diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index d2f7d40c1b4..5b8c08178cc 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -134,6 +134,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc ../sql/item_vers.cc ../sql/opt_trace.cc ../sql/xa.cc + ../sql/json_table.cc ${GEN_SOURCES} ${MYSYS_LIBWRAP_SOURCE} ) diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index f9560b46e3e..fef7b4d3e09 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -9186,4 +9186,32 @@ json_detailed(json_extract(trace, '$**.lateral_derived')) } ] drop table t1,t2; +# +# Test table functions. +# +CREATE TABLE t1(id INT, f1 JSON); +INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'), +(4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}'); +SELECT * FROM t1 WHERE id IN +(SELECT id FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl); +id f1 +1 {"1": 1} +2 {"1": 2} +3 {"1": 3} +4 {"1": 4} +5 {"1": 5} +6 {"1": 6} +select json_detailed(json_extract(trace, '$**.best_join_order')) +from information_schema.OPTIMIZER_TRACE; +json_detailed(json_extract(trace, '$**.best_join_order')) +[ + + [ + "t1", + "" + ] +] +DROP TABLE t1; +# End of 10.6 tests set optimizer_trace='enabled=off'; diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index 13be2d6f604..8a9a618635b 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -837,4 +837,22 @@ from information_schema.optimizer_trace; drop table t1,t2; + +--echo # +--echo # Test table functions. +--echo # + +CREATE TABLE t1(id INT, f1 JSON); +INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'), + (4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}'); + +SELECT * FROM t1 WHERE id IN + (SELECT id FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl); + +select json_detailed(json_extract(trace, '$**.best_join_order')) + from information_schema.OPTIMIZER_TRACE; +DROP TABLE t1; + +--echo # End of 10.6 tests set optimizer_trace='enabled=off'; diff --git a/mysql-test/main/query_cache.result b/mysql-test/main/query_cache.result index bff04de6520..c835e25e396 100644 --- a/mysql-test/main/query_cache.result +++ b/mysql-test/main/query_cache.result @@ -2211,6 +2211,22 @@ drop table t1; # MDEV-24858 SIGABRT in DbugExit from my_malloc in Query_cache::init_cache Regression # set global Query_cache_size=18446744073709547520; +# +# MDEV-22301 JSON_TABLE: Queries are not inserted into query cache. +# +create table t1 (a text); +insert into t1 values ('{"a":"foo"}'); +flush status; +SHOW STATUS LIKE 'Qcache_inserts'; +Variable_name Value +Qcache_inserts 0 +select * from t1, json_table(t1.a, '$' columns (f varchar(20) path '$.a')) as jt; +a f +{"a":"foo"} foo +SHOW STATUS LIKE 'Qcache_inserts'; +Variable_name Value +Qcache_inserts 0 +drop table t1; restore defaults SET GLOBAL query_cache_type= default; SET GLOBAL query_cache_size=@save_query_cache_size; diff --git a/mysql-test/main/query_cache.test b/mysql-test/main/query_cache.test index 4d769b46213..0d747b97acc 100644 --- a/mysql-test/main/query_cache.test +++ b/mysql-test/main/query_cache.test @@ -1807,6 +1807,17 @@ drop table t1; set global Query_cache_size=18446744073709547520; --enable_warnings +--echo # +--echo # MDEV-22301 JSON_TABLE: Queries are not inserted into query cache. +--echo # +create table t1 (a text); +insert into t1 values ('{"a":"foo"}'); +flush status; +SHOW STATUS LIKE 'Qcache_inserts'; +select * from t1, json_table(t1.a, '$' columns (f varchar(20) path '$.a')) as jt; +SHOW STATUS LIKE 'Qcache_inserts'; +drop table t1; + --echo restore defaults SET GLOBAL query_cache_type= default; SET GLOBAL query_cache_size=@save_query_cache_size; diff --git a/mysql-test/main/view_grant.result b/mysql-test/main/view_grant.result index f091d4de2d1..c31ba882e76 100644 --- a/mysql-test/main/view_grant.result +++ b/mysql-test/main/view_grant.result @@ -1954,3 +1954,18 @@ connection default; drop user user_11766767; drop database mysqltest1; drop database mysqltest2; +# Check that a user without access to the schema 'foo' cannot query +# a JSON_TABLE view in that schema. +CREATE SCHEMA foo; +CREATE VIEW foo.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; +CREATE USER foo@localhost; +connect con1,localhost,foo,,; +SELECT * FROM foo.v; +ERROR 42000: SELECT command denied to user 'foo'@'localhost' for table 'v' +# +# Clean-up. +# +connection default; +disconnect con1; +drop user foo@localhost; +drop schema foo; diff --git a/mysql-test/main/view_grant.test b/mysql-test/main/view_grant.test index c9bb9569145..18ff8aaf4fc 100644 --- a/mysql-test/main/view_grant.test +++ b/mysql-test/main/view_grant.test @@ -2205,5 +2205,24 @@ drop user user_11766767; drop database mysqltest1; drop database mysqltest2; +--echo # Check that a user without access to the schema 'foo' cannot query +--echo # a JSON_TABLE view in that schema. +CREATE SCHEMA foo; +CREATE VIEW foo.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; + +CREATE USER foo@localhost; +connect (con1,localhost,foo,,); +--error ER_TABLEACCESS_DENIED_ERROR +SELECT * FROM foo.v; + +--echo # +--echo # Clean-up. +--echo # +connection default; +disconnect con1; +drop user foo@localhost; +drop schema foo; + # Wait till we reached the initial number of concurrent sessions --source include/wait_until_count_sessions.inc + diff --git a/mysql-test/suite/json/r/json_table.result b/mysql-test/suite/json/r/json_table.result new file mode 100644 index 00000000000..f636326b4f4 --- /dev/null +++ b/mysql-test/suite/json/r/json_table.result @@ -0,0 +1,466 @@ +select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; +a +1 +2 +select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +a b +1 11 +1 111 +2 22 +2 222 +3 NULL +SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt; +a b c +1 11 NULL +1 111 NULL +1 NULL 11 +1 NULL 111 +2 22 NULL +2 222 NULL +2 NULL 22 +2 NULL 222 +3 NULL NULL +create table t1 (id varchar(5), json varchar(1024)); +insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]'); +insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]'); +select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt; +id json a +j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 +j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 4 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 5 +select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt; +id json js_id a l_js_id b +j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 1 11 +j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 2 111 +j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 1 22 +j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 2 222 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 1 11 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 2 111 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 1 22 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 2 222 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 1 22 +j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 2 222 +select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt; +ERROR 42S22: Unknown column 't1.no_field' in 'JSON_TABLE argument' +select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt; +ERROR 42S21: Duplicate column name 'a' +DROP TABLE t1; +create table t1 (item_name varchar(32), item_props varchar(1024)); +insert into t1 values ('Laptop', '{"color": "black", "price": 1000}'); +insert into t1 values ('Jeans', '{"color": "blue", "price": 50}'); +select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1; +item_name item_props color +Laptop {"color": "black", "price": 1000} black +Jeans {"color": "blue", "price": 50} blue +select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1; +ERROR 42000: Cross dependency found in OUTER JOIN; examine your ON conditions +DROP TABLE t1; +select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +a b +101 11 +101 111 +2 22 +2 222 +3 NULL +select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +a b +NULL 11 +NULL 111 +2 22 +2 222 +3 NULL +select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +a b +NULL 11 +NULL 111 +2 22 +2 222 +3 NULL +select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +a b +202 11 +202 111 +2 22 +2 222 +3 NULL +select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +ERROR HY000: Syntax error in JSON text in argument 1 to function 'JSON_TABLE' at position 65 +select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; +ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'. +select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo; +a +0.0 +connect con1,localhost,root,,; +select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo; +a +1 +connection default; +disconnect con1; +create database db; +use db; +create table t (a text); +insert into t values ('{"foo":"bar"}'); +create user u@localhost; +grant select (a) on db.t to u@localhost; +connect con1,localhost,u,,db; +select a from t; +a +{"foo":"bar"} +select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt; +a f +{"foo":"bar"} bar +connection default; +disconnect con1; +drop user u@localhost; +drop database db; +use test; +create table t1 ( +color varchar(32), +price int +); +insert into t1 values ("red", 100), ("blue", 50); +insert into t1 select * from t1; +insert into t1 select * from t1; +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='firstmatch=off'; +select * from +json_table('[{"color": "blue", "price": 50}, + {"color": "red", "price": 100}]', +'$[*]' columns( color varchar(100) path '$.color', +price text path '$.price' + ) +) as T +where +T.color in (select color from t1 where t1.price=T.price); +color price +blue 50 +red 100 +set @@optimizer_switch=@save_optimizer_switch; +drop table t1; +select * from +json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]}, + {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]', +'$[*]' columns( +color varchar(4) path '$.color', +seq0 for ordinality, +nested path '$.sizes[*]' + columns (seq1 for ordinality, +size int path '$'), +nested path '$.prices[*]' + columns (seq2 for ordinality, +price int path '$') +) +) as T; +color seq0 seq1 size seq2 price +blue 1 1 1 NULL NULL +blue 1 2 2 NULL NULL +blue 1 3 3 NULL NULL +blue 1 4 4 NULL NULL +blue 1 NULL NULL 1 10 +blue 1 NULL NULL 2 20 +red 2 1 10 NULL NULL +red 2 2 11 NULL NULL +red 2 3 12 NULL NULL +red 2 4 13 NULL NULL +red 2 5 14 NULL NULL +red 2 NULL NULL 1 100 +red 2 NULL NULL 2 200 +red 2 NULL NULL 3 300 +select * from json_table('[{"color": "blue", "price": 50}, + {"color": "red", "price": 100}, + {"color": "rojo", "price": 10.0}, + {"color": "blanco", "price": 11.0}]', +'$[*]' columns( color varchar(100) path '$.color', +price text path '$.price', seq for ordinality)) as T order by color desc; +color price seq +rojo 10.0 3 +red 100 2 +blue 50 1 +blanco 11.0 4 +create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x; +select * from v; +a x +- 123 +show create table v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `x`.`a` AS `a`,`x`.`x` AS `x` from JSON_TABLE('{"as":"b", "x":123}', '$' COLUMNS (`a` varchar(8) PATH '$.a' DEFAULT '-' ON EMPTY, `x` int(11) PATH '$.x')) `x` latin1 latin1_swedish_ci +drop view v; +select * from json_table('{"as":"b", "x":123}', +"$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'null on empty, x int path '$.x')) x' at line 2 +select * from json_table('{"a":"foo","b":"bar"}', '$' + columns (v varchar(20) path '$.*')) as jt; +v +NULL +select * from json_table('{"a":"foo","b":"bar"}', '$' + columns (v varchar(20) path '$.*' default '-' on error)) as jt; +v +- +select * from json_table('{"b":"bar"}', '$' + columns (v varchar(20) path '$.*' default '-' on error)) as jt; +v +bar +create table t1 (a varchar(100)); +insert into t1 values ('1'); +select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T; +ERROR 42000: Not unique table/alias: 'T' +drop table t1; +prepare s from 'select * from +json_table(?, + \'$[*]\' columns( color varchar(100) path \'$.color\', + price text path \'$.price\', + seq for ordinality)) as T +order by color desc; '; +execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]'; +color price seq +red 1 1 +brown 2 2 +deallocate prepare s; +create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T; +select * from v2; +color +blue +drop view v2; +explain format=json select * from +json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "tt", + "access_type": "ALL", + "rows": 40, + "filtered": 100, + "table_function": "json_table" + } + } +} +explain select * from +json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE tt ALL NULL NULL NULL NULL 40 Table function: json_table +create view v1 as select * from +json_table('[{"color": "blue", "price": 50}]', +'$[*]' columns(color text path '$.nonexistent', +seq for ordinality)) as `ALIAS NOT QUOTED`; +select * from v1; +color seq +NULL 1 +drop view v1; +create view v1 as select * from +json_table('[{"color": "blue", "price": 50}, + {"color": "red", "price": 100}]', +'$[*]' columns( +color text path "$.QUOTES \" HERE \"", +color1 text path '$.QUOTES " HERE "', +color2 text path "$.QUOTES ' HERE '", +seq for ordinality)) as T; +select * from v1; +color color1 color2 seq +NULL NULL NULL 1 +NULL NULL NULL 2 +drop view v1; +CREATE TABLE t1 (x INT); +INSERT INTO t1 VALUES (1), (2), (3); +SELECT t1.x*2 m, jt.* FROM t1, +JSON_TABLE(m, '$[*]' COLUMNS (i INT PATH '$')) jt; +ERROR 42S22: Unknown column 'm' in 'JSON_TABLE argument' +DROP TABLE t1; +select * from json_table(JS3.size, '$' columns (size INT PATH '$.size')) as JS1, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS2, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS3 where 1; +ERROR 42S02: Unknown table 'JS3' in JSON_TABLE argument +create table t1 (json varchar(100) character set utf8); +insert into t1 values ('{"value":"АБВ"}'); +create table tj1 as +select T.value +from t1, json_table(t1.json, '$' columns (value varchar(32) PATH '$.value')) T; +show create table tj1; +Table Create Table +tj1 CREATE TABLE `tj1` ( + `value` varchar(32) CHARACTER SET utf8 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; +drop table tj1; +CREATE TABLE t1(id INT, f1 JSON); +INSERT INTO t1 VALUES +(1, '{\"1\": 1}'), +(2, '{\"1\": 2}'), +(3, '{\"1\": 3}'), +(4, '{\"1\": 4}'), +(5, '{\"1\": 5}'), +(6, '{\"1\": 6}'); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'f1' +test.t1 analyze status OK +SELECT * FROM JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN t1 AS tt3; +ERROR 42S02: Unknown table 'tt3' in JSON_TABLE argument +SELECT * FROM t1 as jj1, +(SELECT tt2.* +FROM +t1 as tt2, +JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl +STRAIGHT_JOIN +t1 AS tt3 +) dt +ORDER BY 1,3 LIMIT 10; +ERROR 42S22: Unknown column 'tt3.f1' in 'JSON_TABLE argument' +drop table t1; +select collation(x) from +JSON_TABLE('["abc"]', '$[*]' COLUMNS (x VARCHAR(10) CHARSET latin1 PATH '$')) tbl; +collation(x) +latin1_swedish_ci +SELECT * FROM JSON_TABLE('{"x":1, "y":2}', _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x' + COLUMNS(y INT PATH _utf8mb4'$.y' DEFAULT _utf8mb4'1' ON EMPTY DEFAULT _utf8mb4'2' ON ERROR))) jt; +y +1 +select * from json_table( +'{"name":"t-shirt", "colors": ["yellow", "blue"],"sizes": ["small", "medium", "large"]}', +'$' columns(name varchar(32) path '$.name', +nested path '$.colors[*]' columns ( +color varchar(32) path '$', +nested path '$.sizes[*]' columns ( +size varchar(32) path '$' +)))) as t; +name color size +t-shirt yellow NULL +t-shirt blue NULL +SELECT x, length(x) FROM +JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' DEFAULT 'abcdefg' ON EMPTY)) jt; +x length(x) +abcdefg 7 +select * from +json_table('[{"a":"aa"}, {"b":null}]', '$[*]' + columns (col1 int path '$.b' default '456' on empty)) as tt; +col1 +456 +NULL +select * from +json_table('[{"a":"aa"}, {"b":true}]', '$[*]' + columns (col1 int path '$.b' default '456' on empty)) as tt; +col1 +456 +1 +select * from +json_table('[{"a":"aa"}, {"b":false}]', '$[*]' + columns (col1 int path '$.b' default '456' on empty)) as tt; +col1 +456 +0 +select * from +json_table('[{"a":"aa"}, {"b":null}]', '$[*]' + columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt; +col1 +456 +NULL +select * from +json_table('[{"a":"aa"}, {"b":true}]', '$[*]' + columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt; +col1 +456 +true +select * from +json_table('[{"a":"aa"}, {"b":false}]', '$[*]' + columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt; +col1 +456 +false +select * from +json_table( '[{"a":"asd"}, {"a":123}, {"a":[]}, {"a":{}} ]', '$[*]' + columns (id for ordinality, +intcol int path '$.a' default '1234' on empty default '5678' on error) +) as tt; +id intcol +1 5678 +2 123 +3 5678 +4 5678 +SELECT COUNT(*) FROM JSON_TABLE('[1, 2]', '$[*]' COLUMNS( I INT PATH '$')) tt; +COUNT(*) +2 +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (js json, b int); +insert into t2 select '[1,2,3]',A.a from t1 A, t1 B; +explain select * from t1, +(select * from t2, json_table(t2.js, '$[*]' columns (o for ordinality)) as jt) as TT2 +where 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 10 +1 SIMPLE t2 ALL NULL NULL NULL NULL 100 Using join buffer (flat, BNL join) +1 SIMPLE jt ALL NULL NULL NULL NULL 40 Table function: json_table +drop table t1, t2; +CREATE TABLE t1 (x INT); +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 (j JSON); +INSERT INTO t2 (j) VALUES ('[1,2,3]'); +SELECT * FROM t1 RIGHT JOIN +(SELECT o FROM t2, JSON_TABLE(j, '$[*]' COLUMNS (o FOR ORDINALITY)) AS jt) AS t3 ON (t3.o = t1.x); +x o +1 1 +NULL 2 +NULL 3 +DROP TABLE t1, t2; +create table t20 (a int not null); +create table t21 (a int not null primary key, js varchar(100)); +insert into t20 values (1),(2); +insert into t21 values (1, '{"a":100}'); +explain select t20.a, jt1.ab +from t20 left join t21 on t20.a=t21.a +join JSON_TABLE(t21.js,'$' COLUMNS (ab INT PATH '$.a')) AS jt1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t20 ALL NULL NULL NULL NULL 2 +1 SIMPLE t21 eq_ref PRIMARY PRIMARY 4 test.t20.a 1 +1 SIMPLE jt1 ALL NULL NULL NULL NULL 40 Table function: json_table +drop table t20, t21; +select * from +json_table( +'[ + {"name": "X", + "colors":["blue"], "sizes": [1,2,3,4], "prices" : [10,20]}, + {"name": "Y", + "colors":["red"], "sizes": [10,11], "prices" : [100,200,300]} +]', +'$[*]' columns +( +seq0 for ordinality, +name varchar(4) path '$.name', +nested path '$.colors[*]' columns ( +seq1 for ordinality, +color text path '$' + ), +nested path '$.sizes[*]' columns ( +seq2 for ordinality, +size int path '$' + ), +nested path '$.prices[*]' columns ( +seq3 for ordinality, +price int path '$' + ) +) +) as T order by seq0, name; +seq0 name seq1 color seq2 size seq3 price +1 X NULL NULL NULL NULL 1 10 +1 X NULL NULL NULL NULL 2 20 +1 X NULL NULL 1 1 NULL NULL +1 X NULL NULL 2 2 NULL NULL +1 X NULL NULL 3 3 NULL NULL +1 X NULL NULL 4 4 NULL NULL +1 X 1 blue NULL NULL NULL NULL +2 Y NULL NULL NULL NULL 1 100 +2 Y NULL NULL NULL NULL 2 200 +2 Y NULL NULL NULL NULL 3 300 +2 Y NULL NULL 1 10 NULL NULL +2 Y NULL NULL 2 11 NULL NULL +2 Y 1 red NULL NULL NULL NULL +select * from json_table('[]', '$' COLUMNS(x FOR ORDINALITY)); +ERROR HY000: Every table function must have an alias. +select min(x) from json_table('[]', '$' COLUMNS(x FOR ORDINALITY)) a; +min(x) +1 +# +# End of 10.5 tests +# diff --git a/mysql-test/suite/json/r/json_table_mysql.result b/mysql-test/suite/json/r/json_table_mysql.result new file mode 100644 index 00000000000..9475ed930b2 --- /dev/null +++ b/mysql-test/suite/json/r/json_table_mysql.result @@ -0,0 +1,1622 @@ +# +# WL#8867: Add JSON_TABLE table function +# +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '\$.a', +jexst int exists path '\$.b') +) as tt; +id jpath jexst +1 3 0 +2 2 0 +3 NULL 1 +4 0 0 +select * from +json_table( +'[{"x":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a' default '[99]' on error, +jexst int exists path '$.b') +) as tt; +id jpath jexst +1 NULL 0 +2 2 0 +3 NULL 1 +4 0 0 +select * from +json_table( +'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a' + default '33' on empty +default '66' on error, +jsn_path json path '\$.a' default '{"x":33}' on empty, +jexst int exists path '\$.b') +) as tt; +id jpath jsn_path jexst +1 33 {"x":33} 0 +2 2 2 0 +3 33 {"x":33} 1 +4 0 0 0 +5 66 NULL 0 +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]', +'$[*]' columns (id for ordinality, +jpath_i int path '$.a' + default '33' on empty +default '66' on error, +jpath_r real path '$.a' + default '33.3' on empty +default '77.7' on error, +jsn_path json path '$.a' default '{"x":33}' on empty, +jexst int exists path '$.b') +) as tt; +id jpath_i jpath_r jsn_path jexst +1 3 3 3 0 +2 2 2 2 0 +3 33 33.3 {"x":33} 1 +4 0 0.33 0.33 0 +5 66 0 asd 0 +select * from +json_table( +'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a' + default '33' on empty +default '66' on error, +jsn_path json path '$.a' default '{"x":33}' on empty, +jexst int exists path '$.b') +) as tt; +id jpath jsn_path jexst +1 33 {"x":33} 0 +2 2 2 0 +3 33 {"x":33} 1 +4 0 0 0 +5 66 NULL 0 +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a', +json_path json path '$.a', +jexst int exists path '$.b') +) as tt; +id jpath json_path jexst +1 3 3 0 +2 2 2 0 +3 NULL NULL 1 +4 0 0 0 +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a', +json_path json path '$.a', +jexst int exists path '$.b') +) as tt +where id = 3; +id jpath json_path jexst +3 NULL NULL 1 +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a' error on empty, +jexst int exists path '$.b') +) as tt; +ERROR HY000: Field 'jpath' can't be set for JSON_TABLE 'tt'. +select * from +json_table( +'[{"a":"3"},{"a":2},{"a":1},{"a":[0,1]}]', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a' error on error, +jexst int exists path '$.b') +) as tt; +ERROR HY000: Field 'jpath' can't be set for JSON_TABLE 'tt'. +select * from +json_table( +'!#@$!@#$', +'$[*]' columns (id for ordinality, +jpath varchar(100) path '$.a', +jexst int exists path '$.b') +) as tt; +ERROR HY000: Syntax error in JSON text in argument 1 to function 'JSON_TABLE' at position 1 +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +id for ordinality) +) as tt; +ERROR 42S21: Duplicate column name 'id' +select * from +json_table( +'[{"a":"3"},{"a":2},{"b":1},{"a":0}]', +'$[*]' columns (id for ordinality, +_id for ordinality) +) as tt; +id _id +1 1 +2 2 +3 3 +4 4 +select * from +json_table( +'[ + {"a":"3", "n": { "l": 1}}, + {"a":2, "n": { "l": 1}}, + {"b":1, "n": { "l": 1}}, + {"a":0, "n": { "l": 1}} + ]', +'$[*]' columns ( +id for ordinality, +jpath varchar(100) path '$.a', +jexst int exists path '$.b', +nested path '$.n' columns ( +id_n for ordinality, +jpath_n varchar(50) path '$.l') +) +) as tt; +id jpath jexst id_n jpath_n +1 3 0 1 1 +2 2 0 1 1 +3 NULL 1 1 1 +4 0 0 1 1 +explain format=json +select * from +json_table( +'[ + {"a":"3", "n": { "l": 1}}, + {"a":2, "n": { "l": 1}}, + {"b":1, "n": { "l": 1}}, + {"a":0, "n": { "l": 1}} + ]', +'$[*]' columns ( +id for ordinality, +jpath varchar(100) path '$.a', +jexst int exists path '$.b', +nested path '$.n' columns ( +id_n for ordinality, +jpath_n varchar(50) path '$.l') +) +) as tt; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "tt", + "access_type": "ALL", + "rows": 40, + "filtered": 100, + "table_function": "json_table" + } + } +} +select * from +json_table( +'[ + {"a":2, "n": [{ "l": 1}, {"l": 11}]}, + {"a":1, "n": [{ "l": 2}, {"l": 22}]} + ]', +'$[*]' columns ( +id for ordinality, +jpath varchar(50) path '$.a', +nested path '$.n[*]' columns ( +id_n for ordinality, +jpath_n varchar(50) path '$.l'), +nested path '$.n[*]' columns ( +id_m for ordinality, +jpath_m varchar(50) path '$.l') +) +) as tt; +id jpath id_n jpath_n id_m jpath_m +1 2 1 1 NULL NULL +1 2 2 11 NULL NULL +1 2 NULL NULL 1 1 +1 2 NULL NULL 2 11 +2 1 1 2 NULL NULL +2 1 2 22 NULL NULL +2 1 NULL NULL 1 2 +2 1 NULL NULL 2 22 +select * from json_table( +'[ + {"a":"3", "n": ["b","a","c"]}, + {"a":2, "n": [1,2]}, + {"b":1, "n": ["zzz"]}, + {"a":0, "n": [0.1, 0.02]} + ]', +'$[*]' columns ( +id for ordinality, +jpath varchar(100) path '$.a', +jexst int exists path '$.b', +nested path '$.n[*]' columns ( +id_n for ordinality, +jpath_n varchar(50) path '$') +) +) as tt; +id jpath jexst id_n jpath_n +1 3 0 1 b +1 3 0 2 a +1 3 0 3 c +2 2 0 1 1 +2 2 0 2 2 +3 NULL 1 1 zzz +4 0 0 1 0.1 +4 0 0 2 0.02 +select * from json_table( +'[ + {"a":"3", "n": ["b","a","c"]}, + {"a":2, "n": [1,2]}, + {"b":1, "n": ["zzz"]}, + {"a":0, "n": [0.1, 0.02]} + ]', +'$[*]' columns ( +id for ordinality, +jpath varchar(100) path '$.a', +jexst int exists path '$.b', +nested path '$.n[*]' columns ( +id_n1 for ordinality, +jpath_n1 varchar(50) path '$') , +nested path '$.n[*]' columns ( +id_n2 for ordinality, +jpath_n2 varchar(50) path '$') +) +) as tt; +id jpath jexst id_n1 jpath_n1 id_n2 jpath_n2 +1 3 0 1 b NULL NULL +1 3 0 2 a NULL NULL +1 3 0 3 c NULL NULL +1 3 0 NULL NULL 1 b +1 3 0 NULL NULL 2 a +1 3 0 NULL NULL 3 c +2 2 0 1 1 NULL NULL +2 2 0 2 2 NULL NULL +2 2 0 NULL NULL 1 1 +2 2 0 NULL NULL 2 2 +3 NULL 1 1 zzz NULL NULL +3 NULL 1 NULL NULL 1 zzz +4 0 0 1 0.1 NULL NULL +4 0 0 2 0.02 NULL NULL +4 0 0 NULL NULL 1 0.1 +4 0 0 NULL NULL 2 0.02 +select * from json_table( +'[ + {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]}, + {"ll":["c"]} ]}, + {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]}, + {"b":1, "n": [{"ll":["zzz"]}]}, + {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]} + ]', +'$[*]' columns ( +id1 for ordinality, +jpath varchar(100) path '$.a', +jexst int exists path '$.b', +nested path '$.n[*]' columns ( +id2 for ordinality, +nested path '$.ll[*]' columns ( +id3 for ordinality, +jpath_3 varchar(50) path '$') +), +nested path '$.n[*]' columns ( +id4 for ordinality, +jpath_4 json path '$') +) +) as tt; +id1 jpath jexst id2 id3 jpath_3 id4 jpath_4 +1 3 0 1 1 b1 NULL NULL +1 3 0 1 2 b2 NULL NULL +1 3 0 1 3 b3 NULL NULL +1 3 0 2 1 a1 NULL NULL +1 3 0 2 2 a2 NULL NULL +1 3 0 3 1 c NULL NULL +1 3 0 NULL NULL NULL 1 NULL +1 3 0 NULL NULL NULL 2 NULL +1 3 0 NULL NULL NULL 3 NULL +2 2 0 1 1 1 NULL NULL +2 2 0 1 2 11 NULL NULL +2 2 0 1 3 111 NULL NULL +2 2 0 2 1 2 NULL NULL +2 2 0 NULL NULL NULL 1 NULL +2 2 0 NULL NULL NULL 2 NULL +3 NULL 1 1 1 zzz NULL NULL +3 NULL 1 NULL NULL NULL 1 NULL +4 0 0 1 1 0.1 NULL NULL +4 0 0 1 2 0.01 NULL NULL +4 0 0 2 1 0.02 NULL NULL +4 0 0 2 2 0.002 NULL NULL +4 0 0 2 3 0.0002 NULL NULL +4 0 0 NULL NULL NULL 1 NULL +4 0 0 NULL NULL NULL 2 NULL +ord should be 1,1,1,2, which tells that first two values of 'l' are +from the same object, and next two are from different objects +SELECT * +FROM JSON_TABLE( +'[{"a": "a_val", + "b": [ + {"c": "c_val", + "l": [1,2]} + ] + }, {"a": "a_val", + "b": [ + {"c": "c_val", + "l": [11]}, + {"c": "c_val", + "l": [22]} + ] + }]', +'$[*]' COLUMNS ( +apath VARCHAR(10) PATH '$.a', +NESTED PATH '$.b[*]' COLUMNS ( +bpath VARCHAR(10) PATH '$.c', +ord FOR ORDINALITY, +NESTED PATH '$.l[*]' COLUMNS ( +lpath varchar(10) PATH '$' + ) +) +)) as jt; +apath bpath ord lpath +a_val c_val 1 1 +a_val c_val 1 2 +a_val c_val 1 11 +a_val c_val 2 22 +CREATE TABLE jt( i JSON ); +SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt +WHERE a=1; +i a +EXPLAIN SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt +WHERE a=1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +SELECT * FROM ( +SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt +WHERE a=1) AS ttt; +i a +EXPLAIN SELECT * FROM ( +SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt +WHERE a=1) AS ttt; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +DROP TABLE jt; +CREATE VIEW v AS +SELECT * FROM JSON_TABLE('[1,2,3]', +'$[*]' COLUMNS (num INT PATH '$.a' + DEFAULT '123' ON EMPTY +DEFAULT '456' ON ERROR)) AS jt; +SELECT * FROM v; +num +123 +123 +123 +SHOW CREATE VIEW v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `jt`.`num` AS `num` from JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (`num` int(11) PATH '$.a' DEFAULT '123' ON EMPTY DEFAULT '456' ON ERROR)) `jt` latin1 latin1_swedish_ci +DROP VIEW v; +SELECT * FROM JSON_TABLE('"asdf"', +'$' COLUMNS (a INT PATH '$' ERROR ON ERROR)) AS jt; +ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'. +SELECT * FROM +JSON_TABLE('[{"a":1},{"a":2}]', +'$' COLUMNS (a INT PATH '$[*].a' ERROR ON ERROR)) AS jt; +ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'. +SELECT * FROM +JSON_TABLE('[{"a":1},{"a":2}]', +'$' COLUMNS (a JSON PATH '$[*].a' ERROR ON ERROR)) AS jt; +ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'. +SELECT * FROM +JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$' ERROR ON ERROR)) AS jt; +ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'. +SELECT * FROM +JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$')) AS jt; +a +NULL +SELECT * FROM +JSON_TABLE('{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{}}}}}}}}}}}}}}}}}}}', +'$' COLUMNS (i0 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i1 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i2 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i3 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i4 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i5 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i6 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i7 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i8 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i9 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i10 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i11 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i12 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i13 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i14 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i15 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i16 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i17 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i18 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i19 INT PATH '$.a', +NESTED PATH '$.b' COLUMNS (i20 INT PATH '$.a' + )))))))))))))))))))))) jt; +i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 i16 i17 i18 i19 i20 +1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 NULL NULL NULL +CREATE TABLE t1(id int, jd JSON); +INSERT INTO t1 values (1, '[1,3,5]'),(2,'[2,4,6]'); +SELECT id, jt.* FROM t1, +JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY, +val INT PATH '$')) AS jt; +id jid val +1 1 1 +1 2 3 +1 3 5 +2 1 2 +2 2 4 +2 3 6 +SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.* +FROM t1, +JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY, +val INT PATH '$')) AS jt; +id jid val +1 1 1 +1 2 3 +1 3 5 +2 1 2 +2 2 4 +2 3 6 +EXPLAIN SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.* +FROM t1, +JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY, +val INT PATH '$')) AS jt; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +1 SIMPLE jt ALL NULL NULL NULL NULL 40 Table function: json_table +SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.* +FROM t1, +JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY, +val INT PATH '$')) AS jt, +t1 AS t2; +id id jid val +1 1 1 1 +1 1 2 3 +1 1 3 5 +1 2 1 1 +1 2 2 3 +1 2 3 5 +2 1 1 2 +2 1 2 4 +2 1 3 6 +2 2 1 2 +2 2 2 4 +2 2 3 6 +EXPLAIN SELECT t1.id, t2.id, jt.* +FROM t1, +JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY, +val INT PATH '$')) AS jt, +t1 AS t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join) +1 SIMPLE jt ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.* +FROM t1, +JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY, +val INT PATH '$')) AS jt, +t1 AS t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using join buffer (flat, BNL join) +1 SIMPLE jt ALL NULL NULL NULL NULL 40 Table function: json_table +SELECT * FROM t1 WHERE id IN +(SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS +(id INT PATH '$')) AS jt); +id jd +1 [1,3,5] +2 [2,4,6] +EXPLAIN SELECT * FROM t1 WHERE id IN +(SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS +(id INT PATH '$')) AS jt); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 +1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 +2 MATERIALIZED jt ALL NULL NULL NULL NULL 40 Table function: json_table +DROP TABLE t1; +SELECT * FROM JSON_TABLE('"asdf"', '$' COLUMNS( +tm TIME PATH '$', +dt DATE PATH '$', +i INT PATH '$', +f FLOAT PATH '$', +d DECIMAL PATH '$')) AS jt; +tm dt i f d +NULL NULL NULL 0 0 +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON EMPTY)) jt; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'NULL ON EMPTY)) jt' at line 2 +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON ERROR)) jt; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'NULL ON ERROR)) jt' at line 2 +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON EMPTY)) jt; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '0 ON EMPTY)) jt' at line 2 +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON ERROR)) jt; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '0 ON ERROR)) jt' at line 2 +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x DATE +PATH '$.x' + DEFAULT DATE'2020-01-01' ON EMPTY)) jt; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'DATE'2020-01-01' ON EMPTY)) jt' at line 4 +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x DATE +PATH '$.x' + DEFAULT DATE'2020-01-01' ON ERROR)) jt; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'DATE'2020-01-01' ON ERROR)) jt' at line 4 +# +# Bug#25413069: SIG11 IN CHECK_COLUMN_GRANT_IN_TABLE_REF +# +SELECT a FROM JSON_TABLE(abc, '$[*]' COLUMNS ( a int path '$.a')) AS jt; +ERROR 42S22: Unknown column 'abc' in 'JSON_TABLE argument' +# +# Bug#25420680: ASSERTION `THD->IS_ERROR()' FAILED IN SQL/SQL_SELECT.CC +# +SELECT * FROM JSON_TABLE('{"a":"2017-11-1"}', +'$' COLUMNS (jpath DATE PATH '$.a')) AS jt; +jpath +2017-11-01 +# +# Bug#25413826: ASSERTION `TABLE_LIST->ALIAS' FAILED +# +SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]', +'$[*]' COLUMNS ( a int path '$.b')); +ERROR HY000: Every table function must have an alias. +# +# Bug#25421464: ASSERTION `!STRCMP(TABLE_REF->TABLE_NAME, ... +# +CREATE VIEW v1 AS +SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]', +'$[*]' COLUMNS ( a INT PATH '$.b')) AS jt; +SELECT * FROM v1; +a +2 +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `jt`.`a` AS `a` from JSON_TABLE('[{"a": 1, "b": 2}]', '$[*]' COLUMNS (`a` int(11) PATH '$.b')) `jt` latin1 latin1_swedish_ci +DROP VIEW v1; +# +# Bug#25427457: ASSERTION `!((*REG_FIELD)->FLAGS & 16)' +# +SELECT * FROM JSON_TABLE('{"a":"1"}', +'$' COLUMNS (jpath JSON PATH '$.a', +o FOR ORDINALITY)) AS jt +WHERE o = 1; +jpath o +1 1 +# +# Bug#25427982: ASSERTION `DERIVED' FAILED IN SQL/TABLE.H +# +SELECT je,o FROM JSON_TABLE('{"a":"1"}', +'$' COLUMNS (o FOR ORDINALITY, +je BIGINT EXISTS PATH '$.a')) AS jt +GROUP BY je; +je o +1 1 +SELECT je,COUNT(o) FROM JSON_TABLE('{"a":"1"}', +'$' COLUMNS (o FOR ORDINALITY, +je BIGINT EXISTS PATH '$.a')) AS jt +GROUP BY je; +je COUNT(o) +1 1 +# +# Bug#25413194: ASSERTION `!(WANT_PRIVILEGE & ~(GRANT->WANT_PRIVILEGE +# +CREATE TABLE t1 (j JSON); +SELECT * FROM t1,JSON_TABLE(t1.j, '$[*]' COLUMNS ( a int path '$.b')) AS jt; +j a +DROP TABLE t1; +# +# Bug#25460537:SIG 11 IN NEXT_FAST AT SQL/SQL_LIST.H +# +PREPARE STMT FROM +"SELECT * FROM JSON_TABLE( + \'[{\"a\":\"3\"},{\"a\":2},{\"b\":1},{\"a\":0}]\', + \'$[*]\' COLUMNS (id + FOR ORDINALITY, + jpath VARCHAR(100) PATH \'$.a\', + jexst INT EXISTS PATH \'$.b\') + ) as tt"; +EXECUTE STMT; +id jpath jexst +1 3 0 +2 2 0 +3 NULL 1 +4 0 0 +EXECUTE STMT; +id jpath jexst +1 3 0 +2 2 0 +3 NULL 1 +4 0 0 +DEALLOCATE PREPARE stmt; +# +# Bug#25522353: SIG 11 IN JOIN::MAKE_JOIN_PLAN | SQL/SQL_OPTIMIZER.CC +# +CREATE TABLE t1 (id INT, jc JSON); +SELECT * FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id; +id jc id +SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt LEFT JOIN t1 ON t1.jc=jt.id; +ERROR 42S02: Unknown table 't1' in JSON_TABLE argument +SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt RIGHT JOIN t1 ON t1.jc=jt.id; +ERROR 42S02: Unknown table 't1' in JSON_TABLE argument +SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id; +id jc id +EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id +LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id; +id jc id jc id +EXPLAIN SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id +LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +SELECT * FROM t1 AS t1o RIGHT JOIN +(t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) +ON t1o.id=t1i.id; +id jc id jc id +SELECT * FROM t1 AS t1o RIGHT JOIN +(t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) +ON t1o.id=t1i.id; +id jc id jc id +WITH qn AS +(SELECT jt.* FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id) +SELECT * from qn; +id +WITH qn AS +(SELECT 1 UNION +SELECT jt.id FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1.jc=jt.id) +SELECT * from qn; +1 +1 +SELECT * FROM t1 AS t1o RIGHT JOIN +(t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) +ON t1o.id=t1i.id; +id jc id jc id +SELECT * FROM t1 AS t1o RIGHT JOIN +(t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS +(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) +ON t1o.id=t1i.id; +id jc id jc id +INSERT INTO t1 VALUES(1,"1"),(2,"4"),(3,"3"); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'jc' +test.t1 analyze status OK +SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id INT PATH '$')) as jt ON t1.id=jt.id; +id jc id +1 1 1 +2 4 NULL +3 3 3 +EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS +(id INT PATH '$')) as jt ON t1.id=jt.id; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 +1 SIMPLE jt ALL NULL NULL NULL NULL 40 Table function: json_table; Using where +SELECT * FROM t1 +LEFT JOIN +JSON_TABLE(t1.jc, '$' COLUMNS (id FOR ORDINALITY)) as jt ON t1.jc=jt.id +RIGHT JOIN +JSON_TABLE(jt.id, '$' COLUMNS (id FOR ORDINALITY)) as jt1 ON jt.id=jt1.id; +ERROR 42000: Cross dependency found in OUTER JOIN; examine your ON conditions +DROP TABLE t1; +# +# Bug#25525409: ASSERTION `TABLE_LIST->TABLE' FAILED IN SQL/SQL_BASE.CC +# +SELECT * FROM JSON_TABLE( ( SELECT a ) , '$.*' COLUMNS (col1 FOR ORDINALITY) ) +AS alias1; +ERROR 42S22: Unknown column 'a' in 'field list' +SELECT * FROM JSON_TABLE( ( SELECT 1 ) , '$.*' COLUMNS (col1 FOR ORDINALITY) ) +AS alias1; +col1 +SELECT * FROM JSON_TABLE( ( SUM(1) ) , '$.*' COLUMNS (col1 FOR ORDINALITY) ) +AS alias1; +ERROR HY000: Invalid use of group function +# +# Bug# #25472875: ERROR SHOULD BE THROWN FOR INCORRECT VALUES +# +SELECT * +FROM JSON_TABLE('{"a":"1993-01-01"}', +'$' COLUMNS (jp DATE PATH '$.b' DEFAULT '1000' ON EMPTY)) +AS jt; +jp +0000-00-00 +# +# Bug#25532429: INVALID JSON ERROR NOT THROWN WITH EMPTY TABLES JOIN +# +CREATE TABLE t1(j JSON); +SELECT * FROM t1, +JSON_TABLE( 'dqwfjqjf' , '$[*]' COLUMNS (col5 FOR ORDINALITY) ) AS alias7; +j col5 +DROP TABLE t1; +# +# Bug#25540370: SIG 11 IN SHOW_SQL_TYPE|SQL/SQL_SHOW.CC:7063 +# +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 CHAR(70) PATH '$') +) AS alias2; +col18 +3.14159 +# Too short field causes truncation, error and triggers ON ERROR clause +SELECT * FROM +JSON_TABLE('["3.14159"]', +'$[*]' COLUMNS (col18 CHAR(6) PATH '$') +) AS alias2; +col18 +3.1415 +#Truncated space doesn't trigger ON ERROR +SELECT * FROM +JSON_TABLE('["3.14159 "]', +'$[*]' COLUMNS (col18 CHAR(7) PATH '$') +) AS alias2; +col18 +3.14159 +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 CHAR(255) PATH '$') +) AS alias2; +col18 +3.14159 +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 VARCHAR(70) PATH '$') +) AS alias2; +col18 +3.14159 +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 VARCHAR(255) PATH '$') +) AS alias2; +col18 +3.14159 +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 FLOAT PATH '$') +) AS alias2; +col18 +3.14159 +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 DOUBLE PATH '$') +) AS alias2; +col18 +3.14159 +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$') +) AS alias2; +col18 +NULL +SELECT * FROM +JSON_TABLE('[3.14159]', +'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$' ERROR ON ERROR) +) AS alias2; +ERROR HY000: Field 'col18' can't be set for JSON_TABLE 'alias2'. +SELECT * FROM +JSON_TABLE('[0.9]', +'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$') +) AS alias2; +col18 +0.900 +SELECT * FROM +JSON_TABLE('["asdf","ghjk"]', +'$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$' +DEFAULT "3.14159" ON ERROR) +) AS alias2; +col18 +0.000 +0.000 +CREATE TABLE t1(jd JSON); +INSERT INTO t1 VALUES('["asdf"]'),('["ghjk"]'); +SELECT * FROM t1, +JSON_TABLE(jd, +'$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$' +DEFAULT "3.14159" ON ERROR) +) AS alias2; +jd col18 +["asdf"] 0.000 +["ghjk"] 0.000 +DROP TABLE t1; +# +# Bug#25540027: SIG 11 IN FIND_FIELD_IN_TABLE | SQL/SQL_BASE.CC +# +CREATE TABLE t1(c1 JSON); +UPDATE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 +SET jt1.a=1; +ERROR HY000: The target table jt1 of the UPDATE is not updatable +DELETE JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 +FROM t1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 +FROM t1' at line 1 +DELETE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 +USING t1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 +USING t1' at line 1 +DROP TABLE t1; +# +# Bug#25547244: ASSERTION `!TABLE || (!TABLE->READ_SET || BITMAP_IS_SET( +# +CREATE TABLE t1(i INT); +INSERT INTO t1 VALUES(1); +DROP TABLE t1; +# +# Bug#25540675: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED +# +CREATE TABLE j1(j JSON); +INSERT INTO j1 VALUES('[1,2,3]'),('[1,2,4]'); +SELECT * FROM j1, +JSON_TABLE( JSON_OBJECT('key1', j) , +'$.*' COLUMNS (NESTED PATH '$.*' COLUMNS (col11 FOR ORDINALITY))) AS alias2; +j col11 +[1,2,3] NULL +[1,2,4] NULL +DROP TABLE j1; +# +# Bug#25584335: SIG 11 IN TABLE_LIST::FETCH_NUMBER_OF_ROWS +# +CREATE TABLE t1(i INT); +PREPARE stmt FROM "SELECT alias1.i AS field1 FROM ( + t1 AS alias1, + (SELECT * FROM + JSON_TABLE('[1,2,3]' , + '$[*]' COLUMNS (`col_varchar` FOR ORDINALITY)) AS SQ1_alias1 + ) AS alias2 )"; +EXECUTE stmt; +field1 +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# +# Bug#25604048: COLUMN NAMES WITH SAME 33-CHAR PREFIX ARE EQUAL +# +SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS( +column_name_is_thirty_four_or_more VARCHAR(17) PATH '$.key1', +column_name_is_thirty_four_or_more_yes_indeed VARCHAR(17) PATH '$.key1' +) ) AS alias1; +column_name_is_thirty_four_or_more column_name_is_thirty_four_or_more_yes_indeed +test test +SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS( +`column_name_is_thirty_four_or_more ` VARCHAR(17) PATH '$.key1' +) ) AS alias1; +column_name_is_thirty_four_or_more +test +# +# Bug#25604404: JSON_TABLE MORE RESTRICTIVE WITH IDENTIFIERS THAN +# CREATE TABLE +# +SELECT * FROM JSON_TABLE( '[1, 2]', '$' COLUMNS( +one INT PATH '$[0]', two INT PATH '$[1]' +)) AS jt; +one two +1 2 +# +# Bug#25588450: SIG 6 IN JSON_WRAPPER::SEEK|SQL/JSON_DOM.CC +# +CREATE TABLE t1(c VARCHAR(10)) ENGINE=MEMORY; +INSERT INTO t1 VALUES('fiheife'); +SELECT * FROM `t1` AS alias1, JSON_TABLE( `c` , '$[*]' COLUMNS (jtcol1 JSON +PATH '$.*')) AS alias2 WHERE jtcol1 <= 'kjfh'; +ERROR HY000: Syntax error in JSON text in argument 1 to function 'JSON_TABLE' at position 1 +DROP TABLE t1; +# +# Bug#25587754: ASSERTION `FIXED == 0 || BASIC_CONST_ITEM()' FAILED +# +PREPARE stmt FROM +"SELECT * FROM JSON_TABLE( '[1,2]', '$[*]' + COLUMNS (jtcol1 JSON PATH '$.*')) AS alias2"; +EXECUTE stmt; +jtcol1 +NULL +NULL +DEALLOCATE PREPARE stmt; +# +# Bug#25584593: UNABLE TO USE JSON_TABLE() ON TEXT/BLOB JSON DATA +# +SELECT * FROM JSON_TABLE(NULL, '$.k' COLUMNS (id FOR ORDINALITY)) AS aLias; +id +SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias; +id +SET @myjson = '{"k": 42}'; +SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias; +id +1 +CREATE TABLE t1( +txt TEXT, ty TINYTEXT, tm MEDIUMTEXT, tl LONGTEXT); +INSERT INTO t1 values ( +'{"k": "text"}','{"k": "tinytext"}','{"k": "mediumtext"}','{"k": "longtext"}'); +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +id +text +SELECT alias.* FROM t1, JSON_TABLE(t1.ty, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +id +tinytext +SELECT alias.* FROM t1, JSON_TABLE(t1.tm, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +id +mediumtext +SELECT alias.* FROM t1, JSON_TABLE(t1.tl, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +id +longtext +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TEXT PATH '$')) AS alias; +id +text +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TINYTEXT PATH '$')) AS alias; +id +text +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id MEDIUMTEXT PATH '$')) AS alias; +id +text +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id LONGTEXT PATH '$')) AS alias; +id +text +SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias; +blb +asd123 +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYBLOB PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMBLOB PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGBLOB PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TEXT PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYTEXT PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMTEXT PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGTEXT PATH '$')) as alias; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE alias ALL NULL NULL NULL NULL 40 Table function: json_table +SELECT * FROM +(SELECT CAST(blb AS JSON) jf FROM +JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) AS jti) AS dt, +JSON_TABLE(jf, '$' COLUMNS (blb BLOB PATH '$')) AS jto; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'JSON) jf FROM +JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) AS jti)...' at line 2 +DROP TABLE t1; +# +# Bug#26500384: ASSERT FAILURE IN QUERY WITH WINDOW FUNCTION AND +# JSON_TABLE +# +CREATE TABLE t (x INT); +INSERT INTO t VALUES (1), (2), (3); +SELECT MAX(t.x) OVER () m, jt.* FROM t, +JSON_TABLE(JSON_ARRAY(m), '$[*]' COLUMNS (i INT PATH '$')) jt; +ERROR 42S22: Unknown column 'm' in 'JSON_TABLE argument' +DROP TABLE t; +# +# Bug#26583283: ASSERTION `!THD->IS_ERROR()' FAILED IN SQL_RESOLVER.CC +# +EXPLAIN SELECT * FROM JSON_TABLE('null', '$' COLUMNS(AA DECIMAL PATH '$')) tt; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE tt ALL NULL NULL NULL NULL 40 Table function: json_table +CREATE VIEW v1 AS SELECT * FROM +JSON_TABLE( 'null', '$' COLUMNS (c1 DECIMAL PATH '$' ) ) AS jt; +SELECT * FROM v1; +c1 +NULL +EXPLAIN SELECT * FROM v1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE jt ALL NULL NULL NULL NULL 40 Table function: json_table +DROP VIEW v1; +# +# Bug#25822408: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED +# +PREPARE stmt FROM "SELECT * FROM + JSON_TABLE('{\"a\":1}','$' COLUMNS (c1 CHAR(20) PATH '$.b' ERROR ON EMPTY)) jt"; +EXECUTE stmt; +ERROR HY000: Field 'c1' can't be set for JSON_TABLE 'jt'. +EXECUTE stmt; +ERROR HY000: Field 'c1' can't be set for JSON_TABLE 'jt'. +# +# Bug#25594571: CRASH AT ITEM::CONST_ITEM|SQL/ITEM.H +# +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES(1),(2),(3),(4),(5),(6),(7); +PREPARE stmt FROM "SELECT * FROM t1 AS alias1 LEFT JOIN t1 AS alias2 +LEFT JOIN JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (i FOR ORDINALITY )) AS +alias3 ON alias2 . `i` = alias3 . `i` ON alias1 . `i` = alias2 . `i`"; +EXECUTE stmt; +i i i +1 1 1 +2 2 2 +3 3 3 +4 4 NULL +5 5 NULL +6 6 NULL +7 7 NULL +EXECUTE stmt; +i i i +1 1 1 +2 2 2 +3 3 3 +4 4 NULL +5 5 NULL +6 6 NULL +7 7 NULL +DROP TABLE t1; +# +# Bug#26648617: ASSERTION `IS_VIEW_OR_DERIVED() && +# USES_MATERIALIZATION()' FAILED. +# +CREATE TABLE t1 ( +col_varchar_key varchar(1) DEFAULT NULL +) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1),(4); +SELECT * FROM t1 WHERE col_varchar_key NOT IN ( +SELECT col_varchar_key FROM JSON_TABLE( +'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS +(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); +col_varchar_key +4 +EXPLAIN SELECT * FROM t1 WHERE col_varchar_key NOT IN ( +SELECT col_varchar_key FROM JSON_TABLE( +'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS +(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where +2 MATERIALIZED innr1 ALL NULL NULL NULL NULL 40 Table function: json_table +SELECT * FROM t1 WHERE col_varchar_key IN ( +SELECT col_varchar_key FROM JSON_TABLE( +'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS +(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); +col_varchar_key +1 +EXPLAIN SELECT * FROM t1 WHERE col_varchar_key IN ( +SELECT col_varchar_key FROM JSON_TABLE( +'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS +(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 +1 PRIMARY eq_ref distinct_key distinct_key 13 func 1 Using where +2 MATERIALIZED innr1 ALL NULL NULL NULL NULL 40 Table function: json_table +DROP TABLE t1; +# +# Bug#26711551: WL8867:CONDITIONAL JUMP IN JSON_TABLE_COLUMN::CLEANUP +# +CREATE TABLE t(x int, y int); +INSERT INTO t(x) VALUES (1); +UPDATE t t1, JSON_TABLE('[2]', '$[*]' COLUMNS (x INT PATH '$')) t2 +SET t1.y = t2.x; +SELECT * FROM t; +x y +1 2 +DROP TABLE t; +# +# Bug#26679671: SIG 11 IN JSON_BINARY::PARSE_BINARY() +# +CREATE TABLE t1(id INT, f1 JSON); +INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'), +(4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}'); +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'f1' +test.t1 analyze status OK +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze Warning Engine-independent statistics are not collected for column 'f1' +test.t1 analyze status Table is already up to date +SELECT * FROM t1 as jj1, +(SELECT tt2.* FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt; +id f1 id f1 +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 2 {"1": 2} +1 {"1": 1} 3 {"1": 3} +1 {"1": 1} 4 {"1": 4} +1 {"1": 1} 5 {"1": 5} +1 {"1": 1} 6 {"1": 6} +2 {"1": 2} 1 {"1": 1} +2 {"1": 2} 2 {"1": 2} +2 {"1": 2} 3 {"1": 3} +2 {"1": 2} 4 {"1": 4} +2 {"1": 2} 5 {"1": 5} +2 {"1": 2} 6 {"1": 6} +3 {"1": 3} 1 {"1": 1} +3 {"1": 3} 2 {"1": 2} +3 {"1": 3} 3 {"1": 3} +3 {"1": 3} 4 {"1": 4} +3 {"1": 3} 5 {"1": 5} +3 {"1": 3} 6 {"1": 6} +4 {"1": 4} 1 {"1": 1} +4 {"1": 4} 2 {"1": 2} +4 {"1": 4} 3 {"1": 3} +4 {"1": 4} 4 {"1": 4} +4 {"1": 4} 5 {"1": 5} +4 {"1": 4} 6 {"1": 6} +5 {"1": 5} 1 {"1": 1} +5 {"1": 5} 2 {"1": 2} +5 {"1": 5} 3 {"1": 3} +5 {"1": 5} 4 {"1": 4} +5 {"1": 5} 5 {"1": 5} +5 {"1": 5} 6 {"1": 6} +6 {"1": 6} 1 {"1": 1} +6 {"1": 6} 2 {"1": 2} +6 {"1": 6} 3 {"1": 3} +6 {"1": 6} 4 {"1": 4} +6 {"1": 6} 5 {"1": 5} +6 {"1": 6} 6 {"1": 6} +EXPLAIN SELECT * FROM t1 as jj1, +(SELECT tt2.* FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE jj1 ALL NULL NULL NULL NULL 6 +1 SIMPLE tt2 ALL NULL NULL NULL NULL 6 Using join buffer (flat, BNL join) +1 SIMPLE tbl ALL NULL NULL NULL NULL 40 Table function: json_table +SELECT * FROM t1 as jj1, +(SELECT tt2.* FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN +t1 AS tt3) dt ORDER BY 1,3 LIMIT 10; +id f1 id f1 +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 1 {"1": 1} +1 {"1": 1} 2 {"1": 2} +1 {"1": 1} 2 {"1": 2} +1 {"1": 1} 2 {"1": 2} +1 {"1": 1} 2 {"1": 2} +EXPLAIN SELECT * FROM t1 as jj1, +(SELECT tt2.* FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN +t1 AS tt3) dt ORDER BY 1,3 LIMIT 11; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE jj1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort +1 SIMPLE tt2 ALL NULL NULL NULL NULL 6 Using join buffer (flat, BNL join) +1 SIMPLE tbl ALL NULL NULL NULL NULL 40 Table function: json_table +1 SIMPLE tt3 ALL NULL NULL NULL NULL 6 Using join buffer (flat, BNL join) +SELECT * FROM t1 WHERE id IN +(SELECT id FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl); +id f1 +1 {"1": 1} +2 {"1": 2} +3 {"1": 3} +4 {"1": 4} +5 {"1": 5} +6 {"1": 6} +EXPLAIN SELECT * FROM t1 WHERE id IN +(SELECT id FROM t1 as tt2, +JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 6 +1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 +2 MATERIALIZED tt2 ALL NULL NULL NULL NULL 6 +2 MATERIALIZED tbl ALL NULL NULL NULL NULL 40 Table function: json_table +DROP TABLE t1; +# +# Bug#26760811: WL#8867: MEMORY LEAK REPORTED BY ASAN AND VALGRIND +# +CREATE TABLE t (j JSON); +INSERT INTO t VALUES +('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'), +('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'); +SELECT COUNT(*) FROM t, JSON_TABLE(j, '$[*]' COLUMNS (i INT PATH '$')) AS jt; +COUNT(*) +34 +PREPARE ps FROM +'SELECT COUNT(*) FROM t, JSON_TABLE(j, ''$[*]'' COLUMNS (i INT PATH ''$'')) AS jt'; +EXECUTE ps; +COUNT(*) +34 +EXECUTE ps; +COUNT(*) +34 +DROP PREPARE ps; +DROP TABLE t; +# +# Bug #26781759: NON-UNIQUE ALIAS ERROR NOT BEING THROWN +# +SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias, +JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias; +ERROR 42000: Not unique table/alias: 'jalias' +# +# Bug#27152428 JSON_TABLE + PREPARED STATEMENT + VIEW HAS PROBLEM IN DURING RESOLUTION +# +CREATE TABLE t1 (a INT, b INT); +CREATE VIEW v2 AS SELECT * FROM t1 LIMIT 2; +DROP TABLE t1; +DROP VIEW v2; +# +# Bug#27189940: CREATE VIEW FAILS ON JSON_TABLE() IN SCHEMA-LESS CONNECTIONS +# BUG#27217897: JSON_TABLE() FAILS IF NO DATABASE SELECTED +# +connect conn1,localhost,root,,*NO-ONE*; +connection conn1; +CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; +SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; +num +1 +2 +3 +connection default; +disconnect conn1; +use test; +SHOW CREATE VIEW test.v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `jt`.`num` AS `num` from JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (`num` int(11) PATH '$[0]')) `jt` latin1 latin1_swedish_ci +SELECT * FROM test.v; +num +1 +2 +3 +DROP VIEW test.v; +# +# Bug#27729112 JSON_TABLE SHOULD DECODE BASE64-ENCODED STRINGS +# +SELECT v +FROM JSON_TABLE(JSON_OBJECT('foo', _binary'bar'), '$' + COLUMNS(v VARCHAR(255) PATH '$.foo')) tbl; +v +bar +# +# Bug#28254268: JSON_TABLE() FUNCTION REJECTS SELECT PERMISSIONS +# +CREATE DATABASE db2; +USE db2; +CREATE TABLE t1 (c JSON); +INSERT INTO t1 VALUES('[1,2,3]'); +CREATE USER user1@localhost; +GRANT SELECT ON db2.t1 TO user1@localhost; +connect conn1,localhost,user1,,; +connection conn1; +USE db2; +SELECT t1.c FROM t1; +c +[1,2,3] +SELECT jt.* FROM t1, JSON_TABLE(t1.c, '$[*]' COLUMNS (num INT PATH '$[0]')) +AS jt; +num +1 +2 +3 +disconnect conn1; +connection default; +DROP USER user1@localhost; +DROP DATABASE db2; +# +# Bug#27856835 JSON_TABLE RETURNS WRONG DATATYPE WHEN INT-VALUE IS GRATER +# THAN (2^31-1) +# +SELECT id FROM JSON_TABLE('[{"id":"9223372036854775807"}]', '$[*]' COLUMNS +(id BIGINT PATH '$.id')) AS json; +id +9223372036854775807 +# As we currently have no way of telling if a JSON string value is +# signed or unsigned, this value will overflow. +SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS +(id BIGINT PATH '$.id')) AS json; +id +NULL +# Here the JSON value is a NUMERIC value, and we thus know if the value +# is signed or unsigned. +SELECT id FROM JSON_TABLE('[{"id":9223372036854775808}]', '$[*]' COLUMNS +(id BIGINT PATH '$.id')) AS json; +id +NULL +# If we tell the JSON table column to be unsigned, we get to store the +# full value correctly. +SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS +(id BIGINT UNSIGNED PATH '$.id')) AS json; +id +9223372036854775808 +SELECT id FROM JSON_TABLE('[{"id":"2147483648"}]', '$[*]' COLUMNS +(id INT UNSIGNED PATH '$.id')) AS json; +id +2147483648 +# Check that we preserve the signedness of the columns. +USE test; +CREATE TABLE t1 AS SELECT id, value FROM +JSON_TABLE('[{"id":9223372036854775808, "value":9223372036854775807}]', +'$[*]' COLUMNS (id BIGINT UNSIGNED PATH '$.id', +value BIGINT PATH '$.value')) +AS json; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` bigint(20) unsigned DEFAULT NULL, + `value` bigint(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +# +# Bug#28255453: VIEW USING JSON_TABLE FAILS IF NO SCHEMA IS SELECTED +# +connect conn1,localhost,root,,*NO-ONE*; +CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; +SELECT * FROM test.v; +num +1 +2 +3 +DROP VIEW test.v; +# Check that a user with access to the schema 'foo' can do a SELECT with +# a JSON_TABLE function. +CREATE SCHEMA foo; +CREATE USER foo@localhost; +GRANT EXECUTE ON foo.* TO foo@localhost; +connect con1,localhost,foo,,foo; +SELECT 1 FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$.a')) AS jt; +1 +1 +1 +1 +connection default; +disconnect con1; +DROP USER foo@localhost; +DROP SCHEMA foo; +# +# Bug#27923406 ERROR 1142 (42000) WHEN USING JSON_TABLE +# +CREATE SCHEMA my_schema; +CREATE USER foo@localhost; +GRANT EXECUTE ON my_schema.* TO foo@localhost; +connect con1,localhost,foo,,my_schema; +SELECT +* +FROM +JSON_TABLE( +'[{"x":2,"y":"8"},{"x":"3","y":"7"},{"x":"4","y":6}]', +"$[*]" COLUMNS( +xval VARCHAR(100) PATH "$.x", +yval VARCHAR(100) PATH "$.y" + ) +) AS jt1; +xval yval +2 8 +3 7 +4 6 +connection default; +disconnect con1; +DROP USER foo@localhost; +DROP SCHEMA my_schema; +# +# Bug#28538315: JSON_TABLE() COLUMN TYPES DON'T SUPPORT COLLATE CLAUSE +# +CREATE TABLE t1 SELECT * +FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10) +CHARSET utf8mb4 +PATH '$')) AS jt1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `x` varchar(10) CHARACTER SET utf8mb4 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +# +# Bug#28643862 JSON_TABLE'S "COLUMNS" CLAUSE USES +# GLOBAL.CHARACTER_SET_RESULTS DEFAULT CHARSET +# +SET @@SESSION.character_set_connection = ascii; +CREATE TABLE t1 SELECT a.col +FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `col` varchar(10) CHARACTER SET ascii DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SET @@SESSION.collation_connection = latin1_bin; +CREATE TABLE t2 SELECT a.col +FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `col` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1, t2; +SET @@SESSION.character_set_connection = DEFAULT; +# +# Bug#28851656: JSON_TABLE RETURN "UNKNOWN DATABASE ''" FROM A FUNCTION +# +CREATE FUNCTION FN_COUNT_ROWS(X JSON) +RETURNS INT DETERMINISTIC +RETURN ( +SELECT COUNT(*) FROM JSON_TABLE( X, '$[*]' COLUMNS( I INT PATH '$')) der +); +SELECT FN_COUNT_ROWS('[1, 2]') CNT; +CNT +2 +SELECT FN_COUNT_ROWS('[1, 2, 3]') CNT; +CNT +3 +SELECT FN_COUNT_ROWS('[1, 2, 3, 4]') CNT; +CNT +4 +DROP FUNCTION FN_COUNT_ROWS; +# +# Bug#30310265: VIEWS LOSE THE CHARACTER SET OF JSON_TABLE'S +# PATH ARGUMENTS +# +CREATE VIEW v1 AS +SELECT * FROM JSON_TABLE('{"å":{"å":1}}', '$.å' COLUMNS (x INT PATH '$.å')) t; +SELECT * FROM v1; +x +1 +SET NAMES latin1; +SELECT * FROM v1; +x +1 +SET NAMES DEFAULT; +DROP VIEW v1; +CREATE VIEW v2 AS +SELECT * FROM JSON_TABLE('{}', '$' COLUMNS ( +x VARCHAR(10) PATH '$.a' DEFAULT '"isn''t here"' ON EMPTY) +) t; +DROP VIEW v2; +# +# Bug#30382156: STORED PROCEDURE, JSON_TABLE AND "CONST" JOIN TYPE +# +CREATE TABLE t (id INT PRIMARY KEY, j JSON); +INSERT INTO t VALUES (1, '{"x":1}'); +CREATE PROCEDURE p() +SELECT * FROM t, JSON_TABLE(j, '$' COLUMNS (x INT PATH '$.x')) jt +WHERE id = 1; +CALL p(); +id j x +1 {"x":1} 1 +CALL p(); +id j x +1 {"x":1} 1 +CALL p(); +id j x +1 {"x":1} 1 +DROP PROCEDURE p; +DROP TABLE t; +# +# WL#13512: Deprecate ON ERROR before ON EMPTY in JSON_TABLE syntax +# +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' + DEFAULT '"a"' ON ERROR DEFAULT '"b"' ON EMPTY)) jt; +x +"b" +SELECT * FROM +JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' + NULL ON ERROR NULL ON EMPTY)) jt; +x +NULL +SELECT * FROM +JSON_TABLE('{"x":"c"}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' + ERROR ON ERROR ERROR ON EMPTY)) jt; +x +c +# +# Bug#30628330: INSERT WITH JSON_TABLE FAILS DUE TO WARNING INVOKED +# WITH ON ERROR CLAUSE +# +CREATE TABLE t(id INT, a TINYINT, b VARCHAR(3), c DATE, d DECIMAL); +INSERT INTO t SELECT * FROM +JSON_TABLE(JSON_ARRAY(JSON_OBJECT('a', 1, 'b', 'abc'), +JSON_OBJECT('a', 2, 'b', 'abcd'), +JSON_OBJECT('a', 1000, 'b', 'xyz'), +JSON_OBJECT('c', TIME'12:00:00', 'd', 1e308)), +'$[*]' COLUMNS (id FOR ORDINALITY, +a TINYINT PATH '$.a' DEFAULT '111' ON ERROR, +b VARCHAR(3) PATH '$.b' DEFAULT '"ERR"' ON ERROR, +c DATE PATH '$.c' DEFAULT '"2001-01-01"' ON ERROR, +d DECIMAL PATH '$.c' DEFAULT '999' ON ERROR) +) AS jt; +SELECT * FROM t ORDER BY id; +id a b c d +1 1 abc NULL NULL +2 2 abc NULL NULL +3 111 xyz NULL NULL +4 NULL NULL 2012-00-00 12 +DROP TABLE t; +# +# Bug#30263373: INCORRECT OUTPUT FROM TABLE_FUNCTION_JSON::PRINT() +# +CREATE VIEW v AS SELECT * FROM +JSON_TABLE('[123]', '$[*]' COLUMNS (`name with space 1` INT PATH '$', +`name with space 2` FOR ORDINALITY)) jt; +SELECT * FROM v; +name with space 1 name with space 2 +123 1 +DROP VIEW v; +CREATE VIEW v AS SELECT HEX(x), HEX(y) FROM +JSON_TABLE('["abc"]', '$[*]' COLUMNS (x BINARY(10) PATH '$', +y VARBINARY(10) PATH '$')) jt; +SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS +WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v'; +VIEW_DEFINITION +select hex(`jt`.`x`) AS `HEX(x)`,hex(`jt`.`y`) AS `HEX(y)` from JSON_TABLE('["abc"]', '$[*]' COLUMNS (`x` binary(10) PATH '$', `y` varbinary(10) PATH '$')) `jt` +SELECT * FROM v; +HEX(x) HEX(y) +61626300000000000000 616263 +DROP VIEW v; +CREATE VIEW v AS SELECT * FROM +JSON_TABLE('[123]', '$[*]' COLUMNS(ti TINYINT PATH '$', +si SMALLINT PATH '$', +mi MEDIUMINT PATH '$', +i INT PATH '$', +bi BIGINT PATH '$', +tiu TINYINT UNSIGNED PATH '$', +siu SMALLINT UNSIGNED PATH '$', +miu MEDIUMINT UNSIGNED PATH '$', +iu INT UNSIGNED PATH '$', +biu BIGINT UNSIGNED PATH '$')) jt; +SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS +WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v'; +VIEW_DEFINITION +select `jt`.`ti` AS `ti`,`jt`.`si` AS `si`,`jt`.`mi` AS `mi`,`jt`.`i` AS `i`,`jt`.`bi` AS `bi`,`jt`.`tiu` AS `tiu`,`jt`.`siu` AS `siu`,`jt`.`miu` AS `miu`,`jt`.`iu` AS `iu`,`jt`.`biu` AS `biu` from JSON_TABLE('[123]', '$[*]' COLUMNS (`ti` tinyint(4) PATH '$', `si` smallint(6) PATH '$', `mi` mediumint(9) PATH '$', `i` int(11) PATH '$', `bi` bigint(20) PATH '$', `tiu` tinyint(3) unsigned PATH '$', `siu` smallint(5) unsigned PATH '$', `miu` mediumint(8) unsigned PATH '$', `iu` int(10) unsigned PATH '$', `biu` bigint(20) unsigned PATH '$')) `jt` +SELECT * FROM v; +ti si mi i bi tiu siu miu iu biu +123 123 123 123 123 123 123 123 123 123 +DROP VIEW v; +# +# Bug#31345503 JSON_TABLE SHOULD RETURN SQL NULL FOR JSON NULL +# +SELECT * +FROM +JSON_TABLE( +'[ + {"c1": null, + "c2": [{"c": "c_val", "l": [1,2]}], + "c3": null}, + {"c1": true, + "c2": [{"c": "c_val","l": [11]}, {"c": "c_val", "l": [false]}], + "c3": true}, + {"c1": false, + "c2": [{"c": null,"l": [true]}, {"c": "c_val", "l": [null]}], + "c3": false} + ]', +'$[*]' COLUMNS( +top_ord FOR ORDINALITY, +c1path VARCHAR(10) PATH '$.c1' ERROR ON ERROR, +NESTED PATH '$.c2[*]' COLUMNS ( +c2path VARCHAR(10) PATH '$.c' ERROR ON ERROR, +ord FOR ORDINALITY, +NESTED PATH '$.l[*]' COLUMNS (lpath_c VARCHAR(10) PATH '$' ERROR ON ERROR, +lpath_i INT PATH '$' ERROR ON ERROR) +), +c3path INT PATH '$.c3' ERROR ON ERROR +) +) as jt; +top_ord c1path c2path ord lpath_c lpath_i c3path +1 NULL c_val 1 1 1 NULL +1 NULL c_val 1 2 2 NULL +2 true c_val 1 11 11 1 +2 true c_val 2 false 0 1 +3 false NULL 1 true 1 0 +3 false c_val 2 NULL NULL 0 +# +# Bug #31327187 UBSAN: JSON_TABLE: NULL POINTER PASSED AS ARGUMENT 2, +# WHICH IS DECLARED TO NEVER BE NULL +# +SELECT HEX(a) FROM JSON_TABLE( +'[{"E":{"e":true,"~":1,"S3":"sTa"},"r":3,"":6.7},"",6.5]', +'$' + COLUMNS(a BINARY(5) PATH '$[1]' NULL ON EMPTY) +) e; +HEX(a) +0000000000 diff --git a/mysql-test/suite/json/t/json_table.test b/mysql-test/suite/json/t/json_table.test new file mode 100644 index 00000000000..4a7f120c3dc --- /dev/null +++ b/mysql-test/suite/json/t/json_table.test @@ -0,0 +1,351 @@ +select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; + +select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt; + +create table t1 (id varchar(5), json varchar(1024)); +insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]'); +insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]'); +select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt; +select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt; +--error ER_BAD_FIELD_ERROR +select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt; + +--error ER_DUP_FIELDNAME +select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt; + +DROP TABLE t1; + +create table t1 (item_name varchar(32), item_props varchar(1024)); +insert into t1 values ('Laptop', '{"color": "black", "price": 1000}'); +insert into t1 values ('Jeans', '{"color": "blue", "price": 50}'); + +select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1; + +--error ER_WRONG_OUTER_JOIN +select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1; + +DROP TABLE t1; + +select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +--error ER_JSON_SYNTAX +select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +--error ER_JSON_TABLE_ERROR_ON_FIELD +select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt; + +# +# MDEV-22290 JSON_TABLE: Decimal type with M equal D causes Assertion +# `scale <= precision' failure +# +select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo; + + +# +# MDEV-22291 JSON_TABLE: SELECT from json_table does not work without default database +# +connect (con1,localhost,root,,); +select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo; +connection default; +disconnect con1; + +# +# MDEV-22302 JSON_TABLE: Column privilege is insufficient for query with json_table +# + +create database db; +use db; +create table t (a text); +insert into t values ('{"foo":"bar"}'); +create user u@localhost; +grant select (a) on db.t to u@localhost; + +--connect (con1,localhost,u,,db) +select a from t; +select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt; + +connection default; +disconnect con1; + +drop user u@localhost; +drop database db; + +use test; +create table t1 ( + color varchar(32), + price int + ); +insert into t1 values ("red", 100), ("blue", 50); +insert into t1 select * from t1; +insert into t1 select * from t1; + +set @save_optimizer_switch=@@optimizer_switch; +set optimizer_switch='firstmatch=off'; +select * from + json_table('[{"color": "blue", "price": 50}, + {"color": "red", "price": 100}]', + '$[*]' columns( color varchar(100) path '$.color', + price text path '$.price' + ) + ) as T + where + T.color in (select color from t1 where t1.price=T.price); +set @@optimizer_switch=@save_optimizer_switch; + +drop table t1; + +select * from +json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]}, + {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]', + '$[*]' columns( + color varchar(4) path '$.color', + seq0 for ordinality, + nested path '$.sizes[*]' + columns (seq1 for ordinality, + size int path '$'), + nested path '$.prices[*]' + columns (seq2 for ordinality, + price int path '$') + ) + ) as T; + + +select * from json_table('[{"color": "blue", "price": 50}, + {"color": "red", "price": 100}, + {"color": "rojo", "price": 10.0}, + {"color": "blanco", "price": 11.0}]', + '$[*]' columns( color varchar(100) path '$.color', + price text path '$.price', seq for ordinality)) as T order by color desc; + +create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x; +select * from v; +show create table v; +drop view v; + +--error ER_PARSE_ERROR +select * from json_table('{"as":"b", "x":123}', + "$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x; + +select * from json_table('{"a":"foo","b":"bar"}', '$' + columns (v varchar(20) path '$.*')) as jt; + +select * from json_table('{"a":"foo","b":"bar"}', '$' + columns (v varchar(20) path '$.*' default '-' on error)) as jt; + +select * from json_table('{"b":"bar"}', '$' + columns (v varchar(20) path '$.*' default '-' on error)) as jt; + +create table t1 (a varchar(100)); +insert into t1 values ('1'); +--error ER_NONUNIQ_TABLE +select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T; + +drop table t1; + +prepare s from 'select * from +json_table(?, + \'$[*]\' columns( color varchar(100) path \'$.color\', + price text path \'$.price\', + seq for ordinality)) as T +order by color desc; '; + +execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]'; +deallocate prepare s; + +create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T; +select * from v2; +drop view v2; + +explain format=json select * from + json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; +explain select * from + json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt; + +create view v1 as select * from + json_table('[{"color": "blue", "price": 50}]', + '$[*]' columns(color text path '$.nonexistent', + seq for ordinality)) as `ALIAS NOT QUOTED`; +select * from v1; +drop view v1; +create view v1 as select * from + json_table('[{"color": "blue", "price": 50}, + {"color": "red", "price": 100}]', + '$[*]' columns( + color text path "$.QUOTES \" HERE \"", + color1 text path '$.QUOTES " HERE "', + color2 text path "$.QUOTES ' HERE '", + seq for ordinality)) as T; +select * from v1; +drop view v1; + +CREATE TABLE t1 (x INT); +INSERT INTO t1 VALUES (1), (2), (3); +--error ER_BAD_FIELD_ERROR +SELECT t1.x*2 m, jt.* FROM t1, + JSON_TABLE(m, '$[*]' COLUMNS (i INT PATH '$')) jt; +DROP TABLE t1; + +--error ER_UNKNOWN_TABLE +select * from json_table(JS3.size, '$' columns (size INT PATH '$.size')) as JS1, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS2, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS3 where 1; + +create table t1 (json varchar(100) character set utf8); +insert into t1 values ('{"value":"АБВ"}'); +create table tj1 as +select T.value + from t1, json_table(t1.json, '$' columns (value varchar(32) PATH '$.value')) T; +show create table tj1; +drop table t1; +drop table tj1; + +CREATE TABLE t1(id INT, f1 JSON); +INSERT INTO t1 VALUES + (1, '{\"1\": 1}'), + (2, '{\"1\": 2}'), + (3, '{\"1\": 3}'), + (4, '{\"1\": 4}'), + (5, '{\"1\": 5}'), + (6, '{\"1\": 6}'); +ANALYZE TABLE t1; + +--error ER_UNKNOWN_TABLE +SELECT * FROM JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN t1 AS tt3; +--error ER_BAD_FIELD_ERROR +SELECT * FROM t1 as jj1, + (SELECT tt2.* + FROM + t1 as tt2, + JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl + STRAIGHT_JOIN + t1 AS tt3 + ) dt +ORDER BY 1,3 LIMIT 10; + +drop table t1; + +select collation(x) from + JSON_TABLE('["abc"]', '$[*]' COLUMNS (x VARCHAR(10) CHARSET latin1 PATH '$')) tbl; + +SELECT * FROM JSON_TABLE('{"x":1, "y":2}', _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x' + COLUMNS(y INT PATH _utf8mb4'$.y' DEFAULT _utf8mb4'1' ON EMPTY DEFAULT _utf8mb4'2' ON ERROR))) jt; + +select * from json_table( +'{"name":"t-shirt", "colors": ["yellow", "blue"],"sizes": ["small", "medium", "large"]}', + '$' columns(name varchar(32) path '$.name', + nested path '$.colors[*]' columns ( + color varchar(32) path '$', + nested path '$.sizes[*]' columns ( + size varchar(32) path '$' +)))) as t; + +SELECT x, length(x) FROM + JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' DEFAULT 'abcdefg' ON EMPTY)) jt; + +# check how conversion works for JSON NULL, TRUE and FALSE + +select * from + json_table('[{"a":"aa"}, {"b":null}]', '$[*]' + columns (col1 int path '$.b' default '456' on empty)) as tt; + +select * from + json_table('[{"a":"aa"}, {"b":true}]', '$[*]' + columns (col1 int path '$.b' default '456' on empty)) as tt; + +select * from + json_table('[{"a":"aa"}, {"b":false}]', '$[*]' + columns (col1 int path '$.b' default '456' on empty)) as tt; + +select * from + json_table('[{"a":"aa"}, {"b":null}]', '$[*]' + columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt; + +select * from + json_table('[{"a":"aa"}, {"b":true}]', '$[*]' + columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt; + +select * from + json_table('[{"a":"aa"}, {"b":false}]', '$[*]' + columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt; + + +select * from + json_table( '[{"a":"asd"}, {"a":123}, {"a":[]}, {"a":{}} ]', '$[*]' + columns (id for ordinality, + intcol int path '$.a' default '1234' on empty default '5678' on error) + ) as tt; + +SELECT COUNT(*) FROM JSON_TABLE('[1, 2]', '$[*]' COLUMNS( I INT PATH '$')) tt; + +create table t1 (a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (js json, b int); +insert into t2 select '[1,2,3]',A.a from t1 A, t1 B; + +explain select * from t1, + (select * from t2, json_table(t2.js, '$[*]' columns (o for ordinality)) as jt) as TT2 + where 1; + +drop table t1, t2; + +CREATE TABLE t1 (x INT); +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 (j JSON); +INSERT INTO t2 (j) VALUES ('[1,2,3]'); +--sorted_result +SELECT * FROM t1 RIGHT JOIN + (SELECT o FROM t2, JSON_TABLE(j, '$[*]' COLUMNS (o FOR ORDINALITY)) AS jt) AS t3 ON (t3.o = t1.x); +DROP TABLE t1, t2; + +create table t20 (a int not null); +create table t21 (a int not null primary key, js varchar(100)); +insert into t20 values (1),(2); +insert into t21 values (1, '{"a":100}'); + +explain select t20.a, jt1.ab + from t20 left join t21 on t20.a=t21.a + join JSON_TABLE(t21.js,'$' COLUMNS (ab INT PATH '$.a')) AS jt1; + +drop table t20, t21; + +select * from +json_table( +'[ + {"name": "X", + "colors":["blue"], "sizes": [1,2,3,4], "prices" : [10,20]}, + {"name": "Y", + "colors":["red"], "sizes": [10,11], "prices" : [100,200,300]} +]', +'$[*]' columns +( + seq0 for ordinality, + name varchar(4) path '$.name', + nested path '$.colors[*]' columns ( + seq1 for ordinality, + color text path '$' + ), + nested path '$.sizes[*]' columns ( + seq2 for ordinality, + size int path '$' + ), + nested path '$.prices[*]' columns ( + seq3 for ordinality, + price int path '$' + ) +) +) as T order by seq0, name; + +# MDEV-25140 Success of query execution depends on the outcome of previous queries. +--error ER_JSON_TABLE_ALIAS_REQUIRED +select * from json_table('[]', '$' COLUMNS(x FOR ORDINALITY)); +select min(x) from json_table('[]', '$' COLUMNS(x FOR ORDINALITY)) a; + +--echo # +--echo # End of 10.5 tests +--echo # diff --git a/mysql-test/suite/json/t/json_table_mysql.test b/mysql-test/suite/json/t/json_table_mysql.test new file mode 100644 index 00000000000..0d322c8b286 --- /dev/null +++ b/mysql-test/suite/json/t/json_table_mysql.test @@ -0,0 +1,1488 @@ +# For stable statistics +#--source include/have_innodb_16k.inc +--source include/have_innodb.inc +--echo # +--echo # WL#8867: Add JSON_TABLE table function +--echo # +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '\$.a', + jexst int exists path '\$.b') + ) as tt; + +#--error ER_INVALID_DEFAULT +select * from + json_table( + '[{"x":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a' default '[99]' on error, + jexst int exists path '$.b') + ) as tt; + +select * from + json_table( + '[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a' + default '33' on empty + default '66' on error, + jsn_path json path '\$.a' default '{"x":33}' on empty, + jexst int exists path '\$.b') + ) as tt; + +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]', + '$[*]' columns (id for ordinality, + jpath_i int path '$.a' + default '33' on empty + default '66' on error, + jpath_r real path '$.a' + default '33.3' on empty + default '77.7' on error, + jsn_path json path '$.a' default '{"x":33}' on empty, + jexst int exists path '$.b') + ) as tt; +#eval $query; +#eval explain $query; + +select * from + json_table( + '[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a' + default '33' on empty + default '66' on error, + jsn_path json path '$.a' default '{"x":33}' on empty, + jexst int exists path '$.b') + ) as tt; +#eval $query; +#eval explain $query; + +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a', + json_path json path '$.a', + jexst int exists path '$.b') + ) as tt; + +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a', + json_path json path '$.a', + jexst int exists path '$.b') + ) as tt + where id = 3; +#eval $query; +#eval explain $query; + +--error ER_JSON_TABLE_ERROR_ON_FIELD +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a' error on empty, + jexst int exists path '$.b') + ) as tt; + +--error ER_JSON_TABLE_ERROR_ON_FIELD +select * from + json_table( + '[{"a":"3"},{"a":2},{"a":1},{"a":[0,1]}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a' error on error, + jexst int exists path '$.b') + ) as tt; + +--error ER_JSON_SYNTAX +select * from + json_table( + '!#@$!@#$', + '$[*]' columns (id for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b') + ) as tt; + +--error ER_JSON_SYNTAX +# psergey-done: CRASH1 +--disable_parsing +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + "!@#$!@#$" columns (id for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b') + ) as tt; +--enable_parsing + +--error ER_JSON_SYNTAX +--disable_parsing +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + jpath varchar(100) path "!@#$!@#$", + jexst int exists path '$.b') + ) as tt; +--enable_parsing + +--error ER_DUP_FIELDNAME +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + id for ordinality) + ) as tt; + +select * from + json_table( + '[{"a":"3"},{"a":2},{"b":1},{"a":0}]', + '$[*]' columns (id for ordinality, + _id for ordinality) + ) as tt; + +select * from + json_table( + '[ + {"a":"3", "n": { "l": 1}}, + {"a":2, "n": { "l": 1}}, + {"b":1, "n": { "l": 1}}, + {"a":0, "n": { "l": 1}} + ]', + '$[*]' columns ( + id for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b', + nested path '$.n' columns ( + id_n for ordinality, + jpath_n varchar(50) path '$.l') + ) + ) as tt; + +#eval $query; +#eval explain $query; +explain format=json +select * from + json_table( + '[ + {"a":"3", "n": { "l": 1}}, + {"a":2, "n": { "l": 1}}, + {"b":1, "n": { "l": 1}}, + {"a":0, "n": { "l": 1}} + ]', + '$[*]' columns ( + id for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b', + nested path '$.n' columns ( + id_n for ordinality, + jpath_n varchar(50) path '$.l') + ) + ) as tt; + +select * from + json_table( + '[ + {"a":2, "n": [{ "l": 1}, {"l": 11}]}, + {"a":1, "n": [{ "l": 2}, {"l": 22}]} + ]', + '$[*]' columns ( + id for ordinality, + jpath varchar(50) path '$.a', + nested path '$.n[*]' columns ( + id_n for ordinality, + jpath_n varchar(50) path '$.l'), + nested path '$.n[*]' columns ( + id_m for ordinality, + jpath_m varchar(50) path '$.l') + ) + ) as tt; + + +select * from json_table( + '[ + {"a":"3", "n": ["b","a","c"]}, + {"a":2, "n": [1,2]}, + {"b":1, "n": ["zzz"]}, + {"a":0, "n": [0.1, 0.02]} + ]', + '$[*]' columns ( + id for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b', + nested path '$.n[*]' columns ( + id_n for ordinality, + jpath_n varchar(50) path '$') + ) + ) as tt; + +select * from json_table( + '[ + {"a":"3", "n": ["b","a","c"]}, + {"a":2, "n": [1,2]}, + {"b":1, "n": ["zzz"]}, + {"a":0, "n": [0.1, 0.02]} + ]', + '$[*]' columns ( + id for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b', + nested path '$.n[*]' columns ( + id_n1 for ordinality, + jpath_n1 varchar(50) path '$') , + nested path '$.n[*]' columns ( + id_n2 for ordinality, + jpath_n2 varchar(50) path '$') + ) + ) as tt; + +select * from json_table( + '[ + {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]}, + {"ll":["c"]} ]}, + {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]}, + {"b":1, "n": [{"ll":["zzz"]}]}, + {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]} + ]', + '$[*]' columns ( + id1 for ordinality, + jpath varchar(100) path '$.a', + jexst int exists path '$.b', + nested path '$.n[*]' columns ( + id2 for ordinality, + nested path '$.ll[*]' columns ( + id3 for ordinality, + jpath_3 varchar(50) path '$') + ), + nested path '$.n[*]' columns ( + id4 for ordinality, + jpath_4 json path '$') + ) + ) as tt; + + +--echo ord should be 1,1,1,2, which tells that first two values of 'l' are +--echo from the same object, and next two are from different objects +SELECT * +FROM JSON_TABLE( + '[{"a": "a_val", + "b": [ + {"c": "c_val", + "l": [1,2]} + ] + }, {"a": "a_val", + "b": [ + {"c": "c_val", + "l": [11]}, + {"c": "c_val", + "l": [22]} + ] + }]', + '$[*]' COLUMNS ( + apath VARCHAR(10) PATH '$.a', + NESTED PATH '$.b[*]' COLUMNS ( + bpath VARCHAR(10) PATH '$.c', + ord FOR ORDINALITY, + NESTED PATH '$.l[*]' COLUMNS ( + lpath varchar(10) PATH '$' + ) + ) + )) as jt; + +CREATE TABLE jt( i JSON ); +SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt + WHERE a=1; +EXPLAIN SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt + WHERE a=1; + +SELECT * FROM ( + SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt + WHERE a=1) AS ttt; +EXPLAIN SELECT * FROM ( + SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt + WHERE a=1) AS ttt; +DROP TABLE jt; + +--disable_parsing +SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON), + '$' COLUMNS (dt DATE PATH '$')) as tt; +SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON), + '$' COLUMNS (dt TIME PATH '$')) as tt; +SELECT * FROM JSON_TABLE(CAST(CAST("2001.02.03" AS DATE) AS JSON), + '$' COLUMNS (dt DATE PATH '$')) as tt; +--enable_parsing + +CREATE VIEW v AS + SELECT * FROM JSON_TABLE('[1,2,3]', + '$[*]' COLUMNS (num INT PATH '$.a' + DEFAULT '123' ON EMPTY + DEFAULT '456' ON ERROR)) AS jt; +SELECT * FROM v; +SHOW CREATE VIEW v; +DROP VIEW v; + +--error ER_JSON_TABLE_ERROR_ON_FIELD +SELECT * FROM JSON_TABLE('"asdf"', + '$' COLUMNS (a INT PATH '$' ERROR ON ERROR)) AS jt; +#--error ER_WRONG_JSON_TABLE_VALUE +--error ER_JSON_TABLE_ERROR_ON_FIELD +SELECT * FROM + JSON_TABLE('[{"a":1},{"a":2}]', + '$' COLUMNS (a INT PATH '$[*].a' ERROR ON ERROR)) AS jt; +# psergey-added: +--error ER_JSON_TABLE_ERROR_ON_FIELD +SELECT * FROM + JSON_TABLE('[{"a":1},{"a":2}]', + '$' COLUMNS (a JSON PATH '$[*].a' ERROR ON ERROR)) AS jt; +--error ER_JSON_TABLE_ERROR_ON_FIELD +SELECT * FROM + JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$' ERROR ON ERROR)) AS jt; +SELECT * FROM + JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$')) AS jt; + +#--error ER_JT_MAX_NESTED_PATH +SELECT * FROM + JSON_TABLE('{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{}}}}}}}}}}}}}}}}}}}', + '$' COLUMNS (i0 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i1 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i2 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i3 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i4 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i5 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i6 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i7 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i8 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i9 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i10 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i11 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i12 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i13 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i14 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i15 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i16 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i17 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i18 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i19 INT PATH '$.a', + NESTED PATH '$.b' COLUMNS (i20 INT PATH '$.a' + )))))))))))))))))))))) jt; + +CREATE TABLE t1(id int, jd JSON); +INSERT INTO t1 values (1, '[1,3,5]'),(2,'[2,4,6]'); +SELECT id, jt.* FROM t1, + JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY, + val INT PATH '$')) AS jt; +SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.* + FROM t1, + JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY, + val INT PATH '$')) AS jt; + +EXPLAIN SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.* + FROM t1, + JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY, + val INT PATH '$')) AS jt; + +--sorted_result +SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.* + FROM t1, + JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY, + val INT PATH '$')) AS jt, + t1 AS t2; + +EXPLAIN SELECT t1.id, t2.id, jt.* + FROM t1, + JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY, + val INT PATH '$')) AS jt, + t1 AS t2; + +EXPLAIN SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.* + FROM t1, + JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY, + val INT PATH '$')) AS jt, + t1 AS t2; + +SELECT * FROM t1 WHERE id IN + (SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS + (id INT PATH '$')) AS jt); +EXPLAIN SELECT * FROM t1 WHERE id IN + (SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS + (id INT PATH '$')) AS jt); + +--disable_parsing +(psergey:!) +# JSON_TABLE referring outer scope +SELECT * FROM t1 WHERE id IN + (SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS + (id INT PATH '$')) AS jt); +EXPLAIN SELECT * FROM t1 WHERE id IN + (SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS + (id INT PATH '$')) AS jt); +--enable_parsing + +# JSON_TABLE referring another JSON_TABLE +--disable_parsing +(psergey:!) +SELECT id, jt1.*, jt2.* + FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1, + JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2; + +EXPLAIN SELECT id, jt1.*, jt2.* + FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1, + JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2; +--enable_parsing + +DROP TABLE t1; + + +SELECT * FROM JSON_TABLE('"asdf"', '$' COLUMNS( + tm TIME PATH '$', + dt DATE PATH '$', + i INT PATH '$', + f FLOAT PATH '$', + d DECIMAL PATH '$')) AS jt; + +# DEFAULT NULL is not accepted syntax. +--error ER_PARSE_ERROR +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON EMPTY)) jt; +--error ER_PARSE_ERROR +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON ERROR)) jt; + +# The DEFAULT value must be a string on JSON format for now. +--error 1064 +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON EMPTY)) jt; +--error 1064 +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON ERROR)) jt; +--error 1064 +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x DATE + PATH '$.x' + DEFAULT DATE'2020-01-01' ON EMPTY)) jt; +--error 1064 +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x DATE + PATH '$.x' + DEFAULT DATE'2020-01-01' ON ERROR)) jt; + +--echo # +--echo # Bug#25413069: SIG11 IN CHECK_COLUMN_GRANT_IN_TABLE_REF +--echo # +--error ER_BAD_FIELD_ERROR +SELECT a FROM JSON_TABLE(abc, '$[*]' COLUMNS ( a int path '$.a')) AS jt; + +--echo # +--echo # Bug#25420680: ASSERTION `THD->IS_ERROR()' FAILED IN SQL/SQL_SELECT.CC +--echo # +SELECT * FROM JSON_TABLE('{"a":"2017-11-1"}', + '$' COLUMNS (jpath DATE PATH '$.a')) AS jt; +--echo # +--echo # Bug#25413826: ASSERTION `TABLE_LIST->ALIAS' FAILED +--echo # +--error ER_JSON_TABLE_ALIAS_REQUIRED +SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]', + '$[*]' COLUMNS ( a int path '$.b')); + +--echo # +--echo # Bug#25421464: ASSERTION `!STRCMP(TABLE_REF->TABLE_NAME, ... +--echo # +CREATE VIEW v1 AS + SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]', + '$[*]' COLUMNS ( a INT PATH '$.b')) AS jt; +SELECT * FROM v1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +--echo # +--echo # Bug#25427457: ASSERTION `!((*REG_FIELD)->FLAGS & 16)' +--echo # +SELECT * FROM JSON_TABLE('{"a":"1"}', + '$' COLUMNS (jpath JSON PATH '$.a', + o FOR ORDINALITY)) AS jt +WHERE o = 1; + +--echo # +--echo # Bug#25427982: ASSERTION `DERIVED' FAILED IN SQL/TABLE.H +--echo # +#--error ER_WRONG_FIELD_WITH_GROUP +SELECT je,o FROM JSON_TABLE('{"a":"1"}', + '$' COLUMNS (o FOR ORDINALITY, + je BIGINT EXISTS PATH '$.a')) AS jt +GROUP BY je; +SELECT je,COUNT(o) FROM JSON_TABLE('{"a":"1"}', + '$' COLUMNS (o FOR ORDINALITY, + je BIGINT EXISTS PATH '$.a')) AS jt +GROUP BY je; + +--echo # +--echo # Bug#25413194: ASSERTION `!(WANT_PRIVILEGE & ~(GRANT->WANT_PRIVILEGE +--echo # +CREATE TABLE t1 (j JSON); +SELECT * FROM t1,JSON_TABLE(t1.j, '$[*]' COLUMNS ( a int path '$.b')) AS jt; +DROP TABLE t1; + +--echo # +--echo # Bug#25460537:SIG 11 IN NEXT_FAST AT SQL/SQL_LIST.H +--echo # +PREPARE STMT FROM + "SELECT * FROM JSON_TABLE( + \'[{\"a\":\"3\"},{\"a\":2},{\"b\":1},{\"a\":0}]\', + \'$[*]\' COLUMNS (id + FOR ORDINALITY, + jpath VARCHAR(100) PATH \'$.a\', + jexst INT EXISTS PATH \'$.b\') + ) as tt"; +EXECUTE STMT; +EXECUTE STMT; +DEALLOCATE PREPARE stmt; + +--echo # +--echo # Bug#25522353: SIG 11 IN JOIN::MAKE_JOIN_PLAN | SQL/SQL_OPTIMIZER.CC +--echo # +CREATE TABLE t1 (id INT, jc JSON); + +# psergey! +#--error ER_UNKNOWN_TABLE +SELECT * FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id; + +# psergey! +--error ER_UNKNOWN_TABLE +SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt LEFT JOIN t1 ON t1.jc=jt.id; + +--error ER_UNKNOWN_TABLE +SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt RIGHT JOIN t1 ON t1.jc=jt.id; + +SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id; + +EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id; + +SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id + LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id; + +EXPLAIN SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id + LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id; + +# Error code can be ER_UNKNOWN_TABLE or ER_BAD_FIELD_ERROR +# (see find_field_in_tables()), both are acceptable. + +# psergey: +#--error ER_BAD_FIELD_ERROR +SELECT * FROM t1 AS t1o RIGHT JOIN + (t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) + ON t1o.id=t1i.id; + +# psergey: +#--error ER_UNKNOWN_TABLE +SELECT * FROM t1 AS t1o RIGHT JOIN + (t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) + ON t1o.id=t1i.id; + +# psergey: +#--error ER_UNKNOWN_TABLE +WITH qn AS + (SELECT jt.* FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id) + SELECT * from qn; + +#--error ER_UNKNOWN_TABLE +WITH qn AS + (SELECT 1 UNION + SELECT jt.id FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1.jc=jt.id) + SELECT * from qn; + +#--error ER_BAD_FIELD_ERROR +SELECT * FROM t1 AS t1o RIGHT JOIN + (t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) + ON t1o.id=t1i.id; + +#--error ER_UNKNOWN_TABLE +SELECT * FROM t1 AS t1o RIGHT JOIN + (t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS + (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id) + ON t1o.id=t1i.id; + +INSERT INTO t1 VALUES(1,"1"),(2,"4"),(3,"3"); + +ANALYZE TABLE t1; + +SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id INT PATH '$')) as jt ON t1.id=jt.id; + +EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS + (id INT PATH '$')) as jt ON t1.id=jt.id; + +--error ER_WRONG_OUTER_JOIN +SELECT * FROM t1 + LEFT JOIN + JSON_TABLE(t1.jc, '$' COLUMNS (id FOR ORDINALITY)) as jt ON t1.jc=jt.id + RIGHT JOIN + JSON_TABLE(jt.id, '$' COLUMNS (id FOR ORDINALITY)) as jt1 ON jt.id=jt1.id; + +DROP TABLE t1; + +--echo # +--echo # Bug#25525409: ASSERTION `TABLE_LIST->TABLE' FAILED IN SQL/SQL_BASE.CC +--echo # +--error ER_BAD_FIELD_ERROR +SELECT * FROM JSON_TABLE( ( SELECT a ) , '$.*' COLUMNS (col1 FOR ORDINALITY) ) +AS alias1; + +SELECT * FROM JSON_TABLE( ( SELECT 1 ) , '$.*' COLUMNS (col1 FOR ORDINALITY) ) +AS alias1; + +# psergey: check this +--error ER_INVALID_GROUP_FUNC_USE +SELECT * FROM JSON_TABLE( ( SUM(1) ) , '$.*' COLUMNS (col1 FOR ORDINALITY) ) +AS alias1; + +--echo # +--echo # Bug# #25472875: ERROR SHOULD BE THROWN FOR INCORRECT VALUES +--echo # +#--error ER_JT_VALUE_OUT_OF_RANGE +SELECT * + FROM JSON_TABLE('{"a":"1993-01-01"}', + '$' COLUMNS (jp DATE PATH '$.b' DEFAULT '1000' ON EMPTY)) + AS jt; + +--echo # +--echo # Bug#25532429: INVALID JSON ERROR NOT THROWN WITH EMPTY TABLES JOIN +--echo # +CREATE TABLE t1(j JSON); +#--error ER_INVALID_JSON_TEXT_IN_PARAM +SELECT * FROM t1, + JSON_TABLE( 'dqwfjqjf' , '$[*]' COLUMNS (col5 FOR ORDINALITY) ) AS alias7; +DROP TABLE t1; + + +--echo # +--echo # Bug#25540370: SIG 11 IN SHOW_SQL_TYPE|SQL/SQL_SHOW.CC:7063 +--echo # + SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 CHAR(70) PATH '$') + ) AS alias2; + +--echo # Too short field causes truncation, error and triggers ON ERROR clause + SELECT * FROM + JSON_TABLE('["3.14159"]', + '$[*]' COLUMNS (col18 CHAR(6) PATH '$') + ) AS alias2; + +--echo #Truncated space doesn't trigger ON ERROR + SELECT * FROM + JSON_TABLE('["3.14159 "]', + '$[*]' COLUMNS (col18 CHAR(7) PATH '$') + ) AS alias2; +SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 CHAR(255) PATH '$') + ) AS alias2; + + SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 VARCHAR(70) PATH '$') + ) AS alias2; + + SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 VARCHAR(255) PATH '$') + ) AS alias2; + + SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 FLOAT PATH '$') + ) AS alias2; + + SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 DOUBLE PATH '$') + ) AS alias2; + + SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$') + ) AS alias2; + +#--error ER_JT_VALUE_OUT_OF_RANGE +--error ER_JSON_TABLE_ERROR_ON_FIELD +SELECT * FROM + JSON_TABLE('[3.14159]', + '$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$' ERROR ON ERROR) + ) AS alias2; + + SELECT * FROM + JSON_TABLE('[0.9]', + '$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$') + ) AS alias2; + +SELECT * FROM + JSON_TABLE('["asdf","ghjk"]', + '$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$' + DEFAULT "3.14159" ON ERROR) + ) AS alias2; + +CREATE TABLE t1(jd JSON); +INSERT INTO t1 VALUES('["asdf"]'),('["ghjk"]'); +SELECT * FROM t1, + JSON_TABLE(jd, + '$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$' + DEFAULT "3.14159" ON ERROR) + ) AS alias2; +DROP TABLE t1; +--echo # +--echo # Bug#25540027: SIG 11 IN FIND_FIELD_IN_TABLE | SQL/SQL_BASE.CC +--echo # +CREATE TABLE t1(c1 JSON); +--error ER_NON_UPDATABLE_TABLE +UPDATE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 + SET jt1.a=1; +--error ER_PARSE_ERROR +DELETE JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 + FROM t1; +--error ER_PARSE_ERROR +DELETE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1 + USING t1; + +DROP TABLE t1; + +--echo # +--echo # Bug#25547244: ASSERTION `!TABLE || (!TABLE->READ_SET || BITMAP_IS_SET( +--echo # +CREATE TABLE t1(i INT); +INSERT INTO t1 VALUES(1); +--disable_parsing +# Crash #3 (not reproducible anymore?) +WITH cte_query AS + (SELECT * FROM t1, JSON_TABLE( JSON_OBJECT('ISSKF',i) , + '$[*]' COLUMNS (jtcol1 INT EXISTS PATH '$[*]') ) AS alias2) +SELECT jtcol1 AS field1 FROM cte_query; +--enable_parsing + +DROP TABLE t1; + +--echo # +--echo # Bug#25540675: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED +--echo # +CREATE TABLE j1(j JSON); +INSERT INTO j1 VALUES('[1,2,3]'),('[1,2,4]'); +SELECT * FROM j1, + JSON_TABLE( JSON_OBJECT('key1', j) , + '$.*' COLUMNS (NESTED PATH '$.*' COLUMNS (col11 FOR ORDINALITY))) AS alias2; +DROP TABLE j1; + +--echo # +--echo # Bug#25584335: SIG 11 IN TABLE_LIST::FETCH_NUMBER_OF_ROWS +--echo # +CREATE TABLE t1(i INT); + +PREPARE stmt FROM "SELECT alias1.i AS field1 FROM ( + t1 AS alias1, + (SELECT * FROM + JSON_TABLE('[1,2,3]' , + '$[*]' COLUMNS (`col_varchar` FOR ORDINALITY)) AS SQ1_alias1 + ) AS alias2 )"; + +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + +--echo # +--echo # Bug#25604048: COLUMN NAMES WITH SAME 33-CHAR PREFIX ARE EQUAL +--echo # +SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS( + column_name_is_thirty_four_or_more VARCHAR(17) PATH '$.key1', + column_name_is_thirty_four_or_more_yes_indeed VARCHAR(17) PATH '$.key1' +) ) AS alias1; + +#--error ER_WRONG_COLUMN_NAME +SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS( + `column_name_is_thirty_four_or_more ` VARCHAR(17) PATH '$.key1' +) ) AS alias1; + +--echo # +--echo # Bug#25604404: JSON_TABLE MORE RESTRICTIVE WITH IDENTIFIERS THAN +--echo # CREATE TABLE +--echo # +SELECT * FROM JSON_TABLE( '[1, 2]', '$' COLUMNS( + one INT PATH '$[0]', two INT PATH '$[1]' +)) AS jt; + +--echo # +--echo # Bug#25588450: SIG 6 IN JSON_WRAPPER::SEEK|SQL/JSON_DOM.CC +--echo # +CREATE TABLE t1(c VARCHAR(10)) ENGINE=MEMORY; +INSERT INTO t1 VALUES('fiheife'); +#--error ER_INVALID_JSON_TEXT_IN_PARAM +--error ER_JSON_SYNTAX +SELECT * FROM `t1` AS alias1, JSON_TABLE( `c` , '$[*]' COLUMNS (jtcol1 JSON +PATH '$.*')) AS alias2 WHERE jtcol1 <= 'kjfh'; +DROP TABLE t1; + +--echo # +--echo # Bug#25587754: ASSERTION `FIXED == 0 || BASIC_CONST_ITEM()' FAILED +--echo # +PREPARE stmt FROM + "SELECT * FROM JSON_TABLE( '[1,2]', '$[*]' + COLUMNS (jtcol1 JSON PATH '$.*')) AS alias2"; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +--echo # +--echo # Bug#25584593: UNABLE TO USE JSON_TABLE() ON TEXT/BLOB JSON DATA +--echo # +SELECT * FROM JSON_TABLE(NULL, '$.k' COLUMNS (id FOR ORDINALITY)) AS aLias; +SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias; +SET @myjson = '{"k": 42}'; +SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias; + +CREATE TABLE t1( + txt TEXT, ty TINYTEXT, tm MEDIUMTEXT, tl LONGTEXT); +INSERT INTO t1 values ( + '{"k": "text"}','{"k": "tinytext"}','{"k": "mediumtext"}','{"k": "longtext"}'); +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.ty, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.tm, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.tl, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TEXT PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TINYTEXT PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id MEDIUMTEXT PATH '$')) AS alias; +SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id LONGTEXT PATH '$')) AS alias; + +# BLOB can store data from JSON +SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias; +# Check that type is printed correctly +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYBLOB PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMBLOB PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGBLOB PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TEXT PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYTEXT PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMTEXT PATH '$')) as alias; +EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGTEXT PATH '$')) as alias; +# But can't be used as a data source +#--error ER_WRONG_ARGUMENTS +--error ER_PARSE_ERROR +SELECT * FROM + (SELECT CAST(blb AS JSON) jf FROM + JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) AS jti) AS dt, + JSON_TABLE(jf, '$' COLUMNS (blb BLOB PATH '$')) AS jto; +DROP TABLE t1; + +--echo # +--echo # Bug#26500384: ASSERT FAILURE IN QUERY WITH WINDOW FUNCTION AND +--echo # JSON_TABLE +--echo # +CREATE TABLE t (x INT); +INSERT INTO t VALUES (1), (2), (3); +--error ER_BAD_FIELD_ERROR +SELECT MAX(t.x) OVER () m, jt.* FROM t, + JSON_TABLE(JSON_ARRAY(m), '$[*]' COLUMNS (i INT PATH '$')) jt; +DROP TABLE t; + +--echo # +--echo # Bug#26583283: ASSERTION `!THD->IS_ERROR()' FAILED IN SQL_RESOLVER.CC +--echo # +EXPLAIN SELECT * FROM JSON_TABLE('null', '$' COLUMNS(AA DECIMAL PATH '$')) tt; + +CREATE VIEW v1 AS SELECT * FROM + JSON_TABLE( 'null', '$' COLUMNS (c1 DECIMAL PATH '$' ) ) AS jt; +SELECT * FROM v1; +EXPLAIN SELECT * FROM v1; +DROP VIEW v1; + +--echo # +--echo # Bug#25822408: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED +--echo # +PREPARE stmt FROM "SELECT * FROM + JSON_TABLE('{\"a\":1}','$' COLUMNS (c1 CHAR(20) PATH '$.b' ERROR ON EMPTY)) jt"; +--error ER_JSON_TABLE_ERROR_ON_FIELD +EXECUTE stmt; +--error ER_JSON_TABLE_ERROR_ON_FIELD +EXECUTE stmt; + +--echo # +--echo # Bug#25594571: CRASH AT ITEM::CONST_ITEM|SQL/ITEM.H +--echo # +CREATE TABLE t1 (i INT); + +INSERT INTO t1 VALUES(1),(2),(3),(4),(5),(6),(7); + +PREPARE stmt FROM "SELECT * FROM t1 AS alias1 LEFT JOIN t1 AS alias2 +LEFT JOIN JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (i FOR ORDINALITY )) AS +alias3 ON alias2 . `i` = alias3 . `i` ON alias1 . `i` = alias2 . `i`"; + +EXECUTE stmt; +EXECUTE stmt; + +DROP TABLE t1; + +--echo # +--echo # Bug#26648617: ASSERTION `IS_VIEW_OR_DERIVED() && +--echo # USES_MATERIALIZATION()' FAILED. +--echo # +CREATE TABLE t1 ( + col_varchar_key varchar(1) DEFAULT NULL +) ENGINE=InnoDB; + +INSERT INTO t1 VALUES(1),(4); + +SELECT * FROM t1 WHERE col_varchar_key NOT IN ( + SELECT col_varchar_key FROM JSON_TABLE( + '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS + (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); + +EXPLAIN SELECT * FROM t1 WHERE col_varchar_key NOT IN ( + SELECT col_varchar_key FROM JSON_TABLE( + '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS + (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); + +SELECT * FROM t1 WHERE col_varchar_key IN ( + SELECT col_varchar_key FROM JSON_TABLE( + '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS + (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); + +EXPLAIN SELECT * FROM t1 WHERE col_varchar_key IN ( + SELECT col_varchar_key FROM JSON_TABLE( + '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS + (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1); + +DROP TABLE t1; + +--echo # +--echo # Bug#26711551: WL8867:CONDITIONAL JUMP IN JSON_TABLE_COLUMN::CLEANUP +--echo # +CREATE TABLE t(x int, y int); +INSERT INTO t(x) VALUES (1); +UPDATE t t1, JSON_TABLE('[2]', '$[*]' COLUMNS (x INT PATH '$')) t2 +SET t1.y = t2.x; +SELECT * FROM t; +DROP TABLE t; + +--echo # +--echo # Bug#26679671: SIG 11 IN JSON_BINARY::PARSE_BINARY() +--echo # +CREATE TABLE t1(id INT, f1 JSON); +INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'), +(4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}'); +ANALYZE TABLE t1; + +ANALYZE TABLE t1; + +--sorted_result +SELECT * FROM t1 as jj1, + (SELECT tt2.* FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt; +EXPLAIN SELECT * FROM t1 as jj1, + (SELECT tt2.* FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt; + +# psergey:name resolution +# HF --error ER_NON_UNIQ_ERROR +SELECT * FROM t1 as jj1, + (SELECT tt2.* FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN + t1 AS tt3) dt ORDER BY 1,3 LIMIT 10; +# psergey:name resolution +# HF --error ER_NON_UNIQ_ERROR +EXPLAIN SELECT * FROM t1 as jj1, + (SELECT tt2.* FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN + t1 AS tt3) dt ORDER BY 1,3 LIMIT 11; + +SELECT * FROM t1 WHERE id IN + (SELECT id FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl); +EXPLAIN SELECT * FROM t1 WHERE id IN + (SELECT id FROM t1 as tt2, + JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl); + +DROP TABLE t1; + +--echo # +--echo # Bug#26760811: WL#8867: MEMORY LEAK REPORTED BY ASAN AND VALGRIND +--echo # +CREATE TABLE t (j JSON); +INSERT INTO t VALUES +('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'), +('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'); +SELECT COUNT(*) FROM t, JSON_TABLE(j, '$[*]' COLUMNS (i INT PATH '$')) AS jt; +PREPARE ps FROM +'SELECT COUNT(*) FROM t, JSON_TABLE(j, ''$[*]'' COLUMNS (i INT PATH ''$'')) AS jt'; +EXECUTE ps; +EXECUTE ps; +DROP PREPARE ps; +DROP TABLE t; + +--echo # +--echo # Bug #26781759: NON-UNIQUE ALIAS ERROR NOT BEING THROWN +--echo # +--error ER_NONUNIQ_TABLE +SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias, +JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias; + +# --echo # +# --echo # Bug #26761470: WL#8867: JOIN::MAKE_JOIN_PLAN(): +# --echo # ASSERTION `SELECT_LEX->IS_RECURSIVE()' FAILED +# --echo # +# CREATE TABLE t1 (x INT); +# INSERT INTO t1 VALUES (1); +# CREATE TABLE t2 (j JSON); +# INSERT INTO t2 (j) VALUES ('[1,2,3]'); +# --sorted_result +# # psergey:name resolution +# --error ER_WRONG_OUTER_JOIN +# SELECT * FROM t1 RIGHT JOIN +# (SELECT o FROM t2, JSON_TABLE(j, '$[*]' COLUMNS (o FOR ORDINALITY)) AS jt) AS t3 +# ON (t3.o = t1.x); +# DROP TABLE t1, t2; + +--echo # +--echo # Bug#27152428 JSON_TABLE + PREPARED STATEMENT + VIEW HAS PROBLEM IN DURING RESOLUTION +--echo # + +CREATE TABLE t1 (a INT, b INT); + +CREATE VIEW v2 AS SELECT * FROM t1 LIMIT 2; + +--disable_parsing +# psergey-done: crash in name resolution: +SELECT b + FROM (SELECT * FROM v2) vq1, + JSON_TABLE(CONCAT(vq1.b,'[{\"a\":\"3\"}]'), + '$[*]' COLUMNS (id FOR ORDINALITY, + jpath VARCHAR(100) PATH '$.a', + JEXST INT EXISTS PATH '$.b') + ) AS dt; +--enable_parsing + +DROP TABLE t1; +DROP VIEW v2; + + +--echo # +--echo # Bug#27189940: CREATE VIEW FAILS ON JSON_TABLE() IN SCHEMA-LESS CONNECTIONS +--echo # BUG#27217897: JSON_TABLE() FAILS IF NO DATABASE SELECTED +--echo # + + +# Connect without a schema name: +connect (conn1,localhost,root,,*NO-ONE*); +connection conn1; + +CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; +SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; + +connection default; +disconnect conn1; +use test; + +SHOW CREATE VIEW test.v; +SELECT * FROM test.v; +DROP VIEW test.v; + +--echo # +--echo # Bug#27729112 JSON_TABLE SHOULD DECODE BASE64-ENCODED STRINGS +--echo # +SELECT v +FROM JSON_TABLE(JSON_OBJECT('foo', _binary'bar'), '$' + COLUMNS(v VARCHAR(255) PATH '$.foo')) tbl; + +--disable_parsing +# not supported +CREATE TABLE t1 (col1 VARCHAR(255) CHARACTER SET ucs2, + col2 VARCHAR(255) CHARACTER SET utf8mb4 COLLATE + utf8mb4_unicode_cs); +INSERT INTO t1 VALUES ("æ", "ハ"), ("å", "ø"), ("ø", "パ"), ("@", "バ"); + +ANALYZE TABLE t1 UPDATE HISTOGRAM ON col1, col2; +SELECT v value, c cumulfreq +FROM INFORMATION_SCHEMA.COLUMN_STATISTICS, + JSON_TABLE(histogram->'$.buckets', '$[*]' + COLUMNS(v VARCHAR(255) CHARACTER SET ucs2 PATH '$[0]', + c double PATH '$[1]')) hist +WHERE column_name = "col1"; +SELECT v value, c cumulfreq +FROM INFORMATION_SCHEMA.COLUMN_STATISTICS, + JSON_TABLE(histogram->'$.buckets', '$[*]' + COLUMNS(v VARCHAR(255) CHARACTER SET utf8mb4 PATH '$[0]', + c double PATH '$[1]')) hist +WHERE column_name = "col2"; +DROP TABLE t1; + +--enable_parsing +--echo # +--echo # Bug#28254268: JSON_TABLE() FUNCTION REJECTS SELECT PERMISSIONS +--echo # +CREATE DATABASE db2; +USE db2; +CREATE TABLE t1 (c JSON); +INSERT INTO t1 VALUES('[1,2,3]'); + +CREATE USER user1@localhost; +GRANT SELECT ON db2.t1 TO user1@localhost; + +connect (conn1,localhost,user1,,); +connection conn1; +USE db2; +SELECT t1.c FROM t1; +SELECT jt.* FROM t1, JSON_TABLE(t1.c, '$[*]' COLUMNS (num INT PATH '$[0]')) +AS jt; + +disconnect conn1; +connection default; + +DROP USER user1@localhost; +DROP DATABASE db2; + +--echo # +--echo # Bug#27856835 JSON_TABLE RETURNS WRONG DATATYPE WHEN INT-VALUE IS GRATER +--echo # THAN (2^31-1) +--echo # +SELECT id FROM JSON_TABLE('[{"id":"9223372036854775807"}]', '$[*]' COLUMNS +(id BIGINT PATH '$.id')) AS json; + +--echo # As we currently have no way of telling if a JSON string value is +--echo # signed or unsigned, this value will overflow. +SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS +(id BIGINT PATH '$.id')) AS json; + +--echo # Here the JSON value is a NUMERIC value, and we thus know if the value +--echo # is signed or unsigned. +SELECT id FROM JSON_TABLE('[{"id":9223372036854775808}]', '$[*]' COLUMNS +(id BIGINT PATH '$.id')) AS json; + +--echo # If we tell the JSON table column to be unsigned, we get to store the +--echo # full value correctly. +SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS +(id BIGINT UNSIGNED PATH '$.id')) AS json; + +SELECT id FROM JSON_TABLE('[{"id":"2147483648"}]', '$[*]' COLUMNS +(id INT UNSIGNED PATH '$.id')) AS json; + +--echo # Check that we preserve the signedness of the columns. +USE test; +CREATE TABLE t1 AS SELECT id, value FROM + JSON_TABLE('[{"id":9223372036854775808, "value":9223372036854775807}]', + '$[*]' COLUMNS (id BIGINT UNSIGNED PATH '$.id', + value BIGINT PATH '$.value')) + AS json; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--echo # +--echo # Bug#28255453: VIEW USING JSON_TABLE FAILS IF NO SCHEMA IS SELECTED +--echo # +connect (conn1,localhost,root,,*NO-ONE*); +CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt; +SELECT * FROM test.v; +DROP VIEW test.v; + +--echo # Check that a user with access to the schema 'foo' can do a SELECT with +--echo # a JSON_TABLE function. +CREATE SCHEMA foo; +CREATE USER foo@localhost; +GRANT EXECUTE ON foo.* TO foo@localhost; +connect (con1,localhost,foo,,foo); +SELECT 1 FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$.a')) AS jt; +connection default; +disconnect con1; +DROP USER foo@localhost; +DROP SCHEMA foo; + +--echo # +--echo # Bug#27923406 ERROR 1142 (42000) WHEN USING JSON_TABLE +--echo # +CREATE SCHEMA my_schema; + +CREATE USER foo@localhost; +GRANT EXECUTE ON my_schema.* TO foo@localhost; +connect (con1,localhost,foo,,my_schema); + +SELECT + * +FROM + JSON_TABLE( + '[{"x":2,"y":"8"},{"x":"3","y":"7"},{"x":"4","y":6}]', + "$[*]" COLUMNS( + xval VARCHAR(100) PATH "$.x", + yval VARCHAR(100) PATH "$.y" + ) + ) AS jt1; + +connection default; +disconnect con1; +DROP USER foo@localhost; +DROP SCHEMA my_schema; + +--echo # +--echo # Bug#28538315: JSON_TABLE() COLUMN TYPES DON'T SUPPORT COLLATE CLAUSE +--echo # + +CREATE TABLE t1 SELECT * + FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10) + CHARSET utf8mb4 + PATH '$')) AS jt1; +SHOW CREATE TABLE t1; + +--disable_parsing +CREATE TABLE t2 SELECT * + FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10) + CHARSET utf8mb4 COLLATE utf8mb4_bin + PATH '$')) AS jt1; +SHOW CREATE TABLE t2; + +CREATE TABLE t3 AS SELECT * + FROM JSON_TABLE('"a"', '$' COLUMNS (a VARCHAR(10) + COLLATE ascii_bin + PATH '$')) jt; +SHOW CREATE TABLE t3; +--enable_parsing + +DROP TABLE t1; + +--echo # +--echo # Bug#28643862 JSON_TABLE'S "COLUMNS" CLAUSE USES +--echo # GLOBAL.CHARACTER_SET_RESULTS DEFAULT CHARSET +--echo # + +SET @@SESSION.character_set_connection = ascii; + +CREATE TABLE t1 SELECT a.col + FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a; + +SHOW CREATE TABLE t1; + +SET @@SESSION.collation_connection = latin1_bin; + +CREATE TABLE t2 SELECT a.col + FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a; + +SHOW CREATE TABLE t2; + +DROP TABLE t1, t2; + +SET @@SESSION.character_set_connection = DEFAULT; + +--echo # +--echo # Bug#28851656: JSON_TABLE RETURN "UNKNOWN DATABASE ''" FROM A FUNCTION +--echo # +CREATE FUNCTION FN_COUNT_ROWS(X JSON) +RETURNS INT DETERMINISTIC + RETURN ( + SELECT COUNT(*) FROM JSON_TABLE( X, '$[*]' COLUMNS( I INT PATH '$')) der + ); + +SELECT FN_COUNT_ROWS('[1, 2]') CNT; +SELECT FN_COUNT_ROWS('[1, 2, 3]') CNT; +SELECT FN_COUNT_ROWS('[1, 2, 3, 4]') CNT; + +DROP FUNCTION FN_COUNT_ROWS; + +--echo # +--echo # Bug#30310265: VIEWS LOSE THE CHARACTER SET OF JSON_TABLE'S +--echo # PATH ARGUMENTS +--echo # + +CREATE VIEW v1 AS + SELECT * FROM JSON_TABLE('{"å":{"å":1}}', '$.å' COLUMNS (x INT PATH '$.å')) t; +SELECT * FROM v1; +SET NAMES latin1; +# Used to return zero rows. +SELECT * FROM v1; +SET NAMES DEFAULT; +DROP VIEW v1; + +CREATE VIEW v2 AS + SELECT * FROM JSON_TABLE('{}', '$' COLUMNS ( + x VARCHAR(10) PATH '$.a' DEFAULT '"isn''t here"' ON EMPTY) + ) t; +# SHOW CREATE VIEW and SELECT from the view used to fail with a syntax error. +--disable_parsing +# psergey-done +SHOW CREATE VIEW v2; + +SELECT * FROM v2; +--enable_parsing +DROP VIEW v2; + +# The string literals in JSON_TABLE didn't accept character set +# introducers. Verify that they are accepted now. +--disable_parsing +# psergey: it's a bug! +SELECT * FROM + JSON_TABLE(JSON_OBJECT(), + _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x' COLUMNS + (y INT PATH _utf8mb4'$.y' + DEFAULT _utf8mb4'1' ON EMPTY + DEFAULT _utf8mb4'2' ON ERROR))) jt; +--enable_parsing + +--echo # +--echo # Bug#30382156: STORED PROCEDURE, JSON_TABLE AND "CONST" JOIN TYPE +--echo # +CREATE TABLE t (id INT PRIMARY KEY, j JSON); +INSERT INTO t VALUES (1, '{"x":1}'); +CREATE PROCEDURE p() + SELECT * FROM t, JSON_TABLE(j, '$' COLUMNS (x INT PATH '$.x')) jt + WHERE id = 1; +CALL p(); +CALL p(); +CALL p(); +DROP PROCEDURE p; +DROP TABLE t; + +--echo # +--echo # WL#13512: Deprecate ON ERROR before ON EMPTY in JSON_TABLE syntax +--echo # +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' + DEFAULT '"a"' ON ERROR DEFAULT '"b"' ON EMPTY)) jt; +SELECT * FROM + JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' + NULL ON ERROR NULL ON EMPTY)) jt; +SELECT * FROM + JSON_TABLE('{"x":"c"}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' + ERROR ON ERROR ERROR ON EMPTY)) jt; +--echo # +--echo # Bug#30628330: INSERT WITH JSON_TABLE FAILS DUE TO WARNING INVOKED +--echo # WITH ON ERROR CLAUSE +--echo # +CREATE TABLE t(id INT, a TINYINT, b VARCHAR(3), c DATE, d DECIMAL); +# This statement used to fail with "data too long". +INSERT INTO t SELECT * FROM + JSON_TABLE(JSON_ARRAY(JSON_OBJECT('a', 1, 'b', 'abc'), + JSON_OBJECT('a', 2, 'b', 'abcd'), + JSON_OBJECT('a', 1000, 'b', 'xyz'), + JSON_OBJECT('c', TIME'12:00:00', 'd', 1e308)), + '$[*]' COLUMNS (id FOR ORDINALITY, + a TINYINT PATH '$.a' DEFAULT '111' ON ERROR, + b VARCHAR(3) PATH '$.b' DEFAULT '"ERR"' ON ERROR, + c DATE PATH '$.c' DEFAULT '"2001-01-01"' ON ERROR, + d DECIMAL PATH '$.c' DEFAULT '999' ON ERROR) + ) AS jt; +SELECT * FROM t ORDER BY id; +DROP TABLE t; + +--echo # +--echo # Bug#30263373: INCORRECT OUTPUT FROM TABLE_FUNCTION_JSON::PRINT() +--echo # + +CREATE VIEW v AS SELECT * FROM + JSON_TABLE('[123]', '$[*]' COLUMNS (`name with space 1` INT PATH '$', + `name with space 2` FOR ORDINALITY)) jt; +# Used to fail with a syntax error, due to unquoted column names in +# the view definition. +SELECT * FROM v; +DROP VIEW v; + +CREATE VIEW v AS SELECT HEX(x), HEX(y) FROM + JSON_TABLE('["abc"]', '$[*]' COLUMNS (x BINARY(10) PATH '$', + y VARBINARY(10) PATH '$')) jt; +# Used to say CHAR(10) and VARCHAR(10) instead of BINARY(10) and VARBINARY(10). +SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS + WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v'; +SELECT * FROM v; +DROP VIEW v; + +CREATE VIEW v AS SELECT * FROM + JSON_TABLE('[123]', '$[*]' COLUMNS(ti TINYINT PATH '$', + si SMALLINT PATH '$', + mi MEDIUMINT PATH '$', + i INT PATH '$', + bi BIGINT PATH '$', + tiu TINYINT UNSIGNED PATH '$', + siu SMALLINT UNSIGNED PATH '$', + miu MEDIUMINT UNSIGNED PATH '$', + iu INT UNSIGNED PATH '$', + biu BIGINT UNSIGNED PATH '$')) jt; +# Used to lack the UNSIGNED attribute for the unsigned columns. +SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS + WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v'; +SELECT * FROM v; +DROP VIEW v; + +--disable_parsing +CREATE VIEW v AS SELECT * FROM + JSON_TABLE('[]', '$[*]' COLUMNS (g GEOMETRY PATH '$', + ls LINESTRING PATH '$')) AS jt; +# Used to say GEOMETRY for both columns. +SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS + WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v'; +SELECT * FROM v; +DROP VIEW v; +--enable_parsing + +--disable_parsing +CREATE VIEW v AS SELECT * FROM + JSON_TABLE('["abc"]', '$[*]' COLUMNS + (x VARCHAR(10) CHARSET latin1 PATH '$', + y VARCHAR(10) COLLATE utf8mb4_de_pb_0900_ai_ci PATH '$', + z LONGTEXT COLLATE utf8mb4_bin PATH '$')) jt; +# Character set and collation information wasn't included. +SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS + WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v'; +# Used to return the default collation instead of the collation +# specified in the JSON_TABLE column definitions. +SELECT x, y, z, COLLATION(x) c_x, COLLATION(y) c_y, COLLATION(z) c_z FROM v; +DROP VIEW v; +--enable_parsing + +--echo # +--echo # Bug#31345503 JSON_TABLE SHOULD RETURN SQL NULL FOR JSON NULL +--echo # + +SELECT * +FROM +JSON_TABLE( +'[ + {"c1": null, + "c2": [{"c": "c_val", "l": [1,2]}], + "c3": null}, + {"c1": true, + "c2": [{"c": "c_val","l": [11]}, {"c": "c_val", "l": [false]}], + "c3": true}, + {"c1": false, + "c2": [{"c": null,"l": [true]}, {"c": "c_val", "l": [null]}], + "c3": false} + ]', + '$[*]' COLUMNS( + top_ord FOR ORDINALITY, + c1path VARCHAR(10) PATH '$.c1' ERROR ON ERROR, + NESTED PATH '$.c2[*]' COLUMNS ( + c2path VARCHAR(10) PATH '$.c' ERROR ON ERROR, + ord FOR ORDINALITY, + NESTED PATH '$.l[*]' COLUMNS (lpath_c VARCHAR(10) PATH '$' ERROR ON ERROR, + lpath_i INT PATH '$' ERROR ON ERROR) + ), + c3path INT PATH '$.c3' ERROR ON ERROR + ) +) as jt; + +--echo # +--echo # Bug #31327187 UBSAN: JSON_TABLE: NULL POINTER PASSED AS ARGUMENT 2, +--echo # WHICH IS DECLARED TO NEVER BE NULL +--echo # + +SELECT HEX(a) FROM JSON_TABLE( + '[{"E":{"e":true,"~":1,"S3":"sTa"},"r":3,"":6.7},"",6.5]', + '$' + COLUMNS(a BINARY(5) PATH '$[1]' NULL ON EMPTY) +) e; diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index b172059373c..6fae6fcef95 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -164,6 +164,7 @@ SET (SQL_SOURCE rowid_filter.cc rowid_filter.h opt_trace.cc table_cache.cc encryption.cc temporary_tables.cc + json_table.cc proxy_protocol.cc backup.cc xa.cc ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h ${CMAKE_CURRENT_BINARY_DIR}/lex_token.h diff --git a/sql/field.cc b/sql/field.cc index f3deffccee5..22b56a4647a 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1753,7 +1753,8 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, size_t len, if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION && check_int(cs, from, len, end, error)) return 1; - return 0; + + return error && get_thd()->count_cuted_fields == CHECK_FIELD_EXPRESSION; out_of_range: set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1); diff --git a/sql/field.h b/sql/field.h index 1a1b56b2c02..457b6dfaecb 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1611,6 +1611,7 @@ public: virtual longlong val_time_packed(THD *thd); virtual const TYPELIB *get_typelib() const { return NULL; } virtual CHARSET_INFO *charset() const= 0; + virtual void change_charset(const DTCollation &new_cs) {} virtual const DTCollation &dtcollation() const= 0; virtual CHARSET_INFO *charset_for_protocol(void) const { return binary() ? &my_charset_bin : charset(); } @@ -2109,6 +2110,12 @@ public: { return m_collation; } + void change_charset(const DTCollation &new_cs) override + { + field_length= (field_length * new_cs.collation->mbmaxlen) / + m_collation.collation->mbmaxlen; + m_collation= new_cs; + } bool binary() const override { return field_charset() == &my_charset_bin; } uint32 max_display_length() const override { return field_length; } uint32 character_octet_length() const override { return field_length; } diff --git a/sql/handler.cc b/sql/handler.cc index 985d3c9cc83..482d7939c1a 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -619,10 +619,12 @@ int ha_finalize_handlerton(st_plugin_int *plugin) } +const char *hton_no_exts[]= { 0 }; + + int ha_initialize_handlerton(st_plugin_int *plugin) { handlerton *hton; - static const char *no_exts[]= { 0 }; DBUG_ENTER("ha_initialize_handlerton"); DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str)); @@ -635,7 +637,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin) goto err_no_hton_memory; } - hton->tablefile_extensions= no_exts; + hton->tablefile_extensions= hton_no_exts; hton->discover_table_names= hton_ext_based_table_discovery; hton->drop_table= hton_drop_table; diff --git a/sql/handler.h b/sql/handler.h index 3f8f5f5dd23..f97dcb25274 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1724,6 +1724,8 @@ struct handlerton }; +extern const char *hton_no_exts[]; + static inline LEX_CSTRING *hton_name(const handlerton *hton) { return &(hton2plugin[hton->slot]->name); diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 9aec6a0bad5..9c776baa2fd 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -115,10 +115,6 @@ String *Item_func_geometry_from_wkb::val_str(String *str) } -void report_json_error_ex(String *js, json_engine_t *je, - const char *fname, int n_param, - Sql_condition::enum_warning_level lv); - String *Item_func_geometry_from_json::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -178,7 +174,8 @@ String *Item_func_geometry_from_json::val_str(String *str) my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeomFromGeoJSON"); break; default: - report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN); + report_json_error_ex(js->ptr(), &je, func_name(), 0, + Sql_condition::WARN_LEVEL_WARN); return NULL; } diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 44ce69a482f..af14734eae3 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -247,15 +247,15 @@ error: #define report_json_error(js, je, n_param) \ - report_json_error_ex(js, je, func_name(), n_param, \ + report_json_error_ex(js->ptr(), je, func_name(), n_param, \ Sql_condition::WARN_LEVEL_WARN) -void report_json_error_ex(String *js, json_engine_t *je, +void report_json_error_ex(const char *js, json_engine_t *je, const char *fname, int n_param, Sql_condition::enum_warning_level lv) { THD *thd= current_thd; - int position= (int)((const char *) je->s.c_str - js->ptr()); + int position= (int)((const char *) je->s.c_str - js); uint code; n_param++; @@ -285,16 +285,22 @@ void report_json_error_ex(String *js, json_engine_t *je, case JE_DEPTH: code= ER_JSON_DEPTH; - push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT, - n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT, + n_param, fname, position); return; default: return; } - push_warning_printf(thd, lv, code, ER_THD(thd, code), - n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), + n_param, fname, position); } @@ -304,15 +310,15 @@ void report_json_error_ex(String *js, json_engine_t *je, #define TRIVIAL_PATH_NOT_ALLOWED 3 #define report_path_error(js, je, n_param) \ - report_path_error_ex(js, je, func_name(), n_param,\ + report_path_error_ex(js->ptr(), je, func_name(), n_param,\ Sql_condition::WARN_LEVEL_WARN) -static void report_path_error_ex(String *ps, json_path_t *p, - const char *fname, int n_param, - Sql_condition::enum_warning_level lv) +void report_path_error_ex(const char *ps, json_path_t *p, + const char *fname, int n_param, + Sql_condition::enum_warning_level lv) { THD *thd= current_thd; - int position= (int)((const char *) p->s.c_str - ps->ptr() + 1); + int position= (int)((const char *) p->s.c_str - ps + 1); uint code; n_param++; @@ -331,8 +337,11 @@ static void report_path_error_ex(String *ps, json_path_t *p, case JE_DEPTH: code= ER_JSON_PATH_DEPTH; - push_warning_printf(thd, lv, code, ER_THD(thd, code), - JSON_DEPTH_LIMIT, n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), + JSON_DEPTH_LIMIT, n_param, fname, position); return; case NO_WILDCARD_ALLOWED: @@ -347,12 +356,14 @@ static void report_path_error_ex(String *ps, json_path_t *p, default: return; } - push_warning_printf(thd, lv, code, ER_THD(thd, code), - n_param, fname, position); + if (lv == Sql_condition::WARN_LEVEL_ERROR) + my_error(code, MYF(0), n_param, fname, position); + else + push_warning_printf(thd, lv, code, ER_THD(thd, code), + n_param, fname, position); } - /* Checks if the path has '.*' '[*]' or '**' constructions and sets the NO_WILDCARD_ALLOWED error if the case. diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index ec6c6696001..519bad3f6f3 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -41,6 +41,13 @@ public: }; +void report_path_error_ex(const char *ps, json_path_t *p, + const char *fname, int n_param, + Sql_condition::enum_warning_level lv); +void report_json_error_ex(const char *js, json_engine_t *je, + const char *fname, int n_param, + Sql_condition::enum_warning_level lv); + class Json_engine_scan: public json_engine_t { public: diff --git a/sql/json_table.cc b/sql/json_table.cc new file mode 100644 index 00000000000..1c44e370195 --- /dev/null +++ b/sql/json_table.cc @@ -0,0 +1,1286 @@ +/* + Copyright (c) 2020, MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA +*/ + +#include "mariadb.h" +#include "sql_priv.h" +#include "sql_class.h" /* TMP_TABLE_PARAM */ +#include "table.h" +#include "item_jsonfunc.h" +#include "json_table.h" +#include "sql_show.h" + + +class table_function_handlerton +{ +public: + handlerton m_hton; + table_function_handlerton() + { + bzero(&m_hton, sizeof(m_hton)); + m_hton.tablefile_extensions= hton_no_exts; + m_hton.slot= HA_SLOT_UNDEF; + } +}; + + +static table_function_handlerton table_function_hton; + + +/* + A table that produces output rows for JSON_TABLE(). +*/ + +class ha_json_table: public handler +{ + Table_function_json_table *m_jt; + + String *m_js; // The JSON document we're reading + String m_tmps; // Buffer for the above + + int fill_column_values(uchar * buf, uchar *pos); + +public: + ha_json_table(TABLE_SHARE *share_arg, Table_function_json_table *jt): + handler(&table_function_hton.m_hton, share_arg), m_jt(jt) + { + /* + set the mark_trx_read_write_done to avoid the + handler::mark_trx_read_write_internal() call. + It relies on &ha_thd()->ha_data[ht->slot].ha_info[0] to be set. + But we don't set the ha_data for the ha_json_table, and + that call makes no sence for ha_json_table. + */ + mark_trx_read_write_done= 1; + + /* See ha_json_table::position for format definition */ + ref_length= m_jt->m_columns.elements * 4; + } + ~ha_json_table() {} + handler *clone(const char *name, MEM_ROOT *mem_root) override { return NULL; } + /* Rows also use a fixed-size format */ + enum row_type get_row_type() const override { return ROW_TYPE_FIXED; } + ulonglong table_flags() const override + { + return (HA_FAST_KEY_READ | /*HA_NO_BLOBS |*/ HA_NULL_IN_KEY | + HA_CAN_SQL_HANDLER | + HA_REC_NOT_IN_SEQ | HA_NO_TRANSACTIONS | + HA_HAS_RECORDS); + } + ulong index_flags(uint inx, uint part, bool all_parts) const override + { + return HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR; + } + ha_rows records() override { return HA_POS_ERROR; } + + int open(const char *name, int mode, uint test_if_locked) override + { return 0; } + int close(void) override { return 0; } + int rnd_init(bool scan) override; + int rnd_next(uchar *buf) override; + int rnd_pos(uchar * buf, uchar *pos) override; + void position(const uchar *record) override; + int info(uint) override; + int extra(enum ha_extra_function operation) override { return 0; } + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type) override + { return NULL; } + int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info) + override { return 1; } +}; + + +/* + Helper class that creates the temporary table that + represents the table function in the query. +*/ + +class Create_json_table: public Data_type_statistics +{ + // The following members are initialized only in start() + Field **m_default_field; + uchar *m_bitmaps; + // The following members are initialized in ctor + uint m_temp_pool_slot; + uint m_null_count; +public: + Create_json_table(const TMP_TABLE_PARAM *param, + bool save_sum_fields) + :m_temp_pool_slot(MY_BIT_NONE), + m_null_count(0) + { } + + void add_field(TABLE *table, Field *field, uint fieldnr); + + TABLE *start(THD *thd, + TMP_TABLE_PARAM *param, + Table_function_json_table *jt, + const LEX_CSTRING *table_alias); + + bool add_json_table_fields(THD *thd, TABLE *table, + Table_function_json_table *jt); + bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, + Table_function_json_table *jt); +}; + + +/* + @brief + Start scanning the JSON document in [str ... end] + + @detail + Note: non-root nested paths are set to scan one JSON node (that is, a + "subdocument"). +*/ + +void Json_table_nested_path::scan_start(CHARSET_INFO *i_cs, + const uchar *str, const uchar *end) +{ + json_get_path_start(&m_engine, i_cs, str, end, &m_cur_path); + m_cur_nested= NULL; + m_null= false; + m_ordinality_counter= 0; +} + + +/* + @brief + Find the next JSON element that matches the search path. +*/ + +int Json_table_nested_path::scan_next() +{ + bool no_records_found= false; + if (m_cur_nested) + { + for (;;) + { + if (m_cur_nested->scan_next() == 0) + return 0; + if (!(m_cur_nested= m_cur_nested->m_next_nested)) + break; +handle_new_nested: + m_cur_nested->scan_start(m_engine.s.cs, m_engine.value_begin, + m_engine.s.str_end); + } + if (no_records_found) + return 0; + } + + DBUG_ASSERT(!m_cur_nested); + + while (!json_get_path_next(&m_engine, &m_cur_path)) + { + if (json_path_compare(&m_path, &m_cur_path, m_engine.value_type)) + continue; + /* path found. */ + ++m_ordinality_counter; + + if (!m_nested) + return 0; + + m_cur_nested= m_nested; + no_records_found= true; + goto handle_new_nested; + } + + m_null= true; + return 1; +} + + +int ha_json_table::rnd_init(bool scan) +{ + Json_table_nested_path &p= m_jt->m_nested_path; + DBUG_ENTER("ha_json_table::rnd_init"); + + if ((m_js= m_jt->m_json->val_str(&m_tmps))) + { + p.scan_start(m_js->charset(), + (const uchar *) m_js->ptr(), (const uchar *) m_js->end()); + } + + DBUG_RETURN(0); +} + + +/* + @brief + Store JSON value in an SQL field, doing necessary special conversions + for JSON's null, true, and false. +*/ + +static int store_json_in_field(Field *f, const json_engine_t *je) +{ + switch (je->value_type) + { + case JSON_VALUE_NULL: + f->set_null(); + return 0; + + case JSON_VALUE_TRUE: + case JSON_VALUE_FALSE: + { + Item_result rt= f->result_type(); + if (rt == INT_RESULT || rt == DECIMAL_RESULT || rt == REAL_RESULT) + return f->store(je->value_type == JSON_VALUE_TRUE, false); + break; + } + default: + break; + }; + + return f->store((const char *) je->value, (uint32) je->value_len, je->s.cs); +} + + +bool Json_table_nested_path::check_error(const char *str) +{ + if (m_engine.s.error) + { + report_json_error_ex(str, &m_engine, "JSON_TABLE", 0, + Sql_condition::WARN_LEVEL_ERROR); + return true; // Error + } + return false; // Ok +} + + +int ha_json_table::rnd_next(uchar *buf) +{ + int res; + enum_check_fields cf_orig; + + if (!m_js) + return HA_ERR_END_OF_FILE; + + /* + Step 1: Move the root nested path to the next record (this implies moving + its child nested paths accordingly) + */ + if (m_jt->m_nested_path.scan_next()) + { + if (m_jt->m_nested_path.check_error(m_js->ptr())) + { + /* + We already reported an error, so returning an + error code that just doesn't produce extra + messages. + */ + return HA_ERR_TABLE_IN_FK_CHECK; + } + return HA_ERR_END_OF_FILE; + } + + /* + Step 2: Read values for all columns (the columns refer to nested paths + they are in). + */ + cf_orig= table->in_use->count_cuted_fields; + table->in_use->count_cuted_fields= CHECK_FIELD_EXPRESSION; + res= fill_column_values(buf, NULL); + table->in_use->count_cuted_fields= cf_orig; + return res ? HA_ERR_TABLE_IN_FK_CHECK : 0; +} + + +/* + @brief + Fill values of table columns, taking data either from Json_nested_path + objects, or from the rowid value + + @param pos NULL means the data should be read from Json_nested_path + objects. + Non-null value is a pointer to previously saved rowid (see + ha_json_table::position() for description) +*/ + +int ha_json_table::fill_column_values(uchar * buf, uchar *pos) +{ + Field **f= table->field; + Json_table_column *jc; + List_iterator_fast jc_i(m_jt->m_columns); + my_ptrdiff_t ptrdiff= buf - table->record[0]; + + while ((jc= jc_i++)) + { + bool is_null_value; + uint int_pos= 0; /* just to make compilers happy. */ + + if (!bitmap_is_set(table->read_set, (*f)->field_index)) + goto cont_loop; + + if (ptrdiff) + (*f)->move_field_offset(ptrdiff); + + /* + Read the NULL flag: + - if we are reading from a rowid value, 0 means SQL NULL. + - if scanning json document, read it from the nested path + */ + if (pos) + is_null_value= !(int_pos= uint4korr(pos)); + else + is_null_value= jc->m_nest->m_null; + + if (is_null_value) + { + (*f)->set_null(); + } + else + { + (*f)->set_notnull(); + switch (jc->m_column_type) + { + case Json_table_column::FOR_ORDINALITY: + { + /* + Read the cardinality counter: + - read it from nested path when scanning the json document + - or, read it from rowid when in rnd_pos() call + */ + longlong counter= pos? int_pos: jc->m_nest->m_ordinality_counter; + (*f)->store(counter, TRUE); + break; + } + case Json_table_column::PATH: + case Json_table_column::EXISTS_PATH: + { + json_engine_t je; + json_path_step_t *cur_step; + uint array_counters[JSON_DEPTH_LIMIT]; + int not_found; + const uchar* node_start; + const uchar* node_end; + + /* + Get the JSON context node that we will need to evaluate PATH or + EXISTS against: + - when scanning the json document, read it from nested path + - when in rnd_pos call, the rowid has the start offset. + */ + if (pos) + { + node_start= (const uchar *) (m_js->ptr() + (int_pos-1)); + node_end= (const uchar *) m_js->end(); + } + else + { + node_start= jc->m_nest->get_value(); + node_end= jc->m_nest->get_value_end(); + } + + json_scan_start(&je, m_js->charset(), node_start, node_end); + + cur_step= jc->m_path.steps; + not_found= json_find_path(&je, &jc->m_path, &cur_step, array_counters) || + json_read_value(&je); + + if (jc->m_column_type == Json_table_column::EXISTS_PATH) + { + (*f)->store(!not_found); + } + else /*PATH*/ + { + if (not_found) + { + if (jc->m_on_empty.respond(jc, *f)) + goto error_return; + } + else + { + if (!json_value_scalar(&je) || + store_json_in_field(*f, &je)) + { + if (jc->m_on_error.respond(jc, *f)) + goto error_return; + } + else + { + /* + If the path contains wildcards, check if there are + more matches for it in json and report an error if so. + */ + if (jc->m_path.types_used & + (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD) && + (json_scan_next(&je) || + !json_find_path(&je, &jc->m_path, &cur_step, + array_counters))) + { + if (jc->m_on_error.respond(jc, *f)) + goto error_return; + } + } + } + } + break; + } + }; + } + if (ptrdiff) + (*f)->move_field_offset(-ptrdiff); +cont_loop: + f++; + if (pos) + pos+= 4; + } + return 0; +error_return: + return 1; +} + + +int ha_json_table::rnd_pos(uchar * buf, uchar *pos) +{ + return fill_column_values(buf, pos); +} + + +/* + The reference has 4 bytes for every column of the JSON_TABLE. + There it keeps 0 for the NULL values, ordinality index for + the ORDINALITY columns and the offset of the field's data in + the JSON for other column types. +*/ +void ha_json_table::position(const uchar *record) +{ + uchar *c_ref= ref; + Json_table_column *jc; + List_iterator_fast jc_i(m_jt->m_columns); + + while ((jc= jc_i++)) + { + if (jc->m_nest->m_null) + { + int4store(c_ref, 0); + } + else + { + switch (jc->m_column_type) + { + case Json_table_column::FOR_ORDINALITY: + int4store(c_ref, jc->m_nest->m_ordinality_counter); + break; + case Json_table_column::PATH: + case Json_table_column::EXISTS_PATH: + { + size_t pos= jc->m_nest->get_value() - + (const uchar *) m_js->ptr() + 1; + int4store(c_ref, pos); + break; + } + }; + } + c_ref+= 4; + } +} + + +int ha_json_table::info(uint) +{ + /* + We don't want 0 or 1 in stats.records. + Though this value shouldn't matter as the optimizer + supposed to use Table_function_json_table::get_estimates + to obtain this data. + */ + stats.records= 4; + return 0; +} + + +void Create_json_table::add_field(TABLE *table, Field *field, uint fieldnr) +{ + DBUG_ASSERT(!field->field_name.str || + strlen(field->field_name.str) == field->field_name.length); + + if (!(field->flags & NOT_NULL_FLAG)) + m_null_count++; + + table->s->reclength+= field->pack_length(); + + // Assign it here, before update_data_type_statistics() changes m_blob_count + if (field->flags & BLOB_FLAG) + table->s->blob_field[m_blob_count]= fieldnr; + + table->field[fieldnr]= field; + field->field_index= fieldnr; + + field->update_data_type_statistics(this); +} + + +/** + Create a json table according to a field list. + + @param thd thread handle + @param param a description used as input to create the table + @param jt json_table specificaion + @param table_alias alias +*/ + +TABLE *Create_json_table::start(THD *thd, + TMP_TABLE_PARAM *param, + Table_function_json_table *jt, + const LEX_CSTRING *table_alias) +{ + MEM_ROOT *mem_root_save, own_root; + TABLE *table; + TABLE_SHARE *share; + uint copy_func_count= param->func_count; + char *tmpname,path[FN_REFLEN]; + Field **reg_field; + uint *blob_field; + DBUG_ENTER("Create_json_table::start"); + DBUG_PRINT("enter", + ("table_alias: '%s' ", table_alias->str)); + + if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES)) + m_temp_pool_slot = bitmap_lock_set_next(&temp_pool); + + if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot + sprintf(path, "%s-%lx-%i", tmp_file_prefix, + current_pid, m_temp_pool_slot); + else + { + /* if we run out of slots or we are not using tempool */ + sprintf(path, "%s-%lx-%lx-%x", tmp_file_prefix,current_pid, + (ulong) thd->thread_id, thd->tmp_table++); + } + + /* + No need to change table name to lower case. + */ + fn_format(path, path, mysql_tmpdir, "", + MY_REPLACE_EXT|MY_UNPACK_FILENAME); + + const uint field_count= param->field_count; + DBUG_ASSERT(field_count); + + init_sql_alloc(key_memory_TABLE, &own_root, + TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC)); + + if (!multi_alloc_root(&own_root, + &table, sizeof(*table), + &share, sizeof(*share), + ®_field, sizeof(Field*) * (field_count+1), + &m_default_field, sizeof(Field*) * (field_count), + &blob_field, sizeof(uint)*(field_count+1), + ¶m->items_to_copy, + sizeof(param->items_to_copy[0])*(copy_func_count+1), + ¶m->keyinfo, sizeof(*param->keyinfo), + ¶m->start_recinfo, + sizeof(*param->recinfo)*(field_count*2+4), + &tmpname, (uint) strlen(path)+1, + &m_bitmaps, bitmap_buffer_size(field_count)*6, + NullS)) + { + DBUG_RETURN(NULL); /* purecov: inspected */ + } + strmov(tmpname, path); + /* make table according to fields */ + + bzero((char*) table,sizeof(*table)); + bzero((char*) reg_field, sizeof(Field*) * (field_count+1)); + bzero((char*) m_default_field, sizeof(Field*) * (field_count)); + + table->mem_root= own_root; + mem_root_save= thd->mem_root; + thd->mem_root= &table->mem_root; + + table->field=reg_field; + table->alias.set(table_alias->str, table_alias->length, table_alias_charset); + + table->reginfo.lock_type=TL_WRITE; /* Will be updated */ + table->map=1; + table->temp_pool_slot= m_temp_pool_slot; + table->copy_blobs= 1; + table->in_use= thd; + table->no_rows_with_nulls= param->force_not_null_cols; + + table->s= share; + init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname); + share->blob_field= blob_field; + share->table_charset= param->table_charset; + share->primary_key= MAX_KEY; // Indicate no primary key + share->not_usable_by_query_cache= FALSE; + if (param->schema_table) + share->db= INFORMATION_SCHEMA_NAME; + + param->using_outer_summary_function= 0; + + share->db_plugin= NULL; + if (!(table->file= new (&table->mem_root) ha_json_table(share, jt))) + DBUG_RETURN(NULL); + + table->file->init(); + + thd->mem_root= mem_root_save; + DBUG_RETURN(table); +} + + +bool Create_json_table::finalize(THD *thd, TABLE *table, + TMP_TABLE_PARAM *param, + Table_function_json_table *jt) +{ + DBUG_ENTER("Create_json_table::finalize"); + DBUG_ASSERT(table); + + uint null_pack_length; + bool use_packed_rows= false; + uchar *pos; + uchar *null_flags; + TMP_ENGINE_COLUMNDEF *recinfo; + TABLE_SHARE *share= table->s; + + MEM_ROOT *mem_root_save= thd->mem_root; + thd->mem_root= &table->mem_root; + + DBUG_ASSERT(param->field_count >= share->fields); + DBUG_ASSERT(param->field_count >= share->blob_fields); + + if (table->file->set_ha_share_ref(&share->ha_share)) + { + delete table->file; + goto err; + } + + if (share->blob_fields == 0) + m_null_count++; + + null_pack_length= (m_null_count + m_uneven_bit_length + 7) / 8; + share->reclength+= null_pack_length; + if (!share->reclength) + share->reclength= 1; // Dummy select + + { + uint alloc_length= ALIGN_SIZE(share->reclength + MI_UNIQUE_HASH_LENGTH+1); + share->rec_buff_length= alloc_length; + if (!(table->record[0]= (uchar*) + alloc_root(&table->mem_root, alloc_length*3))) + goto err; + table->record[1]= table->record[0]+alloc_length; + share->default_values= table->record[1]+alloc_length; + } + + setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields); + + recinfo=param->start_recinfo; + null_flags=(uchar*) table->record[0]; + pos=table->record[0]+ null_pack_length; + if (null_pack_length) + { + bzero((uchar*) recinfo,sizeof(*recinfo)); + recinfo->type=FIELD_NORMAL; + recinfo->length=null_pack_length; + recinfo++; + bfill(null_flags,null_pack_length,255); // Set null fields + + table->null_flags= (uchar*) table->record[0]; + share->null_fields= m_null_count; + share->null_bytes= share->null_bytes_for_compare= null_pack_length; + } + m_null_count= (share->blob_fields == 0) ? 1 : 0; + for (uint i= 0; i < share->fields; i++, recinfo++) + { + Field *field= table->field[i]; + uint length; + bzero((uchar*) recinfo,sizeof(*recinfo)); + + if (!(field->flags & NOT_NULL_FLAG)) + { + recinfo->null_bit= (uint8)1 << (m_null_count & 7); + recinfo->null_pos= m_null_count/8; + field->move_field(pos, null_flags + m_null_count/8, + (uint8)1 << (m_null_count & 7)); + m_null_count++; + } + else + field->move_field(pos,(uchar*) 0,0); + if (field->type() == MYSQL_TYPE_BIT) + { + /* We have to reserve place for extra bits among null bits */ + ((Field_bit*) field)->set_bit_ptr(null_flags + m_null_count / 8, + m_null_count & 7); + m_null_count+= (field->field_length & 7); + } + field->reset(); + + /* + Test if there is a default field value. The test for ->ptr is to skip + 'offset' fields generated by initialize_tables + */ + if (m_default_field[i] && m_default_field[i]->ptr) + { + /* + default_field[i] is set only in the cases when 'field' can + inherit the default value that is defined for the field referred + by the Item_field object from which 'field' has been created. + */ + const Field *orig_field= m_default_field[i]; + /* Get the value from default_values */ + if (orig_field->is_null_in_record(orig_field->table->s->default_values)) + field->set_null(); + else + { + field->set_notnull(); + memcpy(field->ptr, + orig_field->ptr_in_record(orig_field->table->s->default_values), + field->pack_length_in_rec()); + } + } + + length=field->pack_length(); + pos+= length; + + /* Make entry for create table */ + recinfo->length=length; + recinfo->type= field->tmp_engine_column_type(use_packed_rows); + + // fix table name in field entry + field->set_table_name(&table->alias); + } + + param->recinfo= recinfo; // Pointer to after last field + store_record(table,s->default_values); // Make empty default record + + share->max_rows= ~(ha_rows) 0; + param->end_write_records= HA_POS_ERROR; + + share->db_record_offset= 1; + + if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR, + HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE))) + goto err; + + table->db_stat= HA_OPEN_KEYFILE; + table->set_created(); + + thd->mem_root= mem_root_save; + + DBUG_RETURN(false); + +err: + thd->mem_root= mem_root_save; + DBUG_RETURN(true); +} + + +/* + @brief + Read the JSON_TABLE's field definitions from @jt and add the fields to + table @table. +*/ + +bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table, + Table_function_json_table *jt) +{ + TABLE_SHARE *share= table->s; + Json_table_column *jc; + uint fieldnr= 0; + MEM_ROOT *mem_root_save= thd->mem_root; + List_iterator_fast jc_i(jt->m_columns); + + DBUG_ENTER("add_json_table_fields"); + + thd->mem_root= &table->mem_root; + + while ((jc= jc_i++)) + { + Create_field *sql_f= jc->m_field; + List_iterator_fast it2(jt->m_columns); + Json_table_column *jc2; + /* + Initialize length from its original value (number of characters), + which was set in the parser. This is necessary if we're + executing a prepared statement for the second time. + */ + sql_f->length= sql_f->char_length; + if (!(jc->m_explicit_cs= sql_f->charset)) + sql_f->charset= thd->variables.collation_server; + + if (sql_f->prepare_stage1(thd, thd->mem_root, table->file, + table->file->ha_table_flags())) + goto err_exit; + + while ((jc2= it2++) != jc) + { + if (lex_string_cmp(system_charset_info, + &sql_f->field_name, &jc2->m_field->field_name) == 0) + { + my_error(ER_DUP_FIELDNAME, MYF(0), sql_f->field_name.str); + goto err_exit; + } + } + it2.rewind(); + } + + jc_i.rewind(); + + while ((jc= jc_i++)) + { + Create_field *sql_f= jc->m_field; + Record_addr addr(!(sql_f->flags & NOT_NULL_FLAG)); + Bit_addr bit(addr.null()); + + sql_f->prepare_stage2(table->file, table->file->ha_table_flags()); + + if (!sql_f->charset) + sql_f->charset= &my_charset_utf8mb4_bin; + + Field *f= sql_f->type_handler()->make_table_field_from_def(share, + thd->mem_root, &sql_f->field_name, addr, bit, sql_f, sql_f->flags); + if (!f) + goto err_exit; + f->init(table); + add_field(table, f, fieldnr++); + } + + share->fields= fieldnr; + share->blob_fields= m_blob_count; + table->field[fieldnr]= 0; // End marker + share->blob_field[m_blob_count]= 0; // End marker + share->column_bitmap_size= bitmap_buffer_size(share->fields); + + thd->mem_root= mem_root_save; + + DBUG_RETURN(FALSE); +err_exit: + thd->mem_root= mem_root_save; + DBUG_RETURN(TRUE); +} + + +/* + @brief + Given a TABLE_LIST representing JSON_TABLE(...) syntax, create a temporary + table for it. + + @detail + The temporary table will have: + - fields whose names/datatypes are specified in JSON_TABLE(...) syntax + - a ha_json_table as the storage engine. + + The uses of the temporary table are: + - name resolution: the query may have references to the columns of + JSON_TABLE(...). A TABLE object will allow to resolve them. + - query execution: ha_json_table will produce JSON_TABLE's rows. +*/ + +TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table) +{ + TMP_TABLE_PARAM tp; + TABLE *table; + uint field_count= sql_table->table_function->m_columns.elements+1; + + DBUG_ENTER("create_table_for_function"); + + tp.init(); + tp.table_charset= system_charset_info; + tp.field_count= field_count; + { + Create_json_table maker(&tp, false); + + if (!(table= maker.start(thd, &tp, + sql_table->table_function, &sql_table->alias)) || + maker.add_json_table_fields(thd, table, sql_table->table_function) || + maker.finalize(thd, table, &tp, sql_table->table_function)) + { + if (table) + free_tmp_table(thd, table); + DBUG_RETURN(NULL); + } + } + sql_table->schema_table_name.length= 0; + + my_bitmap_map* bitmaps= + (my_bitmap_map*) thd->alloc(bitmap_buffer_size(field_count)); + my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count, + FALSE); + table->read_set= &table->def_read_set; + bitmap_clear_all(table->read_set); + table->alias_name_used= true; + table->next= thd->derived_tables; + thd->derived_tables= table; + table->s->tmp_table= INTERNAL_TMP_TABLE; + table->grant.privilege= SELECT_ACL; + + sql_table->table= table; + + DBUG_RETURN(table); +} + + +int Json_table_column::set(THD *thd, enum_type ctype, const LEX_CSTRING &path) +{ + set(ctype); + if (json_path_setup(&m_path, thd->variables.collation_connection, + (const uchar *) path.str, (const uchar *)(path.str + path.length))) + { + report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1, + Sql_condition::WARN_LEVEL_ERROR); + return 1; + } + + /* + This is done so the ::print function can just print the path string. + Can be removed if we redo that function to print the path using it's + anctual content. Not sure though if we should. + */ + m_path.s.c_str= (const uchar *) path.str; + return 0; +} + + +static int print_path(String *str, const json_path_t *p) +{ + return str->append('\'') || + str->append_for_single_quote((const char *) p->s.c_str, + p->s.str_end - p->s.c_str) || + str->append('\''); +} + + +/* + Print the string representation of the Json_table_column. + + @param thd - the thread + @param f - the remaining array of Field-s from the table + if the Json_table_column + @param str - the string where to print +*/ +int Json_table_column::print(THD *thd, Field **f, String *str) +{ + StringBuffer column_type(str->charset()); + + if (append_identifier(thd, str, &m_field->field_name) || + str->append(' ')) + return 1; + + switch (m_column_type) + { + case FOR_ORDINALITY: + if (str->append("FOR ORDINALITY")) + return 1; + break; + case EXISTS_PATH: + case PATH: + (*f)->sql_type(column_type); + + if (str->append(column_type) || + str->append(m_column_type == PATH ? " PATH " : " EXISTS ") || + print_path(str, &m_path)) + return 1; + break; + }; + + if (m_on_empty.print("EMPTY", str) || + m_on_error.print("ERROR", str)) + return 1; + + return 0; +} + + +int Json_table_nested_path::set_path(THD *thd, const LEX_CSTRING &path) +{ + if (json_path_setup(&m_path, thd->variables.collation_connection, + (const uchar *) path.str, (const uchar *)(path.str + path.length))) + { + report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1, + Sql_condition::WARN_LEVEL_ERROR); + return 1; + } + + /* + This is done so the ::print function can just print the path string. + Can be removed if we redo that function to print the path using its + actual content. Not sure though if we should. + */ + m_path.s.c_str= (const uchar *) path.str; + return 0; +} + + +/* + @brief + Perform the action of this response on field @f (emit an error, or set @f + to NULL, or set it to default value). +*/ + +int Json_table_column::On_response::respond(Json_table_column *jc, Field *f) +{ + switch (m_response) + { + case Json_table_column::RESPONSE_NOT_SPECIFIED: + case Json_table_column::RESPONSE_NULL: + f->set_null(); + break; + case Json_table_column::RESPONSE_ERROR: + f->set_null(); + my_error(ER_JSON_TABLE_ERROR_ON_FIELD, MYF(0), + f->field_name.str, f->table->alias.ptr()); + return 1; + case Json_table_column::RESPONSE_DEFAULT: + f->set_notnull(); + f->store(m_default.str, + m_default.length, jc->m_defaults_cs); + break; + } + return 0; +} + + +int Json_table_column::On_response::print(const char *name, String *str) const +{ + const char *resp; + const LEX_CSTRING *ds= NULL; + if (m_response == Json_table_column::RESPONSE_NOT_SPECIFIED) + return 0; + + switch (m_response) + { + case Json_table_column::RESPONSE_NULL: + resp= "NULL"; + break; + case Json_table_column::RESPONSE_ERROR: + resp= "ERROR"; + break; + case Json_table_column::RESPONSE_DEFAULT: + { + resp= "DEFAULT"; + ds= &m_default; + break; + } + default: + resp= NULL; + DBUG_ASSERT(FALSE); /* should never happen. */ + } + + return + (str->append(' ') || str->append(resp) || + (ds && (str->append(" '") || + str->append_for_single_quote(ds->str, ds->length) || + str->append('\''))) || + str->append(" ON ") || + str->append(name)); +} + + +void Table_function_json_table::start_nested_path(Json_table_nested_path *np) +{ + np->m_parent= cur_parent; + *last_sibling_hook= np; + + // Make the newly added path the parent + cur_parent= np; + last_sibling_hook= &np->m_nested; +} + + +void Table_function_json_table::end_nested_path() +{ + last_sibling_hook= &cur_parent->m_next_nested; + cur_parent= cur_parent->m_parent; +} + + +/* + @brief + Perform name-resolution phase tasks + + @detail + - The only argument that needs resolution is the JSON text + - Then, we need to set dependencies: if JSON_TABLE refers to table's + column, e.g. + + JSON_TABLE (t1.col ... ) AS t2 + + then it can be computed only after table t1. + - The dependencies must not form a loop. +*/ + +int Table_function_json_table::setup(THD *thd, TABLE_LIST *sql_table, + SELECT_LEX *s_lex) +{ + TABLE *t= sql_table->table; + + thd->where= "JSON_TABLE argument"; + { + bool save_is_item_list_lookup; + bool res; + save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; + thd->lex->current_select->is_item_list_lookup= 0; + s_lex->end_lateral_table= sql_table; + res= m_json->fix_fields_if_needed(thd, &m_json); + s_lex->end_lateral_table= NULL; + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; + if (res) + return TRUE; + } + + { + List_iterator_fast jc_i(m_columns); + for (uint i= 0; t->field[i]; i++) + { + Json_table_column *jc= jc_i++; + t->field[i]->change_charset( + jc->m_explicit_cs ? jc->m_explicit_cs : m_json->collation); + /* + The m_field->charset is going to be reused if it's the prepared + statement running several times. So should restored the original + value. + */ + jc->m_field->charset= jc->m_explicit_cs; + } + } + + m_dep_tables= m_json->used_tables(); + + if (m_dep_tables) + { + sql_table->dep_tables|= m_dep_tables; + t->no_cache= TRUE; + if (unlikely(sql_table->dep_tables & sql_table->get_map())) + { + /* Table itself is used in the argument. */ + my_error(ER_WRONG_USAGE, MYF(0), "JSON_TABLE", "argument"); + return TRUE; + } + } + + return FALSE; +} + +void Table_function_json_table::get_estimates(ha_rows *out_rows, + double *scan_time, + double *startup_cost) +{ + *out_rows= 40; + *scan_time= 0.0; + *startup_cost= 0.0; +} + + +/* + Print the string representation of the Json_nested_path object. + Which is the COLUMNS(...) part of the JSON_TABLE definition. + + @param thd - the thread + @param f - the remaining part of the array of Field* objects + taken from the TABLE. + It's needed as Json_table_column objects + don't have links to the related Field-s. + @param str - the string where to print + @param it - the remaining part of the Json_table_column list + @param last_column - the last column taken from the list. +*/ + +int Json_table_nested_path::print(THD *thd, Field ***f, String *str, + List_iterator_fast &it, + Json_table_column **last_column) +{ + Json_table_nested_path *c_path= this; + Json_table_nested_path *c_nested= m_nested; + Json_table_column *jc= *last_column; + bool first_column= TRUE; + + if (str->append("COLUMNS (")) + return 1; + + do + { + if (first_column) + first_column= FALSE; + else if (str->append(", ")) + return 1; + + if (jc->m_nest == c_path) + { + if (jc->print(thd, *f, str)) + return 1; + if (!(jc= it++)) + goto exit_ok; + ++(*f); + } + else if (jc->m_nest == c_nested) + { + if (str->append("NESTED PATH ") || + print_path(str, &jc->m_nest->m_path) || + c_nested->print(thd, f, str, it, &jc)) + return 1; + c_nested= c_nested->m_next_nested; + } + else + break; + } while(jc); + +exit_ok: + if (str->append(")")) + return 1; + + *last_column= jc; + return 0; +} + + +/* + Print the SQL definition of the JSON_TABLE. + Used mostly as a part of the CREATE VIEW statement. + + @param thd - the thread + @param sql_table - the corresponding TABLE_LIST object + @param str - the string where to print + @param query_type - the query type +*/ +int Table_function_json_table::print(THD *thd, TABLE_LIST *sql_table, + String *str, enum_query_type query_type) +{ + List_iterator_fast jc_i(m_columns); + Json_table_column *jc= jc_i++; + Field **f_list= sql_table->table->field; + + DBUG_ENTER("Table_function_json_table::print"); + + if (str->append("JSON_TABLE(")) + DBUG_RETURN(TRUE); + + m_json->print(str, query_type); + + if (str->append(", ") || + print_path(str, &m_nested_path.m_path) || + str->append(' ') || + m_nested_path.print(thd, &f_list, str, jc_i, &jc) || + str->append(')')) + DBUG_RETURN(TRUE); + + DBUG_RETURN(0); +} + + +void Table_function_json_table::fix_after_pullout(TABLE_LIST *sql_table, + st_select_lex *new_parent, bool merge) +{ + sql_table->dep_tables&= ~m_dep_tables; + + m_json->fix_after_pullout(new_parent, &m_json, merge); + m_dep_tables= m_json->used_tables(); + + sql_table->dep_tables|= m_dep_tables; +} + + diff --git a/sql/json_table.h b/sql/json_table.h new file mode 100644 index 00000000000..bab4ef9c185 --- /dev/null +++ b/sql/json_table.h @@ -0,0 +1,256 @@ +#ifndef JSON_TABLE_INCLUDED +#define JSON_TABLE_INCLUDED + +/* Copyright (c) 2020, MariaDB Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + + +#include + +class Json_table_column; + +/* + The Json_table_nested_path represents the 'current nesting' level + for a set of JSON_TABLE columns. + Each column (Json_table_column instance) is linked with corresponding + 'nested path' object and gets its piece of JSON to parse during the computation + phase. + The root 'nested_path' is always present as a part of Table_function_json_table, + then other 'nested_paths' can be created and linked into a tree structure when new + 'NESTED PATH' is met. The nested 'nested_paths' are linked with 'm_nested', the same-level + 'nested_paths' are linked with 'm_next_nested'. + So for instance + JSON_TABLE( '...', '$[*]' + COLUMNS( a INT PATH '$.a' , + NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$', + NESTED PATH '$.c[*]' COLUMNS(x INT PATH '$')), + NESTED PATH '$.n[*]' COLUMNS (z INT PATH '$')) + results in 4 'nested_path' created: + root nested_b nested_c nested_n + m_path '$[*]' '$.b[*]' '$.c[*]' '$.n[*] + m_nested &nested_b &nested_c NULL NULL + n_next_nested NULL &nested_n NULL NULL + + and 4 columns created: + a b x z + m_nest &root &nested_b &nested_c &nested_n +*/ + +class Json_table_nested_path : public Sql_alloc +{ +public: + json_path_t m_path; /* The JSON Path to get the rows from */ + bool m_null; // TRUE <=> producing a NULL-complemented row. + + /*** Construction interface ***/ + Json_table_nested_path(): + m_null(TRUE), m_nested(NULL), m_next_nested(NULL) + {} + + int set_path(THD *thd, const LEX_CSTRING &path); + + /*** Methods for performing a scan ***/ + void scan_start(CHARSET_INFO *i_cs, const uchar *str, const uchar *end); + int scan_next(); + bool check_error(const char *str); + + /*** Members for getting the values we've scanned to ***/ + const uchar *get_value() { return m_engine.value_begin; } + const uchar *get_value_end() { return m_engine.s.str_end; } + + /* Counts the rows produced. Used by FOR ORDINALITY columns */ + longlong m_ordinality_counter; + + int print(THD *thd, Field ***f, String *str, + List_iterator_fast &it, + Json_table_column **last_column); +private: + /* The head of the list of nested NESTED PATH statements. */ + Json_table_nested_path *m_nested; + + /* in the above list items are linked with the */ + Json_table_nested_path *m_next_nested; + + /*** Members describing NESTED PATH structure ***/ + /* Parent nested path. The "root" path has this NULL */ + Json_table_nested_path *m_parent; + + /*** Members describing current JSON Path scan state ***/ + /* The JSON Parser and JSON Path evaluator */ + json_engine_t m_engine; + + /* The path the parser is currently pointing to */ + json_path_t m_cur_path; + + /* The child NESTED PATH we're currently scanning */ + Json_table_nested_path *m_cur_nested; + + friend class Table_function_json_table; +}; + + +/* + @brief + Describes the column definition in JSON_TABLE(...) syntax. + + @detail + Has methods for printing/handling errors but otherwise it's a static + object. +*/ + +class Json_table_column : public Sql_alloc +{ +public: + enum enum_type + { + FOR_ORDINALITY, + PATH, + EXISTS_PATH + }; + + enum enum_on_type + { + ON_EMPTY, + ON_ERROR + }; + + enum enum_on_response + { + RESPONSE_NOT_SPECIFIED, + RESPONSE_ERROR, + RESPONSE_NULL, + RESPONSE_DEFAULT + }; + + struct On_response + { + public: + Json_table_column::enum_on_response m_response; + LEX_CSTRING m_default; + int respond(Json_table_column *jc, Field *f); + int print(const char *name, String *str) const; + bool specified() const { return m_response != RESPONSE_NOT_SPECIFIED; } + }; + + enum_type m_column_type; + json_path_t m_path; + On_response m_on_error; + On_response m_on_empty; + Create_field *m_field; + Json_table_nested_path *m_nest; + CHARSET_INFO *m_explicit_cs; + CHARSET_INFO *m_defaults_cs; + + void set(enum_type ctype) + { + m_column_type= ctype; + } + int set(THD *thd, enum_type ctype, const LEX_CSTRING &path); + Json_table_column(Create_field *f, Json_table_nested_path *nest) : + m_field(f), m_nest(nest) + { + m_on_error.m_response= RESPONSE_NOT_SPECIFIED; + m_on_empty.m_response= RESPONSE_NOT_SPECIFIED; + } + int print(THD *tnd, Field **f, String *str); +}; + + +/* + Class represents the table function, the function + that returns the table as a result so supposed to appear + in the FROM list of the SELECT statement. + At the moment there is only one such function JSON_TABLE, + so the class named after it, but should be refactored + into the hierarchy root if we create more of that functions. + + As the parser finds the table function in the list it + creates an instance of Table_function_json_table storing it + into the TABLE_LIST::table_function. + Then the ha_json_table instance is created based on it in + the create_table_for_function(). +*/ + +class Table_function_json_table : public Sql_alloc +{ +public: + /*** Basic properties of the original JSON_TABLE(...) ***/ + Item *m_json; /* The JSON value to be parsed. */ + + /* The COLUMNS(...) part representation. */ + Json_table_nested_path m_nested_path; + + /* The list of table column definitions. */ + List m_columns; + + /*** Name resolution functions ***/ + int setup(THD *thd, TABLE_LIST *sql_table, SELECT_LEX *s_lex); + + /*** Functions for interaction with the Query Optimizer ***/ + void fix_after_pullout(TABLE_LIST *sql_table, + st_select_lex *new_parent, bool merge); + void update_used_tables() { m_json->update_used_tables(); } + + table_map used_tables() const { return m_dep_tables; } + bool join_cache_allowed() const { return !m_dep_tables; } + void get_estimates(ha_rows *out_rows, + double *scan_time, double *startup_cost); + + int print(THD *thd, TABLE_LIST *sql_table, + String *str, enum_query_type query_type); + + /*** Construction interface to be used from the parser ***/ + Table_function_json_table(Item *json): + m_json(json) + { + cur_parent= &m_nested_path; + last_sibling_hook= &m_nested_path.m_nested; + } + + void start_nested_path(Json_table_nested_path *np); + void end_nested_path(); + Json_table_nested_path *get_cur_nested_path() { return cur_parent; } + + /* SQL Parser: current column in JSON_TABLE (...) syntax */ + Json_table_column *m_cur_json_table_column; + + /* SQL Parser: charset of the current text literal */ + CHARSET_INFO *m_text_literal_cs; + +private: + + /* + the JSON argument can be taken from other tables. + We have to mark these tables as dependent so the + mask of these dependent tables is calculated in ::setup(). + */ + table_map m_dep_tables; + + /* Current NESTED PATH level being parsed */ + Json_table_nested_path *cur_parent; + + /* + Pointer to the list tail where we add the next NESTED PATH. + It points to the cur_parnt->m_nested for the first nested + and prev_nested->m_next_nested for the coesequent ones. + */ + Json_table_nested_path **last_sibling_hook; +}; + + +TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table); + +#endif /* JSON_TABLE_INCLUDED */ + diff --git a/sql/lex.h b/sql/lex.h index b40883c4ea3..440ac2de664 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -211,6 +211,7 @@ static SYMBOL symbols[] = { { "ELSE", SYM(ELSE)}, { "ELSEIF", SYM(ELSEIF_MARIADB_SYM)}, { "ELSIF", SYM(ELSIF_MARIADB_SYM)}, + { "EMPTY", SYM(EMPTY_SYM)}, { "ENABLE", SYM(ENABLE_SYM)}, { "ENCLOSED", SYM(ENCLOSED)}, { "END", SYM(END)}, @@ -421,6 +422,7 @@ static SYMBOL symbols[] = { { "NATIONAL", SYM(NATIONAL_SYM)}, { "NATURAL", SYM(NATURAL)}, { "NCHAR", SYM(NCHAR_SYM)}, + { "NESTED", SYM(NESTED_SYM)}, { "NEVER", SYM(NEVER_SYM)}, { "NEW", SYM(NEW_SYM)}, { "NEXT", SYM(NEXT_SYM)}, @@ -455,6 +457,7 @@ static SYMBOL symbols[] = { { "OPTIONALLY", SYM(OPTIONALLY)}, { "OR", SYM(OR_SYM)}, { "ORDER", SYM(ORDER_SYM)}, + { "ORDINALITY", SYM(ORDINALITY_SYM)}, { "OTHERS", SYM(OTHERS_MARIADB_SYM)}, { "OUT", SYM(OUT_SYM)}, { "OUTER", SYM(OUTER)}, @@ -468,6 +471,7 @@ static SYMBOL symbols[] = { { "PAGE_CHECKSUM", SYM(PAGE_CHECKSUM_SYM)}, { "PARSER", SYM(PARSER_SYM)}, { "PARSE_VCOL_EXPR", SYM(PARSE_VCOL_EXPR_SYM)}, + { "PATH", SYM(PATH_SYM)}, { "PERIOD", SYM(PERIOD_SYM)}, { "PARTIAL", SYM(PARTIAL)}, { "PARTITION", SYM(PARTITION_SYM)}, @@ -758,6 +762,7 @@ static SYMBOL sql_functions[] = { { "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)}, { "JSON_ARRAYAGG", SYM(JSON_ARRAYAGG_SYM)}, { "JSON_OBJECTAGG", SYM(JSON_OBJECTAGG_SYM)}, + { "JSON_TABLE", SYM(JSON_TABLE_SYM)}, { "LAG", SYM(LAG_SYM)}, { "LEAD", SYM(LEAD_SYM)}, { "MAX", SYM(MAX_SYM)}, diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 4ba48c565d5..27ce7f3b940 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -1414,6 +1414,14 @@ void get_delayed_table_estimates(TABLE *table, double *startup_cost) { Item_in_subselect *item= table->pos_in_table_list->jtbm_subselect; + Table_function_json_table *table_function= + table->pos_in_table_list->table_function; + + if (table_function) + { + table_function->get_estimates(out_rows, scan_time, startup_cost); + return; + } DBUG_ASSERT(item->engine->engine_type() == subselect_engine::HASH_SJ_ENGINE); @@ -1784,6 +1792,10 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred) tl->jtbm_subselect->fix_after_pullout(parent_lex, &dummy, true); DBUG_ASSERT(dummy == tl->jtbm_subselect); } + else if (tl->table_function) + { + tl->table_function->fix_after_pullout(tl, parent_lex, true); + } SELECT_LEX *old_sl= tl->select_lex; tl->select_lex= parent_join->select_lex; for (TABLE_LIST *emb= tl->embedding; diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 3958797ec44..d5d60f8c0aa 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -637,6 +637,22 @@ void eliminate_tables(JOIN *join) List_iterator it(join->fields_list); while ((item= it++)) used_tables |= item->used_tables(); + + { + /* + Table function JSON_TABLE() can have references to other tables. Do not + eliminate the tables that JSON_TABLE() refers to. + Note: the JSON_TABLE itself cannot be eliminated as it doesn't + have unique keys. + */ + List_iterator it(join->select_lex->leaf_tables); + TABLE_LIST *tbl; + while ((tbl= it++)) + { + if (tbl->table_function) + used_tables|= tbl->table_function->used_tables(); + } + } /* Add tables referred to from ORDER BY and GROUP BY lists */ ORDER *all_lists[]= { join->order, join->group_list}; diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc index e1f402a4d7c..ba9220cac44 100644 --- a/sql/opt_trace.cc +++ b/sql/opt_trace.cc @@ -247,9 +247,11 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl) { /* Anonymous derived tables (as in - "SELECT ... FROM (SELECT ...)") don't have their grant.privilege set. + "SELECT ... FROM (SELECT ...)") and table functions + don't have their grant.privilege set. */ - if (!t->is_anonymous_derived_table()) + if (!t->is_anonymous_derived_table() && + !t->table_function) { const GRANT_INFO backup_grant_info= t->grant; Security_context *const backup_table_sctx= t->security_ctx; diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 555836406fb..7f70a0ff337 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7975,3 +7975,7 @@ ER_PK_INDEX_CANT_BE_IGNORED eng "A primary key cannot be marked as IGNORE" ER_BINLOG_UNSAFE_SKIP_LOCKED eng "SKIP LOCKED makes this statement unsafe" +ER_JSON_TABLE_ERROR_ON_FIELD + eng "Field '%s' can't be set for JSON_TABLE '%s'." +ER_JSON_TABLE_ALIAS_REQUIRED + eng "Every table function must have an alias." diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 5aa2a101822..eae8f22a881 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -8150,6 +8150,14 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables, if (!want_access) continue; // ok + if (t_ref->table_function) + { + /* Table function doesn't need any privileges to be checked. */ + t_ref->grant.privilege|= TMP_TABLE_ACLS; + t_ref->grant.want_privilege= NO_ACL; + continue; + } + if (!(~t_ref->grant.privilege & want_access) || t_ref->is_anonymous_derived_table() || t_ref->schema_table) { diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9a4dea3a216..5e4bb0106ce 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -3684,6 +3684,14 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags, error= TRUE; goto end; } + + if (tables->table_function) + { + if (!create_table_for_function(thd, tables)) + error= TRUE; + goto end; + } + DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: %p", tables->db.str, tables->table_name.str, tables)); (*counter)++; @@ -6454,7 +6462,10 @@ find_field_in_tables(THD *thd, Item_ident *item, db= name_buff; } - if (last_table) + if (first_table && first_table->select_lex && + first_table->select_lex->end_lateral_table) + last_table= first_table->select_lex->end_lateral_table; + else if (last_table) last_table= last_table->next_name_resolution_table; for (; cur_table != last_table ; diff --git a/sql/sql_class.h b/sql/sql_class.h index 0de1d74ec69..466ab8f7c86 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -7723,5 +7723,9 @@ public: extern THD_list server_threads; +void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps); +void +setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count); + #endif /* MYSQL_SERVER */ #endif /* SQL_CLASS_INCLUDED */ diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 353217982e2..eadbb0d4a5e 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1635,6 +1635,9 @@ void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_t case ET_DISTINCT: writer->add_member("distinct").add_bool(true); break; + case ET_TABLE_FUNCTION: + writer->add_member("table_function").add_str("json_table"); + break; default: DBUG_ASSERT(0); @@ -2027,6 +2030,9 @@ void Explain_table_access::append_tag_name(String *str, enum explain_extra_tag t if (loose_scan_is_scanning) str->append(" (scanning)"); break; + case ET_TABLE_FUNCTION: + str->append("Table function: json_table"); + break; } default: str->append(extra_tag_text[tag]); diff --git a/sql/sql_explain.h b/sql/sql_explain.h index 42590e0bea0..d034d7cbf35 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -550,6 +550,7 @@ enum explain_extra_tag ET_CONST_ROW_NOT_FOUND, ET_UNIQUE_ROW_NOT_FOUND, ET_IMPOSSIBLE_ON_CONDITION, + ET_TABLE_FUNCTION, ET_total }; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 1dbeff69cb5..cf652c3dfb4 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -3840,6 +3840,12 @@ void LEX::cleanup_lex_after_parse_error(THD *thd) thd->lex->sphead= NULL; } } + + /* + json_table must be NULL before the query. + Didn't want to overload LEX::start, it's enough to put it here. + */ + thd->lex->json_table= 0; } /* @@ -3927,7 +3933,7 @@ void Query_tables_list::destroy_query_tables_list() LEX::LEX() : explain(NULL), result(0), part_info(NULL), arena_for_set_stmt(0), mem_root_for_set_stmt(0), - option_type(OPT_DEFAULT), context_analysis_only(0), sphead(0), + json_table(NULL), option_type(OPT_DEFAULT), context_analysis_only(0), sphead(0), default_used(0), is_lex_started(0), limit_rows_examined_cnt(ULONGLONG_MAX) { @@ -5054,6 +5060,7 @@ void st_select_lex::remap_tables(TABLE_LIST *derived, table_map map, uint tablenr, SELECT_LEX *parent_lex) { bool first_table= TRUE; + bool has_table_function= FALSE; TABLE_LIST *tl; table_map first_map; uint first_tablenr; @@ -5095,6 +5102,19 @@ void st_select_lex::remap_tables(TABLE_LIST *derived, table_map map, emb && emb->select_lex == old_sl; emb= emb->embedding) emb->select_lex= parent_lex; + + if (tl->table_function) + has_table_function= TRUE; + } + + if (has_table_function) + { + ti.rewind(); + while ((tl= ti++)) + { + if (tl->table_function) + tl->table_function->fix_after_pullout(tl, parent_lex, true); + } } } @@ -5266,6 +5286,9 @@ void SELECT_LEX::update_used_tables() left_expr->walk(&Item::update_table_bitmaps_processor, FALSE, NULL); } + if (tl->table_function) + tl->table_function->update_used_tables(); + embedding= tl->embedding; while (embedding) { diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 7e3a1152d0b..c7278f0555e 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -34,6 +34,7 @@ #include "sql_tvc.h" #include "item.h" #include "sql_limit.h" // Select_limit_counters +#include "json_table.h" // Json_table_column #include "sql_schema.h" /* Used for flags of nesting constructs */ @@ -454,6 +455,7 @@ enum enum_drop_mode #define TL_OPTION_IGNORE_LEAVES 4 #define TL_OPTION_ALIAS 8 #define TL_OPTION_SEQUENCE 16 +#define TL_OPTION_TABLE_FUNCTION 32 typedef List List_item; typedef Mem_root_array Group_list_ptrs; @@ -1182,6 +1184,8 @@ public: enum leaf_list_state {UNINIT, READY, SAVED}; enum leaf_list_state prep_leaf_list_state; uint insert_tables; + // Last table for LATERAL join, used by table functions + TABLE_LIST *end_lateral_table; st_select_lex *merged_into; /* select which this select is merged into */ /* (not 0 only for views/derived tables) */ @@ -1405,7 +1409,8 @@ public: enum_mdl_type mdl_type= MDL_SHARED_READ, List *hints= 0, List *partition_names= 0, - LEX_STRING *option= 0); + LEX_STRING *option= 0, + Table_function_json_table *tfunc= 0); TABLE_LIST* get_table_list(); bool init_nested_join(THD *thd); TABLE_LIST *end_nested_join(THD *thd); @@ -1444,8 +1449,8 @@ public: ha_rows get_limit(); friend struct LEX; - st_select_lex() : group_list_ptrs(NULL), braces(0), automatic_brackets(0), - n_sum_items(0), n_child_sum_items(0) + st_select_lex() : group_list_ptrs(NULL), end_lateral_table(NULL), braces(0), + automatic_brackets(0), n_sum_items(0), n_child_sum_items(0) {} void make_empty_select() { @@ -3313,6 +3318,7 @@ public: SQL_I_List proc_list; SQL_I_List auxiliary_table_list, save_list; Column_definition *last_field; + Table_function_json_table *json_table; Item_sum *in_sum_func; udf_func udf; HA_CHECK_OPT check_opt; // check/repair options diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ddfc49c6d52..237944be44d 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -7104,6 +7104,9 @@ check_table_access(THD *thd, privilege_t requirements, TABLE_LIST *tables, if (table_ref->is_anonymous_derived_table()) continue; + if (table_ref->table_function) + continue; + if (table_ref->sequence) { /* We want to have either SELECT or INSERT rights to sequences depending @@ -8159,7 +8162,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, enum_mdl_type mdl_type, List *index_hints_arg, List *partition_names, - LEX_STRING *option) + LEX_STRING *option, + Table_function_json_table *tfunc) { TABLE_LIST *ptr; TABLE_LIST *UNINIT_VAR(previous_table_ref); /* The table preceding the current one. */ @@ -8184,6 +8188,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, } if (unlikely(table->is_derived_table() == FALSE && table->db.str && + !(table_options & TL_OPTION_TABLE_FUNCTION) && check_db_name((LEX_STRING*) &table->db))) { my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str); @@ -8227,6 +8232,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->table_name= table->table; ptr->lock_type= lock_type; + ptr->table_function= tfunc; ptr->updating= MY_TEST(table_options & TL_OPTION_UPDATING); /* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */ ptr->force_index= MY_TEST(table_options & TL_OPTION_FORCE_INDEX); @@ -8271,7 +8277,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, { if (unlikely(!my_strcasecmp(table_alias_charset, alias_str.str, tables->alias.str) && - !cmp(&ptr->db, &tables->db) && ! tables->sequence)) + (tables->table_function || ptr->table_function || + !cmp(&ptr->db, &tables->db)) && + !tables->sequence)) { my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str.str); /* purecov: tested */ DBUG_RETURN(0); /* purecov: tested */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5f974397b10..3b59a81f8be 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1242,6 +1242,14 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, enum_parsing_place save_place= thd->lex->current_select->context_analysis_place; thd->lex->current_select->context_analysis_place= SELECT_LIST; + + for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local) + { + if (tbl->table_function && + tbl->table_function->setup(thd, tbl, select_lex_arg)) + DBUG_RETURN(-1); + } + if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ, &all_fields, &select_lex->pre_fix, 1)) DBUG_RETURN(-1); @@ -12753,6 +12761,10 @@ uint check_join_cache_usage(JOIN_TAB *tab, !join->allowed_outer_join_with_cache) goto no_join_cache; + if (tab->table->pos_in_table_list->table_function && + !tab->table->pos_in_table_list->table_function->join_cache_allowed()) + goto no_join_cache; + /* Non-linked join buffers can't guarantee one match */ @@ -16531,7 +16543,7 @@ simplify_joins(JOIN *join, List *join_list, COND *conds, bool top, if (table->outer_join && !table->embedding && table->table) table->table->maybe_null= FALSE; table->outer_join= 0; - if (!(straight_join || table->straight)) + if (!(straight_join || table->straight || table->table_function)) { table->dep_tables= 0; TABLE_LIST *embedding= table->embedding; @@ -26854,6 +26866,9 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta, !((QUICK_ROR_INTERSECT_SELECT*)cur_quick)->need_to_fetch_row) key_read=1; + if (table_list->table_function) + eta->push_extra(ET_TABLE_FUNCTION); + if (info) { eta->push_extra(info); @@ -27690,6 +27705,14 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, cmp_name= table_name.str; } } + else if (table_function) + { + /* A table function. */ + (void) table_function->print(thd, this, str, query_type); + str->append(' '); + append_identifier(thd, str, &alias); + cmp_name= alias.str; + } else { // A normal table diff --git a/sql/sql_select.h b/sql/sql_select.h index ff9264d8bf7..cf451979f8e 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -2448,7 +2448,6 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, TMP_ENGINE_COLUMNDEF **recinfo, ulonglong options); bool open_tmp_table(TABLE *table); -void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps); double prev_record_reads(const POSITION *positions, uint idx, table_map found_ref); void fix_list_after_tbl_changes(SELECT_LEX *new_parent, List *tlist); double get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size); diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 3bacc3d1499..a5b2fb749d6 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -522,7 +522,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, { /* is this table temporary and is not view? */ if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view && - !tbl->schema_table) + !tbl->schema_table && !tbl->table_function) { my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias.str); res= TRUE; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 06b02bc2b93..9c994b326df 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -68,6 +68,7 @@ #include "sql_sequence.h" #include "my_base.h" #include "sql_type_json.h" +#include "json_table.h" /* this is to get the bison compilation windows warnings out */ #ifdef _MSC_VER @@ -195,6 +196,18 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)() MYSQL_YYABORT; \ } while(0) +#define set_collation(X) \ + do { \ + if (X) \ + { \ + if (unlikely(Lex->charset && !my_charset_same(Lex->charset,X))) \ + my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), \ + X->name,Lex->charset->csname)); \ + Lex->charset= X; \ + } \ + } while(0) + + %} %union { int num; @@ -219,6 +232,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)() Lex_for_loop_st for_loop; Lex_for_loop_bounds_st for_loop_bounds; Lex_trim_st trim; + Json_table_column::On_response json_on_response; vers_history_point_t vers_history_point; struct { @@ -506,6 +520,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ %token ELSEIF_MARIADB_SYM %token ELSE /* SQL-2003-R */ %token ELSIF_ORACLE_SYM /* PLSQL-R */ +%token EMPTY_SYM /* SQL-2016-R */ %token ENCLOSED %token ESCAPED %token EXCEPT_SYM /* SQL-2003-R */ @@ -524,6 +539,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ %token GROUP_CONCAT_SYM %token JSON_ARRAYAGG_SYM %token JSON_OBJECTAGG_SYM +%token JSON_TABLE_SYM %token GROUP_SYM /* SQL-2003-R */ %token HAVING /* SQL-2003-R */ %token HOUR_MICROSECOND_SYM @@ -582,6 +598,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ %token MOD_SYM /* SQL-2003-N */ %token NATURAL /* SQL-2003-R */ %token NEG +%token NESTED_SYM /* SQL-2003-N */ %token NOT_SYM /* SQL-2003-R */ %token NO_WRITE_TO_BINLOG %token NOW_SYM @@ -593,6 +610,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ %token OPTIMIZE %token OPTIONALLY %token ORDER_SYM /* SQL-2003-R */ +%token ORDINALITY_SYM /* SQL-2003-N */ %token OR_SYM /* SQL-2003-R */ %token OTHERS_ORACLE_SYM /* SQL-2011-N, PLSQL-R */ %token OUTER @@ -603,6 +621,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ %token PAGE_CHECKSUM_SYM %token PARSE_VCOL_EXPR_SYM %token PARTITION_SYM /* SQL-2003-R */ +%token PATH_SYM /* SQL-2003-N */ %token PERCENTILE_CONT_SYM %token PERCENTILE_DISC_SYM %token PERCENT_RANK_SYM @@ -1300,6 +1319,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ %type TEXT_STRING NCHAR_STRING + json_text_literal %type opt_table_alias_clause @@ -1359,6 +1379,8 @@ End SQL_MODE_ORACLE_SPECIFIC */ %type sp_handler +%type json_on_response + %type field_type field_type_all qualified_field_type field_type_numeric @@ -1366,6 +1388,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ field_type_lob field_type_temporal field_type_misc + json_table_field_type %type opt_dyncol_type dyncol_type numeric_dyncol_type temporal_dyncol_type string_dyncol_type @@ -1529,7 +1552,7 @@ End SQL_MODE_ORACLE_SPECIFIC */ table_primary_derived table_primary_derived_opt_parens derived_table_list table_reference_list_parens nested_table_reference_list join_table_parens - update_table_list + update_table_list table_function %type date_time_type; %type interval @@ -1684,6 +1707,9 @@ End SQL_MODE_ORACLE_SPECIFIC */ opt_delete_gtid_domain asrow_attribute opt_constraint_no_id + json_table_columns_clause json_table_columns_list json_table_column + json_table_column_type json_opt_on_empty_or_error + json_on_error_response json_on_empty_response %type call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt %type sp_if_then_statements sp_case_then_statements @@ -9833,7 +9859,7 @@ column_default_non_parenthesized_expr: | variable | sum_expr { - if (!Lex->select_stack_top) + if (!Lex->select_stack_top || Lex->json_table) { my_error(ER_INVALID_GROUP_FUNC_USE, MYF(0)); MYSQL_YYABORT; @@ -11497,10 +11523,200 @@ table_ref: } ; +json_text_literal: + TEXT_STRING + { + Lex->json_table->m_text_literal_cs= NULL; + } + | NCHAR_STRING + { + Lex->json_table->m_text_literal_cs= national_charset_info; + } + | UNDERSCORE_CHARSET TEXT_STRING + { + Lex->json_table->m_text_literal_cs= $1; + $$= $2; + } + ; + join_table_list: derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); } ; +json_table_columns_clause: + COLUMNS '(' json_table_columns_list ')' + {} + ; + +json_table_columns_list: + json_table_column + | json_table_columns_list ',' json_table_column + {} + ; + +json_table_column: + ident + { + LEX *lex=Lex; + Create_field *f= new (thd->mem_root) Create_field(); + + if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN, + system_charset_info, 1))) + my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str)); + + lex->json_table->m_cur_json_table_column= + new (thd->mem_root) Json_table_column(f, + lex->json_table->get_cur_nested_path()); + + if (unlikely(!f || + !lex->json_table->m_cur_json_table_column)) + MYSQL_YYABORT; + + lex->init_last_field(f, &$1, NULL); + } + json_table_column_type + { + LEX *lex=Lex; + if (unlikely(lex->json_table-> + m_cur_json_table_column->m_field->check(thd))) + MYSQL_YYABORT; + lex->json_table->m_columns.push_back( + lex->json_table->m_cur_json_table_column, thd->mem_root); + } + | NESTED_SYM PATH_SYM json_text_literal + { + LEX *lex=Lex; + Json_table_nested_path *np= new (thd->mem_root) + Json_table_nested_path(); + np->set_path(thd, $3); + lex->json_table->start_nested_path(np); + } + json_table_columns_clause + { + LEX *lex=Lex; + lex->json_table->end_nested_path(); + } + ; + +json_table_column_type: + FOR_SYM ORDINALITY_SYM + { + Lex_field_type_st type; + type.set_handler_length_flags(&type_handler_slong, 0, 0); + Lex->last_field->set_attributes(thd, type, Lex->charset, + COLUMN_DEFINITION_TABLE_FIELD); + Lex->json_table->m_cur_json_table_column-> + set(Json_table_column::FOR_ORDINALITY); + } + | json_table_field_type PATH_SYM json_text_literal + json_opt_on_empty_or_error + { + Lex->last_field->set_attributes(thd, $1, Lex->charset, + COLUMN_DEFINITION_TABLE_FIELD); + if (Lex->json_table->m_cur_json_table_column-> + set(thd, Json_table_column::PATH, $3)) + { + MYSQL_YYABORT; + } + } + | json_table_field_type EXISTS PATH_SYM json_text_literal + { + Lex->last_field->set_attributes(thd, $1, Lex->charset, + COLUMN_DEFINITION_TABLE_FIELD); + Lex->json_table->m_cur_json_table_column-> + set(thd, Json_table_column::EXISTS_PATH, $4); + } + ; + +json_table_field_type: + field_type_numeric + | field_type_temporal + | field_type_string opt_collate + { + set_collation($2); + } + | field_type_lob opt_collate + { + set_collation($2); + } + ; + +json_opt_on_empty_or_error: + /* none */ + {} + | json_on_error_response + | json_on_error_response json_on_empty_response + | json_on_empty_response + | json_on_empty_response json_on_error_response + ; + +json_on_response: + ERROR_SYM + { + $$.m_response= Json_table_column::RESPONSE_ERROR; + } + | NULL_SYM + { + $$.m_response= Json_table_column::RESPONSE_NULL; + } + | DEFAULT json_text_literal + { + $$.m_response= Json_table_column::RESPONSE_DEFAULT; + $$.m_default= $2; + Lex->json_table->m_cur_json_table_column->m_defaults_cs= + thd->variables.collation_connection; + } + ; + +json_on_error_response: + json_on_response ON ERROR_SYM + { + Lex->json_table->m_cur_json_table_column->m_on_error= $1; + } + ; + +json_on_empty_response: + json_on_response ON EMPTY_SYM + { + Lex->json_table->m_cur_json_table_column->m_on_empty= $1; + } + ; + +table_function: + JSON_TABLE_SYM '(' expr ',' + { + Table_function_json_table *jt= + new (thd->mem_root) Table_function_json_table($3); + if (unlikely(!jt)) + MYSQL_YYABORT; + Lex->json_table= jt; + } + json_text_literal json_table_columns_clause ')' opt_table_alias_clause + { + SELECT_LEX *sel= Select; + if (unlikely($9 == NULL)) + { + /* Alias is not optional. */ + my_error(ER_JSON_TABLE_ALIAS_REQUIRED, MYF(0)); + MYSQL_YYABORT; + } + if (unlikely(Lex->json_table->m_nested_path.set_path(thd, $6))) + MYSQL_YYABORT; + sel->table_join_options= 0; + if (!($$= Select->add_table_to_list(thd, + new (thd->mem_root) Table_ident(thd, &empty_clex_str, + $9, TRUE), + NULL, + Select->get_table_join_options() | + TL_OPTION_TABLE_FUNCTION, + YYPS->m_lock_type, + YYPS->m_mdl_type, + 0,0,0, Lex->json_table))) + MYSQL_YYABORT; + Lex->json_table= 0; + } + ; + /* The ODBC escape syntax for Outer Join is: '{' OJ join_table '}' The parser does not define OJ as a token, any ident is accepted @@ -11708,6 +11924,7 @@ table_factor: $$= $1; } | table_reference_list_parens { $$= $1; } + | table_function { $$= $1; } ; table_primary_ident_opt_parens: @@ -15646,6 +15863,7 @@ keyword_sp_var_and_label: | DYNAMIC_SYM | ELSEIF_ORACLE_SYM | ELSIF_MARIADB_SYM + | EMPTY_SYM | ENDS_SYM | ENGINE_SYM | ENGINES_SYM @@ -15752,6 +15970,7 @@ keyword_sp_var_and_label: | MYSQL_SYM | MYSQL_ERRNO_SYM | NAME_SYM + | NESTED_SYM | NEVER_SYM | NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2 | NEXTVAL_SYM @@ -15771,6 +15990,7 @@ keyword_sp_var_and_label: | ONE_SYM | ONLINE_SYM | ONLY_SYM + | ORDINALITY_SYM | OVERLAPS_SYM | PACKAGE_MARIADB_SYM | PACK_KEYS_SYM @@ -15778,6 +15998,7 @@ keyword_sp_var_and_label: | PARTIAL | PARTITIONING_SYM | PARTITIONS_SYM + | PATH_SYM | PERSISTENT_SYM | PHASE_SYM | PLUGIN_SYM diff --git a/sql/table.cc b/sql/table.cc index 26dadba9ebc..11d876a8a62 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -6968,7 +6968,7 @@ const char *Field_iterator_table_ref::get_table_name() DBUG_ASSERT(!strcmp(table_ref->table_name.str, table_ref->table->s->table_name.str) || - table_ref->schema_table); + table_ref->schema_table || table_ref->table_function); return table_ref->table_name.str; } @@ -6987,7 +6987,8 @@ const char *Field_iterator_table_ref::get_db_name() */ DBUG_ASSERT(!cmp(&table_ref->db, &table_ref->table->s->db) || (table_ref->schema_table && - is_infoschema_db(&table_ref->table->s->db))); + is_infoschema_db(&table_ref->table->s->db)) || + table_ref->table_function); return table_ref->db.str; } @@ -8180,7 +8181,8 @@ bool TABLE::is_filled_at_execution() */ return MY_TEST(!pos_in_table_list || pos_in_table_list->jtbm_subselect || - pos_in_table_list->is_active_sjm()); + pos_in_table_list->is_active_sjm() || + pos_in_table_list->table_function); } diff --git a/sql/table.h b/sql/table.h index f0499ee63fc..b29199e975a 100644 --- a/sql/table.h +++ b/sql/table.h @@ -63,6 +63,7 @@ class Range_rowid_filter_cost_info; class derived_handler; class Pushdown_derived; struct Name_resolution_context; +class Table_function_json_table; /* Used to identify NESTED_JOIN structures within a join (applicable only to @@ -2209,6 +2210,7 @@ struct TABLE_LIST const char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ Name_resolution_context *on_context; /* For ON expressions */ + Table_function_json_table *table_function; /* If it's the table function. */ Item *sj_on_expr; /* @@ -2601,7 +2603,7 @@ struct TABLE_LIST void cleanup_items(); bool placeholder() { - return derived || view || schema_table || !table; + return derived || view || schema_table || !table || table_function; } void print(THD *thd, table_map eliminated_tables, String *str, enum_query_type query_type);