MDEV-17399 JSON_TABLE.

atch to get rid of duplicating code of the Create_tmp_table.
This commit is contained in:
Alexey Botchkov 2021-04-12 13:04:01 +04:00
parent a5b454f98a
commit f82947e48d
6 changed files with 178 additions and 368 deletions

80
sql/create_tmp_table.h Normal file
View File

@ -0,0 +1,80 @@
#ifndef CREATE_TMP_TABLE_INCLUDED
#define CREATE_TMP_TABLE_INCLUDED
/* Copyright (c) 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
/*
Class for creating internal tempory tables in sql_select.cc
*/
class Create_tmp_table: public Data_type_statistics
{
protected:
// The following members are initialized only in start()
Field **m_from_field, **m_default_field;
KEY_PART_INFO *m_key_part_info;
uchar *m_group_buff, *m_bitmaps;
// The following members are initialized in ctor
uint m_alloced_field_count;
bool m_using_unique_constraint;
uint m_temp_pool_slot;
ORDER *m_group;
bool m_distinct;
bool m_save_sum_fields;
bool m_with_cycle;
ulonglong m_select_options;
ha_rows m_rows_limit;
uint m_group_null_items;
// counter for distinct/other fields
uint m_field_count[2];
// counter for distinct/other fields which can be NULL
uint m_null_count[2];
// counter for distinct/other blob fields
uint m_blobs_count[2];
// counter for "tails" of bit fields which do not fit in a byte
uint m_uneven_bit[2];
public:
enum counter {distinct, other};
/*
shows which field we are processing: distinct/other (set in processing
cycles)
*/
counter current_counter;
Create_tmp_table(ORDER *group, bool distinct, bool save_sum_fields,
ulonglong select_options, ha_rows rows_limit);
virtual ~Create_tmp_table() {}
virtual bool choose_engine(THD *thd, TABLE *table, TMP_TABLE_PARAM *param);
void add_field(TABLE *table, Field *field, uint fieldnr,
bool force_not_null_cols);
TABLE *start(THD *thd,
TMP_TABLE_PARAM *param,
const LEX_CSTRING *table_alias);
bool add_fields(THD *thd, TABLE *table,
TMP_TABLE_PARAM *param, List<Item> &fields);
bool add_schema_fields(THD *thd, TABLE *table,
TMP_TABLE_PARAM *param,
const ST_SCHEMA_TABLE &schema_table);
bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
bool do_not_open, bool keep_row_order);
void cleanup_on_failure(THD *thd, TABLE *table);
};
#endif /* CREATE_TMP_TABLE_INCLUDED */

View File

@ -23,6 +23,7 @@
#include "json_table.h"
#include "sql_show.h"
#include "sql_select.h"
#include "create_tmp_table.h"
#define HA_ERR_JSON_TABLE (HA_ERR_LAST+1)
@ -223,28 +224,21 @@ public:
represents the table function in the query.
*/
class Create_json_table: public Data_type_statistics
class Create_json_table final: public Create_tmp_table
{
// The following members are initialized only in start()
Field **m_default_field;
uchar *m_bitmaps;
// The following members are initialized in ctor
uint m_temp_pool_slot;
uint m_null_count;
public:
Create_json_table(const TMP_TABLE_PARAM *param,
bool save_sum_fields)
:m_temp_pool_slot(MY_BIT_NONE),
m_null_count(0)
{ }
void add_field(TABLE *table, Field *field, uint fieldnr);
Create_json_table() :
Create_tmp_table((ORDER*) 0, 0, 0, 0, 0)
{}
virtual ~Create_json_table() {};
TABLE *start(THD *thd,
TMP_TABLE_PARAM *param,
Table_function_json_table *jt,
const LEX_CSTRING *table_alias);
bool choose_engine(THD *thd, TABLE *table, TMP_TABLE_PARAM *param) override
{
return 0; // Engine already choosen
}
bool add_json_table_fields(THD *thd, TABLE *table,
Table_function_json_table *jt);
bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
@ -618,27 +612,6 @@ int ha_json_table::info(uint)
}
void Create_json_table::add_field(TABLE *table, Field *field, uint fieldnr)
{
DBUG_ASSERT(!field->field_name.str ||
strlen(field->field_name.str) == field->field_name.length);
if (!(field->flags & NOT_NULL_FLAG))
m_null_count++;
table->s->reclength+= field->pack_length();
// Assign it here, before update_data_type_statistics() changes m_blob_count
if (field->flags & BLOB_FLAG)
table->s->blob_field[m_blob_count]= fieldnr;
table->field[fieldnr]= field;
field->field_index= fieldnr;
field->update_data_type_statistics(this);
}
/**
Create a json table according to a field list.
@ -653,98 +626,19 @@ TABLE *Create_json_table::start(THD *thd,
Table_function_json_table *jt,
const LEX_CSTRING *table_alias)
{
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
TABLE_SHARE *share;
uint copy_func_count= param->func_count;
char *tmpname,path[FN_REFLEN];
Field **reg_field;
uint *blob_field;
DBUG_ENTER("Create_json_table::start");
DBUG_PRINT("enter",
("table_alias: '%s' ", table_alias->str));
if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
m_temp_pool_slot = bitmap_lock_set_next(&temp_pool);
if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot
sprintf(path, "%s-%lx-%i", tmp_file_prefix,
current_pid, m_temp_pool_slot);
else
{
/* if we run out of slots or we are not using tempool */
sprintf(path, "%s-%lx-%lx-%x", tmp_file_prefix,current_pid,
(ulong) thd->thread_id, thd->tmp_table++);
}
/*
No need to change table name to lower case.
*/
fn_format(path, path, mysql_tmpdir, "",
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
const uint field_count= param->field_count;
DBUG_ASSERT(field_count);
init_sql_alloc(key_memory_TABLE, &own_root,
TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
&share, sizeof(*share),
&reg_field, sizeof(Field*) * (field_count+1),
&m_default_field, sizeof(Field*) * (field_count),
&blob_field, sizeof(uint)*(field_count+1),
&param->items_to_copy,
sizeof(param->items_to_copy[0])*(copy_func_count+1),
&param->keyinfo, sizeof(*param->keyinfo),
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
&tmpname, (uint) strlen(path)+1,
&m_bitmaps, bitmap_buffer_size(field_count)*6,
NullS))
{
DBUG_RETURN(NULL); /* purecov: inspected */
}
strmov(tmpname, path);
/* make table according to fields */
bzero((char*) table,sizeof(*table));
bzero((char*) reg_field, sizeof(Field*) * (field_count+1));
bzero((char*) m_default_field, sizeof(Field*) * (field_count));
table->mem_root= own_root;
mem_root_save= thd->mem_root;
thd->mem_root= &table->mem_root;
table->field=reg_field;
table->alias.set(table_alias->str, table_alias->length, table_alias_charset);
table->reginfo.lock_type=TL_WRITE; /* Will be updated */
table->map=1;
table->temp_pool_slot= m_temp_pool_slot;
table->copy_blobs= 1;
table->in_use= thd;
table->no_rows_with_nulls= param->force_not_null_cols;
table->s= share;
init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
share->blob_field= blob_field;
share->table_charset= param->table_charset;
share->primary_key= MAX_KEY; // Indicate no primary key
param->tmp_name= "json";
if (!(table= Create_tmp_table::start(thd, param, table_alias)))
DBUG_RETURN(0);
share= table->s;
share->not_usable_by_query_cache= FALSE;
if (param->schema_table)
share->db= INFORMATION_SCHEMA_NAME;
param->using_outer_summary_function= 0;
share->db_plugin= NULL;
if (!(table->file= new (&table->mem_root) ha_json_table(share, jt)))
DBUG_RETURN(NULL);
table->file->init();
thd->mem_root= mem_root_save;
DBUG_RETURN(table);
}
@ -756,143 +650,18 @@ bool Create_json_table::finalize(THD *thd, TABLE *table,
DBUG_ENTER("Create_json_table::finalize");
DBUG_ASSERT(table);
uint null_pack_length;
bool use_packed_rows= false;
uchar *pos;
uchar *null_flags;
TMP_ENGINE_COLUMNDEF *recinfo;
TABLE_SHARE *share= table->s;
MEM_ROOT *mem_root_save= thd->mem_root;
thd->mem_root= &table->mem_root;
DBUG_ASSERT(param->field_count >= share->fields);
DBUG_ASSERT(param->field_count >= share->blob_fields);
if (table->file->set_ha_share_ref(&share->ha_share))
{
delete table->file;
goto err;
}
if (share->blob_fields == 0)
m_null_count++;
null_pack_length= (m_null_count + m_uneven_bit_length + 7) / 8;
share->reclength+= null_pack_length;
if (!share->reclength)
share->reclength= 1; // Dummy select
{
uint alloc_length= ALIGN_SIZE(share->reclength + MI_UNIQUE_HASH_LENGTH+1);
share->rec_buff_length= alloc_length;
if (!(table->record[0]= (uchar*)
alloc_root(&table->mem_root, alloc_length*3)))
goto err;
table->record[1]= table->record[0]+alloc_length;
share->default_values= table->record[1]+alloc_length;
}
setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields);
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
pos=table->record[0]+ null_pack_length;
if (null_pack_length)
{
bzero((uchar*) recinfo,sizeof(*recinfo));
recinfo->type=FIELD_NORMAL;
recinfo->length=null_pack_length;
recinfo++;
bfill(null_flags,null_pack_length,255); // Set null fields
table->null_flags= (uchar*) table->record[0];
share->null_fields= m_null_count;
share->null_bytes= share->null_bytes_for_compare= null_pack_length;
}
m_null_count= (share->blob_fields == 0) ? 1 : 0;
for (uint i= 0; i < share->fields; i++, recinfo++)
{
Field *field= table->field[i];
uint length;
bzero((uchar*) recinfo,sizeof(*recinfo));
if (!(field->flags & NOT_NULL_FLAG))
{
recinfo->null_bit= (uint8)1 << (m_null_count & 7);
recinfo->null_pos= m_null_count/8;
field->move_field(pos, null_flags + m_null_count/8,
(uint8)1 << (m_null_count & 7));
m_null_count++;
}
else
field->move_field(pos,(uchar*) 0,0);
if (field->type() == MYSQL_TYPE_BIT)
{
/* We have to reserve place for extra bits among null bits */
((Field_bit*) field)->set_bit_ptr(null_flags + m_null_count / 8,
m_null_count & 7);
m_null_count+= (field->field_length & 7);
}
field->reset();
/*
Test if there is a default field value. The test for ->ptr is to skip
'offset' fields generated by initialize_tables
*/
if (m_default_field[i] && m_default_field[i]->ptr)
{
/*
default_field[i] is set only in the cases when 'field' can
inherit the default value that is defined for the field referred
by the Item_field object from which 'field' has been created.
*/
const Field *orig_field= m_default_field[i];
/* Get the value from default_values */
if (orig_field->is_null_in_record(orig_field->table->s->default_values))
field->set_null();
else
{
field->set_notnull();
memcpy(field->ptr,
orig_field->ptr_in_record(orig_field->table->s->default_values),
field->pack_length_in_rec());
}
}
length=field->pack_length();
pos+= length;
/* Make entry for create table */
recinfo->length=length;
recinfo->type= field->tmp_engine_column_type(use_packed_rows);
// fix table name in field entry
field->set_table_name(&table->alias);
}
param->recinfo= recinfo; // Pointer to after last field
store_record(table,s->default_values); // Make empty default record
share->max_rows= ~(ha_rows) 0;
param->end_write_records= HA_POS_ERROR;
share->db_record_offset= 1;
if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR,
HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
goto err;
if (Create_tmp_table::finalize(thd, table, param, 1, 0))
DBUG_RETURN(true);
table->db_stat= HA_OPEN_KEYFILE;
if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR,
HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
DBUG_RETURN(true);
table->set_created();
thd->mem_root= mem_root_save;
DBUG_RETURN(false);
err:
thd->mem_root= mem_root_save;
DBUG_RETURN(true);
table->s->max_rows= ~(ha_rows) 0;
param->end_write_records= HA_POS_ERROR;
DBUG_RETURN(0);
}
@ -910,11 +679,11 @@ bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table,
uint fieldnr= 0;
MEM_ROOT *mem_root_save= thd->mem_root;
List_iterator_fast<Json_table_column> jc_i(jt->m_columns);
DBUG_ENTER("add_json_table_fields");
thd->mem_root= &table->mem_root;
current_counter= other;
while ((jc= jc_i++))
{
Create_field *sql_f= jc->m_field;
@ -963,7 +732,7 @@ bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table,
if (!f)
goto err_exit;
f->init(table);
add_field(table, f, fieldnr++);
add_field(table, f, fieldnr++, 0);
}
share->fields= fieldnr;
@ -1009,7 +778,7 @@ TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table)
tp.table_charset= system_charset_info;
tp.field_count= field_count;
{
Create_json_table maker(&tp, false);
Create_json_table maker;
if (!(table= maker.start(thd, &tp,
sql_table->table_function, &sql_table->alias)) ||

View File

@ -4612,7 +4612,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
table->record[1]= table->record[0]+alloc_length;
share->default_values= table->record[1]+alloc_length;
}
setup_tmp_table_column_bitmaps(table, bitmaps);
setup_tmp_table_column_bitmaps(table, bitmaps, table->s->fields);
recinfo= start_recinfo;
null_flags=(uchar*) table->record[0];

View File

@ -4247,6 +4247,7 @@ void TMP_TABLE_PARAM::init()
materialized_subquery= 0;
force_not_null_cols= 0;
skip_create_table= 0;
tmp_name= "temptable"; // Name of temp table on disk
DBUG_VOID_RETURN;
}

View File

@ -6084,6 +6084,7 @@ public:
List<Item> copy_funcs;
Copy_field *copy_field, *copy_field_end;
uchar *group_buff;
const char *tmp_name;
Item **items_to_copy; /* Fields in tmp table */
TMP_ENGINE_COLUMNDEF *recinfo, *start_recinfo;
KEY *keyinfo;
@ -6157,7 +6158,9 @@ public:
schema_table(0), materialized_subquery(0), force_not_null_cols(0),
precomputed_group_by(0),
force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0)
{}
{
init();
}
~TMP_TABLE_PARAM()
{
cleanup();
@ -7723,9 +7726,8 @@ public:
extern THD_list server_threads;
void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps);
void
setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count);
void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps,
uint field_count);
#endif /* MYSQL_SERVER */
#endif /* SQL_CLASS_INCLUDED */

View File

@ -67,6 +67,7 @@
#include "select_handler.h"
#include "my_json_writer.h"
#include "opt_trace.h"
#include "create_tmp_table.h"
/*
A key part number that means we're using a fulltext scan.
@ -18263,50 +18264,10 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count)
}
void
setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
{
setup_tmp_table_column_bitmaps(table, bitmaps, table->s->fields);
}
class Create_tmp_table: public Data_type_statistics
{
// The following members are initialized only in start()
Field **m_from_field, **m_default_field;
KEY_PART_INFO *m_key_part_info;
uchar *m_group_buff, *m_bitmaps;
// The following members are initialized in ctor
uint m_alloced_field_count;
bool m_using_unique_constraint;
uint m_temp_pool_slot;
ORDER *m_group;
bool m_distinct;
bool m_save_sum_fields;
bool m_with_cycle;
ulonglong m_select_options;
ha_rows m_rows_limit;
uint m_group_null_items;
// counter for distinct/other fields
uint m_field_count[2];
// counter for distinct/other fields which can be NULL
uint m_null_count[2];
// counter for distinct/other blob fields
uint m_blobs_count[2];
// counter for "tails" of bit fields which do not fit in a byte
uint m_uneven_bit[2];
public:
enum counter {distinct, other};
/*
shows which field we are processing: distinct/other (set in processing
cycles)
*/
counter current_counter;
Create_tmp_table(const TMP_TABLE_PARAM *param,
ORDER *group, bool distinct, bool save_sum_fields,
ulonglong select_options, ha_rows rows_limit)
Create_tmp_table::Create_tmp_table(ORDER *group, bool distinct,
bool save_sum_fields,
ulonglong select_options,
ha_rows rows_limit)
:m_alloced_field_count(0),
m_using_unique_constraint(false),
m_temp_pool_slot(MY_BIT_NONE),
@ -18318,39 +18279,23 @@ public:
m_rows_limit(rows_limit),
m_group_null_items(0),
current_counter(other)
{
m_field_count[Create_tmp_table::distinct]= 0;
m_field_count[Create_tmp_table::other]= 0;
m_null_count[Create_tmp_table::distinct]= 0;
m_null_count[Create_tmp_table::other]= 0;
m_blobs_count[Create_tmp_table::distinct]= 0;
m_blobs_count[Create_tmp_table::other]= 0;
m_uneven_bit[Create_tmp_table::distinct]= 0;
m_uneven_bit[Create_tmp_table::other]= 0;
}
void add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols);
TABLE *start(THD *thd,
TMP_TABLE_PARAM *param,
const LEX_CSTRING *table_alias);
bool add_fields(THD *thd, TABLE *table,
TMP_TABLE_PARAM *param, List<Item> &fields);
bool add_schema_fields(THD *thd, TABLE *table,
TMP_TABLE_PARAM *param,
const ST_SCHEMA_TABLE &schema_table);
bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
bool do_not_open, bool keep_row_order);
void cleanup_on_failure(THD *thd, TABLE *table);
};
void Create_tmp_table::add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols)
{
DBUG_ASSERT(!field->field_name.str || strlen(field->field_name.str) == field->field_name.length);
m_field_count[Create_tmp_table::distinct]= 0;
m_field_count[Create_tmp_table::other]= 0;
m_null_count[Create_tmp_table::distinct]= 0;
m_null_count[Create_tmp_table::other]= 0;
m_blobs_count[Create_tmp_table::distinct]= 0;
m_blobs_count[Create_tmp_table::other]= 0;
m_uneven_bit[Create_tmp_table::distinct]= 0;
m_uneven_bit[Create_tmp_table::other]= 0;
}
void Create_tmp_table::add_field(TABLE *table, Field *field, uint fieldnr,
bool force_not_null_cols)
{
DBUG_ASSERT(!field->field_name.str ||
strlen(field->field_name.str) == field->field_name.length);
if (force_not_null_cols)
{
@ -18436,13 +18381,13 @@ TABLE *Create_tmp_table::start(THD *thd,
m_temp_pool_slot = bitmap_lock_set_next(&temp_pool);
if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot
sprintf(path, "%s-temptable-%lx-%i", tmp_file_prefix,
sprintf(path, "%s-%s-%lx-%i", tmp_file_prefix, param->tmp_name,
current_pid, m_temp_pool_slot);
else
{
/* if we run out of slots or we are not using tempool */
sprintf(path, "%s-temptable-%lx-%llx-%x", tmp_file_prefix,current_pid,
thd->thread_id, thd->tmp_table++);
sprintf(path, "%s-%s-%lx-%llx-%x", tmp_file_prefix, param->tmp_name,
current_pid, thd->thread_id, thd->tmp_table++);
}
/*
@ -18785,6 +18730,40 @@ err:
}
bool Create_tmp_table::choose_engine(THD *thd, TABLE *table,
TMP_TABLE_PARAM *param)
{
TABLE_SHARE *share= table->s;
DBUG_ENTER("Create_tmp_table::choose_engine");
/*
If result table is small; use a heap, otherwise TMP_TABLE_HTON (Aria)
In the future we should try making storage engine selection more dynamic
*/
if (share->blob_fields || m_using_unique_constraint ||
(thd->variables.big_tables &&
!(m_select_options & SELECT_SMALL_RESULT)) ||
(m_select_options & TMP_TABLE_FORCE_MYISAM) ||
thd->variables.tmp_memory_table_size == 0)
{
share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON);
table->file= get_new_handler(share, &table->mem_root,
share->db_type());
if (m_group &&
(param->group_parts > table->file->max_key_parts() ||
param->group_length > table->file->max_key_length()))
m_using_unique_constraint= true;
}
else
{
share->db_plugin= ha_lock_engine(0, heap_hton);
table->file= get_new_handler(share, &table->mem_root,
share->db_type());
}
DBUG_RETURN(!table->file);
}
bool Create_tmp_table::finalize(THD *thd,
TABLE *table,
TMP_TABLE_PARAM *param,
@ -18811,28 +18790,7 @@ bool Create_tmp_table::finalize(THD *thd,
DBUG_ASSERT(m_alloced_field_count >= share->fields);
DBUG_ASSERT(m_alloced_field_count >= share->blob_fields);
/* If result table is small; use a heap */
/* future: storage engine selection can be made dynamic? */
if (share->blob_fields || m_using_unique_constraint
|| (thd->variables.big_tables && !(m_select_options & SELECT_SMALL_RESULT))
|| (m_select_options & TMP_TABLE_FORCE_MYISAM)
|| thd->variables.tmp_memory_table_size == 0)
{
share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON);
table->file= get_new_handler(share, &table->mem_root,
share->db_type());
if (m_group &&
(param->group_parts > table->file->max_key_parts() ||
param->group_length > table->file->max_key_length()))
m_using_unique_constraint= true;
}
else
{
share->db_plugin= ha_lock_engine(0, heap_hton);
table->file= get_new_handler(share, &table->mem_root,
share->db_type());
}
if (!table->file)
if (choose_engine(thd, table, param))
goto err;
if (table->file->set_ha_share_ref(&share->ha_share))
@ -18884,7 +18842,7 @@ bool Create_tmp_table::finalize(THD *thd,
share->default_values= table->record[1]+alloc_length;
}
setup_tmp_table_column_bitmaps(table, m_bitmaps);
setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields);
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
@ -19339,8 +19297,8 @@ TABLE *create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
bool keep_row_order)
{
TABLE *table;
Create_tmp_table maker(param, group,
distinct, save_sum_fields, select_options, rows_limit);
Create_tmp_table maker(group, distinct, save_sum_fields, select_options,
rows_limit);
if (!(table= maker.start(thd, param, table_alias)) ||
maker.add_fields(thd, table, param, fields) ||
maker.finalize(thd, table, param, do_not_open, keep_row_order))
@ -19359,7 +19317,7 @@ TABLE *create_tmp_table_for_schema(THD *thd, TMP_TABLE_PARAM *param,
bool do_not_open, bool keep_row_order)
{
TABLE *table;
Create_tmp_table maker(param, (ORDER *) NULL, false, false,
Create_tmp_table maker((ORDER *) NULL, false, false,
select_options, HA_POS_ERROR);
if (!(table= maker.start(thd, param, &table_alias)) ||
maker.add_schema_fields(thd, table, param, schema_table) ||