MDEV-18553: MDEV-16327 pre-requisits part 2: uniform of LIMIT/OFFSET handling

Now both offset and limit are stored and do not chenged during execution
(offset is decreased during processing in versions before 10.5).

(Big part of this changes made by Monty)
This commit is contained in:
Oleksandr Byelkin 2019-10-11 12:26:15 +02:00
parent eb0804ef5e
commit 1ae02f0e0d
14 changed files with 63 additions and 83 deletions

View File

@ -97,7 +97,10 @@ int Pushdown_query::execute(JOIN *join)
{
int error;
/* result < 0 if row was not accepted and should not be counted */
if (unlikely((error= join->result->send_data(*join->fields))))
if (unlikely((error=
join->result->send_data_with_check(*join->fields,
join->unit,
join->send_records))))
{
handler->end_scan();
DBUG_RETURN(error < 0 ? 0 : -1);

View File

@ -5714,9 +5714,6 @@ int select_value_catcher::send_data(List<Item> &items)
DBUG_ASSERT(!assigned);
DBUG_ASSERT(items.elements == n_elements);
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // Using limit offset,count
Item *val_item;
List_iterator_fast<Item> li(items);
for (uint i= 0; (val_item= li++); i++)

View File

@ -3015,9 +3015,6 @@ int select_send::send_data(List<Item> &items)
Protocol *protocol= thd->protocol;
DBUG_ENTER("select_send::send_data");
/* unit is not set when using 'delete ... returning' */
if (unit && unit->lim.check_and_move_offset())
DBUG_RETURN(FALSE); // using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(FALSE);
@ -3282,8 +3279,6 @@ int select_export::send_data(List<Item> &items)
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
tmp.length(0);
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
row_count++;
@ -3539,8 +3534,6 @@ int select_dump::send_data(List<Item> &items)
Item *item;
DBUG_ENTER("select_dump::send_data");
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
@ -3579,8 +3572,6 @@ int select_singlerow_subselect::send_data(List<Item> &items)
MYF(current_thd->lex->ignore ? ME_WARNING : 0));
DBUG_RETURN(1);
}
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // Using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
List_iterator_fast<Item> li(items);
@ -3717,8 +3708,6 @@ int select_exists_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_exists_subselect::send_data");
Item_exists_subselect *it= (Item_exists_subselect *)item;
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // Using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
it->value= 1;
@ -4123,8 +4112,6 @@ int select_dumpvar::send_data(List<Item> &items)
{
DBUG_ENTER("select_dumpvar::send_data");
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // using limit offset,count
if (row_count++)
{
my_message(ER_TOO_MANY_ROWS, ER_THD(thd, ER_TOO_MANY_ROWS), MYF(0));

View File

@ -5095,6 +5095,14 @@ class select_result_sink: public Sql_alloc
public:
THD *thd;
select_result_sink(THD *thd_arg): thd(thd_arg) {}
inline int send_data_with_check(List<Item> &items,
SELECT_LEX_UNIT *u,
ha_rows sent)
{
if (u->lim.check_offset(sent))
return 0;
return send_data(items);
}
/*
send_data returns 0 on ok, 1 on error and -1 if data was ignored, for
example for a duplicate row entry written to a temp table.

View File

@ -815,6 +815,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
break;
}
// no LIMIT / OFFSET
if (with_select && result->send_data(select_lex->item_list) < 0)
{
error=1;

View File

@ -808,14 +808,14 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
Diagnostics_area::Sql_condition_iterator it=
thd->get_stmt_da()->sql_conditions();
for (idx= 1; (err= it++) ; idx++)
for (idx= 0; (err= it++) ; idx++)
{
/* Skip levels that the user is not interested in */
if (!(levels_to_show & ((ulong) 1 << err->get_level())))
continue;
if (unit->lim.check_and_move_offset())
if (unit->lim.check_offset(idx))
continue; // using limit offset,count
if (idx > unit->lim.get_select_limit())
if (idx >= unit->lim.get_select_limit())
break;
protocol->prepare_for_resend();
protocol->store(warning_level_names[err->get_level()].str,

View File

@ -3857,8 +3857,6 @@ int select_insert::send_data(List<Item> &values)
DBUG_ENTER("select_insert::send_data");
bool error=0;
if (unit->lim.check_and_move_offset())
DBUG_RETURN(0); // using limit offset,count
if (unlikely(thd->killed == ABORT_QUERY))
DBUG_RETURN(0);

View File

@ -831,7 +831,6 @@ void create_explain_query_if_not_exists(LEX *lex, MEM_ROOT *mem_root);
bool print_explain_for_slow_log(LEX *lex, THD *thd, String *str);
class st_select_lex_unit: public st_select_lex_node {
protected:
TABLE_LIST result_table_list;

View File

@ -22,51 +22,38 @@
class Select_limit_counters
{
ha_rows offset_limit_cnt_start,
select_limit_cnt, offset_limit_cnt;
ha_rows select_limit_cnt, offset_limit_cnt;
public:
Select_limit_counters():
offset_limit_cnt_start(0),
select_limit_cnt(0), offset_limit_cnt(0)
{};
void set_limit(ha_rows limit, ha_rows offset)
{
offset_limit_cnt_start= offset;
offset_limit_cnt= offset;
select_limit_cnt= limit;
if (select_limit_cnt + offset_limit_cnt_start >=
if (select_limit_cnt + offset_limit_cnt >=
select_limit_cnt)
select_limit_cnt+= offset_limit_cnt_start;
select_limit_cnt+= offset_limit_cnt;
else
select_limit_cnt= HA_POS_ERROR;
reset();
}
void set_single_row()
{
offset_limit_cnt= offset_limit_cnt_start= 0;
offset_limit_cnt= 0;
select_limit_cnt= 1;
}
void reset()
{
offset_limit_cnt= offset_limit_cnt_start;
}
bool is_unlimited()
{ return select_limit_cnt == HA_POS_ERROR; }
void set_unlimited()
{ select_limit_cnt= HA_POS_ERROR; offset_limit_cnt= 0; }
bool check_and_move_offset()
bool check_offset(ha_rows sent)
{
if (offset_limit_cnt)
{
offset_limit_cnt--;
return TRUE;
}
return FALSE;
return sent < offset_limit_cnt;
}
void remove_offset() { offset_limit_cnt= 0; }
@ -76,5 +63,4 @@ class Select_limit_counters
{ return offset_limit_cnt; }
};
#endif // INCLUDES_MARIADB_SQL_LIMIT_H

View File

@ -436,7 +436,7 @@ bool PROFILING::show_profiles()
double query_time_usecs= prof->m_end_time_usecs - prof->m_start_time_usecs;
if (unit->lim.check_and_move_offset())
if (unit->lim.check_offset(idx))
continue;
if (idx > unit->lim.get_select_limit())
break;

View File

@ -4106,7 +4106,7 @@ bool mysql_show_binlog_events(THD* thd)
description_event,
opt_master_verify_checksum)); )
{
if (!unit->lim.check_and_move_offset() &&
if (!unit->lim.check_offset(event_count) &&
ev->net_send(protocol, linfo.log_file_name, pos))
{
errmsg = "Net error";

View File

@ -3987,8 +3987,6 @@ JOIN::reinit()
{
DBUG_ENTER("JOIN::reinit");
unit->lim.reset();
first_record= false;
group_sent= false;
cleaned= false;
@ -4259,7 +4257,8 @@ void JOIN::exec_inner()
{
if (do_send_rows &&
(procedure ? (procedure->send_row(procedure_fields_list) ||
procedure->end_of_records()) : result->send_data(fields_list)> 0))
procedure->end_of_records()):
result->send_data_with_check(fields_list, unit, 0)> 0))
error= 1;
else
send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 :
@ -14210,7 +14209,7 @@ return_zero_rows(JOIN *join, select_result *result, List<TABLE_LIST> &tables,
{
bool send_error= FALSE;
if (send_row)
send_error= result->send_data(fields) > 0;
send_error= result->send_data_with_check(fields, join->unit, 0) > 0;
if (likely(!send_error))
result->send_eof(); // Should be safe
}
@ -19791,33 +19790,34 @@ do_select(JOIN *join, Procedure *procedure)
HAVING will be checked after processing aggregate functions,
But WHERE should checked here (we alredy have read tables).
Notice that make_join_select() splits all conditions in this case
into two groups exec_const_cond and outer_ref_cond.
If join->table_count == join->const_tables then it is
sufficient to check only the condition pseudo_bits_cond.
*/
DBUG_ASSERT(join->outer_ref_cond == NULL);
if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int())
{
// HAVING will be checked by end_select
error= (*end_select)(join, 0, 0);
if (error >= NESTED_LOOP_OK)
error= (*end_select)(join, 0, 1);
/*
If we don't go through evaluate_join_record(), do the counting
here. join->send_records is increased on success in end_send(),
so we don't touch it here.
into two groups exec_const_cond and outer_ref_cond.
If join->table_count == join->const_tables then it is
sufficient to check only the condition pseudo_bits_cond.
*/
join->join_examined_rows++;
DBUG_ASSERT(join->join_examined_rows <= 1);
}
else if (join->send_row_on_empty_set())
{
if (!join->having || join->having->val_int())
DBUG_ASSERT(join->outer_ref_cond == NULL);
if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int())
{
List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
// HAVING will be checked by end_select
error= (*end_select)(join, 0, 0);
if (error >= NESTED_LOOP_OK)
error= (*end_select)(join, 0, 1);
/*
If we don't go through evaluate_join_record(), do the counting
here. join->send_records is increased on success in end_send(),
so we don't touch it here.
*/
join->join_examined_rows++;
DBUG_ASSERT(join->join_examined_rows <= 1);
}
else if (join->send_row_on_empty_set())
{
if (!join->having || join->having->val_int())
{
List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
join->fields);
rc= join->result->send_data(*columns_list) > 0;
rc= join->result->send_data_with_check(*columns_list,
join->unit, 0) > 0;
}
}
/*
@ -21489,7 +21489,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
/* result < 0 if row was not accepted and should not be counted */
if (unlikely((error= join->result->send_data(*fields))))
if (unlikely((error= join->result->send_data_with_check(*fields,
join->unit,
join->send_records))))
{
if (error > 0)
DBUG_RETURN(NESTED_LOOP_ERROR);
@ -21637,7 +21639,9 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if (join->do_send_rows)
{
error=join->result->send_data(*fields);
error= join->result->send_data_with_check(*fields,
join->unit,
join->send_records);
if (unlikely(error < 0))
{
/* Duplicate row, don't count */
@ -25847,7 +25851,8 @@ int JOIN::rollup_send_data(uint idx)
if ((!having || having->val_int()))
{
if (send_records < unit->lim.get_select_limit() && do_send_rows &&
(res= result->send_data(rollup.fields[i])) > 0)
(res= result->send_data_with_check(rollup.fields[i],
unit, send_records)) > 0)
return 1;
if (!res)
send_records++;

View File

@ -388,7 +388,8 @@ bool table_value_constr::exec(SELECT_LEX *sl)
{
if (send_records >= sl->master_unit()->lim.get_select_limit())
break;
int rc= result->send_data(*elem);
int rc=
result->send_data_with_check(*elem, sl->master_unit(), send_records);
if (!rc)
send_records++;
else if (rc > 0)

View File

@ -111,8 +111,6 @@ int select_unit::send_data(List<Item> &values)
{
int rc= 0;
int not_reported_error= 0;
if (unit->lim.check_and_move_offset())
return 0; // using limit offset,count
if (thd->killed == ABORT_QUERY)
return 0;
if (table->no_rows_with_nulls)
@ -604,8 +602,6 @@ int select_unit_ext::send_data(List<Item> &values)
int rc= 0;
int not_reported_error= 0;
int find_res;
if (unit->lim.check_and_move_offset())
return 0;
if (thd->killed == ABORT_QUERY)
return 0;
if (table->no_rows_with_nulls)
@ -2200,7 +2196,6 @@ bool st_select_lex_unit::exec()
}
if (!sl->tvc)
saved_error= sl->join->error;
lim.reset();
if (likely(!saved_error))
{
examined_rows+= thd->get_examined_row_count();