Split cost calculations into fetch and total
This patch causes no changes in costs or result files. Changes: - Store row compare cost separately in Cost_estimate::comp_cost - Store cost of fetching rows separately in OPT_RANGE - Use range->fetch_cost instead of adjust_quick_cost(total_cost) This was done to simplify cost calculation in sql_select.cc: - We can use range->fetch_cost directly without having to call adjust_quick_cost(). adjust_quick_cost() is now removed. Other things: - Removed some not used functions in Cost_estimate
This commit is contained in:
parent
766bae2b31
commit
e6205c966d
@ -2780,6 +2780,7 @@ public:
|
||||
double cpu_cost; /* total cost of operations in CPU */
|
||||
double idx_cpu_cost; /* cost of operations in CPU for index */
|
||||
double import_cost; /* cost of remote operations */
|
||||
double comp_cost; /* Cost of comparing found rows with WHERE clause */
|
||||
double mem_cost; /* cost of used memory */
|
||||
|
||||
static constexpr double IO_COEFF= 1;
|
||||
@ -2793,6 +2794,15 @@ public:
|
||||
}
|
||||
|
||||
double total_cost() const
|
||||
{
|
||||
return IO_COEFF*io_count*avg_io_cost +
|
||||
IO_COEFF*idx_io_count*idx_avg_io_cost +
|
||||
CPU_COEFF*(cpu_cost + idx_cpu_cost + comp_cost) +
|
||||
MEM_COEFF*mem_cost + IMPORT_COEFF*import_cost;
|
||||
}
|
||||
|
||||
/* Cost of fetching a row */
|
||||
double fetch_cost() const
|
||||
{
|
||||
return IO_COEFF*io_count*avg_io_cost +
|
||||
IO_COEFF*idx_io_count*idx_avg_io_cost +
|
||||
@ -2800,6 +2810,16 @@ public:
|
||||
MEM_COEFF*mem_cost + IMPORT_COEFF*import_cost;
|
||||
}
|
||||
|
||||
/*
|
||||
Cost of comparing the row with the WHERE clause
|
||||
Note that fetch_cost() + compare_cost() == total_cost()
|
||||
*/
|
||||
double compare_cost() const
|
||||
{
|
||||
return CPU_COEFF*comp_cost;
|
||||
}
|
||||
|
||||
|
||||
double index_only_cost()
|
||||
{
|
||||
return IO_COEFF*idx_io_count*idx_avg_io_cost +
|
||||
@ -2814,14 +2834,15 @@ public:
|
||||
bool is_zero() const
|
||||
{
|
||||
return io_count == 0.0 && idx_io_count == 0.0 && cpu_cost == 0.0 &&
|
||||
import_cost == 0.0 && mem_cost == 0.0;
|
||||
import_cost == 0.0 && mem_cost == 0.0 && comp_cost == 0.0;
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
avg_io_cost= 1.0;
|
||||
idx_avg_io_cost= 1.0;
|
||||
io_count= idx_io_count= cpu_cost= idx_cpu_cost= mem_cost= import_cost= 0.0;
|
||||
io_count= idx_io_count= cpu_cost= idx_cpu_cost= mem_cost= import_cost=
|
||||
comp_cost= 0.0;
|
||||
}
|
||||
|
||||
void multiply(double m)
|
||||
@ -2831,6 +2852,7 @@ public:
|
||||
idx_io_count *= m;
|
||||
idx_cpu_cost *= m;
|
||||
import_cost *= m;
|
||||
comp_cost *= m;
|
||||
/* Don't multiply mem_cost */
|
||||
}
|
||||
|
||||
@ -2855,6 +2877,7 @@ public:
|
||||
cpu_cost += cost->cpu_cost;
|
||||
idx_cpu_cost += cost->idx_cpu_cost;
|
||||
import_cost += cost->import_cost;
|
||||
comp_cost+= cost->comp_cost;
|
||||
}
|
||||
|
||||
void add_io(double add_io_cnt, double add_avg_cost)
|
||||
@ -2869,15 +2892,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
/// Add to CPU cost
|
||||
void add_cpu(double add_cpu_cost) { cpu_cost+= add_cpu_cost; }
|
||||
|
||||
/// Add to import cost
|
||||
void add_import(double add_import_cost) { import_cost+= add_import_cost; }
|
||||
|
||||
/// Add to memory cost
|
||||
void add_mem(double add_mem_cost) { mem_cost+= add_mem_cost; }
|
||||
|
||||
/*
|
||||
To be used when we go from old single value-based cost calculations to
|
||||
the new Cost_estimate-based.
|
||||
|
@ -329,15 +329,18 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
|
||||
uint limited_ranges= (uint) MY_MIN((ulonglong) n_ranges, io_blocks);
|
||||
cost->cpu_cost= read_time(keyno, limited_ranges, total_rows);
|
||||
}
|
||||
cost->cpu_cost+= (rows2double(total_rows) / TIME_FOR_COMPARE +
|
||||
cost->comp_cost= (rows2double(total_rows) / TIME_FOR_COMPARE +
|
||||
MULTI_RANGE_READ_SETUP_COST);
|
||||
}
|
||||
DBUG_PRINT("statistics",
|
||||
("key: %s rows: %llu total_cost: %.3f io_blocks: %llu "
|
||||
"idx_io_count: %.3f cpu_cost: %.3f io_count: %.3f",
|
||||
"idx_io_count: %.3f cpu_cost: %.3f io_count: %.3f "
|
||||
"compare_cost: %.3f",
|
||||
table->s->keynames.type_names[keyno],
|
||||
(ulonglong) total_rows, cost->total_cost(), (ulonglong) io_blocks,
|
||||
cost->idx_io_count, cost->cpu_cost, cost->io_count));
|
||||
(ulonglong) total_rows, cost->total_cost(),
|
||||
(ulonglong) io_blocks,
|
||||
cost->idx_io_count, cost->cpu_cost, cost->io_count,
|
||||
cost->comp_cost));
|
||||
DBUG_RETURN(total_rows);
|
||||
}
|
||||
|
||||
@ -409,7 +412,7 @@ ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows,
|
||||
{
|
||||
cost->cpu_cost= read_time(keyno, n_ranges, (uint)n_rows);
|
||||
}
|
||||
cost->cpu_cost+= rows2double(n_rows) / TIME_FOR_COMPARE;
|
||||
cost->comp_cost= rows2double(n_rows) / TIME_FOR_COMPARE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -11764,7 +11764,9 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
||||
param->table->opt_range_condition_rows=
|
||||
MY_MIN(param->table->opt_range_condition_rows, rows);
|
||||
range->rows= rows;
|
||||
range->cost= cost->total_cost();
|
||||
range->fetch_cost= cost->fetch_cost();
|
||||
/* Same as total cost */
|
||||
range->cost= range->fetch_cost + cost->compare_cost();
|
||||
if (param->table->file->is_clustering_key(keynr))
|
||||
range->index_only_cost= 0;
|
||||
else
|
||||
|
@ -7834,26 +7834,6 @@ double cost_for_index_read(const THD *thd, const TABLE *table, uint key,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Adjust cost from table->quick_costs calculated by
|
||||
multi_range_read_info_const() to be comparable with cost_for_index_read()
|
||||
|
||||
This functions is needed because best_access_path() doesn't add
|
||||
TIME_FOR_COMPARE to it's costs until very late.
|
||||
Preferably we should fix so that all costs are comparably.
|
||||
(All compared costs should include TIME_FOR_COMPARE for all found
|
||||
rows).
|
||||
*/
|
||||
|
||||
double adjust_quick_cost(double quick_cost, ha_rows records)
|
||||
{
|
||||
double cost= (quick_cost - MULTI_RANGE_READ_SETUP_COST -
|
||||
rows2double(records)/TIME_FOR_COMPARE);
|
||||
DBUG_ASSERT(cost > 0.0);
|
||||
return cost;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Find the best access path for an extension of a partial execution
|
||||
plan and add this path to the plan.
|
||||
@ -8117,7 +8097,7 @@ best_access_path(JOIN *join,
|
||||
add("access_type", join_type_str[type]).
|
||||
add("index", keyinfo->name);
|
||||
if (!found_ref && table->opt_range_keys.is_set(key))
|
||||
tmp= adjust_quick_cost(table->opt_range[key].cost, 1);
|
||||
tmp= table->opt_range[key].fetch_cost;
|
||||
else
|
||||
tmp= table->file->avg_io_cost();
|
||||
/*
|
||||
@ -8158,8 +8138,7 @@ best_access_path(JOIN *join,
|
||||
{
|
||||
records= (double) table->opt_range[key].rows;
|
||||
trace_access_idx.add("used_range_estimates", true);
|
||||
tmp= adjust_quick_cost(table->opt_range[key].cost,
|
||||
table->opt_range[key].rows);
|
||||
tmp= table->opt_range[key].fetch_cost;
|
||||
goto got_cost2;
|
||||
}
|
||||
/* quick_range couldn't use key! */
|
||||
@ -8285,8 +8264,7 @@ best_access_path(JOIN *join,
|
||||
table->opt_range[key].ranges == 1 + MY_TEST(ref_or_null_part)) //(C3)
|
||||
{
|
||||
records= (double) table->opt_range[key].rows;
|
||||
tmp= adjust_quick_cost(table->opt_range[key].cost,
|
||||
table->opt_range[key].rows);
|
||||
tmp= table->opt_range[key].fetch_cost;
|
||||
trace_access_idx.add("used_range_estimates", true);
|
||||
goto got_cost2;
|
||||
}
|
||||
|
@ -1392,7 +1392,10 @@ public:
|
||||
uint key_parts;
|
||||
uint ranges;
|
||||
ha_rows rows;
|
||||
/* Cost of fetching and comparing the row aginst the WHERE clause */
|
||||
double cost;
|
||||
/* Cost of comparing row with WHERE clause. Included in 'cost' */
|
||||
double fetch_cost;
|
||||
/*
|
||||
If there is a range access by i-th index then the cost of
|
||||
index only access for it is stored in index_only_costs[i]
|
||||
|
Loading…
x
Reference in New Issue
Block a user