Fix typos in C comments inside sql/
This commit is contained in:
parent
153778437d
commit
717c12de0e
@ -51,7 +51,7 @@ struct show_table_authors_st show_table_authors[]= {
|
|||||||
"Query Cache (4.0), Subqueries (4.1), Views (5.0)" },
|
"Query Cache (4.0), Subqueries (4.1), Views (5.0)" },
|
||||||
{ "Timour Katchaounov", "Sofia , Bulgaria", "Optimizer"},
|
{ "Timour Katchaounov", "Sofia , Bulgaria", "Optimizer"},
|
||||||
{ "Kristian Nielsen", "Copenhagen, Denmark",
|
{ "Kristian Nielsen", "Copenhagen, Denmark",
|
||||||
"Replication, Async client prototocol, General buildbot stuff" },
|
"Replication, Async client protocol, General buildbot stuff" },
|
||||||
{ "Alexander (Bar) Barkov", "Izhevsk, Russia",
|
{ "Alexander (Bar) Barkov", "Izhevsk, Russia",
|
||||||
"Unicode and character sets" },
|
"Unicode and character sets" },
|
||||||
{ "Alexey Botchkov (Holyfoot)", "Izhevsk, Russia",
|
{ "Alexey Botchkov (Holyfoot)", "Izhevsk, Russia",
|
||||||
|
@ -876,7 +876,7 @@ static bool ddl_log_increment_phase_no_lock(uint entry_pos)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Trying to deativate an execute entry or already deactive entry.
|
Trying to deactivate an execute entry or already deactive entry.
|
||||||
This should not happen
|
This should not happen
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT(0);
|
DBUG_ASSERT(0);
|
||||||
@ -1017,7 +1017,7 @@ static void ddl_log_to_binary_log(THD *thd, String *query)
|
|||||||
table name to the query
|
table name to the query
|
||||||
|
|
||||||
When we log, we always log all found tables and views at the same time. This
|
When we log, we always log all found tables and views at the same time. This
|
||||||
is done to simply the exceute code as otherwise we would have to keep
|
is done to simply execute the code as otherwise we would have to keep
|
||||||
information of what was logged.
|
information of what was logged.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -1505,7 +1505,7 @@ static int ddl_log_execute_action(THD *thd, MEM_ROOT *mem_root,
|
|||||||
case DDL_RENAME_PHASE_STAT:
|
case DDL_RENAME_PHASE_STAT:
|
||||||
/*
|
/*
|
||||||
Stat tables must be updated last so that we can handle a rename of
|
Stat tables must be updated last so that we can handle a rename of
|
||||||
a stat table. For now we just rememeber that we have to update it
|
a stat table. For now we just remember that we have to update it
|
||||||
*/
|
*/
|
||||||
update_flags(ddl_log_entry->entry_pos, DDL_LOG_FLAG_UPDATE_STAT);
|
update_flags(ddl_log_entry->entry_pos, DDL_LOG_FLAG_UPDATE_STAT);
|
||||||
ddl_log_entry->flags|= DDL_LOG_FLAG_UPDATE_STAT;
|
ddl_log_entry->flags|= DDL_LOG_FLAG_UPDATE_STAT;
|
||||||
@ -2543,7 +2543,7 @@ bool ddl_log_write_entry(DDL_LOG_ENTRY *ddl_log_entry,
|
|||||||
@brief Write or update execute entry in the ddl log.
|
@brief Write or update execute entry in the ddl log.
|
||||||
|
|
||||||
@details An execute entry points to the first entry that should
|
@details An execute entry points to the first entry that should
|
||||||
be excuted during recovery. In some cases it's only written once,
|
be executed during recovery. In some cases it's only written once,
|
||||||
in other cases it's updated for each log entry to point to the new
|
in other cases it's updated for each log entry to point to the new
|
||||||
header for the list.
|
header for the list.
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
functionality.
|
functionality.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* debug_crash_here() functionallity.
|
/* debug_crash_here() functionality.
|
||||||
See mysql_test/suite/atomic/create_table.test for an example of how it
|
See mysql_test/suite/atomic/create_table.test for an example of how it
|
||||||
can be used
|
can be used
|
||||||
*/
|
*/
|
||||||
|
@ -73,7 +73,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
virtual int next_row()= 0;
|
virtual int next_row()= 0;
|
||||||
|
|
||||||
/* End prodicing rows */
|
/* End producing rows */
|
||||||
virtual int end_scan()=0;
|
virtual int end_scan()=0;
|
||||||
|
|
||||||
/* Report errors */
|
/* Report errors */
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
@file
|
@file
|
||||||
|
|
||||||
@brief
|
@brief
|
||||||
Read language depeneded messagefile
|
Read language depended messagefile
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "mariadb.h"
|
#include "mariadb.h"
|
||||||
|
@ -519,7 +519,7 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table)
|
|||||||
else
|
else
|
||||||
expression= 0;
|
expression= 0;
|
||||||
/*
|
/*
|
||||||
If neigher STARTS and ENDS is set, then both fields are empty.
|
If neither STARTS and ENDS is set, then both fields are empty.
|
||||||
Hence, if ET_FIELD_EXECUTE_AT is empty there is an error.
|
Hence, if ET_FIELD_EXECUTE_AT is empty there is an error.
|
||||||
*/
|
*/
|
||||||
execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null();
|
execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null();
|
||||||
|
@ -167,7 +167,7 @@ deinit_event_thread(THD *thd)
|
|||||||
thd The THD of the thread. Has to be allocated by the caller.
|
thd The THD of the thread. Has to be allocated by the caller.
|
||||||
|
|
||||||
NOTES
|
NOTES
|
||||||
1. The host of the thead is my_localhost
|
1. The host of the thread is my_localhost
|
||||||
2. thd->net is initted with NULL - no communication.
|
2. thd->net is initted with NULL - no communication.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ bool Field::marked_for_write_or_computed() const
|
|||||||
Rules for merging different types of fields in UNION
|
Rules for merging different types of fields in UNION
|
||||||
|
|
||||||
NOTE: to avoid 256*256 table, gap in table types numeration is skipped
|
NOTE: to avoid 256*256 table, gap in table types numeration is skipped
|
||||||
following #defines describe that gap and how to canculate number of fields
|
following #defines describe that gap and how to calculate number of fields
|
||||||
and index of field in this array.
|
and index of field in this array.
|
||||||
*/
|
*/
|
||||||
const int FIELDTYPE_TEAR_FROM= (MYSQL_TYPE_BIT + 1);
|
const int FIELDTYPE_TEAR_FROM= (MYSQL_TYPE_BIT + 1);
|
||||||
@ -8573,7 +8573,7 @@ void Field_varstring::hash_not_null(Hasher *hasher)
|
|||||||
@param[in] from data to compress
|
@param[in] from data to compress
|
||||||
@param[in] length from length
|
@param[in] length from length
|
||||||
@param[in] max_length truncate `from' to this length
|
@param[in] max_length truncate `from' to this length
|
||||||
@param[out] out_length compessed data length
|
@param[out] out_length compressed data length
|
||||||
@param[in] cs from character set
|
@param[in] cs from character set
|
||||||
@param[in] nchars copy no more than "nchars" characters
|
@param[in] nchars copy no more than "nchars" characters
|
||||||
|
|
||||||
@ -9989,8 +9989,8 @@ Field_enum::can_optimize_range_or_keypart_ref(const Item_bool_func *cond,
|
|||||||
3 - first (high) bit of 'c'
|
3 - first (high) bit of 'c'
|
||||||
2 - second bit of 'c'
|
2 - second bit of 'c'
|
||||||
1 - third bit of 'c'
|
1 - third bit of 'c'
|
||||||
0 - forth bit of 'c'
|
0 - fourth bit of 'c'
|
||||||
2 7 - firth bit of 'c'
|
2 7 - fifth bit of 'c'
|
||||||
6 - null bit for 'd'
|
6 - null bit for 'd'
|
||||||
3 - 6 four bytes for 'a'
|
3 - 6 four bytes for 'a'
|
||||||
7 - 8 two bytes for 'b'
|
7 - 8 two bytes for 'b'
|
||||||
|
@ -1045,7 +1045,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
Mark unused memory in the field as defined. Mainly used to ensure
|
Mark unused memory in the field as defined. Mainly used to ensure
|
||||||
that if we write full field to disk (for example in
|
that if we write full field to disk (for example in
|
||||||
Count_distinct_field::add(), we don't write unitalized data to
|
Count_distinct_field::add(), we don't write uninitialized data to
|
||||||
disk which would confuse valgrind or MSAN.
|
disk which would confuse valgrind or MSAN.
|
||||||
*/
|
*/
|
||||||
virtual void mark_unused_memory_as_defined() {}
|
virtual void mark_unused_memory_as_defined() {}
|
||||||
@ -1786,7 +1786,7 @@ protected:
|
|||||||
/*
|
/*
|
||||||
Make a leaf tree when an INT value was stored into a field of INT type,
|
Make a leaf tree when an INT value was stored into a field of INT type,
|
||||||
and some truncation happened. Tries to adjust the range search condition
|
and some truncation happened. Tries to adjust the range search condition
|
||||||
when possible, e.g. "tinytint < 300" -> "tinyint <= 127".
|
when possible, e.g. "tinyint < 300" -> "tinyint <= 127".
|
||||||
Can also return SEL_ARG_IMPOSSIBLE(), and NULL (not sargable).
|
Can also return SEL_ARG_IMPOSSIBLE(), and NULL (not sargable).
|
||||||
*/
|
*/
|
||||||
SEL_ARG *stored_field_make_mm_leaf_bounded_int(RANGE_OPT_PARAM *param,
|
SEL_ARG *stored_field_make_mm_leaf_bounded_int(RANGE_OPT_PARAM *param,
|
||||||
|
@ -339,7 +339,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
|
|||||||
if (costs.fastest_sort == PQ_SORT_ALL_FIELDS ||
|
if (costs.fastest_sort == PQ_SORT_ALL_FIELDS ||
|
||||||
costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS)
|
costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS)
|
||||||
{
|
{
|
||||||
/* We are going to use priorty queue */
|
/* We are going to use priority queue */
|
||||||
thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE;
|
thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE;
|
||||||
status_var_increment(thd->status_var.filesort_pq_sorts_);
|
status_var_increment(thd->status_var.filesort_pq_sorts_);
|
||||||
tracker->incr_pq_used();
|
tracker->incr_pq_used();
|
||||||
@ -359,7 +359,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
|
|||||||
param.res_length= param.ref_length;
|
param.res_length= param.ref_length;
|
||||||
/*
|
/*
|
||||||
Add the ref (rowid which is stored last in the sort key) to the sort,
|
Add the ref (rowid which is stored last in the sort key) to the sort,
|
||||||
as we want to retrive rows in id order, if possible.
|
as we want to retrieve rows in id order, if possible.
|
||||||
*/
|
*/
|
||||||
param.sort_length+= param.ref_length;
|
param.sort_length+= param.ref_length;
|
||||||
param.rec_length= param.sort_length;
|
param.rec_length= param.sort_length;
|
||||||
|
@ -414,7 +414,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* That class introduced mostly for the 'typecontrol' reason. */
|
/* That class introduced mostly for the 'typecontrol' reason. */
|
||||||
/* only difference from the point classis the get_next() function. */
|
/* only difference from the point classes is the get_next() function. */
|
||||||
class event_point : public point
|
class event_point : public point
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -3476,7 +3476,7 @@ bool ha_partition::re_create_par_file(const char *name)
|
|||||||
0);
|
0);
|
||||||
if (m_part_info->partitions.elements == 0)
|
if (m_part_info->partitions.elements == 0)
|
||||||
{
|
{
|
||||||
/* We did not succed in creating default partitions */
|
/* We did not succeed in creating default partitions */
|
||||||
tmp= 1;
|
tmp= 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4508,7 +4508,7 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
|
|||||||
@returns Number of locks returned in call to store_lock
|
@returns Number of locks returned in call to store_lock
|
||||||
|
|
||||||
@desc
|
@desc
|
||||||
Returns the maxinum possible number of store locks needed in call to
|
Returns the maximum possible number of store locks needed in call to
|
||||||
store lock.
|
store lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -5910,7 +5910,7 @@ int ha_partition::index_end()
|
|||||||
index_read_map can be restarted without calling index_end on the previous
|
index_read_map can be restarted without calling index_end on the previous
|
||||||
index scan and without calling index_init. In this case the index_read_map
|
index scan and without calling index_init. In this case the index_read_map
|
||||||
is on the same index as the previous index_scan. This is particularly
|
is on the same index as the previous index_scan. This is particularly
|
||||||
used in conjuntion with multi read ranges.
|
used in conjunction with multi read ranges.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int ha_partition::index_read_map(uchar *buf, const uchar *key,
|
int ha_partition::index_read_map(uchar *buf, const uchar *key,
|
||||||
@ -7436,7 +7436,7 @@ end_dont_reset_start_part:
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
ha_partition::partition_scan_set_up()
|
ha_partition::partition_scan_set_up()
|
||||||
buf Buffer to later return record in (this function
|
buf Buffer to later return record in (this function
|
||||||
needs it to calculcate partitioning function
|
needs it to calculate partitioning function
|
||||||
values)
|
values)
|
||||||
|
|
||||||
idx_read_flag TRUE <=> m_start_key has range start endpoint which
|
idx_read_flag TRUE <=> m_start_key has range start endpoint which
|
||||||
@ -8782,7 +8782,7 @@ int ha_partition::info(uint flag)
|
|||||||
have been disabled.
|
have been disabled.
|
||||||
|
|
||||||
The most important parameters set here is records per key on
|
The most important parameters set here is records per key on
|
||||||
all indexes. block_size and primar key ref_length.
|
all indexes. block_size and primary key ref_length.
|
||||||
|
|
||||||
For each index there is an array of rec_per_key.
|
For each index there is an array of rec_per_key.
|
||||||
As an example if we have an index with three attributes a,b and c
|
As an example if we have an index with three attributes a,b and c
|
||||||
@ -9943,7 +9943,7 @@ IO_AND_CPU_COST ha_partition::scan_time()
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
@brief
|
@brief
|
||||||
Caculate time to scan the given index (index only scan)
|
Calculate time to scan the given index (index only scan)
|
||||||
|
|
||||||
@param inx Index number to scan
|
@param inx Index number to scan
|
||||||
|
|
||||||
@ -10701,7 +10701,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Changing to similar partitioning, only update metadata.
|
Changing to similar partitioning, only update metadata.
|
||||||
Non allowed changes would be caought in prep_alter_part_table().
|
Non allowed changes would be caught in prep_alter_part_table().
|
||||||
*/
|
*/
|
||||||
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO)
|
||||||
{
|
{
|
||||||
|
@ -382,7 +382,7 @@ int ha_sequence::discard_or_import_tablespace(my_bool discard)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Squence engine error deal method
|
Sequence engine error deal method
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void ha_sequence::print_error(int error, myf errflag)
|
void ha_sequence::print_error(int error, myf errflag)
|
||||||
|
@ -34,7 +34,7 @@ extern handlerton *sql_sequence_hton;
|
|||||||
The sequence data (SEQUENCE class) is stored in TABLE_SHARE->sequence
|
The sequence data (SEQUENCE class) is stored in TABLE_SHARE->sequence
|
||||||
|
|
||||||
TABLE RULES:
|
TABLE RULES:
|
||||||
1. When table is created, one row is automaticlly inserted into
|
1. When table is created, one row is automatically inserted into
|
||||||
the table. The table will always have one and only one row.
|
the table. The table will always have one and only one row.
|
||||||
2. Any inserts or updates to the table will be validated.
|
2. Any inserts or updates to the table will be validated.
|
||||||
3. Inserts will overwrite the original row.
|
3. Inserts will overwrite the original row.
|
||||||
|
@ -78,7 +78,7 @@ struct Listener
|
|||||||
virtual void begin_accept()= 0;
|
virtual void begin_accept()= 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Completion callback,called whenever IO posted by begin_accept is finisjed
|
Completion callback,called whenever IO posted by begin_accept is finished
|
||||||
Listener needs to create a new THD then (or, call scheduler so it creates one)
|
Listener needs to create a new THD then (or, call scheduler so it creates one)
|
||||||
|
|
||||||
@param success - whether IO completed successfull
|
@param success - whether IO completed successfull
|
||||||
@ -112,7 +112,7 @@ struct Listener
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Winsock extension finctions. */
|
/* Winsock extension functions. */
|
||||||
static LPFN_ACCEPTEX my_AcceptEx;
|
static LPFN_ACCEPTEX my_AcceptEx;
|
||||||
static LPFN_GETACCEPTEXSOCKADDRS my_GetAcceptExSockaddrs;
|
static LPFN_GETACCEPTEXSOCKADDRS my_GetAcceptExSockaddrs;
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ static LPFN_GETACCEPTEXSOCKADDRS my_GetAcceptExSockaddrs;
|
|||||||
Can be threadpool-bound (i.e the completion is executed in threadpool thread),
|
Can be threadpool-bound (i.e the completion is executed in threadpool thread),
|
||||||
or use events for waits.
|
or use events for waits.
|
||||||
|
|
||||||
Threadpool-bound listener should be used with theradpool scheduler, for better
|
Threadpool-bound listener should be used with threadpool scheduler, for better
|
||||||
performance.
|
performance.
|
||||||
*/
|
*/
|
||||||
struct Socket_Listener: public Listener
|
struct Socket_Listener: public Listener
|
||||||
|
@ -123,7 +123,7 @@ ulong total_ha_2pc= 0;
|
|||||||
/*
|
/*
|
||||||
Number of non-mandatory 2pc handlertons whose initialization failed
|
Number of non-mandatory 2pc handlertons whose initialization failed
|
||||||
to estimate total_ha_2pc value under supposition of the failures
|
to estimate total_ha_2pc value under supposition of the failures
|
||||||
have not occcured.
|
have not occured.
|
||||||
*/
|
*/
|
||||||
ulong failed_ha_2pc= 0;
|
ulong failed_ha_2pc= 0;
|
||||||
#endif
|
#endif
|
||||||
@ -613,7 +613,7 @@ int ha_finalize_handlerton(void *plugin_)
|
|||||||
*/
|
*/
|
||||||
if (hton->slot != HA_SLOT_UNDEF)
|
if (hton->slot != HA_SLOT_UNDEF)
|
||||||
{
|
{
|
||||||
/* Make sure we are not unpluging another plugin */
|
/* Make sure we are not unplugging another plugin */
|
||||||
DBUG_ASSERT(hton2plugin[hton->slot] == plugin);
|
DBUG_ASSERT(hton2plugin[hton->slot] == plugin);
|
||||||
DBUG_ASSERT(hton->slot < MAX_HA);
|
DBUG_ASSERT(hton->slot < MAX_HA);
|
||||||
hton2plugin[hton->slot]= NULL;
|
hton2plugin[hton->slot]= NULL;
|
||||||
@ -2088,7 +2088,7 @@ err:
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
We are not really doing a rollback here, but the code in trans_commit()
|
We are not really doing a rollback here, but the code in trans_commit()
|
||||||
requres that m_transaction_psi is 0 when we return from this function.
|
requires that m_transaction_psi is 0 when we return from this function.
|
||||||
*/
|
*/
|
||||||
MYSQL_ROLLBACK_TRANSACTION(thd->m_transaction_psi);
|
MYSQL_ROLLBACK_TRANSACTION(thd->m_transaction_psi);
|
||||||
thd->m_transaction_psi= NULL;
|
thd->m_transaction_psi= NULL;
|
||||||
@ -3418,7 +3418,7 @@ LEX_CSTRING *handler::engine_name()
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Calclate the number of index blocks we are going to access when
|
Calculate the number of index blocks we are going to access when
|
||||||
doing 'ranges' index dives reading a total of 'rows' rows.
|
doing 'ranges' index dives reading a total of 'rows' rows.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -5379,7 +5379,7 @@ bool non_existing_table_error(int error)
|
|||||||
@retval
|
@retval
|
||||||
HA_ADMIN_NEEDS_DATA_CONVERSION
|
HA_ADMIN_NEEDS_DATA_CONVERSION
|
||||||
Table has structures requiring
|
Table has structures requiring
|
||||||
ALTER TABLE FORCE, algortithm=COPY to
|
ALTER TABLE FORCE, algorithm=COPY to
|
||||||
recreate data.
|
recreate data.
|
||||||
@retval
|
@retval
|
||||||
HA_ADMIN_NOT_IMPLEMENTED
|
HA_ADMIN_NOT_IMPLEMENTED
|
||||||
@ -5465,7 +5465,7 @@ int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt)
|
|||||||
ha_table_flags() & HA_CAN_REPAIR);
|
ha_table_flags() & HA_CAN_REPAIR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Update frm version if no errors and there are no version incompatibiltes
|
Update frm version if there are no errors and no version incompatibilities
|
||||||
in the data (as these are not fixed by repair).
|
in the data (as these are not fixed by repair).
|
||||||
*/
|
*/
|
||||||
if (result == HA_ADMIN_OK && !opt_readonly &&
|
if (result == HA_ADMIN_OK && !opt_readonly &&
|
||||||
@ -7904,7 +7904,7 @@ int handler::check_duplicate_long_entries(const uchar *new_rec)
|
|||||||
/** @brief
|
/** @brief
|
||||||
check whether updated records breaks the
|
check whether updated records breaks the
|
||||||
unique constraint on long columns.
|
unique constraint on long columns.
|
||||||
In the case of update we just need to check the specic key
|
In the case of update we just need to check the specific key
|
||||||
reason for that is consider case
|
reason for that is consider case
|
||||||
create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
|
create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
|
||||||
,unique(x,y))
|
,unique(x,y))
|
||||||
|
@ -861,7 +861,7 @@ typedef bool Log_func(THD*, TABLE*, Event_log *, binlog_cache_data *, bool,
|
|||||||
#define ALTER_PARTITION_ALL (1ULL << 8)
|
#define ALTER_PARTITION_ALL (1ULL << 8)
|
||||||
// Set for REMOVE PARTITIONING
|
// Set for REMOVE PARTITIONING
|
||||||
#define ALTER_PARTITION_REMOVE (1ULL << 9)
|
#define ALTER_PARTITION_REMOVE (1ULL << 9)
|
||||||
// Set for EXCHANGE PARITION
|
// Set for EXCHANGE PARTITION
|
||||||
#define ALTER_PARTITION_EXCHANGE (1ULL << 10)
|
#define ALTER_PARTITION_EXCHANGE (1ULL << 10)
|
||||||
// Set by Sql_cmd_alter_table_truncate_partition::execute()
|
// Set by Sql_cmd_alter_table_truncate_partition::execute()
|
||||||
#define ALTER_PARTITION_TRUNCATE (1ULL << 11)
|
#define ALTER_PARTITION_TRUNCATE (1ULL << 11)
|
||||||
@ -1026,7 +1026,7 @@ struct xid_recovery_member
|
|||||||
*/
|
*/
|
||||||
Binlog_offset binlog_coord;
|
Binlog_offset binlog_coord;
|
||||||
XID *full_xid; // needed by wsrep or past it recovery
|
XID *full_xid; // needed by wsrep or past it recovery
|
||||||
decltype(::server_id) server_id; // server id of orginal server
|
decltype(::server_id) server_id; // server id of original server
|
||||||
|
|
||||||
xid_recovery_member(my_xid xid_arg, uint prepare_arg, bool decided_arg,
|
xid_recovery_member(my_xid xid_arg, uint prepare_arg, bool decided_arg,
|
||||||
XID *full_xid_arg, decltype(::server_id) server_id_arg)
|
XID *full_xid_arg, decltype(::server_id) server_id_arg)
|
||||||
@ -1438,7 +1438,7 @@ struct transaction_participant
|
|||||||
consistent between 2pc participants. Such engine is no longer required to
|
consistent between 2pc participants. Such engine is no longer required to
|
||||||
durably flush to disk transactions in commit(), provided that the
|
durably flush to disk transactions in commit(), provided that the
|
||||||
transaction has been successfully prepare()d and commit_ordered(); thus
|
transaction has been successfully prepare()d and commit_ordered(); thus
|
||||||
potentionally saving one fsync() call. (Engine must still durably flush
|
potentially saving one fsync() call. (Engine must still durably flush
|
||||||
to disk in commit() when no prepare()/commit_ordered() steps took place,
|
to disk in commit() when no prepare()/commit_ordered() steps took place,
|
||||||
at least if durable commits are wanted; this happens eg. if binlog is
|
at least if durable commits are wanted; this happens eg. if binlog is
|
||||||
disabled).
|
disabled).
|
||||||
@ -2644,7 +2644,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
alter_table_operations handler_flags= 0;
|
alter_table_operations handler_flags= 0;
|
||||||
|
|
||||||
/* Alter operations involving parititons are strored here */
|
/* Alter operations involving partitons are stored here */
|
||||||
ulong partition_flags;
|
ulong partition_flags;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3463,8 +3463,8 @@ private:
|
|||||||
Handler_share **ha_share;
|
Handler_share **ha_share;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
double optimizer_where_cost; // Copy of THD->...optimzer_where_cost
|
double optimizer_where_cost; // Copy of THD->...optimizer_where_cost
|
||||||
double optimizer_scan_setup_cost; // Copy of THD->...optimzer_scan_...
|
double optimizer_scan_setup_cost; // Copy of THD->...optimizer_scan_...
|
||||||
|
|
||||||
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
|
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
|
||||||
:table_share(share_arg), table(0),
|
:table_share(share_arg), table(0),
|
||||||
@ -3513,7 +3513,7 @@ public:
|
|||||||
DBUG_ASSERT(m_lock_type == F_UNLCK);
|
DBUG_ASSERT(m_lock_type == F_UNLCK);
|
||||||
DBUG_ASSERT(inited == NONE);
|
DBUG_ASSERT(inited == NONE);
|
||||||
}
|
}
|
||||||
/* To check if table has been properely opened */
|
/* To check if table has been properly opened */
|
||||||
bool is_open()
|
bool is_open()
|
||||||
{
|
{
|
||||||
return ref != 0;
|
return ref != 0;
|
||||||
@ -3605,7 +3605,7 @@ public:
|
|||||||
}
|
}
|
||||||
inline int ha_end_keyread()
|
inline int ha_end_keyread()
|
||||||
{
|
{
|
||||||
if (!keyread_enabled()) /* Enably lazy usage */
|
if (!keyread_enabled()) /* Enable lazy usage */
|
||||||
return 0;
|
return 0;
|
||||||
keyread= MAX_KEY;
|
keyread= MAX_KEY;
|
||||||
return extra(HA_EXTRA_NO_KEYREAD);
|
return extra(HA_EXTRA_NO_KEYREAD);
|
||||||
@ -4311,7 +4311,7 @@ public:
|
|||||||
This is intended to be used for EXPLAIN, via the following scenario:
|
This is intended to be used for EXPLAIN, via the following scenario:
|
||||||
1. SQL layer calls handler->multi_range_read_info().
|
1. SQL layer calls handler->multi_range_read_info().
|
||||||
1.1. Storage engine figures out whether it will use some non-default
|
1.1. Storage engine figures out whether it will use some non-default
|
||||||
MRR strategy, sets appropritate bits in *mrr_mode, and returns
|
MRR strategy, sets appropriate bits in *mrr_mode, and returns
|
||||||
control to SQL layer
|
control to SQL layer
|
||||||
2. SQL layer remembers the returned mrr_mode
|
2. SQL layer remembers the returned mrr_mode
|
||||||
3. SQL layer compares various options and choses the final query plan. As
|
3. SQL layer compares various options and choses the final query plan. As
|
||||||
@ -4411,7 +4411,7 @@ public:
|
|||||||
{ return extra(operation); }
|
{ return extra(operation); }
|
||||||
/*
|
/*
|
||||||
Table version id for the the table. This should change for each
|
Table version id for the the table. This should change for each
|
||||||
sucessfull ALTER TABLE.
|
successful ALTER TABLE.
|
||||||
This is used by the handlerton->check_version() to ask the engine
|
This is used by the handlerton->check_version() to ask the engine
|
||||||
if the table definition has been updated.
|
if the table definition has been updated.
|
||||||
Storage engines that does not support inplace alter table does not
|
Storage engines that does not support inplace alter table does not
|
||||||
@ -4650,7 +4650,7 @@ public:
|
|||||||
Count tables invisible from all tables list on which current one built
|
Count tables invisible from all tables list on which current one built
|
||||||
(like myisammrg and partitioned tables)
|
(like myisammrg and partitioned tables)
|
||||||
|
|
||||||
tables_type mask for the tables should be added herdde
|
tables_type mask for the tables should be added here
|
||||||
|
|
||||||
returns number of such tables
|
returns number of such tables
|
||||||
*/
|
*/
|
||||||
@ -5490,8 +5490,8 @@ public:
|
|||||||
|
|
||||||
@param record record to find (also will be fillded with
|
@param record record to find (also will be fillded with
|
||||||
actual record fields)
|
actual record fields)
|
||||||
@param unique_ref index or unique constraiun number (depends
|
@param unique_ref index or unique constraint number (depends
|
||||||
on what used in the engine
|
on what was used in the engine
|
||||||
|
|
||||||
@retval -1 Error
|
@retval -1 Error
|
||||||
@retval 1 Not found
|
@retval 1 Not found
|
||||||
|
18
sql/item.cc
18
sql/item.cc
@ -1480,8 +1480,8 @@ Item *Item::const_charset_converter(THD *thd, CHARSET_INFO *tocs,
|
|||||||
Item *Item_param::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
|
Item *Item_param::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Return "this" if in prepare. result_type may change at execition time,
|
Return "this" if in prepare. result_type may change at execution time,
|
||||||
to it's possible that the converter will not be needed at all:
|
though it's possible that the converter will not be needed at all:
|
||||||
|
|
||||||
PREPARE stmt FROM 'SELECT * FROM t1 WHERE field = ?';
|
PREPARE stmt FROM 'SELECT * FROM t1 WHERE field = ?';
|
||||||
SET @arg= 1;
|
SET @arg= 1;
|
||||||
@ -2212,7 +2212,7 @@ Item::Type Item_name_const::type() const
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
|
||||||
We are guarenteed that value_item->basic_const_item(), if not
|
We are guaranteed that value_item->basic_const_item(), if not
|
||||||
an error is thrown that WRONG ARGUMENTS are supplied to
|
an error is thrown that WRONG ARGUMENTS are supplied to
|
||||||
NAME_CONST function.
|
NAME_CONST function.
|
||||||
If type is FUNC_ITEM, then we have a fudged item_func_neg()
|
If type is FUNC_ITEM, then we have a fudged item_func_neg()
|
||||||
@ -2370,7 +2370,7 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
|
|||||||
/*
|
/*
|
||||||
Skip the else part, window functions are very special functions:
|
Skip the else part, window functions are very special functions:
|
||||||
they need to have their own fields in the temp. table, but they
|
they need to have their own fields in the temp. table, but they
|
||||||
need to be proceessed differently than regular aggregate functions
|
need to be processed differently than regular aggregate functions
|
||||||
|
|
||||||
Call split_sum_func here so that each argument gets its fields to
|
Call split_sum_func here so that each argument gets its fields to
|
||||||
point to the temporary table.
|
point to the temporary table.
|
||||||
@ -2828,7 +2828,7 @@ Item_func_or_sum
|
|||||||
|
|
||||||
@details
|
@details
|
||||||
This method first builds clones of the arguments. If it is successful with
|
This method first builds clones of the arguments. If it is successful with
|
||||||
buiding the clones then it constructs a copy of this Item_func_or_sum object
|
building the clones then it constructs a copy of this Item_func_or_sum object
|
||||||
and attaches to it the built clones of the arguments.
|
and attaches to it the built clones of the arguments.
|
||||||
|
|
||||||
@return clone of the item
|
@return clone of the item
|
||||||
@ -3085,7 +3085,7 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count)
|
|||||||
@brief Initialize the result field by creating a temporary dummy table
|
@brief Initialize the result field by creating a temporary dummy table
|
||||||
and assign it to a newly created field object. Meta data used to
|
and assign it to a newly created field object. Meta data used to
|
||||||
create the field is fetched from the sp_head belonging to the stored
|
create the field is fetched from the sp_head belonging to the stored
|
||||||
proceedure found in the stored procedure functon cache.
|
procedure found in the stored procedure functon cache.
|
||||||
|
|
||||||
@note This function should be called from fix_fields to init the result
|
@note This function should be called from fix_fields to init the result
|
||||||
field. It is some what related to Item_field.
|
field. It is some what related to Item_field.
|
||||||
@ -3793,7 +3793,7 @@ void Item_field::set_refers_to_temp_table()
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Derived temp. tables have non-zero derived_select_number.
|
Derived temp. tables have non-zero derived_select_number.
|
||||||
We don't need to distingish between other kinds of temp.tables currently.
|
We don't need to distinguish between other kinds of temp.tables currently.
|
||||||
*/
|
*/
|
||||||
refers_to_temp_table= (field->table->derived_select_number != 0)?
|
refers_to_temp_table= (field->table->derived_select_number != 0)?
|
||||||
REFERS_TO_DERIVED_TMP : REFERS_TO_OTHER_TMP;
|
REFERS_TO_DERIVED_TMP : REFERS_TO_OTHER_TMP;
|
||||||
@ -5914,7 +5914,7 @@ bool is_outer_table(TABLE_LIST *table, SELECT_LEX *select)
|
|||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
@retval
|
@retval
|
||||||
1 column succefully resolved and fix_fields() should continue.
|
1 column successfully resolved and fix_fields() should continue.
|
||||||
@retval
|
@retval
|
||||||
0 column fully fixed and fix_fields() should return FALSE
|
0 column fully fixed and fix_fields() should return FALSE
|
||||||
@retval
|
@retval
|
||||||
@ -6440,7 +6440,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
|
|||||||
/*
|
/*
|
||||||
if it is not expression from merged VIEW we will set this field.
|
if it is not expression from merged VIEW we will set this field.
|
||||||
|
|
||||||
We can leave expression substituted from view for next PS/SP rexecution
|
We can leave expression substituted from view for next PS/SP reexecution
|
||||||
(i.e. do not register this substitution for reverting on cleanup()
|
(i.e. do not register this substitution for reverting on cleanup()
|
||||||
(register_item_tree_changing())), because this subtree will be
|
(register_item_tree_changing())), because this subtree will be
|
||||||
fix_field'ed during setup_tables()->setup_underlying() (i.e. before
|
fix_field'ed during setup_tables()->setup_underlying() (i.e. before
|
||||||
|
16
sql/item.h
16
sql/item.h
@ -789,7 +789,7 @@ enum class item_with_t : item_flags_t
|
|||||||
WINDOW_FUNC= (1<<1), // If item contains a window func
|
WINDOW_FUNC= (1<<1), // If item contains a window func
|
||||||
FIELD= (1<<2), // If any item except Item_sum contains a field.
|
FIELD= (1<<2), // If any item except Item_sum contains a field.
|
||||||
SUM_FUNC= (1<<3), // If item contains a sum func
|
SUM_FUNC= (1<<3), // If item contains a sum func
|
||||||
SUBQUERY= (1<<4), // If item containts a sub query
|
SUBQUERY= (1<<4), // If item contains a subquery
|
||||||
ROWNUM_FUNC= (1<<5), // If ROWNUM function was used
|
ROWNUM_FUNC= (1<<5), // If ROWNUM function was used
|
||||||
PARAM= (1<<6) // If user parameter was used
|
PARAM= (1<<6) // If user parameter was used
|
||||||
};
|
};
|
||||||
@ -2294,7 +2294,7 @@ public:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
TRUE if the expression depends only on the table indicated by tab_map
|
TRUE if the expression depends only on the table indicated by tab_map
|
||||||
or can be converted to such an exression using equalities.
|
or can be converted to such an expression using equalities.
|
||||||
Not to be used for AND/OR formulas.
|
Not to be used for AND/OR formulas.
|
||||||
*/
|
*/
|
||||||
virtual bool excl_dep_on_table(table_map tab_map) { return false; }
|
virtual bool excl_dep_on_table(table_map tab_map) { return false; }
|
||||||
@ -3578,7 +3578,7 @@ public:
|
|||||||
void get_tmp_field_src(Tmp_field_src *src, const Tmp_field_param *param);
|
void get_tmp_field_src(Tmp_field_src *src, const Tmp_field_param *param);
|
||||||
/*
|
/*
|
||||||
This implementation of used_tables() used by Item_avg_field and
|
This implementation of used_tables() used by Item_avg_field and
|
||||||
Item_variance_field which work when only temporary table left, so theu
|
Item_variance_field which work when only temporary table left, so they
|
||||||
return table map of the temporary table.
|
return table map of the temporary table.
|
||||||
*/
|
*/
|
||||||
table_map used_tables() const override { return 1; }
|
table_map used_tables() const override { return 1; }
|
||||||
@ -4646,7 +4646,7 @@ public:
|
|||||||
{ return get_item_copy<Item_bool_static>(thd, this); }
|
{ return get_item_copy<Item_bool_static>(thd, this); }
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The following variablese are stored in a read only segment */
|
/* The following variables are stored in a read only segment */
|
||||||
extern Item_bool_static *Item_false, *Item_true;
|
extern Item_bool_static *Item_false, *Item_true;
|
||||||
|
|
||||||
class Item_uint :public Item_int
|
class Item_uint :public Item_int
|
||||||
@ -5804,7 +5804,7 @@ public:
|
|||||||
item to the debug log. The second use of this method is as
|
item to the debug log. The second use of this method is as
|
||||||
a helper function of print() and error messages, where it is
|
a helper function of print() and error messages, where it is
|
||||||
applicable. To suit both goals it should return a meaningful,
|
applicable. To suit both goals it should return a meaningful,
|
||||||
distinguishable and sintactically correct string. This method
|
distinguishable and syntactically correct string. This method
|
||||||
should not be used for runtime type identification, use enum
|
should not be used for runtime type identification, use enum
|
||||||
{Sum}Functype and Item_func::functype()/Item_sum::sum_func()
|
{Sum}Functype and Item_func::functype()/Item_sum::sum_func()
|
||||||
instead.
|
instead.
|
||||||
@ -7196,9 +7196,9 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
This class is used as bulk parameter INGNORE representation.
|
This class is used as bulk parameter IGNORE representation.
|
||||||
|
|
||||||
It just do nothing when assigned to a field
|
It just does nothing when assigned to a field
|
||||||
|
|
||||||
This is a non-standard MariaDB extension.
|
This is a non-standard MariaDB extension.
|
||||||
*/
|
*/
|
||||||
@ -8205,7 +8205,7 @@ public:
|
|||||||
|
|
||||||
The value meaning a not-initialized ESCAPE character must not be equal to
|
The value meaning a not-initialized ESCAPE character must not be equal to
|
||||||
any valid value, so must be outside of these ranges:
|
any valid value, so must be outside of these ranges:
|
||||||
- -128..+127, not to conflict with a valid 8bit charcter
|
- -128..+127, not to conflict with a valid 8bit character
|
||||||
- 0..0x10FFFF, not to conflict with a valid Unicode code point
|
- 0..0x10FFFF, not to conflict with a valid Unicode code point
|
||||||
The exact value does not matter.
|
The exact value does not matter.
|
||||||
*/
|
*/
|
||||||
|
@ -606,7 +606,7 @@ bool Arg_comparator::set_cmp_func_string(THD *thd)
|
|||||||
/*
|
/*
|
||||||
We must set cmp_collation here as we may be called from for an automatic
|
We must set cmp_collation here as we may be called from for an automatic
|
||||||
generated item, like in natural join.
|
generated item, like in natural join.
|
||||||
Allow reinterpted superset as subset.
|
Allow reinterpreted superset as subset.
|
||||||
Use charset narrowing only for equalities, as that would allow
|
Use charset narrowing only for equalities, as that would allow
|
||||||
to construct ref access.
|
to construct ref access.
|
||||||
Non-equality comparisons with constants work without charset narrowing,
|
Non-equality comparisons with constants work without charset narrowing,
|
||||||
@ -2715,7 +2715,7 @@ Item_func_nullif::fix_length_and_dec(THD *thd)
|
|||||||
If this is the first invocation of fix_length_and_dec(), create the
|
If this is the first invocation of fix_length_and_dec(), create the
|
||||||
third argument as a copy of the first. This cannot be done before
|
third argument as a copy of the first. This cannot be done before
|
||||||
fix_fields(), because fix_fields() might replace items,
|
fix_fields(), because fix_fields() might replace items,
|
||||||
for exampe NOT x --> x==0, or (SELECT 1) --> 1.
|
for example NOT x --> x==0, or (SELECT 1) --> 1.
|
||||||
See also class Item_func_nullif declaration.
|
See also class Item_func_nullif declaration.
|
||||||
*/
|
*/
|
||||||
if (arg_count == 2)
|
if (arg_count == 2)
|
||||||
@ -2731,7 +2731,7 @@ Item_func_nullif::fix_length_and_dec(THD *thd)
|
|||||||
l_expr
|
l_expr
|
||||||
args[2]= >------------------------/
|
args[2]= >------------------------/
|
||||||
|
|
||||||
Otherwise (during PREPARE or convensional execution),
|
Otherwise (during PREPARE or conventional execution),
|
||||||
args[0] and args[2] should still point to the same original l_expr.
|
args[0] and args[2] should still point to the same original l_expr.
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT(args[0] == args[2] || thd->stmt_arena->is_stmt_execute());
|
DBUG_ASSERT(args[0] == args[2] || thd->stmt_arena->is_stmt_execute());
|
||||||
@ -2814,7 +2814,7 @@ Item_func_nullif::fix_length_and_dec(THD *thd)
|
|||||||
l_expr (Item_field for t1.a)
|
l_expr (Item_field for t1.a)
|
||||||
args[2] /
|
args[2] /
|
||||||
|
|
||||||
d. Conversion of only args[0] happened (by equal field proparation):
|
d. Conversion of only args[0] happened (by equal field propagation):
|
||||||
|
|
||||||
CREATE OR REPLACE TABLE t1 (
|
CREATE OR REPLACE TABLE t1 (
|
||||||
a CHAR(10),
|
a CHAR(10),
|
||||||
@ -3403,7 +3403,7 @@ bool Item_func_case_simple::aggregate_switch_and_when_arguments(THD *thd,
|
|||||||
If we'll do string comparison, we also need to aggregate
|
If we'll do string comparison, we also need to aggregate
|
||||||
character set and collation for first/WHEN items and
|
character set and collation for first/WHEN items and
|
||||||
install converters for some of them to cmp_collation when necessary.
|
install converters for some of them to cmp_collation when necessary.
|
||||||
This is done because cmp_item compatators cannot compare
|
This is done because cmp_item comparators cannot compare
|
||||||
strings in two different character sets.
|
strings in two different character sets.
|
||||||
Some examples when we install converters:
|
Some examples when we install converters:
|
||||||
|
|
||||||
@ -4709,7 +4709,7 @@ void Item_func_in::fix_in_vector()
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
We don't put NULL values in array, to avoid erronous matches in
|
We don't put NULL values in array, to avoid erroneous matches in
|
||||||
bisection.
|
bisection.
|
||||||
*/
|
*/
|
||||||
have_null= 1;
|
have_null= 1;
|
||||||
@ -4760,7 +4760,7 @@ bool Item_func_in::value_list_convert_const_to_int(THD *thd)
|
|||||||
m_comparator.set_handler(&type_handler_slonglong);
|
m_comparator.set_handler(&type_handler_slonglong);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return thd->is_fatal_error; // Catch errrors in convert_const_to_int
|
return thd->is_fatal_error; // Catch errors in convert_const_to_int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -5435,7 +5435,7 @@ Item *Item_cond::do_transform(THD *thd, Item_transformer transformer, uchar *arg
|
|||||||
callback functions.
|
callback functions.
|
||||||
|
|
||||||
First the function applies the analyzer to the root node of
|
First the function applies the analyzer to the root node of
|
||||||
the Item_func object. Then if the analyzer succeeeds (returns TRUE)
|
the Item_func object. Then if the analyzer succeeds (returns TRUE)
|
||||||
the function recursively applies the compile method to member
|
the function recursively applies the compile method to member
|
||||||
item of the condition list.
|
item of the condition list.
|
||||||
If the call of the method for a member item returns a new item
|
If the call of the method for a member item returns a new item
|
||||||
@ -7897,7 +7897,7 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel)
|
|||||||
|
|
||||||
2. After this all equalities of the form x=a (where x designates the first
|
2. After this all equalities of the form x=a (where x designates the first
|
||||||
non-constant member for which checker returns true and a is some other
|
non-constant member for which checker returns true and a is some other
|
||||||
such member of the multiplle equality) are created. When constructing
|
such member of the multiple equality) are created. When constructing
|
||||||
an equality item both its parts are taken as clones of x and a.
|
an equality item both its parts are taken as clones of x and a.
|
||||||
|
|
||||||
Suppose in the examples above that for 'x', 'a', and 'b' the function
|
Suppose in the examples above that for 'x', 'a', and 'b' the function
|
||||||
|
@ -2070,7 +2070,7 @@ public:
|
|||||||
4. m_cmp_item - the pointer to a cmp_item instance to handle comparison
|
4. m_cmp_item - the pointer to a cmp_item instance to handle comparison
|
||||||
for this pair. Only unique type handlers have m_cmp_item!=NULL.
|
for this pair. Only unique type handlers have m_cmp_item!=NULL.
|
||||||
Non-unique type handlers share the same cmp_item instance.
|
Non-unique type handlers share the same cmp_item instance.
|
||||||
For all m_comparators[] elements the following assersion it true:
|
For all m_comparators[] elements the following assertion is true:
|
||||||
(m_handler_index==i) == (m_cmp_item!=NULL)
|
(m_handler_index==i) == (m_cmp_item!=NULL)
|
||||||
*/
|
*/
|
||||||
class Predicant_to_list_comparator
|
class Predicant_to_list_comparator
|
||||||
|
@ -5500,7 +5500,7 @@ Create_func_rand::create_native(THD *thd, const LEX_CSTRING *name,
|
|||||||
between master and slave, because the order is undefined. Hence,
|
between master and slave, because the order is undefined. Hence,
|
||||||
the statement is unsafe to log in statement format.
|
the statement is unsafe to log in statement format.
|
||||||
|
|
||||||
For normal INSERT's this is howevever safe
|
For normal INSERT's this is however safe
|
||||||
*/
|
*/
|
||||||
if (thd->lex->sql_command != SQLCOM_INSERT)
|
if (thd->lex->sql_command != SQLCOM_INSERT)
|
||||||
thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
|
thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
|
||||||
|
@ -84,7 +84,7 @@ static inline bool test_if_sum_overflows_ull(ulonglong arg1, ulonglong arg2)
|
|||||||
/**
|
/**
|
||||||
Allocate memory for arguments using tmp_args or thd->alloc().
|
Allocate memory for arguments using tmp_args or thd->alloc().
|
||||||
@retval false - success
|
@retval false - success
|
||||||
@retval true - error (arg_count is set to 0 for conveniece)
|
@retval true - error (arg_count is set to 0 for convenience)
|
||||||
*/
|
*/
|
||||||
bool Item_args::alloc_arguments(THD *thd, uint count)
|
bool Item_args::alloc_arguments(THD *thd, uint count)
|
||||||
{
|
{
|
||||||
@ -7308,7 +7308,7 @@ Item_func_rownum::Item_func_rownum(THD *thd):
|
|||||||
/*
|
/*
|
||||||
Remember the select context.
|
Remember the select context.
|
||||||
Add the function to the list fix_after_optimize in the select context
|
Add the function to the list fix_after_optimize in the select context
|
||||||
so that we can easily initializef all rownum functions with the pointers
|
so that we can easily initialize all rownum functions with the pointers
|
||||||
to the row counters.
|
to the row counters.
|
||||||
*/
|
*/
|
||||||
select= thd->lex->current_select;
|
select= thd->lex->current_select;
|
||||||
|
@ -3565,7 +3565,7 @@ public:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
This item represents user variable used as out parameter (e.g in LOAD DATA),
|
This item represents user variable used as out parameter (e.g in LOAD DATA),
|
||||||
and it is supposed to be used only for this purprose. So it is simplified
|
and it is supposed to be used only for this purpose. So it is simplified
|
||||||
a lot. Actually you should never obtain its value.
|
a lot. Actually you should never obtain its value.
|
||||||
|
|
||||||
The only two reasons for this thing being an Item is possibility to store it
|
The only two reasons for this thing being an Item is possibility to store it
|
||||||
|
@ -2770,7 +2770,7 @@ mem_error:
|
|||||||
#ifndef DBUG_OFF
|
#ifndef DBUG_OFF
|
||||||
longlong Item_func_gis_debug::val_int()
|
longlong Item_func_gis_debug::val_int()
|
||||||
{
|
{
|
||||||
/* For now this is just a stub. TODO: implement the internal GIS debuggign */
|
/* For now this is just a stub. TODO: implement the internal GIS debugging */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -140,7 +140,7 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Functions returning GEOMETRY measurements of a single GEOEMETRY argument
|
Functions returning GEOMETRY measurements of a single GEOMETRY argument
|
||||||
*/
|
*/
|
||||||
class Item_geometry_func_args_geometry: public Item_geometry_func
|
class Item_geometry_func_args_geometry: public Item_geometry_func
|
||||||
{
|
{
|
||||||
|
@ -1793,7 +1793,7 @@ null_return:
|
|||||||
Item_func_conv_charset::is_json_type() which returned args[0]->is_json_type().
|
Item_func_conv_charset::is_json_type() which returned args[0]->is_json_type().
|
||||||
JSON functions with multiple string input with different character sets
|
JSON functions with multiple string input with different character sets
|
||||||
wrap some arguments into Item_func_conv_charset. So the former
|
wrap some arguments into Item_func_conv_charset. So the former
|
||||||
Item_func_conv_charset::is_json_type() took the JSON propery from args[0],
|
Item_func_conv_charset::is_json_type() took the JSON properly from args[0],
|
||||||
i.e. from the original argument before the conversion.
|
i.e. from the original argument before the conversion.
|
||||||
This is probably not always correct because an *explicit*
|
This is probably not always correct because an *explicit*
|
||||||
`CONVERT(arg USING charset)` is actually a general purpose string
|
`CONVERT(arg USING charset)` is actually a general purpose string
|
||||||
@ -4761,7 +4761,7 @@ static int json_find_overlap_with_object(json_engine_t *js, json_engine_t *value
|
|||||||
else return false.
|
else return false.
|
||||||
2.c) If either of json document or value is array and other is object:
|
2.c) If either of json document or value is array and other is object:
|
||||||
Iterate over the array, if an element of type object is found,
|
Iterate over the array, if an element of type object is found,
|
||||||
then compare it with the object (which is the other arguemnt).
|
then compare it with the object (which is the other argument).
|
||||||
If the entire object matches i.e all they key value pairs match,
|
If the entire object matches i.e all they key value pairs match,
|
||||||
then return true else return false.
|
then return true else return false.
|
||||||
|
|
||||||
@ -5191,7 +5191,7 @@ static bool get_current_value(json_engine_t *js, const uchar *&value_start,
|
|||||||
If the outermost layer of JSON is an array,
|
If the outermost layer of JSON is an array,
|
||||||
the intersection of arrays is independent of order.
|
the intersection of arrays is independent of order.
|
||||||
Create a hash containing all elements in the array,
|
Create a hash containing all elements in the array,
|
||||||
itterate over another array and add the common elements
|
iterate over another array and add the common elements
|
||||||
to the result.
|
to the result.
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
|
@ -80,7 +80,7 @@ static uint32 max_length_for_string(Item *item, bool *neg)
|
|||||||
}
|
}
|
||||||
if (length > (ulonglong) INT_MAX32)
|
if (length > (ulonglong) INT_MAX32)
|
||||||
{
|
{
|
||||||
/* Limit string length to maxium string length in MariaDB (2G) */
|
/* Limit string length to maximum string length in MariaDB (2G) */
|
||||||
length= (ulonglong) INT_MAX32;
|
length= (ulonglong) INT_MAX32;
|
||||||
}
|
}
|
||||||
return (uint32) length;
|
return (uint32) length;
|
||||||
@ -3701,7 +3701,7 @@ bool Item_func_pad::fix_length_and_dec(THD *thd)
|
|||||||
/*
|
/*
|
||||||
PAD(expr,length,' ')
|
PAD(expr,length,' ')
|
||||||
removes argument's soft dependency on PAD_CHAR_TO_FULL_LENGTH if the result
|
removes argument's soft dependency on PAD_CHAR_TO_FULL_LENGTH if the result
|
||||||
is longer than the argument's maximim possible length.
|
is longer than the argument's maximum possible length.
|
||||||
*/
|
*/
|
||||||
Sql_mode_dependency Item_func_rpad::value_depends_on_sql_mode() const
|
Sql_mode_dependency Item_func_rpad::value_depends_on_sql_mode() const
|
||||||
{
|
{
|
||||||
@ -4044,7 +4044,7 @@ String *Item_func_set_collation::val_str(String *str)
|
|||||||
|
|
||||||
But for a non-NULL result SCS and TCS must be compatible:
|
But for a non-NULL result SCS and TCS must be compatible:
|
||||||
1. Either SCS==TCS
|
1. Either SCS==TCS
|
||||||
2. Or SCS can be can be reinterpeted to TCS.
|
2. Or SCS can be reinterpreted to TCS.
|
||||||
This scenario is possible when args[0] is numeric and TCS->mbmaxlen==1.
|
This scenario is possible when args[0] is numeric and TCS->mbmaxlen==1.
|
||||||
|
|
||||||
If SCS and TCS are not compatible here, then something went wrong during
|
If SCS and TCS are not compatible here, then something went wrong during
|
||||||
@ -4714,7 +4714,7 @@ longlong Item_func_uncompressed_length::val_int()
|
|||||||
5 bytes long.
|
5 bytes long.
|
||||||
res->c_ptr() is not used because:
|
res->c_ptr() is not used because:
|
||||||
- we do not need \0 terminated string to get first 4 bytes
|
- we do not need \0 terminated string to get first 4 bytes
|
||||||
- c_ptr() tests simbol after string end (uninitialized memory) which
|
- c_ptr() tests symbol after string end (uninitialized memory) which
|
||||||
confuse valgrind
|
confuse valgrind
|
||||||
*/
|
*/
|
||||||
return uint4korr(res->ptr()) & 0x3FFFFFFF;
|
return uint4korr(res->ptr()) & 0x3FFFFFFF;
|
||||||
|
@ -2154,7 +2154,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Apply transformation max/min transwormation to ALL/ANY subquery if it is
|
Apply transformation max/min transformation to ALL/ANY subquery if it is
|
||||||
possible.
|
possible.
|
||||||
|
|
||||||
@param join Join object of the subquery (i.e. 'child' join).
|
@param join Join object of the subquery (i.e. 'child' join).
|
||||||
@ -3150,13 +3150,13 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg)
|
|||||||
DBUG_RETURN(FALSE);
|
DBUG_RETURN(FALSE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
EXISTS-to-IN coversion and ORDER BY ... LIMIT clause:
|
EXISTS-to-IN conversion and ORDER BY ... LIMIT clause:
|
||||||
|
|
||||||
- "[ORDER BY ...] LIMIT n" clause with a non-zero n does not affect
|
- "[ORDER BY ...] LIMIT n" clause with a non-zero n does not affect
|
||||||
the result of the EXISTS(...) predicate, and so we can discard
|
the result of the EXISTS(...) predicate, and so we can discard
|
||||||
it during the conversion.
|
it during the conversion.
|
||||||
- "[ORDER BY ...] LIMIT m, n" can turn a non-empty resultset into empty
|
- "[ORDER BY ...] LIMIT m, n" can turn a non-empty resultset into empty
|
||||||
one, so it affects tthe EXISTS(...) result and cannot be discarded.
|
one, so it affects the EXISTS(...) result and cannot be discarded.
|
||||||
|
|
||||||
Disallow exists-to-in conversion if
|
Disallow exists-to-in conversion if
|
||||||
(1). three is a LIMIT which is not a basic constant
|
(1). three is a LIMIT which is not a basic constant
|
||||||
@ -3270,7 +3270,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make EXISTS->IN permanet (see Item_subselect::init()) */
|
/* make EXISTS->IN permanent (see Item_subselect::init()) */
|
||||||
set_exists_transformed();
|
set_exists_transformed();
|
||||||
|
|
||||||
first_select->limit_params.select_limit= NULL;
|
first_select->limit_params.select_limit= NULL;
|
||||||
@ -3583,7 +3583,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
|
|||||||
test for each Item happens later in
|
test for each Item happens later in
|
||||||
Item_in_subselect::row_value_in_to_exists_transformer.
|
Item_in_subselect::row_value_in_to_exists_transformer.
|
||||||
The reason for this mess is that our JOIN::prepare phase works top-down
|
The reason for this mess is that our JOIN::prepare phase works top-down
|
||||||
instead of bottom-up, so we first do name resoluton and semantic checks
|
instead of bottom-up, so we first do name resolution and semantic checks
|
||||||
for the outer selects, then for the inner.
|
for the outer selects, then for the inner.
|
||||||
*/
|
*/
|
||||||
if (engine &&
|
if (engine &&
|
||||||
@ -3713,7 +3713,7 @@ bool Item_in_subselect::init_left_expr_cache()
|
|||||||
outer_join= unit->outer_select()->join;
|
outer_join= unit->outer_select()->join;
|
||||||
/*
|
/*
|
||||||
An IN predicate might be evaluated in a query for which all tables have
|
An IN predicate might be evaluated in a query for which all tables have
|
||||||
been optimzied away.
|
been optimized away.
|
||||||
*/
|
*/
|
||||||
if (!outer_join || !outer_join->table_count || !outer_join->tables_list)
|
if (!outer_join || !outer_join->table_count || !outer_join->tables_list)
|
||||||
return TRUE;
|
return TRUE;
|
||||||
@ -4029,7 +4029,7 @@ bool subselect_single_select_engine::no_rows()
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Makes storage for the output values for the subquery and calcuates
|
Makes storage for the output values for the subquery and calculates
|
||||||
their data and column types and their nullability.
|
their data and column types and their nullability.
|
||||||
*/
|
*/
|
||||||
bool subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
|
bool subselect_engine::set_row(List<Item> &item_list, Item_cache **row)
|
||||||
@ -5338,7 +5338,7 @@ bool subselect_hash_sj_engine::init(List<Item> *tmp_columns, uint subquery_id)
|
|||||||
the extra key part created when s->uniques > 0.
|
the extra key part created when s->uniques > 0.
|
||||||
|
|
||||||
NOTE: item have to be Item_in_subselect, because class constructor
|
NOTE: item have to be Item_in_subselect, because class constructor
|
||||||
accept Item_in_subselect as the parmeter.
|
accepts Item_in_subselect as the parameter.
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT(tmp_table->s->keys == 1 &&
|
DBUG_ASSERT(tmp_table->s->keys == 1 &&
|
||||||
item->get_IN_subquery()->left_expr->cols() ==
|
item->get_IN_subquery()->left_expr->cols() ==
|
||||||
@ -5438,7 +5438,7 @@ bool subselect_hash_sj_engine::make_semi_join_conds()
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Create a new uniquesubquery engine for the execution of an IN predicate.
|
Create a new unique subquery engine for the execution of an IN predicate.
|
||||||
|
|
||||||
@details
|
@details
|
||||||
Create and initialize a new JOIN_TAB, and Table_ref objects to perform
|
Create and initialize a new JOIN_TAB, and Table_ref objects to perform
|
||||||
@ -5754,8 +5754,8 @@ double get_post_group_estimate(JOIN* join, double join_op_rows)
|
|||||||
Execute a subquery IN predicate via materialization.
|
Execute a subquery IN predicate via materialization.
|
||||||
|
|
||||||
@details
|
@details
|
||||||
If needed materialize the subquery into a temporary table, then
|
If needed to materialize the subquery into a temporary table, then
|
||||||
copmpute the predicate via a lookup into this table.
|
compute the predicate via a lookup into this table.
|
||||||
|
|
||||||
@retval TRUE if error
|
@retval TRUE if error
|
||||||
@retval FALSE otherwise
|
@retval FALSE otherwise
|
||||||
@ -6292,7 +6292,7 @@ bool Ordered_key::lookup()
|
|||||||
mid= lo + (hi - lo) / 2;
|
mid= lo + (hi - lo) / 2;
|
||||||
cmp_res= cmp_key_with_search_key(key_buff[mid]);
|
cmp_res= cmp_key_with_search_key(key_buff[mid]);
|
||||||
/*
|
/*
|
||||||
In order to find the minimum match, check if the pevious element is
|
In order to find the minimum match, check if the previous element is
|
||||||
equal or smaller than the found one. If equal, we need to search further
|
equal or smaller than the found one. If equal, we need to search further
|
||||||
to the left.
|
to the left.
|
||||||
*/
|
*/
|
||||||
@ -6855,7 +6855,7 @@ bool subselect_rowid_merge_engine::partial_match()
|
|||||||
|
|
||||||
/* If there is a non-NULL key, it must be the first key in the keys array. */
|
/* If there is a non-NULL key, it must be the first key in the keys array. */
|
||||||
DBUG_ASSERT(!non_null_key || (non_null_key && merge_keys[0] == non_null_key));
|
DBUG_ASSERT(!non_null_key || (non_null_key && merge_keys[0] == non_null_key));
|
||||||
/* The prioryty queue for keys must be empty. */
|
/* The priority queue for keys must be empty. */
|
||||||
DBUG_ASSERT(pq.is_empty());
|
DBUG_ASSERT(pq.is_empty());
|
||||||
|
|
||||||
/* All data accesses during execution are via handler::ha_rnd_pos() */
|
/* All data accesses during execution are via handler::ha_rnd_pos() */
|
||||||
|
@ -231,7 +231,7 @@ public:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Used by max/min subquery to initialize value presence registration
|
Used by max/min subquery to initialize value presence registration
|
||||||
mechanism. Engine call this method before rexecution query.
|
mechanism. Engine call this method before reexecution query.
|
||||||
*/
|
*/
|
||||||
virtual void reset_value_registration() {}
|
virtual void reset_value_registration() {}
|
||||||
enum_parsing_place place() { return parsing_place; }
|
enum_parsing_place place() { return parsing_place; }
|
||||||
@ -1273,7 +1273,7 @@ protected:
|
|||||||
/*
|
/*
|
||||||
Mapping from row numbers to row ids. The element row_num_to_rowid[i]
|
Mapping from row numbers to row ids. The element row_num_to_rowid[i]
|
||||||
contains a buffer with the rowid for the row numbered 'i'.
|
contains a buffer with the rowid for the row numbered 'i'.
|
||||||
The memory for this member is not maintanined by this class because
|
The memory for this member is not maintained by this class because
|
||||||
all Ordered_key indexes of the same table share the same mapping.
|
all Ordered_key indexes of the same table share the same mapping.
|
||||||
*/
|
*/
|
||||||
uchar *row_num_to_rowid;
|
uchar *row_num_to_rowid;
|
||||||
|
@ -180,7 +180,7 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Window functions can not be used as arguments to sum functions.
|
Window functions can not be used as arguments to sum functions.
|
||||||
Aggregation happes before window function computation, so there
|
Aggregation happens before window function computation, so there
|
||||||
are no values to aggregate over.
|
are no values to aggregate over.
|
||||||
*/
|
*/
|
||||||
if (with_window_func())
|
if (with_window_func())
|
||||||
@ -472,7 +472,7 @@ Item_sum::Item_sum(THD *thd, List<Item> &list): Item_func_or_sum(thd, list)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Constructor used in processing select with temporary tebles.
|
Constructor used in processing select with temporary tables.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
Item_sum::Item_sum(THD *thd, Item_sum *item):
|
Item_sum::Item_sum(THD *thd, Item_sum *item):
|
||||||
@ -1246,7 +1246,7 @@ bool Item_sum_min_max::fix_length_and_dec(THD *thd)
|
|||||||
{
|
{
|
||||||
DBUG_ASSERT(args[0]->field_type() == args[0]->real_item()->field_type());
|
DBUG_ASSERT(args[0]->field_type() == args[0]->real_item()->field_type());
|
||||||
DBUG_ASSERT(args[0]->result_type() == args[0]->real_item()->result_type());
|
DBUG_ASSERT(args[0]->result_type() == args[0]->real_item()->result_type());
|
||||||
/* MIN/MAX can return NULL for empty set indepedent of the used column */
|
/* MIN/MAX can return NULL for empty set independent of the used column */
|
||||||
set_maybe_null();
|
set_maybe_null();
|
||||||
null_value= true;
|
null_value= true;
|
||||||
return args[0]->type_handler()->Item_sum_hybrid_fix_length_and_dec(this);
|
return args[0]->type_handler()->Item_sum_hybrid_fix_length_and_dec(this);
|
||||||
@ -4483,7 +4483,7 @@ String* Item_func_group_concat::val_str(String* str)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
@brief
|
@brief
|
||||||
Get the comparator function for DISTINT clause
|
Get the comparator function for DISTINCT clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
qsort_cmp2 Item_func_group_concat::get_comparator_function_for_distinct()
|
qsort_cmp2 Item_func_group_concat::get_comparator_function_for_distinct()
|
||||||
|
@ -304,7 +304,7 @@ class Window_spec;
|
|||||||
The implementation takes into account the used strategy:
|
The implementation takes into account the used strategy:
|
||||||
- Items resolved at optimization phase return 0 from Item_sum::used_tables().
|
- Items resolved at optimization phase return 0 from Item_sum::used_tables().
|
||||||
- Items that depend on the number of join output records, but not columns of
|
- Items that depend on the number of join output records, but not columns of
|
||||||
any particular table (like COUNT(*)), returm 0 from Item_sum::used_tables(),
|
any particular table (like COUNT(*)), return 0 from Item_sum::used_tables(),
|
||||||
but still return false from Item_sum::const_item().
|
but still return false from Item_sum::const_item().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -2059,7 +2059,7 @@ static uint parse_special(char cfmt, const char *ptr, const char *end,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* '&' with text is used for variable input, but '&' with other
|
* '&' with text is used for variable input, but '&' with other
|
||||||
* special charaters like '|'. '*' is used as separator
|
* special characters like '|'. '*' is used as separator
|
||||||
*/
|
*/
|
||||||
if (cfmt == '&' && ptr + 1 < end)
|
if (cfmt == '&' && ptr + 1 < end)
|
||||||
{
|
{
|
||||||
|
@ -1188,7 +1188,7 @@ public:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Computation functions.
|
Computation functions.
|
||||||
TODO: consoder merging these with class Group_bound_tracker.
|
TODO: consider merging these with class Group_bound_tracker.
|
||||||
*/
|
*/
|
||||||
void setup_partition_border_check(THD *thd);
|
void setup_partition_border_check(THD *thd);
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ typedef struct my_xml_node_st
|
|||||||
typedef struct my_xpath_lex_st
|
typedef struct my_xpath_lex_st
|
||||||
{
|
{
|
||||||
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
|
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
|
||||||
const char *beg; /* beginnign of the token */
|
const char *beg; /* beginning of the token */
|
||||||
const char *end; /* end of the token */
|
const char *end; /* end of the token */
|
||||||
} MY_XPATH_LEX;
|
} MY_XPATH_LEX;
|
||||||
|
|
||||||
@ -769,7 +769,7 @@ bool Item_nodeset_func_ancestorbyname::val_native(THD *thd, Native *nodeset)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Go to the root and add all nodes on the way.
|
Go to the root and add all nodes on the way.
|
||||||
Don't add the root if context is the root itelf
|
Don't add the root if context is the root itself
|
||||||
*/
|
*/
|
||||||
MY_XML_NODE *self= &nodebeg[flt->num];
|
MY_XML_NODE *self= &nodebeg[flt->num];
|
||||||
if (need_self && validname(self))
|
if (need_self && validname(self))
|
||||||
@ -1043,7 +1043,7 @@ static Item *create_comparator(MY_XPATH *xpath,
|
|||||||
/*
|
/*
|
||||||
Compare a node set to a scalar value.
|
Compare a node set to a scalar value.
|
||||||
We just create a fake Item_string_xml_non_const() argument,
|
We just create a fake Item_string_xml_non_const() argument,
|
||||||
which will be filled to the partular value
|
which will be filled to the particular value
|
||||||
in a loop through all of the nodes in the node set.
|
in a loop through all of the nodes in the node set.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -2566,7 +2566,7 @@ static int my_xpath_parse_Number(MY_XPATH *xpath)
|
|||||||
|
|
||||||
SYNOPSYS
|
SYNOPSYS
|
||||||
|
|
||||||
The keywords AND, OR, MOD, DIV are valid identitiers
|
The keywords AND, OR, MOD, DIV are valid identifiers
|
||||||
when they are in identifier context:
|
when they are in identifier context:
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -1847,7 +1847,7 @@ bool Json_schema_property_names::handle_keyword(THD *thd, json_engine_t *je,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
additiona_items, additional_properties, unevaluated_items,
|
additional_items, additional_properties, unevaluated_items,
|
||||||
unevaluated_properties are all going to be schemas
|
unevaluated_properties are all going to be schemas
|
||||||
(basically of object type). So they all can be handled
|
(basically of object type). So they all can be handled
|
||||||
just like any other schema.
|
just like any other schema.
|
||||||
@ -1879,7 +1879,7 @@ Json_schema_additional_and_unevaluated::handle_keyword(THD *thd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Validating properties as an alternate shcema happens only when we have
|
Validating properties as an alternate schema happens only when we have
|
||||||
logic keywords. Example we have allOf, and one key is not
|
logic keywords. Example we have allOf, and one key is not
|
||||||
validated against allOf but it is present in "properties" and validates
|
validated against allOf but it is present in "properties" and validates
|
||||||
against it. Then the validation result should be true. So we would want that
|
against it. Then the validation result should be true. So we would want that
|
||||||
|
@ -699,7 +699,7 @@ int ha_json_table::info(uint)
|
|||||||
|
|
||||||
@param thd thread handle
|
@param thd thread handle
|
||||||
@param param a description used as input to create the table
|
@param param a description used as input to create the table
|
||||||
@param jt json_table specificaion
|
@param jt json_table specification
|
||||||
@param table_alias alias
|
@param table_alias alias
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -910,7 +910,7 @@ int Json_table_column::set(THD *thd, enum_type ctype, const LEX_CSTRING &path,
|
|||||||
/*
|
/*
|
||||||
This is done so the ::print function can just print the path string.
|
This is done so the ::print function can just print the path string.
|
||||||
Can be removed if we redo that function to print the path using it's
|
Can be removed if we redo that function to print the path using it's
|
||||||
anctual content. Not sure though if we should.
|
actual content. Not sure though if we should.
|
||||||
*/
|
*/
|
||||||
m_path.s.c_str= (const uchar *) path.str;
|
m_path.s.c_str= (const uchar *) path.str;
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ public:
|
|||||||
In the current MariaDB code, evaluation of JSON_TABLE is deterministic,
|
In the current MariaDB code, evaluation of JSON_TABLE is deterministic,
|
||||||
that is, for a given input string JSON_TABLE will always produce the same
|
that is, for a given input string JSON_TABLE will always produce the same
|
||||||
set of rows in the same order. However one can think of JSON documents
|
set of rows in the same order. However one can think of JSON documents
|
||||||
that one can consider indentical which will produce different output.
|
that one can consider identical which will produce different output.
|
||||||
In order to be feature-proof and withstand changes like:
|
In order to be feature-proof and withstand changes like:
|
||||||
- sorting JSON object members by name (like MySQL does)
|
- sorting JSON object members by name (like MySQL does)
|
||||||
- changing the way duplicate object members are handled
|
- changing the way duplicate object members are handled
|
||||||
@ -274,7 +274,7 @@ private:
|
|||||||
/*
|
/*
|
||||||
Pointer to the list tail where we add the next NESTED PATH.
|
Pointer to the list tail where we add the next NESTED PATH.
|
||||||
It points to the cur_parnt->m_nested for the first nested
|
It points to the cur_parnt->m_nested for the first nested
|
||||||
and prev_nested->m_next_nested for the coesequent ones.
|
and prev_nested->m_next_nested for the consequent ones.
|
||||||
*/
|
*/
|
||||||
Json_table_nested_path **last_sibling_hook;
|
Json_table_nested_path **last_sibling_hook;
|
||||||
};
|
};
|
||||||
|
@ -114,7 +114,7 @@ Lex_exact_collation::raise_if_not_equal(const Lex_exact_collation &cl) const
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Merge an exact collation and a contexual collation.
|
Merge an exact collation and a contextual collation.
|
||||||
@param cl - The contextual collation to merge to "this".
|
@param cl - The contextual collation to merge to "this".
|
||||||
@param reverse_order - If the contextual collation is on the left side
|
@param reverse_order - If the contextual collation is on the left side
|
||||||
|
|
||||||
|
@ -444,7 +444,7 @@ public:
|
|||||||
CREATE TABLE t2 (a CHAR(10) BINARY) CHARACTER SET latin2; -- (3a)
|
CREATE TABLE t2 (a CHAR(10) BINARY) CHARACTER SET latin2; -- (3a)
|
||||||
CREATE TABLE t2 (a CHAR(10) BINARY); -- (3b)
|
CREATE TABLE t2 (a CHAR(10) BINARY); -- (3b)
|
||||||
CREATE TABLE t2 (a CHAR(10) COLLATE DEFAULT)
|
CREATE TABLE t2 (a CHAR(10) COLLATE DEFAULT)
|
||||||
CHARACER SET latin2 COLLATE latin2_bin; -- (3c)
|
CHARACTER SET latin2 COLLATE latin2_bin; -- (3c)
|
||||||
|
|
||||||
In case of an empty or a contextually typed collation,
|
In case of an empty or a contextually typed collation,
|
||||||
it is a subject to later resolution, when the context
|
it is a subject to later resolution, when the context
|
||||||
|
@ -53,7 +53,7 @@ struct Compare_ident_ci
|
|||||||
1. {ptr==NULL,length==0} is valid and means "NULL identifier".
|
1. {ptr==NULL,length==0} is valid and means "NULL identifier".
|
||||||
2a. {ptr<>NULL,length==0} means "empty identifier".
|
2a. {ptr<>NULL,length==0} means "empty identifier".
|
||||||
2b. {ptr<>NULL,length>0} means "not empty identifier.
|
2b. {ptr<>NULL,length>0} means "not empty identifier.
|
||||||
In case of 2a and 2b, ptr must be a '\0'-terninated string.
|
In case of 2a and 2b, ptr must be a '\0'-terminated string.
|
||||||
|
|
||||||
Comparison operands passed to streq() are not required to be 0-terminated.
|
Comparison operands passed to streq() are not required to be 0-terminated.
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ struct Compare_ident_ci
|
|||||||
- inside methods of this class
|
- inside methods of this class
|
||||||
- inside st_charset_info::streq() in include/m_ctype.h
|
- inside st_charset_info::streq() in include/m_ctype.h
|
||||||
The caller must make sure to maintain the object in the valid state,
|
The caller must make sure to maintain the object in the valid state,
|
||||||
as well as provide valid LEX_CSTRING instances for comparion.
|
as well as provide valid LEX_CSTRING instances for comparison.
|
||||||
|
|
||||||
For better code stability, the Lex_cstring base should eventually be
|
For better code stability, the Lex_cstring base should eventually be
|
||||||
encapsulated, so the object debug validation is done at constructor
|
encapsulated, so the object debug validation is done at constructor
|
||||||
@ -468,7 +468,7 @@ public:
|
|||||||
Lex_ident_db::check_name().
|
Lex_ident_db::check_name().
|
||||||
|
|
||||||
Note, the database name passed to the constructor can originally
|
Note, the database name passed to the constructor can originally
|
||||||
come from the parser and can be of an atribtrary long length.
|
come from the parser and can be of an arbitrary long length.
|
||||||
Let's reserve additional buffer space for one extra character
|
Let's reserve additional buffer space for one extra character
|
||||||
(SYSTEM_CHARSET_MBMAXLEN bytes), so Lex_ident_db::check_name() can
|
(SYSTEM_CHARSET_MBMAXLEN bytes), so Lex_ident_db::check_name() can
|
||||||
still detect too long names even if the constructor cuts the data.
|
still detect too long names even if the constructor cuts the data.
|
||||||
|
26
sql/log.cc
26
sql/log.cc
@ -162,8 +162,8 @@ static SHOW_VAR binlog_status_vars_detail[]=
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
This class implementes the feature to rename a binlog cache temporary file to
|
This class implements the feature to rename a binlog cache temporary file to
|
||||||
a binlog file. It is used to avoid holding LOCK_log long time when writting a
|
a binlog file. It is used to avoid holding LOCK_log long time when writing a
|
||||||
huge binlog cache to binlog file.
|
huge binlog cache to binlog file.
|
||||||
|
|
||||||
With this feature, temporary files of binlog caches will be created in
|
With this feature, temporary files of binlog caches will be created in
|
||||||
@ -2227,7 +2227,7 @@ inline bool is_prepared_xa(THD *thd)
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We flush the cache wrapped in a beging/rollback if:
|
We flush the cache wrapped in a beginning/rollback if:
|
||||||
. aborting a single or multi-statement transaction and;
|
. aborting a single or multi-statement transaction and;
|
||||||
. the OPTION_BINLOG_THIS_TRX is active or;
|
. the OPTION_BINLOG_THIS_TRX is active or;
|
||||||
. the format is STMT and a non-trans table was updated or;
|
. the format is STMT and a non-trans table was updated or;
|
||||||
@ -3274,7 +3274,7 @@ void MYSQL_QUERY_LOG::reopen_file()
|
|||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
|
|
||||||
Log given command to to normal (not rotable) log file
|
Log given command to normal (not rotable) log file
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
FASE - OK
|
FASE - OK
|
||||||
@ -4696,7 +4696,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Close the active log.
|
Close the active log.
|
||||||
Close the active GTID index synchroneously. We don't want the close
|
Close the active GTID index synchronously. We don't want the close
|
||||||
running in the background while we delete the gtid index file. And we just
|
running in the background while we delete the gtid index file. And we just
|
||||||
pushed a sentinel through the binlog background thread while holding
|
pushed a sentinel through the binlog background thread while holding
|
||||||
LOCK_log, so no other GTID index operations can be pending.
|
LOCK_log, so no other GTID index operations can be pending.
|
||||||
@ -5724,7 +5724,7 @@ error:
|
|||||||
/* Remove directory (to keep things shorter and compatible */
|
/* Remove directory (to keep things shorter and compatible */
|
||||||
log_file_name_arg+= dirname_length(log_file_name_arg);
|
log_file_name_arg+= dirname_length(log_file_name_arg);
|
||||||
|
|
||||||
/* purge_warning_given is reset after next sucessful purge */
|
/* purge_warning_given is reset after next successful purge */
|
||||||
purge_warning_given= 1;
|
purge_warning_given= 1;
|
||||||
if (interactive)
|
if (interactive)
|
||||||
{
|
{
|
||||||
@ -5870,7 +5870,7 @@ bool MYSQL_BIN_LOG::is_active(const char *log_file_name_arg)
|
|||||||
* #12 next_event
|
* #12 next_event
|
||||||
* #13 exec_relay_log_event
|
* #13 exec_relay_log_event
|
||||||
*
|
*
|
||||||
* I didn't investigate if this is ligit...(i.e if my comment is wrong)
|
* I didn't investigate if this is legit...(i.e if my comment is wrong)
|
||||||
*/
|
*/
|
||||||
return !strcmp(log_file_name, log_file_name_arg);
|
return !strcmp(log_file_name, log_file_name_arg);
|
||||||
}
|
}
|
||||||
@ -8071,7 +8071,7 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate,
|
|||||||
|
|
||||||
if ((err_gtid= do_delete_gtid_domain(domain_drop_lex)))
|
if ((err_gtid= do_delete_gtid_domain(domain_drop_lex)))
|
||||||
{
|
{
|
||||||
// inffective attempt to delete merely skips rotate and purge
|
// ineffective attempt to delete merely skips rotate and purge
|
||||||
if (err_gtid < 0)
|
if (err_gtid < 0)
|
||||||
error= 1; // otherwise error is propagated the user
|
error= 1; // otherwise error is propagated the user
|
||||||
}
|
}
|
||||||
@ -8600,7 +8600,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry)
|
|||||||
|
|
||||||
If waitee->commit_started is set, it means that the transaction we need
|
If waitee->commit_started is set, it means that the transaction we need
|
||||||
to wait for has already queued up for group commit. In this case it is
|
to wait for has already queued up for group commit. In this case it is
|
||||||
safe for us to queue up immediately as well, increasing the opprtunities
|
safe for us to queue up immediately as well, increasing the opportunities
|
||||||
for group commit. Because waitee has taken the LOCK_prepare_ordered
|
for group commit. Because waitee has taken the LOCK_prepare_ordered
|
||||||
before setting the flag, so there is no risk that we can queue ahead of
|
before setting the flag, so there is no risk that we can queue ahead of
|
||||||
it.
|
it.
|
||||||
@ -11317,7 +11317,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint)
|
|||||||
most recent binlog.
|
most recent binlog.
|
||||||
|
|
||||||
Note also that we need to first release LOCK_xid_list, then acquire
|
Note also that we need to first release LOCK_xid_list, then acquire
|
||||||
LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
|
LOCK_log, then re-acquire LOCK_xid_list. If we were to take LOCK_log while
|
||||||
holding LOCK_xid_list, we might deadlock with other threads that take the
|
holding LOCK_xid_list, we might deadlock with other threads that take the
|
||||||
locks in the opposite order.
|
locks in the opposite order.
|
||||||
*/
|
*/
|
||||||
@ -11842,7 +11842,7 @@ public:
|
|||||||
Otherwise enumeration starts with zero for the first file, increments
|
Otherwise enumeration starts with zero for the first file, increments
|
||||||
by one for any next file except for the last file in the list, which
|
by one for any next file except for the last file in the list, which
|
||||||
is also the initial binlog file for recovery,
|
is also the initial binlog file for recovery,
|
||||||
that is enumberated with UINT_MAX.
|
that is enumerated with UINT_MAX.
|
||||||
*/
|
*/
|
||||||
Binlog_file_id id_binlog;
|
Binlog_file_id id_binlog;
|
||||||
enum_binlog_checksum_alg checksum_alg;
|
enum_binlog_checksum_alg checksum_alg;
|
||||||
@ -11925,7 +11925,7 @@ public:
|
|||||||
Is invoked when a standalone or non-2pc group is detected.
|
Is invoked when a standalone or non-2pc group is detected.
|
||||||
Both are unsafe to truncate in the semisync-slave recovery so
|
Both are unsafe to truncate in the semisync-slave recovery so
|
||||||
the maximum unsafe coordinate may be updated.
|
the maximum unsafe coordinate may be updated.
|
||||||
In the non-2pc group case though, *exeptionally*,
|
In the non-2pc group case though, *exceptionally*,
|
||||||
the no-engine group is considered safe, to be invalidated
|
the no-engine group is considered safe, to be invalidated
|
||||||
to not contribute to binlog state.
|
to not contribute to binlog state.
|
||||||
*/
|
*/
|
||||||
@ -12136,7 +12136,7 @@ bool Recovery_context::decide_or_assess(xid_recovery_member *member, int round,
|
|||||||
if (!truncate_validated)
|
if (!truncate_validated)
|
||||||
{
|
{
|
||||||
if (truncate_gtid.seq_no == 0 /* was reset or never set */ ||
|
if (truncate_gtid.seq_no == 0 /* was reset or never set */ ||
|
||||||
(truncate_set_in_1st && round == 2 /* reevaluted at round turn */))
|
(truncate_set_in_1st && round == 2 /* reevaluated at round turn */))
|
||||||
{
|
{
|
||||||
if (set_truncate_coord(linfo, round, fdle->used_checksum_alg))
|
if (set_truncate_coord(linfo, round, fdle->used_checksum_alg))
|
||||||
return true;
|
return true;
|
||||||
|
@ -260,7 +260,7 @@ extern TC_LOG_DUMMY tc_log_dummy;
|
|||||||
class Relay_log_info;
|
class Relay_log_info;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Note that we destroy the lock mutex in the desctructor here.
|
Note that we destroy the lock mutex in the destructor here.
|
||||||
This means that object instances cannot be destroyed/go out of scope,
|
This means that object instances cannot be destroyed/go out of scope,
|
||||||
until we have reset thd->current_linfo to NULL;
|
until we have reset thd->current_linfo to NULL;
|
||||||
*/
|
*/
|
||||||
@ -838,7 +838,7 @@ public:
|
|||||||
FD - Format-Description event,
|
FD - Format-Description event,
|
||||||
R - Rotate event
|
R - Rotate event
|
||||||
R_f - the fake Rotate event
|
R_f - the fake Rotate event
|
||||||
E - an arbirary event
|
E - an arbitrary event
|
||||||
|
|
||||||
The underscore indexes for any event
|
The underscore indexes for any event
|
||||||
`_s' indicates the event is generated by Slave
|
`_s' indicates the event is generated by Slave
|
||||||
|
@ -350,7 +350,7 @@ int binlog_buf_compress(const uchar *src, uchar *dst, uint32 len, uint32 *comlen
|
|||||||
@Note:
|
@Note:
|
||||||
1) The caller should call my_free to release 'dst' if *is_malloc is
|
1) The caller should call my_free to release 'dst' if *is_malloc is
|
||||||
returned as true.
|
returned as true.
|
||||||
2) If *is_malloc is retuened as false, then 'dst' reuses the passed-in
|
2) If *is_malloc is returned as false, then 'dst' reuses the passed-in
|
||||||
'buf'.
|
'buf'.
|
||||||
|
|
||||||
return zero if successful, non-zero otherwise.
|
return zero if successful, non-zero otherwise.
|
||||||
@ -1659,9 +1659,9 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
|
|||||||
+--------+-----------+------+------+---------+----+-------+
|
+--------+-----------+------+------+---------+----+-------+
|
||||||
|
|
||||||
To support the query cache we append the following buffer to the above
|
To support the query cache we append the following buffer to the above
|
||||||
+-------+----------------------------------------+-------+
|
+-------+---------------------------------------+-------+
|
||||||
|db len | uninitiatlized space of size of db len | FLAGS |
|
|db len | uninitialized space of size of db len | FLAGS |
|
||||||
+-------+----------------------------------------+-------+
|
+-------+---------------------------------------+-------+
|
||||||
|
|
||||||
The area of buffer starting from Query field all the way to the end belongs
|
The area of buffer starting from Query field all the way to the end belongs
|
||||||
to the Query buffer and its structure is described in alloc_query() in
|
to the Query buffer and its structure is described in alloc_query() in
|
||||||
@ -2336,7 +2336,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp
|
|||||||
|
|
||||||
@return the version-safe checksum alg descriptor where zero
|
@return the version-safe checksum alg descriptor where zero
|
||||||
designates no checksum, 255 - the orginator is
|
designates no checksum, 255 - the orginator is
|
||||||
checksum-unaware (effectively no checksum) and the actuall
|
checksum-unaware (effectively no checksum) and the actual
|
||||||
[1-254] range alg descriptor.
|
[1-254] range alg descriptor.
|
||||||
*/
|
*/
|
||||||
enum_binlog_checksum_alg get_checksum_alg(const uchar *buf, ulong len)
|
enum_binlog_checksum_alg get_checksum_alg(const uchar *buf, ulong len)
|
||||||
@ -2499,7 +2499,7 @@ Gtid_log_event::Gtid_log_event(const uchar *buf, uint event_len,
|
|||||||
{
|
{
|
||||||
flags_extra= *buf++;
|
flags_extra= *buf++;
|
||||||
/*
|
/*
|
||||||
extra engines flags presence is identifed by non-zero byte value
|
extra engines flags presence is identified by non-zero byte value
|
||||||
at this point
|
at this point
|
||||||
*/
|
*/
|
||||||
if (flags_extra & FL_EXTRA_MULTI_ENGINE_E1)
|
if (flags_extra & FL_EXTRA_MULTI_ENGINE_E1)
|
||||||
@ -2626,7 +2626,7 @@ Gtid_list_log_event::Gtid_list_log_event(const uchar *buf, uint event_len,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Used to record gtid_list event while sending binlog to slave, without having to
|
Used to record gtid_list event while sending binlog to slave, without having to
|
||||||
fully contruct the event object.
|
fully construct the event object.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
Gtid_list_log_event::peek(const char *event_start, size_t event_len,
|
Gtid_list_log_event::peek(const char *event_start, size_t event_len,
|
||||||
|
@ -517,7 +517,7 @@ class String;
|
|||||||
Flag sets by the semisync slave for accepting
|
Flag sets by the semisync slave for accepting
|
||||||
the same server_id ("own") events which the slave must not have
|
the same server_id ("own") events which the slave must not have
|
||||||
in its state. Typically such events were never committed by
|
in its state. Typically such events were never committed by
|
||||||
their originator (this server) and discared at its semisync-slave recovery.
|
their originator (this server) and discarded at its semisync-slave recovery.
|
||||||
*/
|
*/
|
||||||
#define LOG_EVENT_ACCEPT_OWN_F 0x4000
|
#define LOG_EVENT_ACCEPT_OWN_F 0x4000
|
||||||
|
|
||||||
@ -2305,7 +2305,7 @@ public: /* !!! Public in this patch to allow old usage */
|
|||||||
binlogged with comments in the front of these keywords. for examples:
|
binlogged with comments in the front of these keywords. for examples:
|
||||||
/ * bla bla * / SAVEPOINT a;
|
/ * bla bla * / SAVEPOINT a;
|
||||||
/ * bla bla * / ROLLBACK TO a;
|
/ * bla bla * / ROLLBACK TO a;
|
||||||
but we don't handle these cases and after the patch, both quiries are
|
but we don't handle these cases and after the patch, both queries are
|
||||||
binlogged in upper case with no comments.
|
binlogged in upper case with no comments.
|
||||||
*/
|
*/
|
||||||
return is_xa ? !strncasecmp(query, C_STRING_WITH_LEN("XA "))
|
return is_xa ? !strncasecmp(query, C_STRING_WITH_LEN("XA "))
|
||||||
@ -2877,7 +2877,7 @@ private:
|
|||||||
when @c one_phase member is off. The latter option is only for
|
when @c one_phase member is off. The latter option is only for
|
||||||
compatibility with the upstream.
|
compatibility with the upstream.
|
||||||
|
|
||||||
From the groupping perspective the event finalizes the current
|
From the grouping perspective the event finalizes the current
|
||||||
"prepare" group that is started with Gtid_log_event similarly to the
|
"prepare" group that is started with Gtid_log_event similarly to the
|
||||||
regular replicated transaction.
|
regular replicated transaction.
|
||||||
*/
|
*/
|
||||||
@ -4210,7 +4210,7 @@ class table_def;
|
|||||||
<td>signedness of numeric colums. This is included for all values of
|
<td>signedness of numeric colums. This is included for all values of
|
||||||
binlog_row_metadata.</td>
|
binlog_row_metadata.</td>
|
||||||
<td>For each numeric column, a bit indicates whether the numeric
|
<td>For each numeric column, a bit indicates whether the numeric
|
||||||
colunm has unsigned flag. 1 means it is unsigned. The number of
|
column has unsigned flag. 1 means it is unsigned. The number of
|
||||||
bytes needed for this is int((column_count + 7) / 8). The order is
|
bytes needed for this is int((column_count + 7) / 8). The order is
|
||||||
the same as the order of column_type field.</td>
|
the same as the order of column_type field.</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
@ -1204,7 +1204,7 @@ void Rows_log_event::change_to_flashback_event(PRINT_EVENT_INFO *print_event_inf
|
|||||||
{
|
{
|
||||||
if (!bi_fields)
|
if (!bi_fields)
|
||||||
{
|
{
|
||||||
// Both bi and ai inclues all columns, Swap WHERE and SET Part
|
// Both bi and ai include all columns, Swap WHERE and SET Part
|
||||||
memcpy(one_row.str, start_pos + length1, length2);
|
memcpy(one_row.str, start_pos + length1, length2);
|
||||||
memcpy(one_row.str+length2, start_pos, length1);
|
memcpy(one_row.str+length2, start_pos, length1);
|
||||||
}
|
}
|
||||||
@ -2748,7 +2748,7 @@ const char fmt_binlog2[]= "BINLOG @binlog_fragment_0, @binlog_fragment_1%s\n";
|
|||||||
SQL cover.
|
SQL cover.
|
||||||
@param delimiter delimiter string
|
@param delimiter delimiter string
|
||||||
|
|
||||||
@param is_verbose MDEV-10362 workraround parameter to pass
|
@param is_verbose MDEV-10362 workaround parameter to pass
|
||||||
info on presence of verbose printout in cache encoded data
|
info on presence of verbose printout in cache encoded data
|
||||||
|
|
||||||
The function signals on any error through setting @c body->error to -1.
|
The function signals on any error through setting @c body->error to -1.
|
||||||
@ -3291,7 +3291,7 @@ Table_map_log_event::Charset_iterator::create_charset_iterator(
|
|||||||
@param[in|out] meta_ptr the meta_ptr of the column. If the type doesn't have
|
@param[in|out] meta_ptr the meta_ptr of the column. If the type doesn't have
|
||||||
metadata, it will not change meta_ptr, otherwise
|
metadata, it will not change meta_ptr, otherwise
|
||||||
meta_ptr will be moved to the end of the column's
|
meta_ptr will be moved to the end of the column's
|
||||||
metadat.
|
metadata.
|
||||||
@param[in] cs charset of the column if it is a character column.
|
@param[in] cs charset of the column if it is a character column.
|
||||||
@param[out] typestr buffer to storing the string name of the type
|
@param[out] typestr buffer to storing the string name of the type
|
||||||
@param[in] typestr_length length of typestr
|
@param[in] typestr_length length of typestr
|
||||||
|
@ -765,7 +765,7 @@ int Log_event_writer::write_header(uchar *pos, size_t len)
|
|||||||
/*
|
/*
|
||||||
recording checksum of FD event computed with dropped
|
recording checksum of FD event computed with dropped
|
||||||
possibly active LOG_EVENT_BINLOG_IN_USE_F flag.
|
possibly active LOG_EVENT_BINLOG_IN_USE_F flag.
|
||||||
Similar step at verication: the active flag is dropped before
|
Similar step at verification: the active flag is dropped before
|
||||||
checksum computing.
|
checksum computing.
|
||||||
*/
|
*/
|
||||||
if (checksum_len)
|
if (checksum_len)
|
||||||
@ -1666,7 +1666,7 @@ int Query_log_event::handle_split_alter_query_log_event(rpl_group_info *rgi,
|
|||||||
if (is_CA)
|
if (is_CA)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
error handeling, direct_commit_alter is turned on, so that we dont
|
error handling, direct_commit_alter is turned on, so that we dont
|
||||||
wait for master reply in mysql_alter_table (in wait_for_master)
|
wait for master reply in mysql_alter_table (in wait_for_master)
|
||||||
*/
|
*/
|
||||||
rgi->direct_commit_alter= true;
|
rgi->direct_commit_alter= true;
|
||||||
@ -2217,7 +2217,7 @@ compare_errors:
|
|||||||
else if (actual_error == ER_XAER_NOTA && !rpl_filter->db_ok(get_db()))
|
else if (actual_error == ER_XAER_NOTA && !rpl_filter->db_ok(get_db()))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
If there is an XA query whos XID cannot be found, if the replication
|
If there is an XA query whose XID cannot be found, if the replication
|
||||||
filter is active and filters the target database, assume that the XID
|
filter is active and filters the target database, assume that the XID
|
||||||
cache has been cleared (e.g. by server restart) since it was prepared,
|
cache has been cleared (e.g. by server restart) since it was prepared,
|
||||||
so we can just ignore this event.
|
so we can just ignore this event.
|
||||||
@ -2930,7 +2930,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Used to record GTID while sending binlog to slave, without having to
|
Used to record GTID while sending binlog to slave, without having to
|
||||||
fully contruct every Gtid_log_event() needlessly.
|
fully construct every Gtid_log_event() needlessly.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
Gtid_log_event::peek(const uchar *event_start, size_t event_len,
|
Gtid_log_event::peek(const uchar *event_start, size_t event_len,
|
||||||
@ -5468,7 +5468,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd)
|
|||||||
/**
|
/**
|
||||||
The method either increments the relay log position or
|
The method either increments the relay log position or
|
||||||
commits the current statement and increments the master group
|
commits the current statement and increments the master group
|
||||||
possition if the event is STMT_END_F flagged and
|
position if the event is STMT_END_F flagged and
|
||||||
the statement corresponds to the autocommit query (i.e replicated
|
the statement corresponds to the autocommit query (i.e replicated
|
||||||
without wrapping in BEGIN/COMMIT)
|
without wrapping in BEGIN/COMMIT)
|
||||||
|
|
||||||
@ -6620,7 +6620,7 @@ Write_rows_log_event::do_before_row_operations(const rpl_group_info *)
|
|||||||
/*
|
/*
|
||||||
* Fixed Bug#45999, In RBR, Store engine of Slave auto-generates new
|
* Fixed Bug#45999, In RBR, Store engine of Slave auto-generates new
|
||||||
* sequence numbers for auto_increment fields if the values of them are 0.
|
* sequence numbers for auto_increment fields if the values of them are 0.
|
||||||
* If generateing a sequence number is decided by the values of
|
* If generating a sequence number is decided by the values of
|
||||||
* table->auto_increment_field_not_null and SQL_MODE(if includes
|
* table->auto_increment_field_not_null and SQL_MODE(if includes
|
||||||
* MODE_NO_AUTO_VALUE_ON_ZERO) in update_auto_increment function.
|
* MODE_NO_AUTO_VALUE_ON_ZERO) in update_auto_increment function.
|
||||||
* SQL_MODE of slave sql thread is always consistency with master's.
|
* SQL_MODE of slave sql thread is always consistency with master's.
|
||||||
@ -6806,7 +6806,7 @@ is_duplicate_key_error(int errcode)
|
|||||||
|
|
||||||
The row to be inserted can contain values only for selected columns. The
|
The row to be inserted can contain values only for selected columns. The
|
||||||
missing columns are filled with default values using @c prepare_record()
|
missing columns are filled with default values using @c prepare_record()
|
||||||
function. If a matching record is found in the table and @c overwritte is
|
function. If a matching record is found in the table and @c overwrite is
|
||||||
true, the missing columns are taken from it.
|
true, the missing columns are taken from it.
|
||||||
|
|
||||||
@param rli Relay log info (needed for row unpacking).
|
@param rli Relay log info (needed for row unpacking).
|
||||||
@ -7374,7 +7374,7 @@ uint Rows_log_event::find_key_parts(const KEY *key) const
|
|||||||
Find the best key to use when locating the row in @c find_row().
|
Find the best key to use when locating the row in @c find_row().
|
||||||
|
|
||||||
A primary key is preferred if it exists; otherwise a unique index is
|
A primary key is preferred if it exists; otherwise a unique index is
|
||||||
preferred. Else we pick the index with the smalles rec_per_key value.
|
preferred. Else we pick the index with the smallest rec_per_key value.
|
||||||
|
|
||||||
If a suitable key is found, set @c m_key, @c m_key_nr, @c m_key_info,
|
If a suitable key is found, set @c m_key, @c m_key_nr, @c m_key_info,
|
||||||
and @c m_usable_key_parts member fields appropriately.
|
and @c m_usable_key_parts member fields appropriately.
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
are always included first.
|
are always included first.
|
||||||
It can also be used to speed up compilation by using precompiled headers.
|
It can also be used to speed up compilation by using precompiled headers.
|
||||||
|
|
||||||
This file should include a minum set of header files used by all files
|
This file should include a minimum set of header files used by all files
|
||||||
and header files that are very seldom changed.
|
and header files that are very seldom changed.
|
||||||
It can also include some defines that all files should be aware of.
|
It can also include some defines that all files should be aware of.
|
||||||
*/
|
*/
|
||||||
|
@ -538,7 +538,7 @@ public:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
In backup namespace DML/DDL may starve because of concurrent FTWRL or
|
In backup namespace DML/DDL may starve because of concurrent FTWRL or
|
||||||
BACKUP statements. This scenario is partically useless in real world,
|
BACKUP statements. This scenario is practically useless in real world,
|
||||||
so we just return 0 here.
|
so we just return 0 here.
|
||||||
*/
|
*/
|
||||||
bitmap_t hog_lock_types_bitmap() const override
|
bitmap_t hog_lock_types_bitmap() const override
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
One can change info->pos_in_file to a higher value to skip bytes in file if
|
One can change info->pos_in_file to a higher value to skip bytes in file if
|
||||||
also info->read_pos is set to info->read_end.
|
also info->read_pos is set to info->read_end.
|
||||||
If called through open_cached_file(), then the temporary file will
|
If called through open_cached_file(), then the temporary file will
|
||||||
only be created if a write exeeds the file buffer or if one calls
|
only be created if a write exceeds the file buffer or if one calls
|
||||||
flush_io_cache().
|
flush_io_cache().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -1420,7 +1420,7 @@ int DsMrr_impl::setup_two_handlers()
|
|||||||
{
|
{
|
||||||
DBUG_ASSERT(secondary_file && secondary_file->inited==handler::INDEX);
|
DBUG_ASSERT(secondary_file && secondary_file->inited==handler::INDEX);
|
||||||
/*
|
/*
|
||||||
We get here when the access alternates betwen MRR scan(s) and non-MRR
|
We get here when the access alternates between MRR scan(s) and non-MRR
|
||||||
scans.
|
scans.
|
||||||
|
|
||||||
Calling primary_file->index_end() will invoke dsmrr_close() for this
|
Calling primary_file->index_end() will invoke dsmrr_close() for this
|
||||||
|
@ -157,7 +157,7 @@ public:
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Buffer manager interface. Mrr_reader objects use it to inqure DsMrr_impl
|
Buffer manager interface. Mrr_reader objects use it to inquire DsMrr_impl
|
||||||
to manage buffer space for them.
|
to manage buffer space for them.
|
||||||
*/
|
*/
|
||||||
typedef struct st_buffer_manager
|
typedef struct st_buffer_manager
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
- This is an APC request queue
|
- This is an APC request queue
|
||||||
- We assume there is a particular owner thread which periodically calls
|
- We assume there is a particular owner thread which periodically calls
|
||||||
process_apc_requests() to serve the call requests.
|
process_apc_requests() to serve the call requests.
|
||||||
- Other threads can post call requests, and block until they are exectued.
|
- Other threads can post call requests, and block until they are executed.
|
||||||
)
|
)
|
||||||
|
|
||||||
Implementation
|
Implementation
|
||||||
@ -31,7 +31,7 @@
|
|||||||
- The target has a mutex-guarded request queue.
|
- The target has a mutex-guarded request queue.
|
||||||
|
|
||||||
- After the request has been put into queue, the requestor waits for request
|
- After the request has been put into queue, the requestor waits for request
|
||||||
to be satisfied. The worker satisifes the request and signals the
|
to be satisfied. The worker satisfies the request and signals the
|
||||||
requestor.
|
requestor.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -53,8 +53,8 @@ using JOIN_TAB= struct st_join_table;
|
|||||||
|
|
||||||
arrayName : [ "boo", 123, 456 ]
|
arrayName : [ "boo", 123, 456 ]
|
||||||
|
|
||||||
and actually print them on one line. Arrrays that occupy too much space on
|
and actually print them on one line. Arrays that occupy too much space on
|
||||||
the line, or have nested members cannot be printed on one line.
|
the line, or have nested members, cannot be printed on one line.
|
||||||
|
|
||||||
We hook into JSON printing functions and try to detect the pattern. While
|
We hook into JSON printing functions and try to detect the pattern. While
|
||||||
detecting the pattern, we will accumulate "boo", 123, 456 as strings.
|
detecting the pattern, we will accumulate "boo", 123, 456 as strings.
|
||||||
@ -76,7 +76,7 @@ class Single_line_formatting_helper
|
|||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This works like a finite automaton.
|
This works like a finite automation.
|
||||||
|
|
||||||
state=DISABLED means the helper is disabled - all on_XXX functions will
|
state=DISABLED means the helper is disabled - all on_XXX functions will
|
||||||
return false (which means "not handled") and do nothing.
|
return false (which means "not handled") and do nothing.
|
||||||
@ -738,7 +738,7 @@ public:
|
|||||||
/*
|
/*
|
||||||
RAII-based class to disable writing into the JSON document
|
RAII-based class to disable writing into the JSON document
|
||||||
The tracing is disabled as soon as the object is created.
|
The tracing is disabled as soon as the object is created.
|
||||||
The destuctor is called as soon as we exit the scope of the object
|
The destructor is called as soon as we exit the scope of the object
|
||||||
and the tracing is enabled back.
|
and the tracing is enabled back.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -754,7 +754,7 @@ public:
|
|||||||
RAII-based helper class to detect incorrect use of Json_writer.
|
RAII-based helper class to detect incorrect use of Json_writer.
|
||||||
|
|
||||||
The idea is that a function typically must leave Json_writer at the same
|
The idea is that a function typically must leave Json_writer at the same
|
||||||
identation level as it was when it was invoked. Leaving it at a different
|
indentation level as it was when it was invoked. Leaving it at a different
|
||||||
level typically means we forgot to close an object or an array
|
level typically means we forgot to close an object or an array
|
||||||
|
|
||||||
So, here is a way to guard
|
So, here is a way to guard
|
||||||
|
@ -1909,7 +1909,7 @@ extern "C" void unireg_abort(int exit_code)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
This is an abort situation, we cannot expect to gracefully close all
|
This is an abort situation, we cannot expect to gracefully close all
|
||||||
wsrep threads here, we can only diconnect from service
|
wsrep threads here, we can only disconnect from service
|
||||||
*/
|
*/
|
||||||
wsrep_close_client_connections(FALSE);
|
wsrep_close_client_connections(FALSE);
|
||||||
Wsrep_server_state::instance().disconnect();
|
Wsrep_server_state::instance().disconnect();
|
||||||
@ -3897,7 +3897,7 @@ static const char *rpl_make_log_name(PSI_memory_key key, const char *opt,
|
|||||||
MY_REPLACE_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH;
|
MY_REPLACE_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH;
|
||||||
|
|
||||||
/* mysql_real_data_home_ptr may be null if no value of datadir has been
|
/* mysql_real_data_home_ptr may be null if no value of datadir has been
|
||||||
specified through command-line or througha cnf file. If that is the
|
specified through command-line or through a cnf file. If that is the
|
||||||
case we make mysql_real_data_home_ptr point to mysql_real_data_home
|
case we make mysql_real_data_home_ptr point to mysql_real_data_home
|
||||||
which, in that case holds the default path for data-dir.
|
which, in that case holds the default path for data-dir.
|
||||||
*/
|
*/
|
||||||
@ -4251,7 +4251,7 @@ static int init_common_variables()
|
|||||||
SYSVAR_AUTOSIZE(back_log, MY_MIN(900, (50 + max_connections / 5)));
|
SYSVAR_AUTOSIZE(back_log, MY_MIN(900, (50 + max_connections / 5)));
|
||||||
}
|
}
|
||||||
|
|
||||||
unireg_init(opt_specialflag); /* Set up extern variabels */
|
unireg_init(opt_specialflag); /* Set up extern variables */
|
||||||
if (!(my_default_lc_messages=
|
if (!(my_default_lc_messages=
|
||||||
my_locale_by_name(Lex_cstring_strlen(lc_messages))))
|
my_locale_by_name(Lex_cstring_strlen(lc_messages))))
|
||||||
{
|
{
|
||||||
@ -4341,7 +4341,7 @@ static int init_common_variables()
|
|||||||
}
|
}
|
||||||
default_charset_info= default_collation;
|
default_charset_info= default_collation;
|
||||||
}
|
}
|
||||||
/* Set collactions that depends on the default collation */
|
/* Set collations that depend on the default collation */
|
||||||
global_system_variables.collation_server= default_charset_info;
|
global_system_variables.collation_server= default_charset_info;
|
||||||
global_system_variables.collation_database= default_charset_info;
|
global_system_variables.collation_database= default_charset_info;
|
||||||
if (is_supported_parser_charset(default_charset_info))
|
if (is_supported_parser_charset(default_charset_info))
|
||||||
@ -5193,7 +5193,7 @@ static int init_server_components()
|
|||||||
|
|
||||||
if (WSREP_ON && !wsrep_recovery && !opt_abort)
|
if (WSREP_ON && !wsrep_recovery && !opt_abort)
|
||||||
{
|
{
|
||||||
if (opt_bootstrap) // bootsrap option given - disable wsrep functionality
|
if (opt_bootstrap) // bootstrap option given - disable wsrep functionality
|
||||||
{
|
{
|
||||||
wsrep_provider_init(WSREP_NONE);
|
wsrep_provider_init(WSREP_NONE);
|
||||||
if (wsrep_init())
|
if (wsrep_init())
|
||||||
@ -5749,7 +5749,7 @@ static void run_main_loop()
|
|||||||
int mysqld_main(int argc, char **argv)
|
int mysqld_main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
/* We can't close stdin just now, because it may be booststrap mode. */
|
/* We can't close stdin just now, because it may be in bootstrap mode. */
|
||||||
bool please_close_stdin= fcntl(STDIN_FILENO, F_GETFD) >= 0;
|
bool please_close_stdin= fcntl(STDIN_FILENO, F_GETFD) >= 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -8791,7 +8791,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Options have been parsed. Now some of them need additional special
|
Options have been parsed. Now some of them need additional special
|
||||||
handling, like custom value checking, checking of incompatibilites
|
handling, like custom value checking, checking of incompatibilities
|
||||||
between options, setting of multiple variables, etc.
|
between options, setting of multiple variables, etc.
|
||||||
Do them here.
|
Do them here.
|
||||||
*/
|
*/
|
||||||
@ -8956,7 +8956,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
|
|||||||
/*
|
/*
|
||||||
It looks like extra_connection_count should be passed here but
|
It looks like extra_connection_count should be passed here but
|
||||||
its been using connection_count for the last 10+ years and
|
its been using connection_count for the last 10+ years and
|
||||||
no-one was requested a change so lets not suprise anyone.
|
no-one has requested a change so lets not surprise anyone.
|
||||||
*/
|
*/
|
||||||
one_thread_scheduler(extra_thread_scheduler, &connection_count);
|
one_thread_scheduler(extra_thread_scheduler, &connection_count);
|
||||||
#else
|
#else
|
||||||
@ -9345,7 +9345,7 @@ void refresh_global_status()
|
|||||||
*/
|
*/
|
||||||
reset_status_vars();
|
reset_status_vars();
|
||||||
/*
|
/*
|
||||||
Reset accoumulated thread's status variables.
|
Reset accumulated thread's status variables.
|
||||||
These are the variables in 'status_vars[]' with the prefix _STATUS.
|
These are the variables in 'status_vars[]' with the prefix _STATUS.
|
||||||
*/
|
*/
|
||||||
bzero(&global_status_var, clear_for_flush_status);
|
bzero(&global_status_var, clear_for_flush_status);
|
||||||
@ -9393,7 +9393,7 @@ void refresh_status_legacy(THD *thd)
|
|||||||
reset_pfs_status_stats();
|
reset_pfs_status_stats();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Add thread's status variabes to global status */
|
/* Add thread's status variables to global status */
|
||||||
add_to_status(&global_status_var, &thd->status_var);
|
add_to_status(&global_status_var, &thd->status_var);
|
||||||
|
|
||||||
/* Reset thread's status variables */
|
/* Reset thread's status variables */
|
||||||
|
@ -98,7 +98,7 @@ extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *national_charset_info;
|
|||||||
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *table_alias_charset;
|
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *table_alias_charset;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Character set of the buildin error messages loaded from errmsg.sys.
|
Character set of the builtin error messages loaded from errmsg.sys.
|
||||||
*/
|
*/
|
||||||
extern CHARSET_INFO *error_message_charset_info;
|
extern CHARSET_INFO *error_message_charset_info;
|
||||||
|
|
||||||
|
@ -548,7 +548,7 @@ bool read_hex_bucket_endpoint(json_engine_t *je, Field *field, String *out,
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@brief Parse a JSON reprsentation for one histogram bucket
|
@brief Parse a JSON representation for one histogram bucket
|
||||||
|
|
||||||
@param je The JSON parser object
|
@param je The JSON parser object
|
||||||
@param field Table field we are using histogram (used to convert
|
@param field Table field we are using histogram (used to convert
|
||||||
|
@ -58,7 +58,7 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno,
|
|||||||
/*
|
/*
|
||||||
Don't push down the triggered conditions. Nested outer joins execution
|
Don't push down the triggered conditions. Nested outer joins execution
|
||||||
code may need to evaluate a condition several times (both triggered and
|
code may need to evaluate a condition several times (both triggered and
|
||||||
untriggered), and there is no way to put thi
|
untriggered), and there is no way to put this
|
||||||
TODO: Consider cloning the triggered condition and using the copies for:
|
TODO: Consider cloning the triggered condition and using the copies for:
|
||||||
1. push the first copy down, to have most restrictive index condition
|
1. push the first copy down, to have most restrictive index condition
|
||||||
possible
|
possible
|
||||||
|
@ -312,7 +312,7 @@ public:
|
|||||||
longlong baseflag;
|
longlong baseflag;
|
||||||
uint max_key_parts, range_count;
|
uint max_key_parts, range_count;
|
||||||
|
|
||||||
bool quick; // Don't calulate possible keys
|
bool quick; // Don't calculate possible keys
|
||||||
|
|
||||||
uint fields_bitmap_size;
|
uint fields_bitmap_size;
|
||||||
MY_BITMAP needed_fields; /* bitmask of fields needed by the query */
|
MY_BITMAP needed_fields; /* bitmask of fields needed by the query */
|
||||||
@ -1029,7 +1029,7 @@ int imerge_list_or_list(RANGE_OPT_PARAM *param,
|
|||||||
tree SEL_TREE whose range part is to be ored with the imerges
|
tree SEL_TREE whose range part is to be ored with the imerges
|
||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
For each imerge mi from the list 'merges' the function performes OR
|
For each imerge mi from the list 'merges' the function performs OR
|
||||||
operation with mi and the range part of 'tree' rt, producing one or
|
operation with mi and the range part of 'tree' rt, producing one or
|
||||||
two imerges.
|
two imerges.
|
||||||
|
|
||||||
@ -2690,7 +2690,7 @@ static int fill_used_fields_bitmap(PARAM *param)
|
|||||||
TODO
|
TODO
|
||||||
* Change the value returned in opt_range_condition_rows from a pessimistic
|
* Change the value returned in opt_range_condition_rows from a pessimistic
|
||||||
estimate to true E(#rows that satisfy table condition).
|
estimate to true E(#rows that satisfy table condition).
|
||||||
(we can re-use some of E(#rows) calcuation code from
|
(we can re-use some of E(#rows) calculation code from
|
||||||
index_merge/intersection for this)
|
index_merge/intersection for this)
|
||||||
|
|
||||||
* Check if this function really needs to modify keys_to_use, and change the
|
* Check if this function really needs to modify keys_to_use, and change the
|
||||||
@ -4054,7 +4054,7 @@ typedef struct st_part_prune_param
|
|||||||
partitioning index definition doesn't include partitioning fields.
|
partitioning index definition doesn't include partitioning fields.
|
||||||
*/
|
*/
|
||||||
int last_part_partno;
|
int last_part_partno;
|
||||||
int last_subpart_partno; /* Same as above for supartitioning */
|
int last_subpart_partno; /* Same as above for subpartitioning */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
is_part_keypart[i] == MY_TEST(keypart #i in partitioning index is a member
|
is_part_keypart[i] == MY_TEST(keypart #i in partitioning index is a member
|
||||||
@ -4065,7 +4065,7 @@ typedef struct st_part_prune_param
|
|||||||
/* Same as above for subpartitioning */
|
/* Same as above for subpartitioning */
|
||||||
my_bool *is_subpart_keypart;
|
my_bool *is_subpart_keypart;
|
||||||
|
|
||||||
my_bool ignore_part_fields; /* Ignore rest of partioning fields */
|
my_bool ignore_part_fields; /* Ignore rest of partitioning fields */
|
||||||
|
|
||||||
/***************************************************************
|
/***************************************************************
|
||||||
Following fields form find_used_partitions() recursion context:
|
Following fields form find_used_partitions() recursion context:
|
||||||
@ -4819,7 +4819,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
res= 0; /* No satisfying partitions */
|
res= 0; /* No satisfying partitions */
|
||||||
goto pop_and_go_right;
|
goto pop_and_go_right;
|
||||||
}
|
}
|
||||||
/* Rembember the limit we got - single partition #part_id */
|
/* Remember the limit we got - single partition #part_id */
|
||||||
init_single_partition_iterator(part_id, &ppar->part_iter);
|
init_single_partition_iterator(part_id, &ppar->part_iter);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4864,7 +4864,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Can't handle condition on current key part. If we're that deep that
|
Can't handle condition on current key part. If we're that deep that
|
||||||
we're processing subpartititoning's key parts, this means we'll not be
|
we're processing subpartitioning's key parts, this means we'll not be
|
||||||
able to infer any suitable condition, so bail out.
|
able to infer any suitable condition, so bail out.
|
||||||
*/
|
*/
|
||||||
if (key_tree_part >= ppar->last_part_partno)
|
if (key_tree_part >= ppar->last_part_partno)
|
||||||
@ -5704,7 +5704,7 @@ skip_to_ror_scan:
|
|||||||
indexes are to be merges
|
indexes are to be merges
|
||||||
read_time The upper bound for the cost of the plan to be evaluated
|
read_time The upper bound for the cost of the plan to be evaluated
|
||||||
|
|
||||||
DESRIPTION
|
DESCRIPTION
|
||||||
For the given index merge plan imerge_trp extracted from the SEL_MERGE
|
For the given index merge plan imerge_trp extracted from the SEL_MERGE
|
||||||
imerge the function looks for range scans with the same indexes and merges
|
imerge the function looks for range scans with the same indexes and merges
|
||||||
them into SEL_ARG trees. Then for each such SEL_ARG tree r_i the function
|
them into SEL_ARG trees. Then for each such SEL_ARG tree r_i the function
|
||||||
@ -5812,7 +5812,7 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
This structure contains the info common for all steps of a partial
|
This structure contains the info common for all steps of a partial
|
||||||
index intersection plan. Morever it contains also the info common
|
index intersection plan. Moreover it contains also the info common
|
||||||
for index intersect plans. This info is filled in by the function
|
for index intersect plans. This info is filled in by the function
|
||||||
prepare_search_best just before searching for the best index
|
prepare_search_best just before searching for the best index
|
||||||
intersection plan.
|
intersection plan.
|
||||||
@ -5839,7 +5839,7 @@ typedef struct st_common_index_intersect_info
|
|||||||
ha_rows best_records;
|
ha_rows best_records;
|
||||||
uint best_length; /* number of indexes in the current best intersection */
|
uint best_length; /* number of indexes in the current best intersection */
|
||||||
INDEX_SCAN_INFO **best_intersect; /* the current best index intersection */
|
INDEX_SCAN_INFO **best_intersect; /* the current best index intersection */
|
||||||
/* scans from the best intersect to be filtrered by cpk conditions */
|
/* scans from the best intersect to be filtered by cpk conditions */
|
||||||
key_map filtered_scans;
|
key_map filtered_scans;
|
||||||
|
|
||||||
uint *buff_elems; /* buffer to calculate cost of index intersection */
|
uint *buff_elems; /* buffer to calculate cost of index intersection */
|
||||||
@ -6384,7 +6384,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
|
|||||||
the function returns a number bigger than #r.
|
the function returns a number bigger than #r.
|
||||||
|
|
||||||
NOTES
|
NOTES
|
||||||
See the comment before the desription of the function that explains the
|
See the comment before the description of the function that explains the
|
||||||
reasoning used by this function.
|
reasoning used by this function.
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
@ -6465,7 +6465,7 @@ double get_cpk_filter_cost(ha_rows filtered_records,
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Check whether a patial index intersection plan can be extended
|
Check whether a partial index intersection plan can be extended
|
||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
check_index_intersect_extension()
|
check_index_intersect_extension()
|
||||||
@ -6616,7 +6616,7 @@ bool check_index_intersect_extension(THD *thd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The cost after sweeep can be bigger than cutoff, but that is ok as the
|
The cost after sweep can be bigger than cutoff, but that is ok as the
|
||||||
end cost can decrease when we add the next index.
|
end cost can decrease when we add the next index.
|
||||||
*/
|
*/
|
||||||
cost+= get_sweep_read_cost(common_info->param, rows2double(records), 1);
|
cost+= get_sweep_read_cost(common_info->param, rows2double(records), 1);
|
||||||
@ -8110,7 +8110,7 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param,
|
|||||||
cond_func item for the predicate
|
cond_func item for the predicate
|
||||||
field field in the predicate
|
field field in the predicate
|
||||||
lt_value constant that field should be smaller
|
lt_value constant that field should be smaller
|
||||||
gt_value constant that field should be greaterr
|
gt_value constant that field should be greater
|
||||||
|
|
||||||
RETURN
|
RETURN
|
||||||
# Pointer to tree built tree
|
# Pointer to tree built tree
|
||||||
@ -8289,7 +8289,7 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
if this is a "col1 NOT IN (...)", and there is a UNIQUE KEY(col1), do
|
if this is a "col1 NOT IN (...)", and there is a UNIQUE KEY(col1), do
|
||||||
not constuct a SEL_TREE from it. The rationale is as follows:
|
not construct a SEL_TREE from it. The rationale is as follows:
|
||||||
- if there are only a few constants, this condition is not selective
|
- if there are only a few constants, this condition is not selective
|
||||||
(unless the table is also very small in which case we won't gain
|
(unless the table is also very small in which case we won't gain
|
||||||
anything)
|
anything)
|
||||||
@ -8381,7 +8381,7 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param,
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Get the SEL_TREE for the last "c_last < X < +inf" interval
|
Get the SEL_TREE for the last "c_last < X < +inf" interval
|
||||||
(value_item cotains c_last already)
|
(value_item contains c_last already)
|
||||||
*/
|
*/
|
||||||
tree2= get_mm_parts(param, field, Item_func::GT_FUNC, value_item);
|
tree2= get_mm_parts(param, field, Item_func::GT_FUNC, value_item);
|
||||||
tree= tree_or(param, tree, tree2);
|
tree= tree_or(param, tree, tree2);
|
||||||
@ -9353,7 +9353,7 @@ Item_func_null_predicate::get_mm_leaf(RANGE_OPT_PARAM *param,
|
|||||||
DBUG_ENTER("Item_func_null_predicate::get_mm_leaf");
|
DBUG_ENTER("Item_func_null_predicate::get_mm_leaf");
|
||||||
DBUG_ASSERT(!value);
|
DBUG_ASSERT(!value);
|
||||||
/*
|
/*
|
||||||
No check for field->table->maybe_null. It's perfecly fine to use range
|
No check for field->table->maybe_null. It's perfectly fine to use range
|
||||||
access for cases like
|
access for cases like
|
||||||
|
|
||||||
SELECT * FROM t1 LEFT JOIN t2 ON t2.key IS [NOT] NULL
|
SELECT * FROM t1 LEFT JOIN t2 ON t2.key IS [NOT] NULL
|
||||||
@ -10691,7 +10691,7 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
|
|||||||
new_tree=new_tree->insert(new_arg);
|
new_tree=new_tree->insert(new_arg);
|
||||||
}
|
}
|
||||||
if (e1->cmp_max_to_max(e2) < 0)
|
if (e1->cmp_max_to_max(e2) < 0)
|
||||||
e1=e1->next; // e1 can't overlapp next e2
|
e1=e1->next; // e1 can't overlap next e2
|
||||||
else
|
else
|
||||||
e2=e2->next;
|
e2=e2->next;
|
||||||
}
|
}
|
||||||
@ -10807,7 +10807,7 @@ SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno,
|
|||||||
transformation is key_or( expr1, expr2 ) => expr1 OR expr2.
|
transformation is key_or( expr1, expr2 ) => expr1 OR expr2.
|
||||||
|
|
||||||
Both expressions are assumed to be in the SEL_ARG format. In a logic sense,
|
Both expressions are assumed to be in the SEL_ARG format. In a logic sense,
|
||||||
theformat is reminiscent of DNF, since an expression such as the following
|
the format is reminiscent of DNF, since an expression such as the following
|
||||||
|
|
||||||
( 1 < kp1 < 10 AND p1 ) OR ( 10 <= kp2 < 20 AND p2 )
|
( 1 < kp1 < 10 AND p1 ) OR ( 10 <= kp2 < 20 AND p2 )
|
||||||
|
|
||||||
@ -10833,7 +10833,7 @@ SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno,
|
|||||||
If the predicates are equal for the rest of the keyparts, or if there are
|
If the predicates are equal for the rest of the keyparts, or if there are
|
||||||
no more, the range in expr2 has its endpoints copied in, and the SEL_ARG
|
no more, the range in expr2 has its endpoints copied in, and the SEL_ARG
|
||||||
node in expr2 is deallocated. If more ranges became connected in expr1, the
|
node in expr2 is deallocated. If more ranges became connected in expr1, the
|
||||||
surplus is also dealocated. If they differ, two ranges are created.
|
surplus is also deallocated. If they differ, two ranges are created.
|
||||||
|
|
||||||
- The range leading up to the overlap. Empty if endpoints are equal.
|
- The range leading up to the overlap. Empty if endpoints are equal.
|
||||||
|
|
||||||
@ -10929,7 +10929,7 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
|
|||||||
|
|
||||||
Ambiguity: ***
|
Ambiguity: ***
|
||||||
The range starts or stops somewhere in the "***" range.
|
The range starts or stops somewhere in the "***" range.
|
||||||
Example: a starts before b and may end before/the same plase/after b
|
Example: a starts before b and may end before/the same place/after b
|
||||||
a: [----***]
|
a: [----***]
|
||||||
b: [---]
|
b: [---]
|
||||||
|
|
||||||
@ -12329,7 +12329,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit,
|
|||||||
estimates may be slightly out of sync.
|
estimates may be slightly out of sync.
|
||||||
|
|
||||||
We cannot do this easily in the above multi_range_read_info_const()
|
We cannot do this easily in the above multi_range_read_info_const()
|
||||||
call as then we would need to have similar adjustmends done
|
call as then we would need to have similar adjustments done
|
||||||
in the partitioning engine.
|
in the partitioning engine.
|
||||||
*/
|
*/
|
||||||
rows= MY_MAX(table_records, 1);
|
rows= MY_MAX(table_records, 1);
|
||||||
@ -12378,7 +12378,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit,
|
|||||||
else if (param->range_count > 1)
|
else if (param->range_count > 1)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Scaning multiple key values in the index: the records are ROR
|
Scanning multiple key values in the index: the records are ROR
|
||||||
for each value, but not between values. E.g, "SELECT ... x IN
|
for each value, but not between values. E.g, "SELECT ... x IN
|
||||||
(1,3)" returns ROR order for all records with x=1, then ROR
|
(1,3)" returns ROR order for all records with x=1, then ROR
|
||||||
order for records with x=3
|
order for records with x=3
|
||||||
@ -16070,7 +16070,7 @@ bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range)
|
|||||||
are more keyparts to follow the ones we are using we must make the
|
are more keyparts to follow the ones we are using we must make the
|
||||||
condition on the key inclusive (because x < "ab" means
|
condition on the key inclusive (because x < "ab" means
|
||||||
x[0] < 'a' OR (x[0] == 'a' AND x[1] < 'b').
|
x[0] < 'a' OR (x[0] == 'a' AND x[1] < 'b').
|
||||||
To achive the above we must turn off the NEAR_MIN/NEAR_MAX
|
To achieve the above we must turn off the NEAR_MIN/NEAR_MAX
|
||||||
*/
|
*/
|
||||||
void QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges ()
|
void QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges ()
|
||||||
{
|
{
|
||||||
@ -16407,7 +16407,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max()
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Find the next different key value by skiping all the rows with the same key
|
Find the next different key value by skipping all the rows with the same key
|
||||||
value.
|
value.
|
||||||
|
|
||||||
Implements a specialized loose index access method for queries
|
Implements a specialized loose index access method for queries
|
||||||
@ -17460,7 +17460,7 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Print key parts involed in a range
|
Print key parts involved in a range
|
||||||
@param[out] out String the key is appended to
|
@param[out] out String the key is appended to
|
||||||
@param[in] key_part Index components description
|
@param[in] key_part Index components description
|
||||||
@param[in] n_keypart Number of keyparts in index
|
@param[in] n_keypart Number of keyparts in index
|
||||||
|
@ -110,10 +110,10 @@
|
|||||||
@detail
|
@detail
|
||||||
'foo' here can be any constant we can compute during optimization,
|
'foo' here can be any constant we can compute during optimization,
|
||||||
Only equality conditions are supported.
|
Only equality conditions are supported.
|
||||||
See SargableLeft above for detals.
|
See SargableLeft above for details.
|
||||||
|
|
||||||
@param field The first argument of LEFT or SUBSTRING if sargable,
|
@param field The first argument of LEFT or SUBSTRING if sargable,
|
||||||
otherwise deferenced to NULL
|
otherwise dereferenced to NULL
|
||||||
@param value_idx The index of argument that is the prefix string
|
@param value_idx The index of argument that is the prefix string
|
||||||
if sargable, otherwise dereferenced to -1
|
if sargable, otherwise dereferenced to -1
|
||||||
*/
|
*/
|
||||||
|
@ -83,7 +83,7 @@
|
|||||||
are evaluated then the optimizer should consider pushing t.a = t1.a,
|
are evaluated then the optimizer should consider pushing t.a = t1.a,
|
||||||
t.b = t2.b and (t.a = t1.a AND t.b = t2.b) to choose the best condition
|
t.b = t2.b and (t.a = t1.a AND t.b = t2.b) to choose the best condition
|
||||||
for splitting. Apparently here last condition is the best one because
|
for splitting. Apparently here last condition is the best one because
|
||||||
it provides the miximum possible number of partitions.
|
it provides the minimum possible number of partitions.
|
||||||
|
|
||||||
If we dropped the index on t3(a,b) and created the index on t3(a) instead
|
If we dropped the index on t3(a,b) and created the index on t3(a) instead
|
||||||
then we would have two options for splitting: to push t.a = t1.a or to
|
then we would have two options for splitting: to push t.a = t1.a or to
|
||||||
@ -160,7 +160,7 @@
|
|||||||
The set of all rows belonging to the union of several partitions is called
|
The set of all rows belonging to the union of several partitions is called
|
||||||
here superpartition. If a grouping operation is defined by the list
|
here superpartition. If a grouping operation is defined by the list
|
||||||
e_1,...,e_n then any set S = {e_i1,...,e_ik} can be used to devide all rows
|
e_1,...,e_n then any set S = {e_i1,...,e_ik} can be used to devide all rows
|
||||||
into superpartions such that for any two rows r1, r2 the following holds:
|
into superpartitions such that for any two rows r1, r2 the following holds:
|
||||||
e_ij(r1) = e_ij(r2) for each e_ij from S. We use the splitting technique
|
e_ij(r1) = e_ij(r2) for each e_ij from S. We use the splitting technique
|
||||||
only if S consists of references to colums of the joined tables.
|
only if S consists of references to colums of the joined tables.
|
||||||
For example if the GROUP BY list looks like this a, g(b), c we can consider
|
For example if the GROUP BY list looks like this a, g(b), c we can consider
|
||||||
|
@ -345,7 +345,7 @@ with the first one:
|
|||||||
|
|
||||||
When SJM nests are present, we should take care not to construct equalities
|
When SJM nests are present, we should take care not to construct equalities
|
||||||
that violate the (SJM-RULE). This is achieved by generating separate sets of
|
that violate the (SJM-RULE). This is achieved by generating separate sets of
|
||||||
equalites for top-level tables and for inner tables. That is, for the join
|
equalities for top-level tables and for inner tables. That is, for the join
|
||||||
order
|
order
|
||||||
|
|
||||||
ot1 - ot2 --\ /--- ot3 -- ot5
|
ot1 - ot2 --\ /--- ot3 -- ot5
|
||||||
@ -546,7 +546,7 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs,
|
|||||||
The disjunctive members
|
The disjunctive members
|
||||||
!((Sql_cmd_update *) cmd)->is_multitable()
|
!((Sql_cmd_update *) cmd)->is_multitable()
|
||||||
!((Sql_cmd_delete *) cmd)->is_multitable()
|
!((Sql_cmd_delete *) cmd)->is_multitable()
|
||||||
will be removed when conversions of IN predicands to semi-joins are
|
will be removed when conversions of IN predicants to semi-joins are
|
||||||
fully supported for single-table UPDATE/DELETE statements.
|
fully supported for single-table UPDATE/DELETE statements.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -1984,7 +1984,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred)
|
|||||||
else if (left_exp->type() == Item::ROW_ITEM)
|
else if (left_exp->type() == Item::ROW_ITEM)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
disassemple left expression and add
|
disassemble left expression and add
|
||||||
left1 = select_list_element1 and left2 = select_list_element2 ...
|
left1 = select_list_element1 and left2 = select_list_element2 ...
|
||||||
*/
|
*/
|
||||||
for (uint i= 0; i < ncols; i++)
|
for (uint i= 0; i < ncols; i++)
|
||||||
@ -3041,7 +3041,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
|
|||||||
1. strategy X removes fanout for semijoin X,Y
|
1. strategy X removes fanout for semijoin X,Y
|
||||||
2. using strategy Z is cheaper, but it only removes
|
2. using strategy Z is cheaper, but it only removes
|
||||||
fanout from semijoin X.
|
fanout from semijoin X.
|
||||||
3. We have no clue what to do about fanount of semi-join Y.
|
3. We have no clue what to do about fanout of semi-join Y.
|
||||||
|
|
||||||
For the first iteration read_time will always be bigger than
|
For the first iteration read_time will always be bigger than
|
||||||
*current_read_time (as the 'strategy' is an addition to the
|
*current_read_time (as the 'strategy' is an addition to the
|
||||||
@ -3057,7 +3057,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
|
|||||||
DBUG_ASSERT(pos->sj_strategy != sj_strategy);
|
DBUG_ASSERT(pos->sj_strategy != sj_strategy);
|
||||||
/*
|
/*
|
||||||
If the strategy chosen first time or
|
If the strategy chosen first time or
|
||||||
the strategy replace strategy which was used to exectly the same
|
the strategy replace strategy which was used to exactly the same
|
||||||
tables
|
tables
|
||||||
*/
|
*/
|
||||||
if (pos->sj_strategy == SJ_OPT_NONE ||
|
if (pos->sj_strategy == SJ_OPT_NONE ||
|
||||||
@ -3093,7 +3093,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
|
|||||||
(*prev_strategy)->set_empty();
|
(*prev_strategy)->set_empty();
|
||||||
dups_producing_tables= prev_dups_producing_tables;
|
dups_producing_tables= prev_dups_producing_tables;
|
||||||
join->sjm_lookup_tables= prev_sjm_lookup_tables;
|
join->sjm_lookup_tables= prev_sjm_lookup_tables;
|
||||||
// mark it 'none' to avpoid loops
|
// mark it 'none' to avoid loops
|
||||||
pos->sj_strategy= SJ_OPT_NONE;
|
pos->sj_strategy= SJ_OPT_NONE;
|
||||||
// next skip to last;
|
// next skip to last;
|
||||||
strategy= pickers +
|
strategy= pickers +
|
||||||
@ -3149,7 +3149,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
|
|||||||
Update JOIN's semi-join optimization state after the join tab new_tab
|
Update JOIN's semi-join optimization state after the join tab new_tab
|
||||||
has been added into the join prefix.
|
has been added into the join prefix.
|
||||||
|
|
||||||
@seealso restore_prev_sj_state() does the reverse actoion
|
@seealso restore_prev_sj_state() does the reverse action
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void update_sj_state(JOIN *join, const JOIN_TAB *new_tab,
|
void update_sj_state(JOIN *join, const JOIN_TAB *new_tab,
|
||||||
@ -4326,7 +4326,7 @@ uint get_number_of_tables_at_top_level(JOIN *join)
|
|||||||
Setup execution structures for one semi-join materialization nest:
|
Setup execution structures for one semi-join materialization nest:
|
||||||
- Create the materialization temporary table
|
- Create the materialization temporary table
|
||||||
- If we're going to do index lookups
|
- If we're going to do index lookups
|
||||||
create TABLE_REF structure to make the lookus
|
create TABLE_REF structure to make the lookups
|
||||||
- else (if we're going to do a full scan of the temptable)
|
- else (if we're going to do a full scan of the temptable)
|
||||||
create Copy_field structures to do copying.
|
create Copy_field structures to do copying.
|
||||||
|
|
||||||
@ -5360,7 +5360,7 @@ int setup_semijoin_loosescan(JOIN *join)
|
|||||||
application of FirstMatch strategy, with the exception that
|
application of FirstMatch strategy, with the exception that
|
||||||
outer IN-correlated tables are considered to be non-correlated.
|
outer IN-correlated tables are considered to be non-correlated.
|
||||||
|
|
||||||
(4) - THe suffix of outer and outer non-correlated tables.
|
(4) - The suffix of outer and outer non-correlated tables.
|
||||||
|
|
||||||
|
|
||||||
The choice between the strategies is made by the join optimizer (see
|
The choice between the strategies is made by the join optimizer (see
|
||||||
@ -5984,7 +5984,7 @@ enum_nested_loop_state join_tab_execution_startup(JOIN_TAB *tab)
|
|||||||
Create a dummy temporary table, useful only for the sake of having a
|
Create a dummy temporary table, useful only for the sake of having a
|
||||||
TABLE* object with map,tablenr and maybe_null properties.
|
TABLE* object with map,tablenr and maybe_null properties.
|
||||||
|
|
||||||
This is used by non-mergeable semi-join materilization code to handle
|
This is used by non-mergeable semi-join materialization code to handle
|
||||||
degenerate cases where materialized subquery produced "Impossible WHERE"
|
degenerate cases where materialized subquery produced "Impossible WHERE"
|
||||||
and thus wasn't materialized.
|
and thus wasn't materialized.
|
||||||
*/
|
*/
|
||||||
@ -6557,7 +6557,7 @@ bool setup_degenerate_jtbm_semi_joins(JOIN *join,
|
|||||||
The function saves the equalities between all pairs of the expressions
|
The function saves the equalities between all pairs of the expressions
|
||||||
from the left part of the IN subquery predicate and the corresponding
|
from the left part of the IN subquery predicate and the corresponding
|
||||||
columns of the subquery from the predicate in eq_list appending them
|
columns of the subquery from the predicate in eq_list appending them
|
||||||
to the list. The equalities of eq_list will be later conjucted with the
|
to the list. The equalities of eq_list will be later conjuncted with the
|
||||||
condition of the WHERE clause.
|
condition of the WHERE clause.
|
||||||
|
|
||||||
In the case when a table is nested in another table 'nested_join' the
|
In the case when a table is nested in another table 'nested_join' the
|
||||||
@ -7031,7 +7031,7 @@ bool JOIN::choose_tableless_subquery_plan()
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
For IN subqueries, use IN->EXISTS transfomation, unless the subquery
|
For IN subqueries, use IN->EXISTS transformation, unless the subquery
|
||||||
has been converted to a JTBM semi-join. In that case, just leave
|
has been converted to a JTBM semi-join. In that case, just leave
|
||||||
everything as-is, setup_jtbm_semi_joins() has special handling for cases
|
everything as-is, setup_jtbm_semi_joins() has special handling for cases
|
||||||
like this.
|
like this.
|
||||||
|
@ -144,7 +144,7 @@
|
|||||||
|
|
||||||
The algorithm starts with equality nodes that don't have any incoming edges
|
The algorithm starts with equality nodes that don't have any incoming edges
|
||||||
(their expressions are either constant or depend only on tables that are
|
(their expressions are either constant or depend only on tables that are
|
||||||
outside of the outer join in question) and performns a breadth-first
|
outside of the outer join in question) and performs a breadth-first
|
||||||
traversal. If we reach the outer join nest node, it means outer join is
|
traversal. If we reach the outer join nest node, it means outer join is
|
||||||
functionally dependent and can be eliminated. Otherwise it cannot be
|
functionally dependent and can be eliminated. Otherwise it cannot be
|
||||||
eliminated.
|
eliminated.
|
||||||
@ -332,7 +332,7 @@ private:
|
|||||||
public:
|
public:
|
||||||
/* Space for field iterator */
|
/* Space for field iterator */
|
||||||
char buf[Dep_value_field::iterator_size];
|
char buf[Dep_value_field::iterator_size];
|
||||||
/* !NULL <=> iterating over depdenent modules of this field */
|
/* !NULL <=> iterating over dependent modules of this field */
|
||||||
Dep_value_field *field_dep;
|
Dep_value_field *field_dep;
|
||||||
bool returned_goal;
|
bool returned_goal;
|
||||||
};
|
};
|
||||||
@ -383,7 +383,7 @@ protected:
|
|||||||
uint unbound_args;
|
uint unbound_args;
|
||||||
|
|
||||||
Dep_module() : unbound_args(0) {}
|
Dep_module() : unbound_args(0) {}
|
||||||
/* to bump unbound_args when constructing depedendencies */
|
/* to bump unbound_args when constructing dependencies */
|
||||||
friend class Field_dependency_recorder;
|
friend class Field_dependency_recorder;
|
||||||
friend class Dep_analysis_context;
|
friend class Dep_analysis_context;
|
||||||
};
|
};
|
||||||
@ -877,7 +877,7 @@ eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
|
|||||||
|
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
check_func_dependency()
|
check_func_dependency()
|
||||||
join Join we're procesing
|
join Join we're processing
|
||||||
dep_tables Tables that we check to be functionally dependent (on
|
dep_tables Tables that we check to be functionally dependent (on
|
||||||
everything else)
|
everything else)
|
||||||
it Iterator that enumerates these tables, or NULL if we're
|
it Iterator that enumerates these tables, or NULL if we're
|
||||||
@ -1334,8 +1334,8 @@ void build_eq_mods_for_cond(THD *thd, Dep_analysis_context *ctx,
|
|||||||
multiple-equality. Do two things:
|
multiple-equality. Do two things:
|
||||||
- Collect List<Dep_value_field> of tblX.colY where tblX is one of the
|
- Collect List<Dep_value_field> of tblX.colY where tblX is one of the
|
||||||
tables we're trying to eliminate.
|
tables we're trying to eliminate.
|
||||||
- rembember if there was a bound value, either const_expr or tblY.colZ
|
- remember if there was a bound value, either const_expr or tblY.colZ
|
||||||
swher tblY is not a table that we're trying to eliminate.
|
where tblY is not a table that we're trying to eliminate.
|
||||||
Store all collected information in a Dep_module_expr object.
|
Store all collected information in a Dep_module_expr object.
|
||||||
*/
|
*/
|
||||||
Item_equal *item_equal= (Item_equal*)cond;
|
Item_equal *item_equal= (Item_equal*)cond;
|
||||||
@ -1400,7 +1400,7 @@ void build_eq_mods_for_cond(THD *thd, Dep_analysis_context *ctx,
|
|||||||
|
|
||||||
$LEFT_PART OR $RIGHT_PART
|
$LEFT_PART OR $RIGHT_PART
|
||||||
|
|
||||||
condition. This is achieved as follows: First, we apply distrubutive law:
|
condition. This is achieved as follows: First, we apply distributive law:
|
||||||
|
|
||||||
(fdep_A_1 AND fdep_A_2 AND ...) OR (fdep_B_1 AND fdep_B_2 AND ...) =
|
(fdep_A_1 AND fdep_A_2 AND ...) OR (fdep_B_1 AND fdep_B_2 AND ...) =
|
||||||
|
|
||||||
@ -1846,7 +1846,7 @@ Dep_value_field *Dep_analysis_context::get_field_value(Field *field)
|
|||||||
/*
|
/*
|
||||||
Iteration over unbound modules that are our dependencies.
|
Iteration over unbound modules that are our dependencies.
|
||||||
for those we have:
|
for those we have:
|
||||||
- dependendencies of our fields
|
- dependencies of our fields
|
||||||
- outer join we're in
|
- outer join we're in
|
||||||
*/
|
*/
|
||||||
char *Dep_value_table::init_unbound_modules_iter(char *buf)
|
char *Dep_value_table::init_unbound_modules_iter(char *buf)
|
||||||
|
@ -84,7 +84,7 @@ ST_FIELD_INFO optimizer_trace_info[]=
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TODO: one-line needs to be implemented seperately
|
TODO: one-line needs to be implemented separately
|
||||||
*/
|
*/
|
||||||
const char *Opt_trace_context::flag_names[]= {"enabled", "default",
|
const char *Opt_trace_context::flag_names[]= {"enabled", "default",
|
||||||
NullS};
|
NullS};
|
||||||
|
@ -42,7 +42,7 @@ private:
|
|||||||
0 <=> this trace should be in information_schema.
|
0 <=> this trace should be in information_schema.
|
||||||
!=0 tracing is disabled, this currently happens when we want to trace a
|
!=0 tracing is disabled, this currently happens when we want to trace a
|
||||||
sub-statement. For now traces are only collect for the top statement
|
sub-statement. For now traces are only collect for the top statement
|
||||||
not for the sub-statments.
|
not for the sub-statements.
|
||||||
*/
|
*/
|
||||||
uint I_S_disabled;
|
uint I_S_disabled;
|
||||||
};
|
};
|
||||||
|
@ -29,9 +29,9 @@
|
|||||||
@file
|
@file
|
||||||
|
|
||||||
@brief
|
@brief
|
||||||
Virtual Column Substitution feature makes the optimizer recongize usage of
|
Virtual Column Substitution feature makes the optimizer recognize usage of
|
||||||
virtual column expressions in the WHERE/ON clauses. If there is an index
|
virtual column expressions in the WHERE/ON clauses. If there is an index
|
||||||
on the virtual column, the optimizer is able construct query plans that
|
on the virtual column, the optimizer is able to construct query plans that
|
||||||
use that index.
|
use that index.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -369,7 +369,7 @@ void subst_vcol_if_compatible(Vcol_subst_context *ctx,
|
|||||||
/*
|
/*
|
||||||
@brief
|
@brief
|
||||||
Do a quick and imprecise check if it makes sense to try Virtual Column
|
Do a quick and imprecise check if it makes sense to try Virtual Column
|
||||||
Substitutiion transformation for this item.
|
Substitution transformation for this item.
|
||||||
|
|
||||||
@detail
|
@detail
|
||||||
For vcol_expr='FOO' the item to be trans formed is the comparison item
|
For vcol_expr='FOO' the item to be trans formed is the comparison item
|
||||||
|
@ -102,7 +102,7 @@ extern OPTIMIZER_COSTS heap_optimizer_costs, tmp_table_optimizer_costs;
|
|||||||
A factor of 0.1 makes the cost of get_pq_sort_cost(10, 10, false) =0.52
|
A factor of 0.1 makes the cost of get_pq_sort_cost(10, 10, false) =0.52
|
||||||
(Reading 10 rows into a priority queue of 10 elements).
|
(Reading 10 rows into a priority queue of 10 elements).
|
||||||
|
|
||||||
One consenquence if this factor is too high is that priority_queue will
|
One consequence if this factor is too high is that priority_queue will
|
||||||
not use addon fields (to solve the sort without having to do an extra
|
not use addon fields (to solve the sort without having to do an extra
|
||||||
re-read of rows) even if the number of LIMIT is low.
|
re-read of rows) even if the number of LIMIT is low.
|
||||||
*/
|
*/
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
Cost of copying a row to 'table->record'.
|
Cost of copying a row to 'table->record'.
|
||||||
Used by scan_time() and rnd_pos_time() methods.
|
Used by scan_time() and rnd_pos_time() methods.
|
||||||
|
|
||||||
If this is too small, then table scans will be prefered over 'ref'
|
If this is too small, then table scans will be preferred over 'ref'
|
||||||
as with table scans there are no key read (KEY_LOOKUP_COST), fewer
|
as with table scans there are no key read (KEY_LOOKUP_COST), fewer
|
||||||
disk reads but more record copying and row comparisions. If it's
|
disk reads but more record copying and row comparisions. If it's
|
||||||
too big then MariaDB will used key lookup even when table scan is
|
too big then MariaDB will used key lookup even when table scan is
|
||||||
@ -55,7 +55,7 @@
|
|||||||
Cost of copying the key to 'table->record'
|
Cost of copying the key to 'table->record'
|
||||||
|
|
||||||
If this is too small, then, for small tables, index scans will be
|
If this is too small, then, for small tables, index scans will be
|
||||||
prefered over 'ref' as with index scans there are fewer disk reads.
|
preferred over 'ref' as with index scans there are fewer disk reads.
|
||||||
*/
|
*/
|
||||||
#define DEFAULT_KEY_COPY_COST ((double) 0.000015685)
|
#define DEFAULT_KEY_COPY_COST ((double) 0.000015685)
|
||||||
|
|
||||||
@ -103,7 +103,7 @@
|
|||||||
#define DEFAULT_DISK_READ_COST ((double) IO_SIZE / 400000000.0 * 1000)
|
#define DEFAULT_DISK_READ_COST ((double) IO_SIZE / 400000000.0 * 1000)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The follwoing is an old comment for hard-disks, please ignore the
|
The following is an old comment for hard-disks, please ignore the
|
||||||
following, except if you like history:
|
following, except if you like history:
|
||||||
|
|
||||||
For sequential hard disk seeks the cost formula is:
|
For sequential hard disk seeks the cost formula is:
|
||||||
|
@ -177,7 +177,7 @@ write_parameter(IO_CACHE *file, const uchar* base, File_option *parameter)
|
|||||||
LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset);
|
LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset);
|
||||||
// number of microseconds since Epoch, timezone-independent
|
// number of microseconds since Epoch, timezone-independent
|
||||||
my_hrtime_t tm= my_hrtime();
|
my_hrtime_t tm= my_hrtime();
|
||||||
// Paded to 19 characters for compatibility
|
// Padded to 19 characters for compatibility
|
||||||
val_s->length= snprintf(val_s->str, MICROSECOND_TIMESTAMP_BUFFER_SIZE,
|
val_s->length= snprintf(val_s->str, MICROSECOND_TIMESTAMP_BUFFER_SIZE,
|
||||||
"%019lld", tm.val);
|
"%019lld", tm.val);
|
||||||
DBUG_ASSERT(val_s->length == MICROSECOND_TIMESTAMP_BUFFER_SIZE-1);
|
DBUG_ASSERT(val_s->length == MICROSECOND_TIMESTAMP_BUFFER_SIZE-1);
|
||||||
|
@ -558,7 +558,7 @@ bool partition_info::set_up_defaults_for_partitioning(THD *thd, handler *file,
|
|||||||
no parameters
|
no parameters
|
||||||
|
|
||||||
RETURN VALUE
|
RETURN VALUE
|
||||||
Erroneus field name Error, there are two fields with same name
|
Erroneous field name Error, there are two fields with same name
|
||||||
NULL Ok, no field defined twice
|
NULL Ok, no field defined twice
|
||||||
|
|
||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
|
@ -180,7 +180,7 @@ public:
|
|||||||
* lock_partitions - partitions that must be locked (read or write).
|
* lock_partitions - partitions that must be locked (read or write).
|
||||||
Usually read_partitions is the same set as lock_partitions, but
|
Usually read_partitions is the same set as lock_partitions, but
|
||||||
in case of UPDATE the WHERE clause can limit the read_partitions set,
|
in case of UPDATE the WHERE clause can limit the read_partitions set,
|
||||||
but not neccesarily the lock_partitions set.
|
but not necessarily the lock_partitions set.
|
||||||
Usage pattern:
|
Usage pattern:
|
||||||
* Initialized in ha_partition::open().
|
* Initialized in ha_partition::open().
|
||||||
* read+lock_partitions is set according to explicit PARTITION,
|
* read+lock_partitions is set according to explicit PARTITION,
|
||||||
|
@ -213,7 +213,7 @@ static inline constexpr privilege_t operator|(privilege_t a, privilege_t b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Dyadyc bitwise assignment operators
|
// Dyadic bitwise assignment operators
|
||||||
static inline privilege_t& operator&=(privilege_t &a, privilege_t b)
|
static inline privilege_t& operator&=(privilege_t &a, privilege_t b)
|
||||||
{
|
{
|
||||||
return a= a & b;
|
return a= a & b;
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
@file
|
@file
|
||||||
|
|
||||||
Low level functions for storing data to be send to the MySQL client.
|
Low level functions for storing data to be send to the MySQL client.
|
||||||
The actual communction is handled by the net_xxx functions in net_serv.cc
|
The actual communication is handled by the net_xxx functions in net_serv.cc
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "mariadb.h"
|
#include "mariadb.h"
|
||||||
@ -64,7 +64,7 @@ bool Protocol_binary::net_store_data(const uchar *from, size_t length)
|
|||||||
net_store_data_cs() - extended version with character set conversion.
|
net_store_data_cs() - extended version with character set conversion.
|
||||||
|
|
||||||
It is optimized for short strings whose length after
|
It is optimized for short strings whose length after
|
||||||
conversion is garanteed to be less than 251, which accupies
|
conversion is guaranteed to be less than 251, which occupies
|
||||||
exactly one byte to store length. It allows not to use
|
exactly one byte to store length. It allows not to use
|
||||||
the "convert" member as a temporary buffer, conversion
|
the "convert" member as a temporary buffer, conversion
|
||||||
is done directly to the "packet" member.
|
is done directly to the "packet" member.
|
||||||
@ -81,7 +81,7 @@ bool Protocol_binary::net_store_data_cs(const uchar *from, size_t length,
|
|||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
uint dummy_errors;
|
uint dummy_errors;
|
||||||
/* Calculate maxumum possible result length */
|
/* Calculate maximum possible result length */
|
||||||
size_t conv_length= to_cs->mbmaxlen * length / from_cs->mbminlen;
|
size_t conv_length= to_cs->mbmaxlen * length / from_cs->mbminlen;
|
||||||
|
|
||||||
if (conv_length > 250)
|
if (conv_length > 250)
|
||||||
@ -482,7 +482,7 @@ bool Protocol::net_send_error_packet(THD *thd, uint sql_errno, const char *err,
|
|||||||
We keep a separate version for that range because it's widely used in
|
We keep a separate version for that range because it's widely used in
|
||||||
libmysql.
|
libmysql.
|
||||||
|
|
||||||
uint is used as agrument type because of MySQL type conventions:
|
uint is used as argument type because of MySQL type conventions:
|
||||||
- uint for 0..65536
|
- uint for 0..65536
|
||||||
- ulong for 0..4294967296
|
- ulong for 0..4294967296
|
||||||
- ulonglong for bigger numbers.
|
- ulonglong for bigger numbers.
|
||||||
|
@ -525,7 +525,7 @@ bool is_proxy_protocol_allowed(const sockaddr *addr)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Non-TCP addresses (unix domain socket, windows pipe and shared memory
|
Non-TCP addresses (unix domain socket, windows pipe and shared memory
|
||||||
gets tranlated to TCP4 localhost address.
|
gets translated to TCP4 localhost address.
|
||||||
|
|
||||||
Note, that vio remote addresses are initialized with binary zeros
|
Note, that vio remote addresses are initialized with binary zeros
|
||||||
for these protocols (which is AF_UNSPEC everywhere).
|
for these protocols (which is AF_UNSPEC everywhere).
|
||||||
|
@ -656,7 +656,7 @@ Rowid_filter::build_return_code Range_rowid_filter::build()
|
|||||||
Binary search in the sorted array of a rowid filter
|
Binary search in the sorted array of a rowid filter
|
||||||
|
|
||||||
@param ctxt context of the search
|
@param ctxt context of the search
|
||||||
@parab elem rowid / primary key to look for
|
@param elem rowid / primary key to look for
|
||||||
|
|
||||||
@details
|
@details
|
||||||
The function looks for the rowid / primary key ' elem' in this container
|
The function looks for the rowid / primary key ' elem' in this container
|
||||||
|
@ -101,7 +101,7 @@ rpl_slave_state::record_and_update_gtid(THD *thd, rpl_group_info *rgi)
|
|||||||
applied, then the event should be skipped. If not then the event should be
|
applied, then the event should be skipped. If not then the event should be
|
||||||
applied.
|
applied.
|
||||||
|
|
||||||
To avoid two master connections tring to apply the same event
|
To avoid two master connections trying to apply the same event
|
||||||
simultaneously, only one is allowed to work in any given domain at any point
|
simultaneously, only one is allowed to work in any given domain at any point
|
||||||
in time. The associated Relay_log_info object is called the owner of the
|
in time. The associated Relay_log_info object is called the owner of the
|
||||||
domain (and there can be multiple parallel worker threads working in that
|
domain (and there can be multiple parallel worker threads working in that
|
||||||
@ -1240,7 +1240,7 @@ rpl_slave_state_tostring_cb(rpl_gtid *gtid, void *data)
|
|||||||
The state consists of the most recently applied GTID for each domain_id,
|
The state consists of the most recently applied GTID for each domain_id,
|
||||||
ie. the one with the highest sub_id within each domain_id.
|
ie. the one with the highest sub_id within each domain_id.
|
||||||
|
|
||||||
Optinally, extra_gtids is a list of GTIDs from the binlog. This is used when
|
Optionally, extra_gtids is a list of GTIDs from the binlog. This is used when
|
||||||
a server was previously a master and now needs to connect to a new master as
|
a server was previously a master and now needs to connect to a new master as
|
||||||
a slave. For each domain_id, if the GTID in the binlog was logged with our
|
a slave. For each domain_id, if the GTID in the binlog was logged with our
|
||||||
own server_id _and_ has a higher seq_no than what is in the slave state,
|
own server_id _and_ has a higher seq_no than what is in the slave state,
|
||||||
@ -2309,7 +2309,7 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids,
|
|||||||
Gtid_list_log_event *glev,
|
Gtid_list_log_event *glev,
|
||||||
char* errbuf)
|
char* errbuf)
|
||||||
{
|
{
|
||||||
DYNAMIC_ARRAY domain_unique; // sequece (unsorted) of unique element*:s
|
DYNAMIC_ARRAY domain_unique; // sequence (unsorted) of unique element*:s
|
||||||
rpl_binlog_state::element* domain_unique_buffer[16];
|
rpl_binlog_state::element* domain_unique_buffer[16];
|
||||||
ulong k, l;
|
ulong k, l;
|
||||||
const char* errmsg= NULL;
|
const char* errmsg= NULL;
|
||||||
|
@ -932,7 +932,7 @@ public:
|
|||||||
~Intersecting_gtid_event_filter();
|
~Intersecting_gtid_event_filter();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Returns TRUE if any filers exclude the gtid, returns FALSE otherwise, i.e.
|
Returns TRUE if any filters exclude the gtid, returns FALSE otherwise, i.e.
|
||||||
all filters must allow the GTID.
|
all filters must allow the GTID.
|
||||||
*/
|
*/
|
||||||
my_bool exclude(rpl_gtid *gtid) override;
|
my_bool exclude(rpl_gtid *gtid) override;
|
||||||
|
@ -55,7 +55,7 @@ injector::transaction::~transaction()
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
We set the first character to null just to give all the copies of the
|
We set the first character to null just to give all the copies of the
|
||||||
start position a (minimal) chance of seening that the memory is lost.
|
start position a (minimal) chance of seeing that the memory is lost.
|
||||||
All assuming the my_free does not step over the memory, of course.
|
All assuming the my_free does not step over the memory, of course.
|
||||||
*/
|
*/
|
||||||
*the_memory= '\0';
|
*the_memory= '\0';
|
||||||
|
@ -1508,7 +1508,7 @@ bool Master_info_index::remove_master_info(Master_info *mi, bool clear_log_files
|
|||||||
{
|
{
|
||||||
File index_file_nr;
|
File index_file_nr;
|
||||||
|
|
||||||
// Close IO_CACHE and FILE handler fisrt
|
// Close IO_CACHE and FILE handler first
|
||||||
end_io_cache(&index_file);
|
end_io_cache(&index_file);
|
||||||
my_close(index_file.file, MYF(MY_WME));
|
my_close(index_file.file, MYF(MY_WME));
|
||||||
|
|
||||||
|
@ -395,7 +395,7 @@ class Master_info : public Slave_reporting_capability
|
|||||||
Flag is raised at the parallel worker slave stop. Its purpose
|
Flag is raised at the parallel worker slave stop. Its purpose
|
||||||
is to mark the whole start_alter_list when slave stops.
|
is to mark the whole start_alter_list when slave stops.
|
||||||
The flag is read by Start Alter event to self-mark its state accordingly
|
The flag is read by Start Alter event to self-mark its state accordingly
|
||||||
at time its alter info struct is about to be appened to the list.
|
at time its alter info struct is about to be appended to the list.
|
||||||
*/
|
*/
|
||||||
bool is_shutdown= false;
|
bool is_shutdown= false;
|
||||||
|
|
||||||
|
@ -2534,7 +2534,7 @@ idx_found:
|
|||||||
if(flags_extra & (Gtid_log_event::FL_COMMIT_ALTER_E1 |
|
if(flags_extra & (Gtid_log_event::FL_COMMIT_ALTER_E1 |
|
||||||
Gtid_log_event::FL_ROLLBACK_ALTER_E1 ))
|
Gtid_log_event::FL_ROLLBACK_ALTER_E1 ))
|
||||||
{
|
{
|
||||||
//Free the corrosponding rpt current_start_alter_id
|
//Free the corresponding rpt current_start_alter_id
|
||||||
for(uint i= 0; i < e->rpl_thread_max; i++)
|
for(uint i= 0; i < e->rpl_thread_max; i++)
|
||||||
{
|
{
|
||||||
if(e->rpl_threads[i].thr &&
|
if(e->rpl_threads[i].thr &&
|
||||||
|
@ -1073,7 +1073,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
|
|||||||
value which would lead to badly broken replication.
|
value which would lead to badly broken replication.
|
||||||
Even the relay_log_pos will be corrupted in this case, because the len is
|
Even the relay_log_pos will be corrupted in this case, because the len is
|
||||||
the relay log is not "val".
|
the relay log is not "val".
|
||||||
With the end_log_pos solution, we avoid computations involving lengthes.
|
With the end_log_pos solution, we avoid computations involving length.
|
||||||
*/
|
*/
|
||||||
mysql_cond_broadcast(&data_cond);
|
mysql_cond_broadcast(&data_cond);
|
||||||
if (!skip_lock)
|
if (!skip_lock)
|
||||||
@ -1286,7 +1286,7 @@ err:
|
|||||||
compare them each time this function is called, we only need to do this
|
compare them each time this function is called, we only need to do this
|
||||||
when current log name changes. If we have UNTIL_MASTER_POS condition we
|
when current log name changes. If we have UNTIL_MASTER_POS condition we
|
||||||
need to do this only after Rotate_log_event::do_apply_event() (which is
|
need to do this only after Rotate_log_event::do_apply_event() (which is
|
||||||
rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS
|
rare, so caching gives real benefit), and if we have UNTIL_RELAY_POS
|
||||||
condition then we should invalidate cached comarison value after
|
condition then we should invalidate cached comarison value after
|
||||||
inc_group_relay_log_pos() which called for each group of events (so we
|
inc_group_relay_log_pos() which called for each group of events (so we
|
||||||
have some benefit if we have something like queries that use
|
have some benefit if we have something like queries that use
|
||||||
|
@ -378,7 +378,7 @@ public:
|
|||||||
slave_connection_state ign_gtids;
|
slave_connection_state ign_gtids;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Indentifies where the SQL Thread should create temporary files for the
|
Identifies where the SQL Thread should create temporary files for the
|
||||||
LOAD DATA INFILE. This is used for security reasons.
|
LOAD DATA INFILE. This is used for security reasons.
|
||||||
*/
|
*/
|
||||||
char slave_patternload_file[FN_REFLEN];
|
char slave_patternload_file[FN_REFLEN];
|
||||||
@ -396,7 +396,7 @@ public:
|
|||||||
/*
|
/*
|
||||||
The restart_gtid_state is used when the SQL thread restarts on a relay log
|
The restart_gtid_state is used when the SQL thread restarts on a relay log
|
||||||
in GTID mode. In multi-domain parallel replication, each domain may have a
|
in GTID mode. In multi-domain parallel replication, each domain may have a
|
||||||
separat position, so some events in more progressed domains may need to be
|
separate position, so some events in more progressed domains may need to be
|
||||||
skipped. This keeps track of the domains that have not yet reached their
|
skipped. This keeps track of the domains that have not yet reached their
|
||||||
starting event.
|
starting event.
|
||||||
*/
|
*/
|
||||||
@ -917,7 +917,7 @@ struct rpl_group_info
|
|||||||
void reinit(Relay_log_info *rli);
|
void reinit(Relay_log_info *rli);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Returns true if the argument event resides in the containter;
|
Returns true if the argument event resides in the container;
|
||||||
more specifically, the checking is done against the last added event.
|
more specifically, the checking is done against the last added event.
|
||||||
*/
|
*/
|
||||||
bool is_deferred_event(Log_event * ev)
|
bool is_deferred_event(Log_event * ev)
|
||||||
|
@ -105,7 +105,7 @@ private:
|
|||||||
*/
|
*/
|
||||||
entry *m_free;
|
entry *m_free;
|
||||||
|
|
||||||
/* Correspondance between an id (a number) and a TABLE object */
|
/* Correspondence between an id (a number) and a TABLE object */
|
||||||
HASH m_table_ids;
|
HASH m_table_ids;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -291,7 +291,7 @@ table_def::~table_def()
|
|||||||
|
|
||||||
@notes
|
@notes
|
||||||
event_buf will have same values on return. However during the process of
|
event_buf will have same values on return. However during the process of
|
||||||
caluclating the checksum, it's temporary changed. Because of this the
|
calculating the checksum, it's temporary changed. Because of this the
|
||||||
event_buf argument is not a pointer to const.
|
event_buf argument is not a pointer to const.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
@ -497,7 +497,7 @@ Field_int::rpl_conv_type_from(const Conv_source &source,
|
|||||||
max_dispay_length() when the table field and the binlog field
|
max_dispay_length() when the table field and the binlog field
|
||||||
are of the same type.
|
are of the same type.
|
||||||
This code should eventually be rewritten not to use
|
This code should eventually be rewritten not to use
|
||||||
compare_lengths(), to detect subtype/supetype relations
|
compare_lengths(), to detect subtype/supertype relations
|
||||||
just using the type codes.
|
just using the type codes.
|
||||||
*/
|
*/
|
||||||
DBUG_ASSERT(source.real_field_type() != real_type());
|
DBUG_ASSERT(source.real_field_type() != real_type());
|
||||||
@ -535,7 +535,7 @@ Field_longstr::rpl_conv_type_from(const Conv_source &source,
|
|||||||
/**
|
/**
|
||||||
@todo
|
@todo
|
||||||
Implement Field_varstring_compressed::real_type() and
|
Implement Field_varstring_compressed::real_type() and
|
||||||
Field_blob_compressed::real_type() properly. All occurencies
|
Field_blob_compressed::real_type() properly. All occurrences
|
||||||
of Field::real_type() have to be inspected and adjusted if needed.
|
of Field::real_type() have to be inspected and adjusted if needed.
|
||||||
|
|
||||||
Until it is not ready we have to compare source_type against
|
Until it is not ready we have to compare source_type against
|
||||||
|
@ -75,7 +75,7 @@ void one_thread_scheduler(scheduler_functions *func, Atomic_counter<uint> *arg_c
|
|||||||
extern void scheduler_init();
|
extern void scheduler_init();
|
||||||
extern void post_kill_notification(THD *);
|
extern void post_kill_notification(THD *);
|
||||||
/*
|
/*
|
||||||
To be used for pool-of-threads (implemeneted differently on various OSs)
|
To be used for pool-of-threads (implemented differently on various OSs)
|
||||||
*/
|
*/
|
||||||
struct thd_scheduler
|
struct thd_scheduler
|
||||||
{
|
{
|
||||||
|
@ -58,7 +58,7 @@ public:
|
|||||||
|
|
||||||
/* The layout of a semisync slave reply packet:
|
/* The layout of a semisync slave reply packet:
|
||||||
1 byte for the magic num
|
1 byte for the magic num
|
||||||
8 bytes for the binlog positon
|
8 bytes for the binlog position
|
||||||
n bytes for the binlog filename, terminated with a '\0'
|
n bytes for the binlog filename, terminated with a '\0'
|
||||||
*/
|
*/
|
||||||
#define REPLY_MAGIC_NUM_LEN 1
|
#define REPLY_MAGIC_NUM_LEN 1
|
||||||
|
@ -691,7 +691,7 @@ int Repl_semi_sync_master::report_reply_binlog(uint32 server_id,
|
|||||||
/* If the requested position is behind the sending binlog position,
|
/* If the requested position is behind the sending binlog position,
|
||||||
* would not adjust sending binlog position.
|
* would not adjust sending binlog position.
|
||||||
* We based on the assumption that there are multiple semi-sync slave,
|
* We based on the assumption that there are multiple semi-sync slave,
|
||||||
* and at least one of them shou/ld be up to date.
|
* and at least one of them should be up to date.
|
||||||
* If all semi-sync slaves are behind, at least initially, the primary
|
* If all semi-sync slaves are behind, at least initially, the primary
|
||||||
* can find the situation after the waiting timeout. After that, some
|
* can find the situation after the waiting timeout. After that, some
|
||||||
* slaves should catch up quickly.
|
* slaves should catch up quickly.
|
||||||
@ -1502,7 +1502,7 @@ void Repl_semi_sync_master::await_all_slave_replies(const char *msg)
|
|||||||
/* Get the waiting time given the wait's staring time.
|
/* Get the waiting time given the wait's staring time.
|
||||||
*
|
*
|
||||||
* Return:
|
* Return:
|
||||||
* >= 0: the waiting time in microsecons(us)
|
* >= 0: the waiting time in microseconds(us)
|
||||||
* < 0: error in get time or time back traverse
|
* < 0: error in get time or time back traverse
|
||||||
*/
|
*/
|
||||||
static int get_wait_time(const struct timespec& start_ts)
|
static int get_wait_time(const struct timespec& start_ts)
|
||||||
|
@ -526,7 +526,7 @@ class Repl_semi_sync_master
|
|||||||
|
|
||||||
If info_msg is provided, it will be output via sql_print_information when
|
If info_msg is provided, it will be output via sql_print_information when
|
||||||
there are transactions awaiting ACKs; info_msg is not output if there are
|
there are transactions awaiting ACKs; info_msg is not output if there are
|
||||||
no transasctions to await.
|
no transactions to await.
|
||||||
*/
|
*/
|
||||||
void await_all_slave_replies(const char *msg);
|
void await_all_slave_replies(const char *msg);
|
||||||
|
|
||||||
@ -599,7 +599,7 @@ class Repl_semi_sync_master
|
|||||||
/*Wait for ACK after writing/sync binlog to file*/
|
/*Wait for ACK after writing/sync binlog to file*/
|
||||||
int wait_after_sync(const char* log_file, my_off_t log_pos);
|
int wait_after_sync(const char* log_file, my_off_t log_pos);
|
||||||
|
|
||||||
/*Wait for ACK after commting the transaction*/
|
/*Wait for ACK after committing the transaction*/
|
||||||
int wait_after_commit(THD* thd, bool all);
|
int wait_after_commit(THD* thd, bool all);
|
||||||
|
|
||||||
/*Wait after the transaction is rollback*/
|
/*Wait after the transaction is rollback*/
|
||||||
@ -735,7 +735,7 @@ extern unsigned long long rpl_semi_sync_master_get_ack;
|
|||||||
/*
|
/*
|
||||||
This indicates whether we should keep waiting if no semi-sync slave
|
This indicates whether we should keep waiting if no semi-sync slave
|
||||||
is available.
|
is available.
|
||||||
0 : stop waiting if detected no avaialable semi-sync slave.
|
0 : stop waiting if detected no available semi-sync slave.
|
||||||
1 (default) : keep waiting until timeout even no available semi-sync slave.
|
1 (default) : keep waiting until timeout even no available semi-sync slave.
|
||||||
*/
|
*/
|
||||||
extern char rpl_semi_sync_master_wait_no_slave;
|
extern char rpl_semi_sync_master_wait_no_slave;
|
||||||
|
@ -190,7 +190,7 @@ void Ack_receiver::remove_slave(THD *thd)
|
|||||||
mysql_cond_broadcast(&m_cond);
|
mysql_cond_broadcast(&m_cond);
|
||||||
/*
|
/*
|
||||||
Wait until Ack_receiver::run() acknowledges remove of slave
|
Wait until Ack_receiver::run() acknowledges remove of slave
|
||||||
As this is only sent under the mutex and after listners has
|
As this is only sent under the mutex and after listeners has
|
||||||
been collected, we know that listener has ignored the found
|
been collected, we know that listener has ignored the found
|
||||||
slave.
|
slave.
|
||||||
*/
|
*/
|
||||||
|
@ -46,7 +46,7 @@ void Session_sysvars_tracker::vars_list::reinit()
|
|||||||
Copy the given list.
|
Copy the given list.
|
||||||
|
|
||||||
@param from Source vars_list object.
|
@param from Source vars_list object.
|
||||||
@param thd THD handle to retrive the charset in use.
|
@param thd THD handle to retrieve the charset in use.
|
||||||
|
|
||||||
@retval true there is something to track
|
@retval true there is something to track
|
||||||
@retval false nothing to track
|
@retval false nothing to track
|
||||||
@ -117,7 +117,7 @@ bool Session_sysvars_tracker::vars_list::insert(const sys_var *svar)
|
|||||||
@param var_list [IN] System variable list.
|
@param var_list [IN] System variable list.
|
||||||
@param throw_error [IN] bool when set to true, returns an error
|
@param throw_error [IN] bool when set to true, returns an error
|
||||||
in case of invalid/duplicate values.
|
in case of invalid/duplicate values.
|
||||||
@param char_set [IN] charecter set information used for string
|
@param char_set [IN] character set information used for string
|
||||||
manipulations.
|
manipulations.
|
||||||
|
|
||||||
@return
|
@return
|
||||||
@ -848,7 +848,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf)
|
|||||||
legal and equivalent syntax in MySQL, or START TRANSACTION
|
legal and equivalent syntax in MySQL, or START TRANSACTION
|
||||||
sans options) will re-use any one-shots set up so far
|
sans options) will re-use any one-shots set up so far
|
||||||
(with SET before the first transaction started, and with
|
(with SET before the first transaction started, and with
|
||||||
all subsequent STARTs), except for WITH CONSISTANT SNAPSHOT,
|
all subsequent STARTs), except for WITH CONSISTENT SNAPSHOT,
|
||||||
which will never be chained and only applies when explicitly
|
which will never be chained and only applies when explicitly
|
||||||
given.
|
given.
|
||||||
|
|
||||||
@ -952,7 +952,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf)
|
|||||||
/*
|
/*
|
||||||
"READ ONLY" / "READ WRITE"
|
"READ ONLY" / "READ WRITE"
|
||||||
We could transform this to SET TRANSACTION even when it occurs
|
We could transform this to SET TRANSACTION even when it occurs
|
||||||
in START TRANSACTION, but for now, we'll resysynthesize the original
|
in START TRANSACTION, but for now, we'll resynthesize the original
|
||||||
command as closely as possible.
|
command as closely as possible.
|
||||||
*/
|
*/
|
||||||
buf->append(STRING_WITH_LEN("SET TRANSACTION "));
|
buf->append(STRING_WITH_LEN("SET TRANSACTION "));
|
||||||
|
@ -860,7 +860,7 @@ int set_var::light_check(THD *thd)
|
|||||||
@returns 0|1 ok or ERROR
|
@returns 0|1 ok or ERROR
|
||||||
|
|
||||||
@note ERROR can be only due to abnormal operations involving
|
@note ERROR can be only due to abnormal operations involving
|
||||||
the server's execution evironment such as
|
the server's execution environment such as
|
||||||
out of memory, hard disk failure or the computer blows up.
|
out of memory, hard disk failure or the computer blows up.
|
||||||
Consider set_var::check() method if there is a need to return
|
Consider set_var::check() method if there is a need to return
|
||||||
an error due to logics.
|
an error due to logics.
|
||||||
|
@ -273,7 +273,7 @@ protected:
|
|||||||
/**
|
/**
|
||||||
A base class for everything that can be set with SET command.
|
A base class for everything that can be set with SET command.
|
||||||
It's similar to Items, an instance of this is created by the parser
|
It's similar to Items, an instance of this is created by the parser
|
||||||
for every assigmnent in SET (or elsewhere, e.g. in SELECT).
|
for every assignment in SET (or elsewhere, e.g. in SELECT).
|
||||||
*/
|
*/
|
||||||
class set_var_base :public Sql_alloc
|
class set_var_base :public Sql_alloc
|
||||||
{
|
{
|
||||||
|
28
sql/slave.cc
28
sql/slave.cc
@ -329,7 +329,7 @@ gtid_pos_table_creation(THD *thd, plugin_ref engine, LEX_CSTRING *table_name)
|
|||||||
thd->set_db(&MYSQL_SCHEMA_NAME);
|
thd->set_db(&MYSQL_SCHEMA_NAME);
|
||||||
thd->clear_error();
|
thd->clear_error();
|
||||||
ulonglong thd_saved_option= thd->variables.option_bits;
|
ulonglong thd_saved_option= thd->variables.option_bits;
|
||||||
/* This query shuold not be binlogged. */
|
/* This query should not be binlogged. */
|
||||||
thd->variables.option_bits&= ~(ulonglong)OPTION_BIN_LOG;
|
thd->variables.option_bits&= ~(ulonglong)OPTION_BIN_LOG;
|
||||||
thd->set_query_and_id(query.c_ptr(), query.length(), thd->charset(),
|
thd->set_query_and_id(query.c_ptr(), query.length(), thd->charset(),
|
||||||
next_query_id());
|
next_query_id());
|
||||||
@ -1049,7 +1049,7 @@ terminate_slave_thread(THD *thd,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Is is critical to test if the slave is running. Otherwise, we might
|
Is is critical to test if the slave is running. Otherwise, we might
|
||||||
be referening freed memory trying to kick it
|
be refreeing freed memory trying to kick it
|
||||||
*/
|
*/
|
||||||
|
|
||||||
while (*slave_running) // Should always be true
|
while (*slave_running) // Should always be true
|
||||||
@ -1323,7 +1323,7 @@ static bool io_slave_killed(Master_info* mi)
|
|||||||
The function analyzes a possible killed status and makes
|
The function analyzes a possible killed status and makes
|
||||||
a decision whether to accept it or not.
|
a decision whether to accept it or not.
|
||||||
Normally upon accepting the sql thread goes to shutdown.
|
Normally upon accepting the sql thread goes to shutdown.
|
||||||
In the event of deffering decision @rli->last_event_start_time waiting
|
In the event of deferring decision @rli->last_event_start_time waiting
|
||||||
timer is set to force the killed status be accepted upon its expiration.
|
timer is set to force the killed status be accepted upon its expiration.
|
||||||
|
|
||||||
@param thd pointer to a THD instance
|
@param thd pointer to a THD instance
|
||||||
@ -1391,8 +1391,8 @@ static bool sql_slave_killed(rpl_group_info *rgi)
|
|||||||
may eventually give out to complete the current group and in
|
may eventually give out to complete the current group and in
|
||||||
that case there might be issues at consequent slave restart,
|
that case there might be issues at consequent slave restart,
|
||||||
see the error message. WL#2975 offers a robust solution
|
see the error message. WL#2975 offers a robust solution
|
||||||
requiring to store the last exectuted event's coordinates
|
requiring to store the last executed event's coordinates
|
||||||
along with the group's coordianates instead of waiting with
|
along with the group's coordinates instead of waiting with
|
||||||
@c last_event_start_time the timer.
|
@c last_event_start_time the timer.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -2006,7 +2006,7 @@ inconsistency if replicated data deals with collation.");
|
|||||||
slave and master, but we can't rely on value of @@system_time_zone
|
slave and master, but we can't rely on value of @@system_time_zone
|
||||||
variable (it is time zone abbreviation) since it determined at start
|
variable (it is time zone abbreviation) since it determined at start
|
||||||
time and so could differ for slave and master even if they are really
|
time and so could differ for slave and master even if they are really
|
||||||
in the same system time zone. So we are omiting this check and just
|
in the same system time zone. So we are omitting this check and just
|
||||||
relying on documentation. Also according to Monty there are many users
|
relying on documentation. Also according to Monty there are many users
|
||||||
who are using replication between servers in various time zones. Hence
|
who are using replication between servers in various time zones. Hence
|
||||||
such check will broke everything for them. (And now everything will
|
such check will broke everything for them. (And now everything will
|
||||||
@ -3740,7 +3740,7 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
|
|||||||
ev->update_pos(rli);
|
ev->update_pos(rli);
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
It also does the following maintainance:
|
It also does the following maintenance:
|
||||||
|
|
||||||
- Initializes the thread's server_id and time; and the event's
|
- Initializes the thread's server_id and time; and the event's
|
||||||
thread.
|
thread.
|
||||||
@ -4096,7 +4096,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
|
|||||||
The following failure injecion works in cooperation with tests
|
The following failure injecion works in cooperation with tests
|
||||||
setting @@global.debug= 'd,incomplete_group_in_relay_log'.
|
setting @@global.debug= 'd,incomplete_group_in_relay_log'.
|
||||||
Xid or Commit events are not executed to force the slave sql
|
Xid or Commit events are not executed to force the slave sql
|
||||||
read hanging if the realy log does not have any more events.
|
read hanging if the real log does not have any more events.
|
||||||
*/
|
*/
|
||||||
DBUG_EXECUTE_IF("incomplete_group_in_relay_log",
|
DBUG_EXECUTE_IF("incomplete_group_in_relay_log",
|
||||||
if ((typ == XID_EVENT) ||
|
if ((typ == XID_EVENT) ||
|
||||||
@ -4436,7 +4436,7 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi,
|
|||||||
IO_RPL_LOG_NAME, mi->master_log_pos,
|
IO_RPL_LOG_NAME, mi->master_log_pos,
|
||||||
tmp.c_ptr_safe());
|
tmp.c_ptr_safe());
|
||||||
/*
|
/*
|
||||||
Raise a warining during registering on master/requesting dump.
|
Raise a warning during registering on master/requesting dump.
|
||||||
Log a message reading event.
|
Log a message reading event.
|
||||||
*/
|
*/
|
||||||
if (messages[SLAVE_RECON_MSG_COMMAND][0])
|
if (messages[SLAVE_RECON_MSG_COMMAND][0])
|
||||||
@ -6134,7 +6134,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
|
|||||||
/*
|
/*
|
||||||
compare local and event's versions of log_file, log_pos.
|
compare local and event's versions of log_file, log_pos.
|
||||||
|
|
||||||
Heartbeat is sent only after an event corresponding to the corrdinates
|
Heartbeat is sent only after an event corresponding to the coordinates
|
||||||
the heartbeat carries.
|
the heartbeat carries.
|
||||||
Slave can not have a higher coordinate except in the only
|
Slave can not have a higher coordinate except in the only
|
||||||
special case when mi->master_log_name, master_log_pos have never
|
special case when mi->master_log_name, master_log_pos have never
|
||||||
@ -6143,7 +6143,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
|
|||||||
|
|
||||||
Slave can have lower coordinates, if some event from master was omitted.
|
Slave can have lower coordinates, if some event from master was omitted.
|
||||||
|
|
||||||
TODO: handling `when' for SHOW SLAVE STATUS' snds behind
|
TODO: handling `when' for SHOW SLAVE STATUS' sends behind
|
||||||
*/
|
*/
|
||||||
if (memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len()) ||
|
if (memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len()) ||
|
||||||
mi->master_log_pos > hb.log_pos) {
|
mi->master_log_pos > hb.log_pos) {
|
||||||
@ -6871,7 +6871,7 @@ void end_relay_log_info(Relay_log_info* rli)
|
|||||||
mysql_mutex_unlock(log_lock);
|
mysql_mutex_unlock(log_lock);
|
||||||
/*
|
/*
|
||||||
Delete the slave's temporary tables from memory.
|
Delete the slave's temporary tables from memory.
|
||||||
In the future there will be other actions than this, to ensure persistance
|
In the future there will be other actions than this, to ensure persistence
|
||||||
of slave's temp tables after shutdown.
|
of slave's temp tables after shutdown.
|
||||||
*/
|
*/
|
||||||
rli->close_temporary_tables();
|
rli->close_temporary_tables();
|
||||||
@ -7245,7 +7245,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
|
|||||||
finishes executing the new event; it will be look abnormal only if
|
finishes executing the new event; it will be look abnormal only if
|
||||||
the events have old timestamps (then you get "many", 0, "many").
|
the events have old timestamps (then you get "many", 0, "many").
|
||||||
|
|
||||||
Transient phases like this can be fixed with implemeting
|
Transient phases like this can be fixed with implementing
|
||||||
Heartbeat event which provides the slave the status of the
|
Heartbeat event which provides the slave the status of the
|
||||||
master at time the master does not have any new update to send.
|
master at time the master does not have any new update to send.
|
||||||
Seconds_Behind_Master would be zero only when master has no
|
Seconds_Behind_Master would be zero only when master has no
|
||||||
@ -7848,7 +7848,7 @@ void Rows_event_tracker::update(const char *file_name, my_off_t pos,
|
|||||||
/**
|
/**
|
||||||
The function is called at next event reading
|
The function is called at next event reading
|
||||||
after a sequence of Rows- log-events. It checks the end-of-statement status
|
after a sequence of Rows- log-events. It checks the end-of-statement status
|
||||||
of the past sequence to report on any isssue.
|
of the past sequence to report on any issue.
|
||||||
In the positive case the tracker gets reset.
|
In the positive case the tracker gets reset.
|
||||||
|
|
||||||
@return true when the Rows- event group integrity found compromised,
|
@return true when the Rows- event group integrity found compromised,
|
||||||
|
@ -26,7 +26,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* Changes:
|
/* Changes:
|
||||||
* 2023-12-25 Addopted for MariaDB usage
|
* 2023-12-25 Adopted for MariaDB usage
|
||||||
* 2014-02-12: merge David Woodhouse, Ger Hobbelt improvements
|
* 2014-02-12: merge David Woodhouse, Ger Hobbelt improvements
|
||||||
* git.infradead.org/users/dwmw2/openconnect.git/commitdiff/bdeefa54
|
* git.infradead.org/users/dwmw2/openconnect.git/commitdiff/bdeefa54
|
||||||
* github.com/GerHobbelt/selectable-socketpair
|
* github.com/GerHobbelt/selectable-socketpair
|
||||||
|
10
sql/sp.cc
10
sql/sp.cc
@ -838,7 +838,7 @@ static LEX_STRING copy_definition_string(String *defstr,
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@brief The function parses input strings and returns SP stucture.
|
@brief The function parses input strings and returns SP structure.
|
||||||
|
|
||||||
@param[in] thd Thread handler
|
@param[in] thd Thread handler
|
||||||
@param[in] defstr CREATE... string
|
@param[in] defstr CREATE... string
|
||||||
@ -984,7 +984,7 @@ Sp_handler::db_load_routine(THD *thd, const Database_qualified_name *name,
|
|||||||
defstr.set_thread_specific();
|
defstr.set_thread_specific();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We have to add DEFINER clause and provide proper routine characterstics in
|
We have to add DEFINER clause and provide proper routine characteristics in
|
||||||
routine definition statement that we build here to be able to use this
|
routine definition statement that we build here to be able to use this
|
||||||
definition for SHOW CREATE PROCEDURE later.
|
definition for SHOW CREATE PROCEDURE later.
|
||||||
*/
|
*/
|
||||||
@ -1238,7 +1238,7 @@ Sp_handler_package_spec::
|
|||||||
- SP_OK means that "CREATE PACKAGE pkg" had a correspoinding
|
- SP_OK means that "CREATE PACKAGE pkg" had a correspoinding
|
||||||
"CREATE PACKAGE BODY pkg", which was successfully dropped.
|
"CREATE PACKAGE BODY pkg", which was successfully dropped.
|
||||||
*/
|
*/
|
||||||
return ret; // Other codes mean an unexpecte error
|
return ret; // Other codes mean an unexpected error
|
||||||
}
|
}
|
||||||
return Sp_handler::sp_find_and_drop_routine(thd, table, name);
|
return Sp_handler::sp_find_and_drop_routine(thd, table, name);
|
||||||
}
|
}
|
||||||
@ -1550,7 +1550,7 @@ log:
|
|||||||
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
my_error(ER_OUT_OF_RESOURCES, MYF(0));
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
/* restore sql_mode when binloging */
|
/* restore sql_mode when binlogging */
|
||||||
thd->variables.sql_mode= org_sql_mode;
|
thd->variables.sql_mode= org_sql_mode;
|
||||||
/* Such a statement can always go directly to binlog, no trans cache */
|
/* Such a statement can always go directly to binlog, no trans cache */
|
||||||
if (thd->binlog_query(THD::STMT_QUERY_TYPE,
|
if (thd->binlog_query(THD::STMT_QUERY_TYPE,
|
||||||
@ -3107,7 +3107,7 @@ Sp_handler::show_create_sp(THD *thd, String *buf,
|
|||||||
(used for I_S ROUTINES & PARAMETERS tables).
|
(used for I_S ROUTINES & PARAMETERS tables).
|
||||||
|
|
||||||
@param[in] thd thread handler
|
@param[in] thd thread handler
|
||||||
@param[in] proc_table mysql.proc table structurte
|
@param[in] proc_table mysql.proc table structure
|
||||||
@param[in] db database name
|
@param[in] db database name
|
||||||
@param[in] name sp name
|
@param[in] name sp name
|
||||||
@param[in] sql_mode SQL mode
|
@param[in] sql_mode SQL mode
|
||||||
|
@ -177,7 +177,7 @@ void sp_cache_insert(sp_cache **cp, sp_head *sp)
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
sp_cache_lookup()
|
sp_cache_lookup()
|
||||||
cp Cache to look into
|
cp Cache to look into
|
||||||
name Name of rutine to find
|
name Name of routine to find
|
||||||
|
|
||||||
NOTE
|
NOTE
|
||||||
An obsolete (but not more obsolete then since last
|
An obsolete (but not more obsolete then since last
|
||||||
|
@ -722,7 +722,7 @@ bool sp_package::validate_public_routines(THD *thd, sp_package *spec)
|
|||||||
bool sp_package::validate_private_routines(THD *thd)
|
bool sp_package::validate_private_routines(THD *thd)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Check that all forwad declarations in
|
Check that all forward declarations in
|
||||||
CREATE PACKAGE BODY have implementations.
|
CREATE PACKAGE BODY have implementations.
|
||||||
*/
|
*/
|
||||||
List_iterator<LEX> it(m_routine_declarations);
|
List_iterator<LEX> it(m_routine_declarations);
|
||||||
@ -1639,7 +1639,7 @@ bool sp_head::check_execute_access(THD *thd) const
|
|||||||
|
|
||||||
@param thd
|
@param thd
|
||||||
@param ret_value
|
@param ret_value
|
||||||
@retval NULL - error (access denided or EOM)
|
@retval NULL - error (access denied or EOM)
|
||||||
@retval !NULL - success (the invoker has rights to all %TYPE tables)
|
@retval !NULL - success (the invoker has rights to all %TYPE tables)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -2027,7 +2027,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
|
|||||||
we have separate union for each such event and hence can't use
|
we have separate union for each such event and hence can't use
|
||||||
query_id of real calling statement as the start of all these
|
query_id of real calling statement as the start of all these
|
||||||
unions (this will break logic of replication of user-defined
|
unions (this will break logic of replication of user-defined
|
||||||
variables). So we use artifical value which is guaranteed to
|
variables). So we use artificial value which is guaranteed to
|
||||||
be greater than all query_id's of all statements belonging
|
be greater than all query_id's of all statements belonging
|
||||||
to previous events/unions.
|
to previous events/unions.
|
||||||
Possible alternative to this is logging of all function invocations
|
Possible alternative to this is logging of all function invocations
|
||||||
@ -2317,7 +2317,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
In the case when we weren't able to employ reuse mechanism for
|
In the case when we weren't able to employ reuse mechanism for
|
||||||
OUT/INOUT paranmeters, we should reallocate memory. This
|
OUT/INOUT parameters, we should reallocate memory. This
|
||||||
allocation should be done on the arena which will live through
|
allocation should be done on the arena which will live through
|
||||||
all execution of calling routine.
|
all execution of calling routine.
|
||||||
*/
|
*/
|
||||||
|
@ -286,7 +286,7 @@ public:
|
|||||||
/** Recursion level of the current SP instance. The levels are numbered from 0 */
|
/** Recursion level of the current SP instance. The levels are numbered from 0 */
|
||||||
ulong m_recursion_level;
|
ulong m_recursion_level;
|
||||||
/**
|
/**
|
||||||
A list of diferent recursion level instances for the same procedure.
|
A list of different recursion level instances for the same procedure.
|
||||||
For every recursion level we have a sp_head instance. This instances
|
For every recursion level we have a sp_head instance. This instances
|
||||||
connected in the list. The list ordered by increasing recursion level
|
connected in the list. The list ordered by increasing recursion level
|
||||||
(m_recursion_level).
|
(m_recursion_level).
|
||||||
@ -299,7 +299,7 @@ public:
|
|||||||
/**
|
/**
|
||||||
Pointer to the first free (non-INVOKED) routine in the list of
|
Pointer to the first free (non-INVOKED) routine in the list of
|
||||||
cached instances for this SP. This pointer is set only for the first
|
cached instances for this SP. This pointer is set only for the first
|
||||||
SP in the list of instences (see above m_first_cached_sp pointer).
|
SP in the list of instances (see above m_first_cached_sp pointer).
|
||||||
The pointer equal to 0 if we have no free instances.
|
The pointer equal to 0 if we have no free instances.
|
||||||
For non-first instance value of this pointer meanless (point to itself);
|
For non-first instance value of this pointer meanless (point to itself);
|
||||||
*/
|
*/
|
||||||
@ -559,7 +559,7 @@ public:
|
|||||||
FOR index IN cursor(1,2,3) -- cursor with parameters
|
FOR index IN cursor(1,2,3) -- cursor with parameters
|
||||||
|
|
||||||
The code generated by this method does the following during SP run-time:
|
The code generated by this method does the following during SP run-time:
|
||||||
- Sets all cursor parameter vartiables from "parameters"
|
- Sets all cursor parameter variables from "parameters"
|
||||||
- Initializes the index ROW-type variable from the cursor
|
- Initializes the index ROW-type variable from the cursor
|
||||||
(the structure is copied from the cursor to the index variable)
|
(the structure is copied from the cursor to the index variable)
|
||||||
- The cursor gets opened
|
- The cursor gets opened
|
||||||
@ -938,7 +938,7 @@ public:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Check EXECUTE access:
|
Check EXECUTE access:
|
||||||
- in case of a standalone rotuine, for the routine itself
|
- in case of a standalone routine, for the routine itself
|
||||||
- in case of a package routine, for the owner package body
|
- in case of a package routine, for the owner package body
|
||||||
*/
|
*/
|
||||||
bool check_execute_access(THD *thd) const;
|
bool check_execute_access(THD *thd) const;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user