From 717c12de0e2dcf9118777c28eeae3b766e0886f1 Mon Sep 17 00:00:00 2001 From: Vasilii Lakhin Date: Sun, 9 Mar 2025 18:19:33 +0200 Subject: [PATCH] Fix typos in C comments inside sql/ --- sql/authors.h | 2 +- sql/ddl_log.cc | 8 +- sql/debug.h | 2 +- sql/derived_handler.h | 2 +- sql/derror.cc | 2 +- sql/event_data_objects.cc | 2 +- sql/event_scheduler.cc | 2 +- sql/field.cc | 8 +- sql/field.h | 4 +- sql/filesort.cc | 4 +- sql/gcalc_slicescan.h | 2 +- sql/ha_partition.cc | 14 +-- sql/ha_sequence.cc | 2 +- sql/ha_sequence.h | 2 +- sql/handle_connections_win.cc | 6 +- sql/handler.cc | 14 +-- sql/handler.h | 26 +++--- sql/item.cc | 18 ++-- sql/item.h | 16 ++-- sql/item_cmpfunc.cc | 18 ++-- sql/item_cmpfunc.h | 2 +- sql/item_create.cc | 2 +- sql/item_func.cc | 4 +- sql/item_func.h | 2 +- sql/item_geofunc.cc | 2 +- sql/item_geofunc.h | 2 +- sql/item_jsonfunc.cc | 6 +- sql/item_strfunc.cc | 8 +- sql/item_subselect.cc | 26 +++--- sql/item_subselect.h | 4 +- sql/item_sum.cc | 8 +- sql/item_sum.h | 2 +- sql/item_timefunc.cc | 2 +- sql/item_windowfunc.h | 2 +- sql/item_xmlfunc.cc | 8 +- sql/json_schema.cc | 4 +- sql/json_table.cc | 4 +- sql/json_table.h | 4 +- sql/lex_charset.cc | 2 +- sql/lex_charset.h | 2 +- sql/lex_ident.h | 6 +- sql/log.cc | 26 +++--- sql/log.h | 4 +- sql/log_event.cc | 14 +-- sql/log_event.h | 8 +- sql/log_event_client.cc | 6 +- sql/log_event_server.cc | 16 ++-- sql/mariadb.h | 2 +- sql/mdl.cc | 2 +- sql/mf_iocache.cc | 2 +- sql/multi_range_read.cc | 2 +- sql/multi_range_read.h | 2 +- sql/my_apc.h | 4 +- sql/my_json_writer.h | 10 +-- sql/mysqld.cc | 20 ++--- sql/mysqld.h | 2 +- sql/opt_histogram_json.cc | 2 +- sql/opt_index_cond_pushdown.cc | 2 +- sql/opt_range.cc | 52 +++++------ sql/opt_sargable_left.cc | 4 +- sql/opt_split.cc | 4 +- sql/opt_subselect.cc | 24 ++--- sql/opt_table_elimination.cc | 16 ++-- sql/opt_trace.cc | 2 +- sql/opt_trace_context.h | 2 +- sql/opt_vcol_substitution.cc | 6 +- sql/optimizer_costs.h | 2 +- sql/optimizer_defaults.h | 6 +- sql/parse_file.cc | 2 +- sql/partition_info.cc | 2 +- sql/partition_info.h | 2 +- sql/privilege.h | 2 +- sql/protocol.cc | 8 +- sql/proxy_protocol.cc | 2 +- sql/rowid_filter.cc | 2 +- sql/rpl_gtid.cc | 6 +- sql/rpl_gtid.h | 2 +- sql/rpl_injector.cc | 2 +- sql/rpl_mi.cc | 2 +- sql/rpl_mi.h | 2 +- sql/rpl_parallel.cc | 2 +- sql/rpl_rli.cc | 4 +- sql/rpl_rli.h | 6 +- sql/rpl_tblmap.h | 2 +- sql/rpl_utility.cc | 2 +- sql/rpl_utility_server.cc | 4 +- sql/scheduler.h | 2 +- sql/semisync.h | 2 +- sql/semisync_master.cc | 4 +- sql/semisync_master.h | 6 +- sql/semisync_master_ack_receiver.cc | 2 +- sql/session_tracker.cc | 8 +- sql/set_var.cc | 2 +- sql/set_var.h | 2 +- sql/slave.cc | 28 +++--- sql/socketpair.c | 2 +- sql/sp.cc | 10 +-- sql/sp_cache.cc | 2 +- sql/sp_head.cc | 8 +- sql/sp_head.h | 8 +- sql/sp_instr.cc | 2 +- sql/sp_pcontext.cc | 2 +- sql/sp_pcontext.h | 4 +- sql/spatial.cc | 4 +- sql/sql_acl.cc | 24 ++--- sql/sql_admin.cc | 6 +- sql/sql_audit.cc | 2 +- sql/sql_base.cc | 24 ++--- sql/sql_base.h | 2 +- sql/sql_basic_types.h | 2 +- sql/sql_bitmap.h | 2 +- sql/sql_cache.cc | 22 ++--- sql/sql_cache.h | 4 +- sql/sql_class.cc | 22 ++--- sql/sql_class.h | 26 +++--- sql/sql_connect.cc | 10 +-- sql/sql_const.h | 2 +- sql/sql_cte.cc | 8 +- sql/sql_cte.h | 2 +- sql/sql_db.cc | 10 +-- sql/sql_delete.cc | 6 +- sql/sql_derived.cc | 2 +- sql/sql_error.cc | 2 +- sql/sql_error.h | 4 +- sql/sql_explain.cc | 10 +-- sql/sql_explain.h | 4 +- sql/sql_handler.cc | 8 +- sql/sql_help.cc | 6 +- sql/sql_insert.cc | 16 ++-- sql/sql_join_cache.cc | 12 +-- sql/sql_join_cache.h | 4 +- sql/sql_lex.cc | 46 +++++----- sql/sql_lex.h | 10 +-- sql/sql_limit.h | 2 +- sql/sql_load.cc | 10 +-- sql/sql_mode.h | 4 +- sql/sql_parse.cc | 8 +- sql/sql_partition.cc | 14 +-- sql/sql_partition_admin.cc | 4 +- sql/sql_plugin.cc | 4 +- sql/sql_prepare.cc | 10 +-- sql/sql_priv.h | 2 +- sql/sql_profile.h | 2 +- sql/sql_repl.cc | 8 +- sql/sql_select.cc | 132 ++++++++++++++-------------- sql/sql_select.h | 24 ++--- sql/sql_sequence.cc | 4 +- sql/sql_servers.cc | 4 +- sql/sql_show.cc | 16 ++-- sql/sql_sort.h | 4 +- sql/sql_statistics.cc | 18 ++-- sql/sql_statistics.h | 4 +- sql/sql_string.cc | 8 +- sql/sql_string.h | 12 +-- sql/sql_table.cc | 22 ++--- sql/sql_time.cc | 2 +- sql/sql_trigger.cc | 4 +- sql/sql_tvc.cc | 8 +- sql/sql_type.cc | 18 ++-- sql/sql_type.h | 8 +- sql/sql_type_int.h | 2 +- sql/sql_type_json.cc | 2 +- sql/sql_udf.cc | 4 +- sql/sql_union.cc | 6 +- sql/sql_update.cc | 8 +- sql/sql_view.cc | 4 +- sql/sql_window.cc | 6 +- sql/sql_window.h | 4 +- sql/sys_vars.cc | 12 +-- sql/table.cc | 34 +++---- sql/table.h | 12 +-- sql/table_cache.cc | 10 +-- sql/temporary_tables.cc | 4 +- sql/threadpool_generic.cc | 6 +- sql/threadpool_winsockets.h | 2 +- sql/transaction.cc | 2 +- sql/tztime.cc | 16 ++-- sql/tztime.h | 4 +- sql/unireg.cc | 2 +- sql/unireg.h | 2 +- sql/winmain.cc | 2 +- sql/winservice.c | 2 +- sql/wsrep_client_service.cc | 4 +- sql/wsrep_client_service.h | 2 +- sql/wsrep_mysqld.cc | 2 +- sql/wsrep_mysqld.h | 2 +- sql/wsrep_schema.h | 2 +- sql/wsrep_sst.cc | 6 +- sql/wsrep_sst.h | 2 +- sql/wsrep_storage_service.cc | 2 +- sql/wsrep_trans_observer.h | 16 ++-- sql/wsrep_var.cc | 2 +- sql/xa.cc | 2 +- 193 files changed, 746 insertions(+), 746 deletions(-) diff --git a/sql/authors.h b/sql/authors.h index cf0a4c5e51a..44837701991 100644 --- a/sql/authors.h +++ b/sql/authors.h @@ -51,7 +51,7 @@ struct show_table_authors_st show_table_authors[]= { "Query Cache (4.0), Subqueries (4.1), Views (5.0)" }, { "Timour Katchaounov", "Sofia , Bulgaria", "Optimizer"}, { "Kristian Nielsen", "Copenhagen, Denmark", - "Replication, Async client prototocol, General buildbot stuff" }, + "Replication, Async client protocol, General buildbot stuff" }, { "Alexander (Bar) Barkov", "Izhevsk, Russia", "Unicode and character sets" }, { "Alexey Botchkov (Holyfoot)", "Izhevsk, Russia", diff --git a/sql/ddl_log.cc b/sql/ddl_log.cc index 88a41b41e65..3cebe0025d8 100644 --- a/sql/ddl_log.cc +++ b/sql/ddl_log.cc @@ -876,7 +876,7 @@ static bool ddl_log_increment_phase_no_lock(uint entry_pos) else { /* - Trying to deativate an execute entry or already deactive entry. + Trying to deactivate an execute entry or already deactive entry. This should not happen */ DBUG_ASSERT(0); @@ -1017,7 +1017,7 @@ static void ddl_log_to_binary_log(THD *thd, String *query) table name to the query When we log, we always log all found tables and views at the same time. This - is done to simply the exceute code as otherwise we would have to keep + is done to simply execute the code as otherwise we would have to keep information of what was logged. */ @@ -1505,7 +1505,7 @@ static int ddl_log_execute_action(THD *thd, MEM_ROOT *mem_root, case DDL_RENAME_PHASE_STAT: /* Stat tables must be updated last so that we can handle a rename of - a stat table. For now we just rememeber that we have to update it + a stat table. For now we just remember that we have to update it */ update_flags(ddl_log_entry->entry_pos, DDL_LOG_FLAG_UPDATE_STAT); ddl_log_entry->flags|= DDL_LOG_FLAG_UPDATE_STAT; @@ -2543,7 +2543,7 @@ bool ddl_log_write_entry(DDL_LOG_ENTRY *ddl_log_entry, @brief Write or update execute entry in the ddl log. @details An execute entry points to the first entry that should - be excuted during recovery. In some cases it's only written once, + be executed during recovery. In some cases it's only written once, in other cases it's updated for each log entry to point to the new header for the list. diff --git a/sql/debug.h b/sql/debug.h index f0eaa79e3c7..353441ac033 100644 --- a/sql/debug.h +++ b/sql/debug.h @@ -23,7 +23,7 @@ functionality. */ -/* debug_crash_here() functionallity. +/* debug_crash_here() functionality. See mysql_test/suite/atomic/create_table.test for an example of how it can be used */ diff --git a/sql/derived_handler.h b/sql/derived_handler.h index f6feed8db32..9aac5e29934 100644 --- a/sql/derived_handler.h +++ b/sql/derived_handler.h @@ -73,7 +73,7 @@ public: */ virtual int next_row()= 0; - /* End prodicing rows */ + /* End producing rows */ virtual int end_scan()=0; /* Report errors */ diff --git a/sql/derror.cc b/sql/derror.cc index 455e57fd2d7..6b0f699faf0 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -18,7 +18,7 @@ @file @brief - Read language depeneded messagefile + Read language depended messagefile */ #include "mariadb.h" diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 4238cd90e02..2f6584bfcde 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -519,7 +519,7 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table) else expression= 0; /* - If neigher STARTS and ENDS is set, then both fields are empty. + If neither STARTS and ENDS is set, then both fields are empty. Hence, if ET_FIELD_EXECUTE_AT is empty there is an error. */ execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null(); diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index b75b0e2ce2a..42e7a033882 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -167,7 +167,7 @@ deinit_event_thread(THD *thd) thd The THD of the thread. Has to be allocated by the caller. NOTES - 1. The host of the thead is my_localhost + 1. The host of the thread is my_localhost 2. thd->net is initted with NULL - no communication. */ diff --git a/sql/field.cc b/sql/field.cc index a890ba98539..9aa816d0d79 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -85,7 +85,7 @@ bool Field::marked_for_write_or_computed() const Rules for merging different types of fields in UNION NOTE: to avoid 256*256 table, gap in table types numeration is skipped - following #defines describe that gap and how to canculate number of fields + following #defines describe that gap and how to calculate number of fields and index of field in this array. */ const int FIELDTYPE_TEAR_FROM= (MYSQL_TYPE_BIT + 1); @@ -8573,7 +8573,7 @@ void Field_varstring::hash_not_null(Hasher *hasher) @param[in] from data to compress @param[in] length from length @param[in] max_length truncate `from' to this length - @param[out] out_length compessed data length + @param[out] out_length compressed data length @param[in] cs from character set @param[in] nchars copy no more than "nchars" characters @@ -9989,8 +9989,8 @@ Field_enum::can_optimize_range_or_keypart_ref(const Item_bool_func *cond, 3 - first (high) bit of 'c' 2 - second bit of 'c' 1 - third bit of 'c' - 0 - forth bit of 'c' - 2 7 - firth bit of 'c' + 0 - fourth bit of 'c' + 2 7 - fifth bit of 'c' 6 - null bit for 'd' 3 - 6 four bytes for 'a' 7 - 8 two bytes for 'b' diff --git a/sql/field.h b/sql/field.h index bf99eb2820a..cdab1e49f19 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1045,7 +1045,7 @@ public: /** Mark unused memory in the field as defined. Mainly used to ensure that if we write full field to disk (for example in - Count_distinct_field::add(), we don't write unitalized data to + Count_distinct_field::add(), we don't write uninitialized data to disk which would confuse valgrind or MSAN. */ virtual void mark_unused_memory_as_defined() {} @@ -1786,7 +1786,7 @@ protected: /* Make a leaf tree when an INT value was stored into a field of INT type, and some truncation happened. Tries to adjust the range search condition - when possible, e.g. "tinytint < 300" -> "tinyint <= 127". + when possible, e.g. "tinyint < 300" -> "tinyint <= 127". Can also return SEL_ARG_IMPOSSIBLE(), and NULL (not sargable). */ SEL_ARG *stored_field_make_mm_leaf_bounded_int(RANGE_OPT_PARAM *param, diff --git a/sql/filesort.cc b/sql/filesort.cc index 3e61e56c294..d6d16beaebe 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -339,7 +339,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, if (costs.fastest_sort == PQ_SORT_ALL_FIELDS || costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS) { - /* We are going to use priorty queue */ + /* We are going to use priority queue */ thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE; status_var_increment(thd->status_var.filesort_pq_sorts_); tracker->incr_pq_used(); @@ -359,7 +359,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, param.res_length= param.ref_length; /* Add the ref (rowid which is stored last in the sort key) to the sort, - as we want to retrive rows in id order, if possible. + as we want to retrieve rows in id order, if possible. */ param.sort_length+= param.ref_length; param.rec_length= param.sort_length; diff --git a/sql/gcalc_slicescan.h b/sql/gcalc_slicescan.h index 37e887e87e5..950c88e8aa9 100644 --- a/sql/gcalc_slicescan.h +++ b/sql/gcalc_slicescan.h @@ -414,7 +414,7 @@ public: }; /* That class introduced mostly for the 'typecontrol' reason. */ - /* only difference from the point classis the get_next() function. */ + /* only difference from the point classes is the get_next() function. */ class event_point : public point { public: diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 078ecee4b9e..e4732c6f17f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3476,7 +3476,7 @@ bool ha_partition::re_create_par_file(const char *name) 0); if (m_part_info->partitions.elements == 0) { - /* We did not succed in creating default partitions */ + /* We did not succeed in creating default partitions */ tmp= 1; } } @@ -4508,7 +4508,7 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) @returns Number of locks returned in call to store_lock @desc - Returns the maxinum possible number of store locks needed in call to + Returns the maximum possible number of store locks needed in call to store lock. */ @@ -5910,7 +5910,7 @@ int ha_partition::index_end() index_read_map can be restarted without calling index_end on the previous index scan and without calling index_init. In this case the index_read_map is on the same index as the previous index_scan. This is particularly - used in conjuntion with multi read ranges. + used in conjunction with multi read ranges. */ int ha_partition::index_read_map(uchar *buf, const uchar *key, @@ -7436,7 +7436,7 @@ end_dont_reset_start_part: SYNOPSIS ha_partition::partition_scan_set_up() buf Buffer to later return record in (this function - needs it to calculcate partitioning function + needs it to calculate partitioning function values) idx_read_flag TRUE <=> m_start_key has range start endpoint which @@ -8782,7 +8782,7 @@ int ha_partition::info(uint flag) have been disabled. The most important parameters set here is records per key on - all indexes. block_size and primar key ref_length. + all indexes. block_size and primary key ref_length. For each index there is an array of rec_per_key. As an example if we have an index with three attributes a,b and c @@ -9943,7 +9943,7 @@ IO_AND_CPU_COST ha_partition::scan_time() /** @brief - Caculate time to scan the given index (index only scan) + Calculate time to scan the given index (index only scan) @param inx Index number to scan @@ -10701,7 +10701,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table, /* Changing to similar partitioning, only update metadata. - Non allowed changes would be caought in prep_alter_part_table(). + Non allowed changes would be caught in prep_alter_part_table(). */ if (ha_alter_info->alter_info->partition_flags == ALTER_PARTITION_INFO) { diff --git a/sql/ha_sequence.cc b/sql/ha_sequence.cc index 4d2179d3555..c60b7f13e32 100644 --- a/sql/ha_sequence.cc +++ b/sql/ha_sequence.cc @@ -382,7 +382,7 @@ int ha_sequence::discard_or_import_tablespace(my_bool discard) } /* - Squence engine error deal method + Sequence engine error deal method */ void ha_sequence::print_error(int error, myf errflag) diff --git a/sql/ha_sequence.h b/sql/ha_sequence.h index 0a3ff86a121..bc1eb07d8b0 100644 --- a/sql/ha_sequence.h +++ b/sql/ha_sequence.h @@ -34,7 +34,7 @@ extern handlerton *sql_sequence_hton; The sequence data (SEQUENCE class) is stored in TABLE_SHARE->sequence TABLE RULES: - 1. When table is created, one row is automaticlly inserted into + 1. When table is created, one row is automatically inserted into the table. The table will always have one and only one row. 2. Any inserts or updates to the table will be validated. 3. Inserts will overwrite the original row. diff --git a/sql/handle_connections_win.cc b/sql/handle_connections_win.cc index ffacfcab88f..d0b7e76751c 100644 --- a/sql/handle_connections_win.cc +++ b/sql/handle_connections_win.cc @@ -78,7 +78,7 @@ struct Listener virtual void begin_accept()= 0; /** - Completion callback,called whenever IO posted by begin_accept is finisjed + Completion callback,called whenever IO posted by begin_accept is finished Listener needs to create a new THD then (or, call scheduler so it creates one) @param success - whether IO completed successfull @@ -112,7 +112,7 @@ struct Listener }; }; -/* Winsock extension finctions. */ +/* Winsock extension functions. */ static LPFN_ACCEPTEX my_AcceptEx; static LPFN_GETACCEPTEXSOCKADDRS my_GetAcceptExSockaddrs; @@ -121,7 +121,7 @@ static LPFN_GETACCEPTEXSOCKADDRS my_GetAcceptExSockaddrs; Can be threadpool-bound (i.e the completion is executed in threadpool thread), or use events for waits. - Threadpool-bound listener should be used with theradpool scheduler, for better + Threadpool-bound listener should be used with threadpool scheduler, for better performance. */ struct Socket_Listener: public Listener diff --git a/sql/handler.cc b/sql/handler.cc index 9ca2fee591c..81d5ec6da58 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -123,7 +123,7 @@ ulong total_ha_2pc= 0; /* Number of non-mandatory 2pc handlertons whose initialization failed to estimate total_ha_2pc value under supposition of the failures - have not occcured. + have not occured. */ ulong failed_ha_2pc= 0; #endif @@ -613,7 +613,7 @@ int ha_finalize_handlerton(void *plugin_) */ if (hton->slot != HA_SLOT_UNDEF) { - /* Make sure we are not unpluging another plugin */ + /* Make sure we are not unplugging another plugin */ DBUG_ASSERT(hton2plugin[hton->slot] == plugin); DBUG_ASSERT(hton->slot < MAX_HA); hton2plugin[hton->slot]= NULL; @@ -2088,7 +2088,7 @@ err: { /* We are not really doing a rollback here, but the code in trans_commit() - requres that m_transaction_psi is 0 when we return from this function. + requires that m_transaction_psi is 0 when we return from this function. */ MYSQL_ROLLBACK_TRANSACTION(thd->m_transaction_psi); thd->m_transaction_psi= NULL; @@ -3418,7 +3418,7 @@ LEX_CSTRING *handler::engine_name() /* - Calclate the number of index blocks we are going to access when + Calculate the number of index blocks we are going to access when doing 'ranges' index dives reading a total of 'rows' rows. */ @@ -5379,7 +5379,7 @@ bool non_existing_table_error(int error) @retval HA_ADMIN_NEEDS_DATA_CONVERSION Table has structures requiring - ALTER TABLE FORCE, algortithm=COPY to + ALTER TABLE FORCE, algorithm=COPY to recreate data. @retval HA_ADMIN_NOT_IMPLEMENTED @@ -5465,7 +5465,7 @@ int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt) ha_table_flags() & HA_CAN_REPAIR); /* - Update frm version if no errors and there are no version incompatibiltes + Update frm version if there are no errors and no version incompatibilities in the data (as these are not fixed by repair). */ if (result == HA_ADMIN_OK && !opt_readonly && @@ -7904,7 +7904,7 @@ int handler::check_duplicate_long_entries(const uchar *new_rec) /** @brief check whether updated records breaks the unique constraint on long columns. - In the case of update we just need to check the specic key + In the case of update we just need to check the specific key reason for that is consider case create table t1(a blob , b blob , x blob , y blob ,unique(a,b) ,unique(x,y)) diff --git a/sql/handler.h b/sql/handler.h index a4da2287565..c6aae1a0f1e 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -861,7 +861,7 @@ typedef bool Log_func(THD*, TABLE*, Event_log *, binlog_cache_data *, bool, #define ALTER_PARTITION_ALL (1ULL << 8) // Set for REMOVE PARTITIONING #define ALTER_PARTITION_REMOVE (1ULL << 9) -// Set for EXCHANGE PARITION +// Set for EXCHANGE PARTITION #define ALTER_PARTITION_EXCHANGE (1ULL << 10) // Set by Sql_cmd_alter_table_truncate_partition::execute() #define ALTER_PARTITION_TRUNCATE (1ULL << 11) @@ -1026,7 +1026,7 @@ struct xid_recovery_member */ Binlog_offset binlog_coord; XID *full_xid; // needed by wsrep or past it recovery - decltype(::server_id) server_id; // server id of orginal server + decltype(::server_id) server_id; // server id of original server xid_recovery_member(my_xid xid_arg, uint prepare_arg, bool decided_arg, XID *full_xid_arg, decltype(::server_id) server_id_arg) @@ -1438,7 +1438,7 @@ struct transaction_participant consistent between 2pc participants. Such engine is no longer required to durably flush to disk transactions in commit(), provided that the transaction has been successfully prepare()d and commit_ordered(); thus - potentionally saving one fsync() call. (Engine must still durably flush + potentially saving one fsync() call. (Engine must still durably flush to disk in commit() when no prepare()/commit_ordered() steps took place, at least if durable commits are wanted; this happens eg. if binlog is disabled). @@ -2644,7 +2644,7 @@ public: */ alter_table_operations handler_flags= 0; - /* Alter operations involving parititons are strored here */ + /* Alter operations involving partitons are stored here */ ulong partition_flags; /** @@ -3463,8 +3463,8 @@ private: Handler_share **ha_share; public: - double optimizer_where_cost; // Copy of THD->...optimzer_where_cost - double optimizer_scan_setup_cost; // Copy of THD->...optimzer_scan_... + double optimizer_where_cost; // Copy of THD->...optimizer_where_cost + double optimizer_scan_setup_cost; // Copy of THD->...optimizer_scan_... handler(handlerton *ht_arg, TABLE_SHARE *share_arg) :table_share(share_arg), table(0), @@ -3513,7 +3513,7 @@ public: DBUG_ASSERT(m_lock_type == F_UNLCK); DBUG_ASSERT(inited == NONE); } - /* To check if table has been properely opened */ + /* To check if table has been properly opened */ bool is_open() { return ref != 0; @@ -3605,7 +3605,7 @@ public: } inline int ha_end_keyread() { - if (!keyread_enabled()) /* Enably lazy usage */ + if (!keyread_enabled()) /* Enable lazy usage */ return 0; keyread= MAX_KEY; return extra(HA_EXTRA_NO_KEYREAD); @@ -4311,7 +4311,7 @@ public: This is intended to be used for EXPLAIN, via the following scenario: 1. SQL layer calls handler->multi_range_read_info(). 1.1. Storage engine figures out whether it will use some non-default - MRR strategy, sets appropritate bits in *mrr_mode, and returns + MRR strategy, sets appropriate bits in *mrr_mode, and returns control to SQL layer 2. SQL layer remembers the returned mrr_mode 3. SQL layer compares various options and choses the final query plan. As @@ -4411,7 +4411,7 @@ public: { return extra(operation); } /* Table version id for the the table. This should change for each - sucessfull ALTER TABLE. + successful ALTER TABLE. This is used by the handlerton->check_version() to ask the engine if the table definition has been updated. Storage engines that does not support inplace alter table does not @@ -4650,7 +4650,7 @@ public: Count tables invisible from all tables list on which current one built (like myisammrg and partitioned tables) - tables_type mask for the tables should be added herdde + tables_type mask for the tables should be added here returns number of such tables */ @@ -5490,8 +5490,8 @@ public: @param record record to find (also will be fillded with actual record fields) - @param unique_ref index or unique constraiun number (depends - on what used in the engine + @param unique_ref index or unique constraint number (depends + on what was used in the engine @retval -1 Error @retval 1 Not found diff --git a/sql/item.cc b/sql/item.cc index 763e3613834..81dc36c0a2d 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1480,8 +1480,8 @@ Item *Item::const_charset_converter(THD *thd, CHARSET_INFO *tocs, Item *Item_param::safe_charset_converter(THD *thd, CHARSET_INFO *tocs) { /* - Return "this" if in prepare. result_type may change at execition time, - to it's possible that the converter will not be needed at all: + Return "this" if in prepare. result_type may change at execution time, + though it's possible that the converter will not be needed at all: PREPARE stmt FROM 'SELECT * FROM t1 WHERE field = ?'; SET @arg= 1; @@ -2212,7 +2212,7 @@ Item::Type Item_name_const::type() const { /* - We are guarenteed that value_item->basic_const_item(), if not + We are guaranteed that value_item->basic_const_item(), if not an error is thrown that WRONG ARGUMENTS are supplied to NAME_CONST function. If type is FUNC_ITEM, then we have a fudged item_func_neg() @@ -2370,7 +2370,7 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array, /* Skip the else part, window functions are very special functions: they need to have their own fields in the temp. table, but they - need to be proceessed differently than regular aggregate functions + need to be processed differently than regular aggregate functions Call split_sum_func here so that each argument gets its fields to point to the temporary table. @@ -2828,7 +2828,7 @@ Item_func_or_sum @details This method first builds clones of the arguments. If it is successful with - buiding the clones then it constructs a copy of this Item_func_or_sum object + building the clones then it constructs a copy of this Item_func_or_sum object and attaches to it the built clones of the arguments. @return clone of the item @@ -3085,7 +3085,7 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count) @brief Initialize the result field by creating a temporary dummy table and assign it to a newly created field object. Meta data used to create the field is fetched from the sp_head belonging to the stored - proceedure found in the stored procedure functon cache. + procedure found in the stored procedure functon cache. @note This function should be called from fix_fields to init the result field. It is some what related to Item_field. @@ -3793,7 +3793,7 @@ void Item_field::set_refers_to_temp_table() { /* Derived temp. tables have non-zero derived_select_number. - We don't need to distingish between other kinds of temp.tables currently. + We don't need to distinguish between other kinds of temp.tables currently. */ refers_to_temp_table= (field->table->derived_select_number != 0)? REFERS_TO_DERIVED_TMP : REFERS_TO_OTHER_TMP; @@ -5914,7 +5914,7 @@ bool is_outer_table(TABLE_LIST *table, SELECT_LEX *select) @endcode @retval - 1 column succefully resolved and fix_fields() should continue. + 1 column successfully resolved and fix_fields() should continue. @retval 0 column fully fixed and fix_fields() should return FALSE @retval @@ -6440,7 +6440,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) /* if it is not expression from merged VIEW we will set this field. - We can leave expression substituted from view for next PS/SP rexecution + We can leave expression substituted from view for next PS/SP reexecution (i.e. do not register this substitution for reverting on cleanup() (register_item_tree_changing())), because this subtree will be fix_field'ed during setup_tables()->setup_underlying() (i.e. before diff --git a/sql/item.h b/sql/item.h index 73d0d16f873..6c4143d39d9 100644 --- a/sql/item.h +++ b/sql/item.h @@ -789,7 +789,7 @@ enum class item_with_t : item_flags_t WINDOW_FUNC= (1<<1), // If item contains a window func FIELD= (1<<2), // If any item except Item_sum contains a field. SUM_FUNC= (1<<3), // If item contains a sum func - SUBQUERY= (1<<4), // If item containts a sub query + SUBQUERY= (1<<4), // If item contains a subquery ROWNUM_FUNC= (1<<5), // If ROWNUM function was used PARAM= (1<<6) // If user parameter was used }; @@ -2294,7 +2294,7 @@ public: /* TRUE if the expression depends only on the table indicated by tab_map - or can be converted to such an exression using equalities. + or can be converted to such an expression using equalities. Not to be used for AND/OR formulas. */ virtual bool excl_dep_on_table(table_map tab_map) { return false; } @@ -3578,7 +3578,7 @@ public: void get_tmp_field_src(Tmp_field_src *src, const Tmp_field_param *param); /* This implementation of used_tables() used by Item_avg_field and - Item_variance_field which work when only temporary table left, so theu + Item_variance_field which work when only temporary table left, so they return table map of the temporary table. */ table_map used_tables() const override { return 1; } @@ -4646,7 +4646,7 @@ public: { return get_item_copy(thd, this); } }; -/* The following variablese are stored in a read only segment */ +/* The following variables are stored in a read only segment */ extern Item_bool_static *Item_false, *Item_true; class Item_uint :public Item_int @@ -5804,7 +5804,7 @@ public: item to the debug log. The second use of this method is as a helper function of print() and error messages, where it is applicable. To suit both goals it should return a meaningful, - distinguishable and sintactically correct string. This method + distinguishable and syntactically correct string. This method should not be used for runtime type identification, use enum {Sum}Functype and Item_func::functype()/Item_sum::sum_func() instead. @@ -7196,9 +7196,9 @@ public: /** - This class is used as bulk parameter INGNORE representation. + This class is used as bulk parameter IGNORE representation. - It just do nothing when assigned to a field + It just does nothing when assigned to a field This is a non-standard MariaDB extension. */ @@ -8205,7 +8205,7 @@ public: The value meaning a not-initialized ESCAPE character must not be equal to any valid value, so must be outside of these ranges: - - -128..+127, not to conflict with a valid 8bit charcter + - -128..+127, not to conflict with a valid 8bit character - 0..0x10FFFF, not to conflict with a valid Unicode code point The exact value does not matter. */ diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c244780835f..460fb020d5e 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -606,7 +606,7 @@ bool Arg_comparator::set_cmp_func_string(THD *thd) /* We must set cmp_collation here as we may be called from for an automatic generated item, like in natural join. - Allow reinterpted superset as subset. + Allow reinterpreted superset as subset. Use charset narrowing only for equalities, as that would allow to construct ref access. Non-equality comparisons with constants work without charset narrowing, @@ -2715,7 +2715,7 @@ Item_func_nullif::fix_length_and_dec(THD *thd) If this is the first invocation of fix_length_and_dec(), create the third argument as a copy of the first. This cannot be done before fix_fields(), because fix_fields() might replace items, - for exampe NOT x --> x==0, or (SELECT 1) --> 1. + for example NOT x --> x==0, or (SELECT 1) --> 1. See also class Item_func_nullif declaration. */ if (arg_count == 2) @@ -2731,7 +2731,7 @@ Item_func_nullif::fix_length_and_dec(THD *thd) l_expr args[2]= >------------------------/ - Otherwise (during PREPARE or convensional execution), + Otherwise (during PREPARE or conventional execution), args[0] and args[2] should still point to the same original l_expr. */ DBUG_ASSERT(args[0] == args[2] || thd->stmt_arena->is_stmt_execute()); @@ -2814,7 +2814,7 @@ Item_func_nullif::fix_length_and_dec(THD *thd) l_expr (Item_field for t1.a) args[2] / - d. Conversion of only args[0] happened (by equal field proparation): + d. Conversion of only args[0] happened (by equal field propagation): CREATE OR REPLACE TABLE t1 ( a CHAR(10), @@ -3403,7 +3403,7 @@ bool Item_func_case_simple::aggregate_switch_and_when_arguments(THD *thd, If we'll do string comparison, we also need to aggregate character set and collation for first/WHEN items and install converters for some of them to cmp_collation when necessary. - This is done because cmp_item compatators cannot compare + This is done because cmp_item comparators cannot compare strings in two different character sets. Some examples when we install converters: @@ -4709,7 +4709,7 @@ void Item_func_in::fix_in_vector() else { /* - We don't put NULL values in array, to avoid erronous matches in + We don't put NULL values in array, to avoid erroneous matches in bisection. */ have_null= 1; @@ -4760,7 +4760,7 @@ bool Item_func_in::value_list_convert_const_to_int(THD *thd) m_comparator.set_handler(&type_handler_slonglong); } } - return thd->is_fatal_error; // Catch errrors in convert_const_to_int + return thd->is_fatal_error; // Catch errors in convert_const_to_int } @@ -5435,7 +5435,7 @@ Item *Item_cond::do_transform(THD *thd, Item_transformer transformer, uchar *arg callback functions. First the function applies the analyzer to the root node of - the Item_func object. Then if the analyzer succeeeds (returns TRUE) + the Item_func object. Then if the analyzer succeeds (returns TRUE) the function recursively applies the compile method to member item of the condition list. If the call of the method for a member item returns a new item @@ -7897,7 +7897,7 @@ Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel) 2. After this all equalities of the form x=a (where x designates the first non-constant member for which checker returns true and a is some other - such member of the multiplle equality) are created. When constructing + such member of the multiple equality) are created. When constructing an equality item both its parts are taken as clones of x and a. Suppose in the examples above that for 'x', 'a', and 'b' the function diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 2775176d1fb..e0233a8f085 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -2070,7 +2070,7 @@ public: 4. m_cmp_item - the pointer to a cmp_item instance to handle comparison for this pair. Only unique type handlers have m_cmp_item!=NULL. Non-unique type handlers share the same cmp_item instance. - For all m_comparators[] elements the following assersion it true: + For all m_comparators[] elements the following assertion is true: (m_handler_index==i) == (m_cmp_item!=NULL) */ class Predicant_to_list_comparator diff --git a/sql/item_create.cc b/sql/item_create.cc index 4ce151a1294..05f9c65bfb3 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -5500,7 +5500,7 @@ Create_func_rand::create_native(THD *thd, const LEX_CSTRING *name, between master and slave, because the order is undefined. Hence, the statement is unsafe to log in statement format. - For normal INSERT's this is howevever safe + For normal INSERT's this is however safe */ if (thd->lex->sql_command != SQLCOM_INSERT) thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); diff --git a/sql/item_func.cc b/sql/item_func.cc index ff32db8f996..f11b78af191 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -84,7 +84,7 @@ static inline bool test_if_sum_overflows_ull(ulonglong arg1, ulonglong arg2) /** Allocate memory for arguments using tmp_args or thd->alloc(). @retval false - success - @retval true - error (arg_count is set to 0 for conveniece) + @retval true - error (arg_count is set to 0 for convenience) */ bool Item_args::alloc_arguments(THD *thd, uint count) { @@ -7308,7 +7308,7 @@ Item_func_rownum::Item_func_rownum(THD *thd): /* Remember the select context. Add the function to the list fix_after_optimize in the select context - so that we can easily initializef all rownum functions with the pointers + so that we can easily initialize all rownum functions with the pointers to the row counters. */ select= thd->lex->current_select; diff --git a/sql/item_func.h b/sql/item_func.h index d2da02fc39e..88b91b22087 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -3565,7 +3565,7 @@ public: /* This item represents user variable used as out parameter (e.g in LOAD DATA), - and it is supposed to be used only for this purprose. So it is simplified + and it is supposed to be used only for this purpose. So it is simplified a lot. Actually you should never obtain its value. The only two reasons for this thing being an Item is possibility to store it diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index a994e810238..60bcefdc804 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2770,7 +2770,7 @@ mem_error: #ifndef DBUG_OFF longlong Item_func_gis_debug::val_int() { - /* For now this is just a stub. TODO: implement the internal GIS debuggign */ + /* For now this is just a stub. TODO: implement the internal GIS debugging */ return 0; } #endif diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index fc758074e40..9e2c4fabe88 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -140,7 +140,7 @@ public: /* - Functions returning GEOMETRY measurements of a single GEOEMETRY argument + Functions returning GEOMETRY measurements of a single GEOMETRY argument */ class Item_geometry_func_args_geometry: public Item_geometry_func { diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index ff4336e7aab..23bf1fbc8e6 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -1793,7 +1793,7 @@ null_return: Item_func_conv_charset::is_json_type() which returned args[0]->is_json_type(). JSON functions with multiple string input with different character sets wrap some arguments into Item_func_conv_charset. So the former - Item_func_conv_charset::is_json_type() took the JSON propery from args[0], + Item_func_conv_charset::is_json_type() took the JSON properly from args[0], i.e. from the original argument before the conversion. This is probably not always correct because an *explicit* `CONVERT(arg USING charset)` is actually a general purpose string @@ -4761,7 +4761,7 @@ static int json_find_overlap_with_object(json_engine_t *js, json_engine_t *value else return false. 2.c) If either of json document or value is array and other is object: Iterate over the array, if an element of type object is found, - then compare it with the object (which is the other arguemnt). + then compare it with the object (which is the other argument). If the entire object matches i.e all they key value pairs match, then return true else return false. @@ -5191,7 +5191,7 @@ static bool get_current_value(json_engine_t *js, const uchar *&value_start, If the outermost layer of JSON is an array, the intersection of arrays is independent of order. Create a hash containing all elements in the array, - itterate over another array and add the common elements + iterate over another array and add the common elements to the result. RETURN diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index ef958945c11..50fdfeec074 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -80,7 +80,7 @@ static uint32 max_length_for_string(Item *item, bool *neg) } if (length > (ulonglong) INT_MAX32) { - /* Limit string length to maxium string length in MariaDB (2G) */ + /* Limit string length to maximum string length in MariaDB (2G) */ length= (ulonglong) INT_MAX32; } return (uint32) length; @@ -3701,7 +3701,7 @@ bool Item_func_pad::fix_length_and_dec(THD *thd) /* PAD(expr,length,' ') removes argument's soft dependency on PAD_CHAR_TO_FULL_LENGTH if the result - is longer than the argument's maximim possible length. + is longer than the argument's maximum possible length. */ Sql_mode_dependency Item_func_rpad::value_depends_on_sql_mode() const { @@ -4044,7 +4044,7 @@ String *Item_func_set_collation::val_str(String *str) But for a non-NULL result SCS and TCS must be compatible: 1. Either SCS==TCS - 2. Or SCS can be can be reinterpeted to TCS. + 2. Or SCS can be reinterpreted to TCS. This scenario is possible when args[0] is numeric and TCS->mbmaxlen==1. If SCS and TCS are not compatible here, then something went wrong during @@ -4714,7 +4714,7 @@ longlong Item_func_uncompressed_length::val_int() 5 bytes long. res->c_ptr() is not used because: - we do not need \0 terminated string to get first 4 bytes - - c_ptr() tests simbol after string end (uninitialized memory) which + - c_ptr() tests symbol after string end (uninitialized memory) which confuse valgrind */ return uint4korr(res->ptr()) & 0x3FFFFFFF; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 9d2e0b7cffd..61432ce973b 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -2154,7 +2154,7 @@ Item_in_subselect::single_value_transformer(JOIN *join) /** - Apply transformation max/min transwormation to ALL/ANY subquery if it is + Apply transformation max/min transformation to ALL/ANY subquery if it is possible. @param join Join object of the subquery (i.e. 'child' join). @@ -3150,13 +3150,13 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) DBUG_RETURN(FALSE); /* - EXISTS-to-IN coversion and ORDER BY ... LIMIT clause: + EXISTS-to-IN conversion and ORDER BY ... LIMIT clause: - "[ORDER BY ...] LIMIT n" clause with a non-zero n does not affect the result of the EXISTS(...) predicate, and so we can discard it during the conversion. - "[ORDER BY ...] LIMIT m, n" can turn a non-empty resultset into empty - one, so it affects tthe EXISTS(...) result and cannot be discarded. + one, so it affects the EXISTS(...) result and cannot be discarded. Disallow exists-to-in conversion if (1). three is a LIMIT which is not a basic constant @@ -3270,7 +3270,7 @@ bool Item_exists_subselect::exists2in_processor(void *opt_arg) } } - /* make EXISTS->IN permanet (see Item_subselect::init()) */ + /* make EXISTS->IN permanent (see Item_subselect::init()) */ set_exists_transformed(); first_select->limit_params.select_limit= NULL; @@ -3583,7 +3583,7 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref) test for each Item happens later in Item_in_subselect::row_value_in_to_exists_transformer. The reason for this mess is that our JOIN::prepare phase works top-down - instead of bottom-up, so we first do name resoluton and semantic checks + instead of bottom-up, so we first do name resolution and semantic checks for the outer selects, then for the inner. */ if (engine && @@ -3713,7 +3713,7 @@ bool Item_in_subselect::init_left_expr_cache() outer_join= unit->outer_select()->join; /* An IN predicate might be evaluated in a query for which all tables have - been optimzied away. + been optimized away. */ if (!outer_join || !outer_join->table_count || !outer_join->tables_list) return TRUE; @@ -4029,7 +4029,7 @@ bool subselect_single_select_engine::no_rows() /** - Makes storage for the output values for the subquery and calcuates + Makes storage for the output values for the subquery and calculates their data and column types and their nullability. */ bool subselect_engine::set_row(List &item_list, Item_cache **row) @@ -5338,7 +5338,7 @@ bool subselect_hash_sj_engine::init(List *tmp_columns, uint subquery_id) the extra key part created when s->uniques > 0. NOTE: item have to be Item_in_subselect, because class constructor - accept Item_in_subselect as the parmeter. + accepts Item_in_subselect as the parameter. */ DBUG_ASSERT(tmp_table->s->keys == 1 && item->get_IN_subquery()->left_expr->cols() == @@ -5438,7 +5438,7 @@ bool subselect_hash_sj_engine::make_semi_join_conds() /** - Create a new uniquesubquery engine for the execution of an IN predicate. + Create a new unique subquery engine for the execution of an IN predicate. @details Create and initialize a new JOIN_TAB, and Table_ref objects to perform @@ -5754,8 +5754,8 @@ double get_post_group_estimate(JOIN* join, double join_op_rows) Execute a subquery IN predicate via materialization. @details - If needed materialize the subquery into a temporary table, then - copmpute the predicate via a lookup into this table. + If needed to materialize the subquery into a temporary table, then + compute the predicate via a lookup into this table. @retval TRUE if error @retval FALSE otherwise @@ -6292,7 +6292,7 @@ bool Ordered_key::lookup() mid= lo + (hi - lo) / 2; cmp_res= cmp_key_with_search_key(key_buff[mid]); /* - In order to find the minimum match, check if the pevious element is + In order to find the minimum match, check if the previous element is equal or smaller than the found one. If equal, we need to search further to the left. */ @@ -6855,7 +6855,7 @@ bool subselect_rowid_merge_engine::partial_match() /* If there is a non-NULL key, it must be the first key in the keys array. */ DBUG_ASSERT(!non_null_key || (non_null_key && merge_keys[0] == non_null_key)); - /* The prioryty queue for keys must be empty. */ + /* The priority queue for keys must be empty. */ DBUG_ASSERT(pq.is_empty()); /* All data accesses during execution are via handler::ha_rnd_pos() */ diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 1d90dc42865..b83d30ffda6 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -231,7 +231,7 @@ public: /* Used by max/min subquery to initialize value presence registration - mechanism. Engine call this method before rexecution query. + mechanism. Engine call this method before reexecution query. */ virtual void reset_value_registration() {} enum_parsing_place place() { return parsing_place; } @@ -1273,7 +1273,7 @@ protected: /* Mapping from row numbers to row ids. The element row_num_to_rowid[i] contains a buffer with the rowid for the row numbered 'i'. - The memory for this member is not maintanined by this class because + The memory for this member is not maintained by this class because all Ordered_key indexes of the same table share the same mapping. */ uchar *row_num_to_rowid; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 69733544fbc..74196e5addd 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -180,7 +180,7 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) /* Window functions can not be used as arguments to sum functions. - Aggregation happes before window function computation, so there + Aggregation happens before window function computation, so there are no values to aggregate over. */ if (with_window_func()) @@ -472,7 +472,7 @@ Item_sum::Item_sum(THD *thd, List &list): Item_func_or_sum(thd, list) /** - Constructor used in processing select with temporary tebles. + Constructor used in processing select with temporary tables. */ Item_sum::Item_sum(THD *thd, Item_sum *item): @@ -1246,7 +1246,7 @@ bool Item_sum_min_max::fix_length_and_dec(THD *thd) { DBUG_ASSERT(args[0]->field_type() == args[0]->real_item()->field_type()); DBUG_ASSERT(args[0]->result_type() == args[0]->real_item()->result_type()); - /* MIN/MAX can return NULL for empty set indepedent of the used column */ + /* MIN/MAX can return NULL for empty set independent of the used column */ set_maybe_null(); null_value= true; return args[0]->type_handler()->Item_sum_hybrid_fix_length_and_dec(this); @@ -4483,7 +4483,7 @@ String* Item_func_group_concat::val_str(String* str) /* @brief - Get the comparator function for DISTINT clause + Get the comparator function for DISTINCT clause */ qsort_cmp2 Item_func_group_concat::get_comparator_function_for_distinct() diff --git a/sql/item_sum.h b/sql/item_sum.h index 62a5fd38fac..6a6b463220c 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -304,7 +304,7 @@ class Window_spec; The implementation takes into account the used strategy: - Items resolved at optimization phase return 0 from Item_sum::used_tables(). - Items that depend on the number of join output records, but not columns of - any particular table (like COUNT(*)), returm 0 from Item_sum::used_tables(), + any particular table (like COUNT(*)), return 0 from Item_sum::used_tables(), but still return false from Item_sum::const_item(). */ diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index a8134ca1eb3..16c0a56a3e0 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -2059,7 +2059,7 @@ static uint parse_special(char cfmt, const char *ptr, const char *end, /* * '&' with text is used for variable input, but '&' with other - * special charaters like '|'. '*' is used as separator + * special characters like '|'. '*' is used as separator */ if (cfmt == '&' && ptr + 1 < end) { diff --git a/sql/item_windowfunc.h b/sql/item_windowfunc.h index c832fcf367a..03beadababc 100644 --- a/sql/item_windowfunc.h +++ b/sql/item_windowfunc.h @@ -1188,7 +1188,7 @@ public: /* Computation functions. - TODO: consoder merging these with class Group_bound_tracker. + TODO: consider merging these with class Group_bound_tracker. */ void setup_partition_border_check(THD *thd); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 74e88e8dd87..e7f0464f8df 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -66,7 +66,7 @@ typedef struct my_xml_node_st typedef struct my_xpath_lex_st { int term; /* token type, see MY_XPATH_LEX_XXXXX below */ - const char *beg; /* beginnign of the token */ + const char *beg; /* beginning of the token */ const char *end; /* end of the token */ } MY_XPATH_LEX; @@ -769,7 +769,7 @@ bool Item_nodeset_func_ancestorbyname::val_native(THD *thd, Native *nodeset) { /* Go to the root and add all nodes on the way. - Don't add the root if context is the root itelf + Don't add the root if context is the root itself */ MY_XML_NODE *self= &nodebeg[flt->num]; if (need_self && validname(self)) @@ -1043,7 +1043,7 @@ static Item *create_comparator(MY_XPATH *xpath, /* Compare a node set to a scalar value. We just create a fake Item_string_xml_non_const() argument, - which will be filled to the partular value + which will be filled to the particular value in a loop through all of the nodes in the node set. */ @@ -2566,7 +2566,7 @@ static int my_xpath_parse_Number(MY_XPATH *xpath) SYNOPSYS - The keywords AND, OR, MOD, DIV are valid identitiers + The keywords AND, OR, MOD, DIV are valid identifiers when they are in identifier context: SELECT diff --git a/sql/json_schema.cc b/sql/json_schema.cc index c5dfdec409a..a562a735e9b 100644 --- a/sql/json_schema.cc +++ b/sql/json_schema.cc @@ -1847,7 +1847,7 @@ bool Json_schema_property_names::handle_keyword(THD *thd, json_engine_t *je, } /* - additiona_items, additional_properties, unevaluated_items, + additional_items, additional_properties, unevaluated_items, unevaluated_properties are all going to be schemas (basically of object type). So they all can be handled just like any other schema. @@ -1879,7 +1879,7 @@ Json_schema_additional_and_unevaluated::handle_keyword(THD *thd, } /* - Validating properties as an alternate shcema happens only when we have + Validating properties as an alternate schema happens only when we have logic keywords. Example we have allOf, and one key is not validated against allOf but it is present in "properties" and validates against it. Then the validation result should be true. So we would want that diff --git a/sql/json_table.cc b/sql/json_table.cc index 905ad1ac303..9f531a30fd2 100644 --- a/sql/json_table.cc +++ b/sql/json_table.cc @@ -699,7 +699,7 @@ int ha_json_table::info(uint) @param thd thread handle @param param a description used as input to create the table - @param jt json_table specificaion + @param jt json_table specification @param table_alias alias */ @@ -910,7 +910,7 @@ int Json_table_column::set(THD *thd, enum_type ctype, const LEX_CSTRING &path, /* This is done so the ::print function can just print the path string. Can be removed if we redo that function to print the path using it's - anctual content. Not sure though if we should. + actual content. Not sure though if we should. */ m_path.s.c_str= (const uchar *) path.str; diff --git a/sql/json_table.h b/sql/json_table.h index 84f0a099d6e..28bc4752470 100644 --- a/sql/json_table.h +++ b/sql/json_table.h @@ -194,7 +194,7 @@ public: In the current MariaDB code, evaluation of JSON_TABLE is deterministic, that is, for a given input string JSON_TABLE will always produce the same set of rows in the same order. However one can think of JSON documents - that one can consider indentical which will produce different output. + that one can consider identical which will produce different output. In order to be feature-proof and withstand changes like: - sorting JSON object members by name (like MySQL does) - changing the way duplicate object members are handled @@ -274,7 +274,7 @@ private: /* Pointer to the list tail where we add the next NESTED PATH. It points to the cur_parnt->m_nested for the first nested - and prev_nested->m_next_nested for the coesequent ones. + and prev_nested->m_next_nested for the consequent ones. */ Json_table_nested_path **last_sibling_hook; }; diff --git a/sql/lex_charset.cc b/sql/lex_charset.cc index aef1235c99c..2a016385ed8 100644 --- a/sql/lex_charset.cc +++ b/sql/lex_charset.cc @@ -114,7 +114,7 @@ Lex_exact_collation::raise_if_not_equal(const Lex_exact_collation &cl) const /* - Merge an exact collation and a contexual collation. + Merge an exact collation and a contextual collation. @param cl - The contextual collation to merge to "this". @param reverse_order - If the contextual collation is on the left side diff --git a/sql/lex_charset.h b/sql/lex_charset.h index 0145176f8f0..f176d11878e 100644 --- a/sql/lex_charset.h +++ b/sql/lex_charset.h @@ -444,7 +444,7 @@ public: CREATE TABLE t2 (a CHAR(10) BINARY) CHARACTER SET latin2; -- (3a) CREATE TABLE t2 (a CHAR(10) BINARY); -- (3b) CREATE TABLE t2 (a CHAR(10) COLLATE DEFAULT) - CHARACER SET latin2 COLLATE latin2_bin; -- (3c) + CHARACTER SET latin2 COLLATE latin2_bin; -- (3c) In case of an empty or a contextually typed collation, it is a subject to later resolution, when the context diff --git a/sql/lex_ident.h b/sql/lex_ident.h index e5b6bf3f970..99597397c56 100644 --- a/sql/lex_ident.h +++ b/sql/lex_ident.h @@ -53,7 +53,7 @@ struct Compare_ident_ci 1. {ptr==NULL,length==0} is valid and means "NULL identifier". 2a. {ptr<>NULL,length==0} means "empty identifier". 2b. {ptr<>NULL,length>0} means "not empty identifier. - In case of 2a and 2b, ptr must be a '\0'-terninated string. + In case of 2a and 2b, ptr must be a '\0'-terminated string. Comparison operands passed to streq() are not required to be 0-terminated. @@ -61,7 +61,7 @@ struct Compare_ident_ci - inside methods of this class - inside st_charset_info::streq() in include/m_ctype.h The caller must make sure to maintain the object in the valid state, - as well as provide valid LEX_CSTRING instances for comparion. + as well as provide valid LEX_CSTRING instances for comparison. For better code stability, the Lex_cstring base should eventually be encapsulated, so the object debug validation is done at constructor @@ -468,7 +468,7 @@ public: Lex_ident_db::check_name(). Note, the database name passed to the constructor can originally - come from the parser and can be of an atribtrary long length. + come from the parser and can be of an arbitrary long length. Let's reserve additional buffer space for one extra character (SYSTEM_CHARSET_MBMAXLEN bytes), so Lex_ident_db::check_name() can still detect too long names even if the constructor cuts the data. diff --git a/sql/log.cc b/sql/log.cc index 2fc87ade6ae..d36f249025a 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -162,8 +162,8 @@ static SHOW_VAR binlog_status_vars_detail[]= }; /** - This class implementes the feature to rename a binlog cache temporary file to - a binlog file. It is used to avoid holding LOCK_log long time when writting a + This class implements the feature to rename a binlog cache temporary file to + a binlog file. It is used to avoid holding LOCK_log long time when writing a huge binlog cache to binlog file. With this feature, temporary files of binlog caches will be created in @@ -2227,7 +2227,7 @@ inline bool is_prepared_xa(THD *thd) /* - We flush the cache wrapped in a beging/rollback if: + We flush the cache wrapped in a beginning/rollback if: . aborting a single or multi-statement transaction and; . the OPTION_BINLOG_THIS_TRX is active or; . the format is STMT and a non-trans table was updated or; @@ -3274,7 +3274,7 @@ void MYSQL_QUERY_LOG::reopen_file() DESCRIPTION - Log given command to to normal (not rotable) log file + Log given command to normal (not rotable) log file RETURN FASE - OK @@ -4696,7 +4696,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log, /* Close the active log. - Close the active GTID index synchroneously. We don't want the close + Close the active GTID index synchronously. We don't want the close running in the background while we delete the gtid index file. And we just pushed a sentinel through the binlog background thread while holding LOCK_log, so no other GTID index operations can be pending. @@ -5724,7 +5724,7 @@ error: /* Remove directory (to keep things shorter and compatible */ log_file_name_arg+= dirname_length(log_file_name_arg); - /* purge_warning_given is reset after next sucessful purge */ + /* purge_warning_given is reset after next successful purge */ purge_warning_given= 1; if (interactive) { @@ -5870,7 +5870,7 @@ bool MYSQL_BIN_LOG::is_active(const char *log_file_name_arg) * #12 next_event * #13 exec_relay_log_event * - * I didn't investigate if this is ligit...(i.e if my comment is wrong) + * I didn't investigate if this is legit...(i.e if my comment is wrong) */ return !strcmp(log_file_name, log_file_name_arg); } @@ -8071,7 +8071,7 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate, if ((err_gtid= do_delete_gtid_domain(domain_drop_lex))) { - // inffective attempt to delete merely skips rotate and purge + // ineffective attempt to delete merely skips rotate and purge if (err_gtid < 0) error= 1; // otherwise error is propagated the user } @@ -8600,7 +8600,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) If waitee->commit_started is set, it means that the transaction we need to wait for has already queued up for group commit. In this case it is - safe for us to queue up immediately as well, increasing the opprtunities + safe for us to queue up immediately as well, increasing the opportunities for group commit. Because waitee has taken the LOCK_prepare_ordered before setting the flag, so there is no risk that we can queue ahead of it. @@ -11317,7 +11317,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) most recent binlog. Note also that we need to first release LOCK_xid_list, then acquire - LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while + LOCK_log, then re-acquire LOCK_xid_list. If we were to take LOCK_log while holding LOCK_xid_list, we might deadlock with other threads that take the locks in the opposite order. */ @@ -11842,7 +11842,7 @@ public: Otherwise enumeration starts with zero for the first file, increments by one for any next file except for the last file in the list, which is also the initial binlog file for recovery, - that is enumberated with UINT_MAX. + that is enumerated with UINT_MAX. */ Binlog_file_id id_binlog; enum_binlog_checksum_alg checksum_alg; @@ -11925,7 +11925,7 @@ public: Is invoked when a standalone or non-2pc group is detected. Both are unsafe to truncate in the semisync-slave recovery so the maximum unsafe coordinate may be updated. - In the non-2pc group case though, *exeptionally*, + In the non-2pc group case though, *exceptionally*, the no-engine group is considered safe, to be invalidated to not contribute to binlog state. */ @@ -12136,7 +12136,7 @@ bool Recovery_context::decide_or_assess(xid_recovery_member *member, int round, if (!truncate_validated) { if (truncate_gtid.seq_no == 0 /* was reset or never set */ || - (truncate_set_in_1st && round == 2 /* reevaluted at round turn */)) + (truncate_set_in_1st && round == 2 /* reevaluated at round turn */)) { if (set_truncate_coord(linfo, round, fdle->used_checksum_alg)) return true; diff --git a/sql/log.h b/sql/log.h index 46657c34d80..dc95f33de63 100644 --- a/sql/log.h +++ b/sql/log.h @@ -260,7 +260,7 @@ extern TC_LOG_DUMMY tc_log_dummy; class Relay_log_info; /* - Note that we destroy the lock mutex in the desctructor here. + Note that we destroy the lock mutex in the destructor here. This means that object instances cannot be destroyed/go out of scope, until we have reset thd->current_linfo to NULL; */ @@ -838,7 +838,7 @@ public: FD - Format-Description event, R - Rotate event R_f - the fake Rotate event - E - an arbirary event + E - an arbitrary event The underscore indexes for any event `_s' indicates the event is generated by Slave diff --git a/sql/log_event.cc b/sql/log_event.cc index 424fa80fb00..a3281558076 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -350,7 +350,7 @@ int binlog_buf_compress(const uchar *src, uchar *dst, uint32 len, uint32 *comlen @Note: 1) The caller should call my_free to release 'dst' if *is_malloc is returned as true. - 2) If *is_malloc is retuened as false, then 'dst' reuses the passed-in + 2) If *is_malloc is returned as false, then 'dst' reuses the passed-in 'buf'. return zero if successful, non-zero otherwise. @@ -1659,9 +1659,9 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len, +--------+-----------+------+------+---------+----+-------+ To support the query cache we append the following buffer to the above - +-------+----------------------------------------+-------+ - |db len | uninitiatlized space of size of db len | FLAGS | - +-------+----------------------------------------+-------+ + +-------+---------------------------------------+-------+ + |db len | uninitialized space of size of db len | FLAGS | + +-------+---------------------------------------+-------+ The area of buffer starting from Query field all the way to the end belongs to the Query buffer and its structure is described in alloc_query() in @@ -2336,7 +2336,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp @return the version-safe checksum alg descriptor where zero designates no checksum, 255 - the orginator is - checksum-unaware (effectively no checksum) and the actuall + checksum-unaware (effectively no checksum) and the actual [1-254] range alg descriptor. */ enum_binlog_checksum_alg get_checksum_alg(const uchar *buf, ulong len) @@ -2499,7 +2499,7 @@ Gtid_log_event::Gtid_log_event(const uchar *buf, uint event_len, { flags_extra= *buf++; /* - extra engines flags presence is identifed by non-zero byte value + extra engines flags presence is identified by non-zero byte value at this point */ if (flags_extra & FL_EXTRA_MULTI_ENGINE_E1) @@ -2626,7 +2626,7 @@ Gtid_list_log_event::Gtid_list_log_event(const uchar *buf, uint event_len, /* Used to record gtid_list event while sending binlog to slave, without having to - fully contruct the event object. + fully construct the event object. */ bool Gtid_list_log_event::peek(const char *event_start, size_t event_len, diff --git a/sql/log_event.h b/sql/log_event.h index 68c8f05d8c5..f024c160191 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -517,7 +517,7 @@ class String; Flag sets by the semisync slave for accepting the same server_id ("own") events which the slave must not have in its state. Typically such events were never committed by - their originator (this server) and discared at its semisync-slave recovery. + their originator (this server) and discarded at its semisync-slave recovery. */ #define LOG_EVENT_ACCEPT_OWN_F 0x4000 @@ -2305,7 +2305,7 @@ public: /* !!! Public in this patch to allow old usage */ binlogged with comments in the front of these keywords. for examples: / * bla bla * / SAVEPOINT a; / * bla bla * / ROLLBACK TO a; - but we don't handle these cases and after the patch, both quiries are + but we don't handle these cases and after the patch, both queries are binlogged in upper case with no comments. */ return is_xa ? !strncasecmp(query, C_STRING_WITH_LEN("XA ")) @@ -2877,7 +2877,7 @@ private: when @c one_phase member is off. The latter option is only for compatibility with the upstream. - From the groupping perspective the event finalizes the current + From the grouping perspective the event finalizes the current "prepare" group that is started with Gtid_log_event similarly to the regular replicated transaction. */ @@ -4210,7 +4210,7 @@ class table_def; signedness of numeric colums. This is included for all values of binlog_row_metadata. For each numeric column, a bit indicates whether the numeric - colunm has unsigned flag. 1 means it is unsigned. The number of + column has unsigned flag. 1 means it is unsigned. The number of bytes needed for this is int((column_count + 7) / 8). The order is the same as the order of column_type field. diff --git a/sql/log_event_client.cc b/sql/log_event_client.cc index 659d745fe49..119ab047b2f 100644 --- a/sql/log_event_client.cc +++ b/sql/log_event_client.cc @@ -1204,7 +1204,7 @@ void Rows_log_event::change_to_flashback_event(PRINT_EVENT_INFO *print_event_inf { if (!bi_fields) { - // Both bi and ai inclues all columns, Swap WHERE and SET Part + // Both bi and ai include all columns, Swap WHERE and SET Part memcpy(one_row.str, start_pos + length1, length2); memcpy(one_row.str+length2, start_pos, length1); } @@ -2748,7 +2748,7 @@ const char fmt_binlog2[]= "BINLOG @binlog_fragment_0, @binlog_fragment_1%s\n"; SQL cover. @param delimiter delimiter string - @param is_verbose MDEV-10362 workraround parameter to pass + @param is_verbose MDEV-10362 workaround parameter to pass info on presence of verbose printout in cache encoded data The function signals on any error through setting @c body->error to -1. @@ -3291,7 +3291,7 @@ Table_map_log_event::Charset_iterator::create_charset_iterator( @param[in|out] meta_ptr the meta_ptr of the column. If the type doesn't have metadata, it will not change meta_ptr, otherwise meta_ptr will be moved to the end of the column's - metadat. + metadata. @param[in] cs charset of the column if it is a character column. @param[out] typestr buffer to storing the string name of the type @param[in] typestr_length length of typestr diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index c0a58ecaf9d..8a824eff582 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -765,7 +765,7 @@ int Log_event_writer::write_header(uchar *pos, size_t len) /* recording checksum of FD event computed with dropped possibly active LOG_EVENT_BINLOG_IN_USE_F flag. - Similar step at verication: the active flag is dropped before + Similar step at verification: the active flag is dropped before checksum computing. */ if (checksum_len) @@ -1666,7 +1666,7 @@ int Query_log_event::handle_split_alter_query_log_event(rpl_group_info *rgi, if (is_CA) { /* - error handeling, direct_commit_alter is turned on, so that we dont + error handling, direct_commit_alter is turned on, so that we dont wait for master reply in mysql_alter_table (in wait_for_master) */ rgi->direct_commit_alter= true; @@ -2217,7 +2217,7 @@ compare_errors: else if (actual_error == ER_XAER_NOTA && !rpl_filter->db_ok(get_db())) { /* - If there is an XA query whos XID cannot be found, if the replication + If there is an XA query whose XID cannot be found, if the replication filter is active and filters the target database, assume that the XID cache has been cleared (e.g. by server restart) since it was prepared, so we can just ignore this event. @@ -2930,7 +2930,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg, /* Used to record GTID while sending binlog to slave, without having to - fully contruct every Gtid_log_event() needlessly. + fully construct every Gtid_log_event() needlessly. */ bool Gtid_log_event::peek(const uchar *event_start, size_t event_len, @@ -5468,7 +5468,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd) /** The method either increments the relay log position or commits the current statement and increments the master group - possition if the event is STMT_END_F flagged and + position if the event is STMT_END_F flagged and the statement corresponds to the autocommit query (i.e replicated without wrapping in BEGIN/COMMIT) @@ -6620,7 +6620,7 @@ Write_rows_log_event::do_before_row_operations(const rpl_group_info *) /* * Fixed Bug#45999, In RBR, Store engine of Slave auto-generates new * sequence numbers for auto_increment fields if the values of them are 0. - * If generateing a sequence number is decided by the values of + * If generating a sequence number is decided by the values of * table->auto_increment_field_not_null and SQL_MODE(if includes * MODE_NO_AUTO_VALUE_ON_ZERO) in update_auto_increment function. * SQL_MODE of slave sql thread is always consistency with master's. @@ -6806,7 +6806,7 @@ is_duplicate_key_error(int errcode) The row to be inserted can contain values only for selected columns. The missing columns are filled with default values using @c prepare_record() - function. If a matching record is found in the table and @c overwritte is + function. If a matching record is found in the table and @c overwrite is true, the missing columns are taken from it. @param rli Relay log info (needed for row unpacking). @@ -7374,7 +7374,7 @@ uint Rows_log_event::find_key_parts(const KEY *key) const Find the best key to use when locating the row in @c find_row(). A primary key is preferred if it exists; otherwise a unique index is - preferred. Else we pick the index with the smalles rec_per_key value. + preferred. Else we pick the index with the smallest rec_per_key value. If a suitable key is found, set @c m_key, @c m_key_nr, @c m_key_info, and @c m_usable_key_parts member fields appropriately. diff --git a/sql/mariadb.h b/sql/mariadb.h index 00cf2ed1d9c..10f6e8f033f 100644 --- a/sql/mariadb.h +++ b/sql/mariadb.h @@ -19,7 +19,7 @@ are always included first. It can also be used to speed up compilation by using precompiled headers. - This file should include a minum set of header files used by all files + This file should include a minimum set of header files used by all files and header files that are very seldom changed. It can also include some defines that all files should be aware of. */ diff --git a/sql/mdl.cc b/sql/mdl.cc index 5b077177d25..255271ccef9 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -538,7 +538,7 @@ public: /* In backup namespace DML/DDL may starve because of concurrent FTWRL or - BACKUP statements. This scenario is partically useless in real world, + BACKUP statements. This scenario is practically useless in real world, so we just return 0 here. */ bitmap_t hog_lock_types_bitmap() const override diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index a8087ed5fc5..42c1b762977 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -28,7 +28,7 @@ One can change info->pos_in_file to a higher value to skip bytes in file if also info->read_pos is set to info->read_end. If called through open_cached_file(), then the temporary file will - only be created if a write exeeds the file buffer or if one calls + only be created if a write exceeds the file buffer or if one calls flush_io_cache(). */ diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index c8d19f6b9fa..cc6c9958892 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -1420,7 +1420,7 @@ int DsMrr_impl::setup_two_handlers() { DBUG_ASSERT(secondary_file && secondary_file->inited==handler::INDEX); /* - We get here when the access alternates betwen MRR scan(s) and non-MRR + We get here when the access alternates between MRR scan(s) and non-MRR scans. Calling primary_file->index_end() will invoke dsmrr_close() for this diff --git a/sql/multi_range_read.h b/sql/multi_range_read.h index afdaf00389f..0a1f183613a 100644 --- a/sql/multi_range_read.h +++ b/sql/multi_range_read.h @@ -157,7 +157,7 @@ public: /* - Buffer manager interface. Mrr_reader objects use it to inqure DsMrr_impl + Buffer manager interface. Mrr_reader objects use it to inquire DsMrr_impl to manage buffer space for them. */ typedef struct st_buffer_manager diff --git a/sql/my_apc.h b/sql/my_apc.h index 29fa3172a12..7f611089f15 100644 --- a/sql/my_apc.h +++ b/sql/my_apc.h @@ -23,7 +23,7 @@ - This is an APC request queue - We assume there is a particular owner thread which periodically calls process_apc_requests() to serve the call requests. - - Other threads can post call requests, and block until they are exectued. + - Other threads can post call requests, and block until they are executed. ) Implementation @@ -31,7 +31,7 @@ - The target has a mutex-guarded request queue. - After the request has been put into queue, the requestor waits for request - to be satisfied. The worker satisifes the request and signals the + to be satisfied. The worker satisfies the request and signals the requestor. */ diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h index a581899aab2..06c45ab83b5 100644 --- a/sql/my_json_writer.h +++ b/sql/my_json_writer.h @@ -53,8 +53,8 @@ using JOIN_TAB= struct st_join_table; arrayName : [ "boo", 123, 456 ] - and actually print them on one line. Arrrays that occupy too much space on - the line, or have nested members cannot be printed on one line. + and actually print them on one line. Arrays that occupy too much space on + the line, or have nested members, cannot be printed on one line. We hook into JSON printing functions and try to detect the pattern. While detecting the pattern, we will accumulate "boo", 123, 456 as strings. @@ -76,7 +76,7 @@ class Single_line_formatting_helper }; /* - This works like a finite automaton. + This works like a finite automation. state=DISABLED means the helper is disabled - all on_XXX functions will return false (which means "not handled") and do nothing. @@ -738,7 +738,7 @@ public: /* RAII-based class to disable writing into the JSON document The tracing is disabled as soon as the object is created. - The destuctor is called as soon as we exit the scope of the object + The destructor is called as soon as we exit the scope of the object and the tracing is enabled back. */ @@ -754,7 +754,7 @@ public: RAII-based helper class to detect incorrect use of Json_writer. The idea is that a function typically must leave Json_writer at the same - identation level as it was when it was invoked. Leaving it at a different + indentation level as it was when it was invoked. Leaving it at a different level typically means we forgot to close an object or an array So, here is a way to guard diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6267f64c5ce..3908e416eb1 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1909,7 +1909,7 @@ extern "C" void unireg_abort(int exit_code) { /* This is an abort situation, we cannot expect to gracefully close all - wsrep threads here, we can only diconnect from service + wsrep threads here, we can only disconnect from service */ wsrep_close_client_connections(FALSE); Wsrep_server_state::instance().disconnect(); @@ -3897,7 +3897,7 @@ static const char *rpl_make_log_name(PSI_memory_key key, const char *opt, MY_REPLACE_EXT | MY_UNPACK_FILENAME | MY_SAFE_PATH; /* mysql_real_data_home_ptr may be null if no value of datadir has been - specified through command-line or througha cnf file. If that is the + specified through command-line or through a cnf file. If that is the case we make mysql_real_data_home_ptr point to mysql_real_data_home which, in that case holds the default path for data-dir. */ @@ -4251,7 +4251,7 @@ static int init_common_variables() SYSVAR_AUTOSIZE(back_log, MY_MIN(900, (50 + max_connections / 5))); } - unireg_init(opt_specialflag); /* Set up extern variabels */ + unireg_init(opt_specialflag); /* Set up extern variables */ if (!(my_default_lc_messages= my_locale_by_name(Lex_cstring_strlen(lc_messages)))) { @@ -4341,7 +4341,7 @@ static int init_common_variables() } default_charset_info= default_collation; } - /* Set collactions that depends on the default collation */ + /* Set collations that depend on the default collation */ global_system_variables.collation_server= default_charset_info; global_system_variables.collation_database= default_charset_info; if (is_supported_parser_charset(default_charset_info)) @@ -5193,7 +5193,7 @@ static int init_server_components() if (WSREP_ON && !wsrep_recovery && !opt_abort) { - if (opt_bootstrap) // bootsrap option given - disable wsrep functionality + if (opt_bootstrap) // bootstrap option given - disable wsrep functionality { wsrep_provider_init(WSREP_NONE); if (wsrep_init()) @@ -5749,7 +5749,7 @@ static void run_main_loop() int mysqld_main(int argc, char **argv) { #ifndef _WIN32 - /* We can't close stdin just now, because it may be booststrap mode. */ + /* We can't close stdin just now, because it may be in bootstrap mode. */ bool please_close_stdin= fcntl(STDIN_FILENO, F_GETFD) >= 0; #endif @@ -8791,7 +8791,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr) /* Options have been parsed. Now some of them need additional special - handling, like custom value checking, checking of incompatibilites + handling, like custom value checking, checking of incompatibilities between options, setting of multiple variables, etc. Do them here. */ @@ -8956,7 +8956,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr) /* It looks like extra_connection_count should be passed here but its been using connection_count for the last 10+ years and - no-one was requested a change so lets not suprise anyone. + no-one has requested a change so lets not surprise anyone. */ one_thread_scheduler(extra_thread_scheduler, &connection_count); #else @@ -9345,7 +9345,7 @@ void refresh_global_status() */ reset_status_vars(); /* - Reset accoumulated thread's status variables. + Reset accumulated thread's status variables. These are the variables in 'status_vars[]' with the prefix _STATUS. */ bzero(&global_status_var, clear_for_flush_status); @@ -9393,7 +9393,7 @@ void refresh_status_legacy(THD *thd) reset_pfs_status_stats(); #endif - /* Add thread's status variabes to global status */ + /* Add thread's status variables to global status */ add_to_status(&global_status_var, &thd->status_var); /* Reset thread's status variables */ diff --git a/sql/mysqld.h b/sql/mysqld.h index 3cac9a6630a..d34d3f0bd0a 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -98,7 +98,7 @@ extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *national_charset_info; extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *table_alias_charset; /** - Character set of the buildin error messages loaded from errmsg.sys. + Character set of the builtin error messages loaded from errmsg.sys. */ extern CHARSET_INFO *error_message_charset_info; diff --git a/sql/opt_histogram_json.cc b/sql/opt_histogram_json.cc index 020e07527d4..3dee9f34bd5 100644 --- a/sql/opt_histogram_json.cc +++ b/sql/opt_histogram_json.cc @@ -548,7 +548,7 @@ bool read_hex_bucket_endpoint(json_engine_t *je, Field *field, String *out, /* - @brief Parse a JSON reprsentation for one histogram bucket + @brief Parse a JSON representation for one histogram bucket @param je The JSON parser object @param field Table field we are using histogram (used to convert diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc index 51aa70fa02c..f98adee4f7c 100644 --- a/sql/opt_index_cond_pushdown.cc +++ b/sql/opt_index_cond_pushdown.cc @@ -58,7 +58,7 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, /* Don't push down the triggered conditions. Nested outer joins execution code may need to evaluate a condition several times (both triggered and - untriggered), and there is no way to put thi + untriggered), and there is no way to put this TODO: Consider cloning the triggered condition and using the copies for: 1. push the first copy down, to have most restrictive index condition possible diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2c656d3d32e..1ee4cdeb5d3 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -312,7 +312,7 @@ public: longlong baseflag; uint max_key_parts, range_count; - bool quick; // Don't calulate possible keys + bool quick; // Don't calculate possible keys uint fields_bitmap_size; MY_BITMAP needed_fields; /* bitmask of fields needed by the query */ @@ -1029,7 +1029,7 @@ int imerge_list_or_list(RANGE_OPT_PARAM *param, tree SEL_TREE whose range part is to be ored with the imerges DESCRIPTION - For each imerge mi from the list 'merges' the function performes OR + For each imerge mi from the list 'merges' the function performs OR operation with mi and the range part of 'tree' rt, producing one or two imerges. @@ -2690,7 +2690,7 @@ static int fill_used_fields_bitmap(PARAM *param) TODO * Change the value returned in opt_range_condition_rows from a pessimistic estimate to true E(#rows that satisfy table condition). - (we can re-use some of E(#rows) calcuation code from + (we can re-use some of E(#rows) calculation code from index_merge/intersection for this) * Check if this function really needs to modify keys_to_use, and change the @@ -4054,7 +4054,7 @@ typedef struct st_part_prune_param partitioning index definition doesn't include partitioning fields. */ int last_part_partno; - int last_subpart_partno; /* Same as above for supartitioning */ + int last_subpart_partno; /* Same as above for subpartitioning */ /* is_part_keypart[i] == MY_TEST(keypart #i in partitioning index is a member @@ -4065,7 +4065,7 @@ typedef struct st_part_prune_param /* Same as above for subpartitioning */ my_bool *is_subpart_keypart; - my_bool ignore_part_fields; /* Ignore rest of partioning fields */ + my_bool ignore_part_fields; /* Ignore rest of partitioning fields */ /*************************************************************** Following fields form find_used_partitions() recursion context: @@ -4819,7 +4819,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) res= 0; /* No satisfying partitions */ goto pop_and_go_right; } - /* Rembember the limit we got - single partition #part_id */ + /* Remember the limit we got - single partition #part_id */ init_single_partition_iterator(part_id, &ppar->part_iter); /* @@ -4864,7 +4864,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) { /* Can't handle condition on current key part. If we're that deep that - we're processing subpartititoning's key parts, this means we'll not be + we're processing subpartitioning's key parts, this means we'll not be able to infer any suitable condition, so bail out. */ if (key_tree_part >= ppar->last_part_partno) @@ -5704,7 +5704,7 @@ skip_to_ror_scan: indexes are to be merges read_time The upper bound for the cost of the plan to be evaluated - DESRIPTION + DESCRIPTION For the given index merge plan imerge_trp extracted from the SEL_MERGE imerge the function looks for range scans with the same indexes and merges them into SEL_ARG trees. Then for each such SEL_ARG tree r_i the function @@ -5812,7 +5812,7 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge, /* This structure contains the info common for all steps of a partial - index intersection plan. Morever it contains also the info common + index intersection plan. Moreover it contains also the info common for index intersect plans. This info is filled in by the function prepare_search_best just before searching for the best index intersection plan. @@ -5839,7 +5839,7 @@ typedef struct st_common_index_intersect_info ha_rows best_records; uint best_length; /* number of indexes in the current best intersection */ INDEX_SCAN_INFO **best_intersect; /* the current best index intersection */ - /* scans from the best intersect to be filtrered by cpk conditions */ + /* scans from the best intersect to be filtered by cpk conditions */ key_map filtered_scans; uint *buff_elems; /* buffer to calculate cost of index intersection */ @@ -6384,7 +6384,7 @@ bool prepare_search_best_index_intersect(PARAM *param, the function returns a number bigger than #r. NOTES - See the comment before the desription of the function that explains the + See the comment before the description of the function that explains the reasoning used by this function. RETURN @@ -6465,7 +6465,7 @@ double get_cpk_filter_cost(ha_rows filtered_records, /* - Check whether a patial index intersection plan can be extended + Check whether a partial index intersection plan can be extended SYNOPSIS check_index_intersect_extension() @@ -6616,7 +6616,7 @@ bool check_index_intersect_extension(THD *thd, } /* - The cost after sweeep can be bigger than cutoff, but that is ok as the + The cost after sweep can be bigger than cutoff, but that is ok as the end cost can decrease when we add the next index. */ cost+= get_sweep_read_cost(common_info->param, rows2double(records), 1); @@ -8110,7 +8110,7 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param, cond_func item for the predicate field field in the predicate lt_value constant that field should be smaller - gt_value constant that field should be greaterr + gt_value constant that field should be greater RETURN # Pointer to tree built tree @@ -8289,7 +8289,7 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param, /* if this is a "col1 NOT IN (...)", and there is a UNIQUE KEY(col1), do - not constuct a SEL_TREE from it. The rationale is as follows: + not construct a SEL_TREE from it. The rationale is as follows: - if there are only a few constants, this condition is not selective (unless the table is also very small in which case we won't gain anything) @@ -8381,7 +8381,7 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param, { /* Get the SEL_TREE for the last "c_last < X < +inf" interval - (value_item cotains c_last already) + (value_item contains c_last already) */ tree2= get_mm_parts(param, field, Item_func::GT_FUNC, value_item); tree= tree_or(param, tree, tree2); @@ -9353,7 +9353,7 @@ Item_func_null_predicate::get_mm_leaf(RANGE_OPT_PARAM *param, DBUG_ENTER("Item_func_null_predicate::get_mm_leaf"); DBUG_ASSERT(!value); /* - No check for field->table->maybe_null. It's perfecly fine to use range + No check for field->table->maybe_null. It's perfectly fine to use range access for cases like SELECT * FROM t1 LEFT JOIN t2 ON t2.key IS [NOT] NULL @@ -10691,7 +10691,7 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) new_tree=new_tree->insert(new_arg); } if (e1->cmp_max_to_max(e2) < 0) - e1=e1->next; // e1 can't overlapp next e2 + e1=e1->next; // e1 can't overlap next e2 else e2=e2->next; } @@ -10807,7 +10807,7 @@ SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno, transformation is key_or( expr1, expr2 ) => expr1 OR expr2. Both expressions are assumed to be in the SEL_ARG format. In a logic sense, - theformat is reminiscent of DNF, since an expression such as the following + the format is reminiscent of DNF, since an expression such as the following ( 1 < kp1 < 10 AND p1 ) OR ( 10 <= kp2 < 20 AND p2 ) @@ -10833,7 +10833,7 @@ SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno, If the predicates are equal for the rest of the keyparts, or if there are no more, the range in expr2 has its endpoints copied in, and the SEL_ARG node in expr2 is deallocated. If more ranges became connected in expr1, the - surplus is also dealocated. If they differ, two ranges are created. + surplus is also deallocated. If they differ, two ranges are created. - The range leading up to the overlap. Empty if endpoints are equal. @@ -10929,7 +10929,7 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) Ambiguity: *** The range starts or stops somewhere in the "***" range. - Example: a starts before b and may end before/the same plase/after b + Example: a starts before b and may end before/the same place/after b a: [----***] b: [---] @@ -12329,7 +12329,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit, estimates may be slightly out of sync. We cannot do this easily in the above multi_range_read_info_const() - call as then we would need to have similar adjustmends done + call as then we would need to have similar adjustments done in the partitioning engine. */ rows= MY_MAX(table_records, 1); @@ -12378,7 +12378,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, ha_rows limit, else if (param->range_count > 1) { /* - Scaning multiple key values in the index: the records are ROR + Scanning multiple key values in the index: the records are ROR for each value, but not between values. E.g, "SELECT ... x IN (1,3)" returns ROR order for all records with x=1, then ROR order for records with x=3 @@ -16070,7 +16070,7 @@ bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range) are more keyparts to follow the ones we are using we must make the condition on the key inclusive (because x < "ab" means x[0] < 'a' OR (x[0] == 'a' AND x[1] < 'b'). - To achive the above we must turn off the NEAR_MIN/NEAR_MAX + To achieve the above we must turn off the NEAR_MIN/NEAR_MAX */ void QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges () { @@ -16407,7 +16407,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max() /** - Find the next different key value by skiping all the rows with the same key + Find the next different key value by skipping all the rows with the same key value. Implements a specialized loose index access method for queries @@ -17460,7 +17460,7 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part, } /** - Print key parts involed in a range + Print key parts involved in a range @param[out] out String the key is appended to @param[in] key_part Index components description @param[in] n_keypart Number of keyparts in index diff --git a/sql/opt_sargable_left.cc b/sql/opt_sargable_left.cc index 6ae10e3701e..9718ceb2c79 100644 --- a/sql/opt_sargable_left.cc +++ b/sql/opt_sargable_left.cc @@ -110,10 +110,10 @@ @detail 'foo' here can be any constant we can compute during optimization, Only equality conditions are supported. - See SargableLeft above for detals. + See SargableLeft above for details. @param field The first argument of LEFT or SUBSTRING if sargable, - otherwise deferenced to NULL + otherwise dereferenced to NULL @param value_idx The index of argument that is the prefix string if sargable, otherwise dereferenced to -1 */ diff --git a/sql/opt_split.cc b/sql/opt_split.cc index c08d29e69a0..d359aa80b81 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -83,7 +83,7 @@ are evaluated then the optimizer should consider pushing t.a = t1.a, t.b = t2.b and (t.a = t1.a AND t.b = t2.b) to choose the best condition for splitting. Apparently here last condition is the best one because - it provides the miximum possible number of partitions. + it provides the minimum possible number of partitions. If we dropped the index on t3(a,b) and created the index on t3(a) instead then we would have two options for splitting: to push t.a = t1.a or to @@ -160,7 +160,7 @@ The set of all rows belonging to the union of several partitions is called here superpartition. If a grouping operation is defined by the list e_1,...,e_n then any set S = {e_i1,...,e_ik} can be used to devide all rows - into superpartions such that for any two rows r1, r2 the following holds: + into superpartitions such that for any two rows r1, r2 the following holds: e_ij(r1) = e_ij(r2) for each e_ij from S. We use the splitting technique only if S consists of references to colums of the joined tables. For example if the GROUP BY list looks like this a, g(b), c we can consider diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 4ad0540a3d6..5640358d363 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -345,7 +345,7 @@ with the first one: When SJM nests are present, we should take care not to construct equalities that violate the (SJM-RULE). This is achieved by generating separate sets of -equalites for top-level tables and for inner tables. That is, for the join +equalities for top-level tables and for inner tables. That is, for the join order ot1 - ot2 --\ /--- ot3 -- ot5 @@ -546,7 +546,7 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs, The disjunctive members !((Sql_cmd_update *) cmd)->is_multitable() !((Sql_cmd_delete *) cmd)->is_multitable() - will be removed when conversions of IN predicands to semi-joins are + will be removed when conversions of IN predicants to semi-joins are fully supported for single-table UPDATE/DELETE statements. */ @@ -1984,7 +1984,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred) else if (left_exp->type() == Item::ROW_ITEM) { /* - disassemple left expression and add + disassemble left expression and add left1 = select_list_element1 and left2 = select_list_element2 ... */ for (uint i= 0; i < ncols; i++) @@ -3041,7 +3041,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx, 1. strategy X removes fanout for semijoin X,Y 2. using strategy Z is cheaper, but it only removes fanout from semijoin X. - 3. We have no clue what to do about fanount of semi-join Y. + 3. We have no clue what to do about fanout of semi-join Y. For the first iteration read_time will always be bigger than *current_read_time (as the 'strategy' is an addition to the @@ -3057,7 +3057,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx, DBUG_ASSERT(pos->sj_strategy != sj_strategy); /* If the strategy chosen first time or - the strategy replace strategy which was used to exectly the same + the strategy replace strategy which was used to exactly the same tables */ if (pos->sj_strategy == SJ_OPT_NONE || @@ -3093,7 +3093,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx, (*prev_strategy)->set_empty(); dups_producing_tables= prev_dups_producing_tables; join->sjm_lookup_tables= prev_sjm_lookup_tables; - // mark it 'none' to avpoid loops + // mark it 'none' to avoid loops pos->sj_strategy= SJ_OPT_NONE; // next skip to last; strategy= pickers + @@ -3149,7 +3149,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx, Update JOIN's semi-join optimization state after the join tab new_tab has been added into the join prefix. - @seealso restore_prev_sj_state() does the reverse actoion + @seealso restore_prev_sj_state() does the reverse action */ void update_sj_state(JOIN *join, const JOIN_TAB *new_tab, @@ -4326,7 +4326,7 @@ uint get_number_of_tables_at_top_level(JOIN *join) Setup execution structures for one semi-join materialization nest: - Create the materialization temporary table - If we're going to do index lookups - create TABLE_REF structure to make the lookus + create TABLE_REF structure to make the lookups - else (if we're going to do a full scan of the temptable) create Copy_field structures to do copying. @@ -5360,7 +5360,7 @@ int setup_semijoin_loosescan(JOIN *join) application of FirstMatch strategy, with the exception that outer IN-correlated tables are considered to be non-correlated. - (4) - THe suffix of outer and outer non-correlated tables. + (4) - The suffix of outer and outer non-correlated tables. The choice between the strategies is made by the join optimizer (see @@ -5984,7 +5984,7 @@ enum_nested_loop_state join_tab_execution_startup(JOIN_TAB *tab) Create a dummy temporary table, useful only for the sake of having a TABLE* object with map,tablenr and maybe_null properties. - This is used by non-mergeable semi-join materilization code to handle + This is used by non-mergeable semi-join materialization code to handle degenerate cases where materialized subquery produced "Impossible WHERE" and thus wasn't materialized. */ @@ -6557,7 +6557,7 @@ bool setup_degenerate_jtbm_semi_joins(JOIN *join, The function saves the equalities between all pairs of the expressions from the left part of the IN subquery predicate and the corresponding columns of the subquery from the predicate in eq_list appending them - to the list. The equalities of eq_list will be later conjucted with the + to the list. The equalities of eq_list will be later conjuncted with the condition of the WHERE clause. In the case when a table is nested in another table 'nested_join' the @@ -7031,7 +7031,7 @@ bool JOIN::choose_tableless_subquery_plan() } /* - For IN subqueries, use IN->EXISTS transfomation, unless the subquery + For IN subqueries, use IN->EXISTS transformation, unless the subquery has been converted to a JTBM semi-join. In that case, just leave everything as-is, setup_jtbm_semi_joins() has special handling for cases like this. diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 4cc1e264cd0..02e3dab8e9d 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -144,7 +144,7 @@ The algorithm starts with equality nodes that don't have any incoming edges (their expressions are either constant or depend only on tables that are - outside of the outer join in question) and performns a breadth-first + outside of the outer join in question) and performs a breadth-first traversal. If we reach the outer join nest node, it means outer join is functionally dependent and can be eliminated. Otherwise it cannot be eliminated. @@ -332,7 +332,7 @@ private: public: /* Space for field iterator */ char buf[Dep_value_field::iterator_size]; - /* !NULL <=> iterating over depdenent modules of this field */ + /* !NULL <=> iterating over dependent modules of this field */ Dep_value_field *field_dep; bool returned_goal; }; @@ -383,7 +383,7 @@ protected: uint unbound_args; Dep_module() : unbound_args(0) {} - /* to bump unbound_args when constructing depedendencies */ + /* to bump unbound_args when constructing dependencies */ friend class Field_dependency_recorder; friend class Dep_analysis_context; }; @@ -877,7 +877,7 @@ eliminate_tables_for_list(JOIN *join, List *join_list, SYNOPSIS check_func_dependency() - join Join we're procesing + join Join we're processing dep_tables Tables that we check to be functionally dependent (on everything else) it Iterator that enumerates these tables, or NULL if we're @@ -1334,8 +1334,8 @@ void build_eq_mods_for_cond(THD *thd, Dep_analysis_context *ctx, multiple-equality. Do two things: - Collect List of tblX.colY where tblX is one of the tables we're trying to eliminate. - - rembember if there was a bound value, either const_expr or tblY.colZ - swher tblY is not a table that we're trying to eliminate. + - remember if there was a bound value, either const_expr or tblY.colZ + where tblY is not a table that we're trying to eliminate. Store all collected information in a Dep_module_expr object. */ Item_equal *item_equal= (Item_equal*)cond; @@ -1400,7 +1400,7 @@ void build_eq_mods_for_cond(THD *thd, Dep_analysis_context *ctx, $LEFT_PART OR $RIGHT_PART - condition. This is achieved as follows: First, we apply distrubutive law: + condition. This is achieved as follows: First, we apply distributive law: (fdep_A_1 AND fdep_A_2 AND ...) OR (fdep_B_1 AND fdep_B_2 AND ...) = @@ -1846,7 +1846,7 @@ Dep_value_field *Dep_analysis_context::get_field_value(Field *field) /* Iteration over unbound modules that are our dependencies. for those we have: - - dependendencies of our fields + - dependencies of our fields - outer join we're in */ char *Dep_value_table::init_unbound_modules_iter(char *buf) diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc index 22714596e55..122102ffbfb 100644 --- a/sql/opt_trace.cc +++ b/sql/opt_trace.cc @@ -84,7 +84,7 @@ ST_FIELD_INFO optimizer_trace_info[]= /* - TODO: one-line needs to be implemented seperately + TODO: one-line needs to be implemented separately */ const char *Opt_trace_context::flag_names[]= {"enabled", "default", NullS}; diff --git a/sql/opt_trace_context.h b/sql/opt_trace_context.h index f578a0c67ec..246efc86db5 100644 --- a/sql/opt_trace_context.h +++ b/sql/opt_trace_context.h @@ -42,7 +42,7 @@ private: 0 <=> this trace should be in information_schema. !=0 tracing is disabled, this currently happens when we want to trace a sub-statement. For now traces are only collect for the top statement - not for the sub-statments. + not for the sub-statements. */ uint I_S_disabled; }; diff --git a/sql/opt_vcol_substitution.cc b/sql/opt_vcol_substitution.cc index 7ad63c21417..62710316f89 100644 --- a/sql/opt_vcol_substitution.cc +++ b/sql/opt_vcol_substitution.cc @@ -29,9 +29,9 @@ @file @brief - Virtual Column Substitution feature makes the optimizer recongize usage of + Virtual Column Substitution feature makes the optimizer recognize usage of virtual column expressions in the WHERE/ON clauses. If there is an index - on the virtual column, the optimizer is able construct query plans that + on the virtual column, the optimizer is able to construct query plans that use that index. */ @@ -369,7 +369,7 @@ void subst_vcol_if_compatible(Vcol_subst_context *ctx, /* @brief Do a quick and imprecise check if it makes sense to try Virtual Column - Substitutiion transformation for this item. + Substitution transformation for this item. @detail For vcol_expr='FOO' the item to be trans formed is the comparison item diff --git a/sql/optimizer_costs.h b/sql/optimizer_costs.h index cd4ac888019..d1ccc3c264e 100644 --- a/sql/optimizer_costs.h +++ b/sql/optimizer_costs.h @@ -102,7 +102,7 @@ extern OPTIMIZER_COSTS heap_optimizer_costs, tmp_table_optimizer_costs; A factor of 0.1 makes the cost of get_pq_sort_cost(10, 10, false) =0.52 (Reading 10 rows into a priority queue of 10 elements). - One consenquence if this factor is too high is that priority_queue will + One consequence if this factor is too high is that priority_queue will not use addon fields (to solve the sort without having to do an extra re-read of rows) even if the number of LIMIT is low. */ diff --git a/sql/optimizer_defaults.h b/sql/optimizer_defaults.h index 4eaa30757ce..882fbcda6f0 100644 --- a/sql/optimizer_defaults.h +++ b/sql/optimizer_defaults.h @@ -43,7 +43,7 @@ Cost of copying a row to 'table->record'. Used by scan_time() and rnd_pos_time() methods. - If this is too small, then table scans will be prefered over 'ref' + If this is too small, then table scans will be preferred over 'ref' as with table scans there are no key read (KEY_LOOKUP_COST), fewer disk reads but more record copying and row comparisions. If it's too big then MariaDB will used key lookup even when table scan is @@ -55,7 +55,7 @@ Cost of copying the key to 'table->record' If this is too small, then, for small tables, index scans will be - prefered over 'ref' as with index scans there are fewer disk reads. + preferred over 'ref' as with index scans there are fewer disk reads. */ #define DEFAULT_KEY_COPY_COST ((double) 0.000015685) @@ -103,7 +103,7 @@ #define DEFAULT_DISK_READ_COST ((double) IO_SIZE / 400000000.0 * 1000) /* - The follwoing is an old comment for hard-disks, please ignore the + The following is an old comment for hard-disks, please ignore the following, except if you like history: For sequential hard disk seeks the cost formula is: diff --git a/sql/parse_file.cc b/sql/parse_file.cc index f4aae1300e2..de579b72543 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -177,7 +177,7 @@ write_parameter(IO_CACHE *file, const uchar* base, File_option *parameter) LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); // number of microseconds since Epoch, timezone-independent my_hrtime_t tm= my_hrtime(); - // Paded to 19 characters for compatibility + // Padded to 19 characters for compatibility val_s->length= snprintf(val_s->str, MICROSECOND_TIMESTAMP_BUFFER_SIZE, "%019lld", tm.val); DBUG_ASSERT(val_s->length == MICROSECOND_TIMESTAMP_BUFFER_SIZE-1); diff --git a/sql/partition_info.cc b/sql/partition_info.cc index e1d7c7b8152..62d8dbd7fc6 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -558,7 +558,7 @@ bool partition_info::set_up_defaults_for_partitioning(THD *thd, handler *file, no parameters RETURN VALUE - Erroneus field name Error, there are two fields with same name + Erroneous field name Error, there are two fields with same name NULL Ok, no field defined twice DESCRIPTION diff --git a/sql/partition_info.h b/sql/partition_info.h index e8ec830ea53..795f47781a9 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -180,7 +180,7 @@ public: * lock_partitions - partitions that must be locked (read or write). Usually read_partitions is the same set as lock_partitions, but in case of UPDATE the WHERE clause can limit the read_partitions set, - but not neccesarily the lock_partitions set. + but not necessarily the lock_partitions set. Usage pattern: * Initialized in ha_partition::open(). * read+lock_partitions is set according to explicit PARTITION, diff --git a/sql/privilege.h b/sql/privilege.h index eec0eb49df1..3e0a9df0d42 100644 --- a/sql/privilege.h +++ b/sql/privilege.h @@ -213,7 +213,7 @@ static inline constexpr privilege_t operator|(privilege_t a, privilege_t b) } -// Dyadyc bitwise assignment operators +// Dyadic bitwise assignment operators static inline privilege_t& operator&=(privilege_t &a, privilege_t b) { return a= a & b; diff --git a/sql/protocol.cc b/sql/protocol.cc index 0af8598cc59..1af6a5ed729 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -18,7 +18,7 @@ @file Low level functions for storing data to be send to the MySQL client. - The actual communction is handled by the net_xxx functions in net_serv.cc + The actual communication is handled by the net_xxx functions in net_serv.cc */ #include "mariadb.h" @@ -64,7 +64,7 @@ bool Protocol_binary::net_store_data(const uchar *from, size_t length) net_store_data_cs() - extended version with character set conversion. It is optimized for short strings whose length after - conversion is garanteed to be less than 251, which accupies + conversion is guaranteed to be less than 251, which occupies exactly one byte to store length. It allows not to use the "convert" member as a temporary buffer, conversion is done directly to the "packet" member. @@ -81,7 +81,7 @@ bool Protocol_binary::net_store_data_cs(const uchar *from, size_t length, #endif { uint dummy_errors; - /* Calculate maxumum possible result length */ + /* Calculate maximum possible result length */ size_t conv_length= to_cs->mbmaxlen * length / from_cs->mbminlen; if (conv_length > 250) @@ -482,7 +482,7 @@ bool Protocol::net_send_error_packet(THD *thd, uint sql_errno, const char *err, We keep a separate version for that range because it's widely used in libmysql. - uint is used as agrument type because of MySQL type conventions: + uint is used as argument type because of MySQL type conventions: - uint for 0..65536 - ulong for 0..4294967296 - ulonglong for bigger numbers. diff --git a/sql/proxy_protocol.cc b/sql/proxy_protocol.cc index 689d1af88f0..af56a615eab 100644 --- a/sql/proxy_protocol.cc +++ b/sql/proxy_protocol.cc @@ -525,7 +525,7 @@ bool is_proxy_protocol_allowed(const sockaddr *addr) /* Non-TCP addresses (unix domain socket, windows pipe and shared memory - gets tranlated to TCP4 localhost address. + gets translated to TCP4 localhost address. Note, that vio remote addresses are initialized with binary zeros for these protocols (which is AF_UNSPEC everywhere). diff --git a/sql/rowid_filter.cc b/sql/rowid_filter.cc index ecb2f82f114..4cfe8b9d086 100644 --- a/sql/rowid_filter.cc +++ b/sql/rowid_filter.cc @@ -656,7 +656,7 @@ Rowid_filter::build_return_code Range_rowid_filter::build() Binary search in the sorted array of a rowid filter @param ctxt context of the search - @parab elem rowid / primary key to look for + @param elem rowid / primary key to look for @details The function looks for the rowid / primary key ' elem' in this container diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index 94e8f188f77..57c27eb5106 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -101,7 +101,7 @@ rpl_slave_state::record_and_update_gtid(THD *thd, rpl_group_info *rgi) applied, then the event should be skipped. If not then the event should be applied. - To avoid two master connections tring to apply the same event + To avoid two master connections trying to apply the same event simultaneously, only one is allowed to work in any given domain at any point in time. The associated Relay_log_info object is called the owner of the domain (and there can be multiple parallel worker threads working in that @@ -1240,7 +1240,7 @@ rpl_slave_state_tostring_cb(rpl_gtid *gtid, void *data) The state consists of the most recently applied GTID for each domain_id, ie. the one with the highest sub_id within each domain_id. - Optinally, extra_gtids is a list of GTIDs from the binlog. This is used when + Optionally, extra_gtids is a list of GTIDs from the binlog. This is used when a server was previously a master and now needs to connect to a new master as a slave. For each domain_id, if the GTID in the binlog was logged with our own server_id _and_ has a higher seq_no than what is in the slave state, @@ -2309,7 +2309,7 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids, Gtid_list_log_event *glev, char* errbuf) { - DYNAMIC_ARRAY domain_unique; // sequece (unsorted) of unique element*:s + DYNAMIC_ARRAY domain_unique; // sequence (unsorted) of unique element*:s rpl_binlog_state::element* domain_unique_buffer[16]; ulong k, l; const char* errmsg= NULL; diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h index d5af90e029d..3a7f22a7d11 100644 --- a/sql/rpl_gtid.h +++ b/sql/rpl_gtid.h @@ -932,7 +932,7 @@ public: ~Intersecting_gtid_event_filter(); /* - Returns TRUE if any filers exclude the gtid, returns FALSE otherwise, i.e. + Returns TRUE if any filters exclude the gtid, returns FALSE otherwise, i.e. all filters must allow the GTID. */ my_bool exclude(rpl_gtid *gtid) override; diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index 3080d92bf63..dd8e5175ccd 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -55,7 +55,7 @@ injector::transaction::~transaction() /* We set the first character to null just to give all the copies of the - start position a (minimal) chance of seening that the memory is lost. + start position a (minimal) chance of seeing that the memory is lost. All assuming the my_free does not step over the memory, of course. */ *the_memory= '\0'; diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index b6070c780d5..d5b56a69364 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -1508,7 +1508,7 @@ bool Master_info_index::remove_master_info(Master_info *mi, bool clear_log_files { File index_file_nr; - // Close IO_CACHE and FILE handler fisrt + // Close IO_CACHE and FILE handler first end_io_cache(&index_file); my_close(index_file.file, MYF(MY_WME)); diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index 6d2e5cef5f1..f8420ce8bbc 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -395,7 +395,7 @@ class Master_info : public Slave_reporting_capability Flag is raised at the parallel worker slave stop. Its purpose is to mark the whole start_alter_list when slave stops. The flag is read by Start Alter event to self-mark its state accordingly - at time its alter info struct is about to be appened to the list. + at time its alter info struct is about to be appended to the list. */ bool is_shutdown= false; diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index b4746ed6d55..8ed38c93369 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -2534,7 +2534,7 @@ idx_found: if(flags_extra & (Gtid_log_event::FL_COMMIT_ALTER_E1 | Gtid_log_event::FL_ROLLBACK_ALTER_E1 )) { - //Free the corrosponding rpt current_start_alter_id + //Free the corresponding rpt current_start_alter_id for(uint i= 0; i < e->rpl_thread_max; i++) { if(e->rpl_threads[i].thr && diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 09a3711ff27..02ca4272c7a 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -1073,7 +1073,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, value which would lead to badly broken replication. Even the relay_log_pos will be corrupted in this case, because the len is the relay log is not "val". - With the end_log_pos solution, we avoid computations involving lengthes. + With the end_log_pos solution, we avoid computations involving length. */ mysql_cond_broadcast(&data_cond); if (!skip_lock) @@ -1286,7 +1286,7 @@ err: compare them each time this function is called, we only need to do this when current log name changes. If we have UNTIL_MASTER_POS condition we need to do this only after Rotate_log_event::do_apply_event() (which is - rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS + rare, so caching gives real benefit), and if we have UNTIL_RELAY_POS condition then we should invalidate cached comarison value after inc_group_relay_log_pos() which called for each group of events (so we have some benefit if we have something like queries that use diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 7bbb5b33daf..486cbe387ca 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -378,7 +378,7 @@ public: slave_connection_state ign_gtids; /* - Indentifies where the SQL Thread should create temporary files for the + Identifies where the SQL Thread should create temporary files for the LOAD DATA INFILE. This is used for security reasons. */ char slave_patternload_file[FN_REFLEN]; @@ -396,7 +396,7 @@ public: /* The restart_gtid_state is used when the SQL thread restarts on a relay log in GTID mode. In multi-domain parallel replication, each domain may have a - separat position, so some events in more progressed domains may need to be + separate position, so some events in more progressed domains may need to be skipped. This keeps track of the domains that have not yet reached their starting event. */ @@ -917,7 +917,7 @@ struct rpl_group_info void reinit(Relay_log_info *rli); /* - Returns true if the argument event resides in the containter; + Returns true if the argument event resides in the container; more specifically, the checking is done against the last added event. */ bool is_deferred_event(Log_event * ev) diff --git a/sql/rpl_tblmap.h b/sql/rpl_tblmap.h index 63bac5e68a8..16c0e95e705 100644 --- a/sql/rpl_tblmap.h +++ b/sql/rpl_tblmap.h @@ -105,7 +105,7 @@ private: */ entry *m_free; - /* Correspondance between an id (a number) and a TABLE object */ + /* Correspondence between an id (a number) and a TABLE object */ HASH m_table_ids; }; diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index d3189394820..1d7592b00dc 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -291,7 +291,7 @@ table_def::~table_def() @notes event_buf will have same values on return. However during the process of - caluclating the checksum, it's temporary changed. Because of this the + calculating the checksum, it's temporary changed. Because of this the event_buf argument is not a pointer to const. */ diff --git a/sql/rpl_utility_server.cc b/sql/rpl_utility_server.cc index ccad7bd0709..132eb8b2ea7 100644 --- a/sql/rpl_utility_server.cc +++ b/sql/rpl_utility_server.cc @@ -497,7 +497,7 @@ Field_int::rpl_conv_type_from(const Conv_source &source, max_dispay_length() when the table field and the binlog field are of the same type. This code should eventually be rewritten not to use - compare_lengths(), to detect subtype/supetype relations + compare_lengths(), to detect subtype/supertype relations just using the type codes. */ DBUG_ASSERT(source.real_field_type() != real_type()); @@ -535,7 +535,7 @@ Field_longstr::rpl_conv_type_from(const Conv_source &source, /** @todo Implement Field_varstring_compressed::real_type() and - Field_blob_compressed::real_type() properly. All occurencies + Field_blob_compressed::real_type() properly. All occurrences of Field::real_type() have to be inspected and adjusted if needed. Until it is not ready we have to compare source_type against diff --git a/sql/scheduler.h b/sql/scheduler.h index 7bf1240973b..c0eff0b965d 100644 --- a/sql/scheduler.h +++ b/sql/scheduler.h @@ -75,7 +75,7 @@ void one_thread_scheduler(scheduler_functions *func, Atomic_counter *arg_c extern void scheduler_init(); extern void post_kill_notification(THD *); /* - To be used for pool-of-threads (implemeneted differently on various OSs) + To be used for pool-of-threads (implemented differently on various OSs) */ struct thd_scheduler { diff --git a/sql/semisync.h b/sql/semisync.h index 44f236606fd..f71d2555813 100644 --- a/sql/semisync.h +++ b/sql/semisync.h @@ -58,7 +58,7 @@ public: /* The layout of a semisync slave reply packet: 1 byte for the magic num - 8 bytes for the binlog positon + 8 bytes for the binlog position n bytes for the binlog filename, terminated with a '\0' */ #define REPLY_MAGIC_NUM_LEN 1 diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index 888085c0d7d..bb43a556199 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -691,7 +691,7 @@ int Repl_semi_sync_master::report_reply_binlog(uint32 server_id, /* If the requested position is behind the sending binlog position, * would not adjust sending binlog position. * We based on the assumption that there are multiple semi-sync slave, - * and at least one of them shou/ld be up to date. + * and at least one of them should be up to date. * If all semi-sync slaves are behind, at least initially, the primary * can find the situation after the waiting timeout. After that, some * slaves should catch up quickly. @@ -1502,7 +1502,7 @@ void Repl_semi_sync_master::await_all_slave_replies(const char *msg) /* Get the waiting time given the wait's staring time. * * Return: - * >= 0: the waiting time in microsecons(us) + * >= 0: the waiting time in microseconds(us) * < 0: error in get time or time back traverse */ static int get_wait_time(const struct timespec& start_ts) diff --git a/sql/semisync_master.h b/sql/semisync_master.h index c96b0404035..c7ec983c22e 100644 --- a/sql/semisync_master.h +++ b/sql/semisync_master.h @@ -526,7 +526,7 @@ class Repl_semi_sync_master If info_msg is provided, it will be output via sql_print_information when there are transactions awaiting ACKs; info_msg is not output if there are - no transasctions to await. + no transactions to await. */ void await_all_slave_replies(const char *msg); @@ -599,7 +599,7 @@ class Repl_semi_sync_master /*Wait for ACK after writing/sync binlog to file*/ int wait_after_sync(const char* log_file, my_off_t log_pos); - /*Wait for ACK after commting the transaction*/ + /*Wait for ACK after committing the transaction*/ int wait_after_commit(THD* thd, bool all); /*Wait after the transaction is rollback*/ @@ -735,7 +735,7 @@ extern unsigned long long rpl_semi_sync_master_get_ack; /* This indicates whether we should keep waiting if no semi-sync slave is available. - 0 : stop waiting if detected no avaialable semi-sync slave. + 0 : stop waiting if detected no available semi-sync slave. 1 (default) : keep waiting until timeout even no available semi-sync slave. */ extern char rpl_semi_sync_master_wait_no_slave; diff --git a/sql/semisync_master_ack_receiver.cc b/sql/semisync_master_ack_receiver.cc index 69ec99614a9..dc4c2fb401e 100644 --- a/sql/semisync_master_ack_receiver.cc +++ b/sql/semisync_master_ack_receiver.cc @@ -190,7 +190,7 @@ void Ack_receiver::remove_slave(THD *thd) mysql_cond_broadcast(&m_cond); /* Wait until Ack_receiver::run() acknowledges remove of slave - As this is only sent under the mutex and after listners has + As this is only sent under the mutex and after listeners has been collected, we know that listener has ignored the found slave. */ diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index e7e089b748c..ac1b91f22ed 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -46,7 +46,7 @@ void Session_sysvars_tracker::vars_list::reinit() Copy the given list. @param from Source vars_list object. - @param thd THD handle to retrive the charset in use. + @param thd THD handle to retrieve the charset in use. @retval true there is something to track @retval false nothing to track @@ -117,7 +117,7 @@ bool Session_sysvars_tracker::vars_list::insert(const sys_var *svar) @param var_list [IN] System variable list. @param throw_error [IN] bool when set to true, returns an error in case of invalid/duplicate values. - @param char_set [IN] charecter set information used for string + @param char_set [IN] character set information used for string manipulations. @return @@ -848,7 +848,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf) legal and equivalent syntax in MySQL, or START TRANSACTION sans options) will re-use any one-shots set up so far (with SET before the first transaction started, and with - all subsequent STARTs), except for WITH CONSISTANT SNAPSHOT, + all subsequent STARTs), except for WITH CONSISTENT SNAPSHOT, which will never be chained and only applies when explicitly given. @@ -952,7 +952,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf) /* "READ ONLY" / "READ WRITE" We could transform this to SET TRANSACTION even when it occurs - in START TRANSACTION, but for now, we'll resysynthesize the original + in START TRANSACTION, but for now, we'll resynthesize the original command as closely as possible. */ buf->append(STRING_WITH_LEN("SET TRANSACTION ")); diff --git a/sql/set_var.cc b/sql/set_var.cc index 6359eb9ab3f..c1d826aeeb0 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -860,7 +860,7 @@ int set_var::light_check(THD *thd) @returns 0|1 ok or ERROR @note ERROR can be only due to abnormal operations involving - the server's execution evironment such as + the server's execution environment such as out of memory, hard disk failure or the computer blows up. Consider set_var::check() method if there is a need to return an error due to logics. diff --git a/sql/set_var.h b/sql/set_var.h index 607b8127b6a..da74a135c37 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -273,7 +273,7 @@ protected: /** A base class for everything that can be set with SET command. It's similar to Items, an instance of this is created by the parser - for every assigmnent in SET (or elsewhere, e.g. in SELECT). + for every assignment in SET (or elsewhere, e.g. in SELECT). */ class set_var_base :public Sql_alloc { diff --git a/sql/slave.cc b/sql/slave.cc index ca9f05c5425..2d8ef61f0dd 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -329,7 +329,7 @@ gtid_pos_table_creation(THD *thd, plugin_ref engine, LEX_CSTRING *table_name) thd->set_db(&MYSQL_SCHEMA_NAME); thd->clear_error(); ulonglong thd_saved_option= thd->variables.option_bits; - /* This query shuold not be binlogged. */ + /* This query should not be binlogged. */ thd->variables.option_bits&= ~(ulonglong)OPTION_BIN_LOG; thd->set_query_and_id(query.c_ptr(), query.length(), thd->charset(), next_query_id()); @@ -1049,7 +1049,7 @@ terminate_slave_thread(THD *thd, /* Is is critical to test if the slave is running. Otherwise, we might - be referening freed memory trying to kick it + be refreeing freed memory trying to kick it */ while (*slave_running) // Should always be true @@ -1323,7 +1323,7 @@ static bool io_slave_killed(Master_info* mi) The function analyzes a possible killed status and makes a decision whether to accept it or not. Normally upon accepting the sql thread goes to shutdown. - In the event of deffering decision @rli->last_event_start_time waiting + In the event of deferring decision @rli->last_event_start_time waiting timer is set to force the killed status be accepted upon its expiration. @param thd pointer to a THD instance @@ -1391,8 +1391,8 @@ static bool sql_slave_killed(rpl_group_info *rgi) may eventually give out to complete the current group and in that case there might be issues at consequent slave restart, see the error message. WL#2975 offers a robust solution - requiring to store the last exectuted event's coordinates - along with the group's coordianates instead of waiting with + requiring to store the last executed event's coordinates + along with the group's coordinates instead of waiting with @c last_event_start_time the timer. */ @@ -2006,7 +2006,7 @@ inconsistency if replicated data deals with collation."); slave and master, but we can't rely on value of @@system_time_zone variable (it is time zone abbreviation) since it determined at start time and so could differ for slave and master even if they are really - in the same system time zone. So we are omiting this check and just + in the same system time zone. So we are omitting this check and just relying on documentation. Also according to Monty there are many users who are using replication between servers in various time zones. Hence such check will broke everything for them. (And now everything will @@ -3740,7 +3740,7 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi, ev->update_pos(rli); @endcode - It also does the following maintainance: + It also does the following maintenance: - Initializes the thread's server_id and time; and the event's thread. @@ -4096,7 +4096,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, The following failure injecion works in cooperation with tests setting @@global.debug= 'd,incomplete_group_in_relay_log'. Xid or Commit events are not executed to force the slave sql - read hanging if the realy log does not have any more events. + read hanging if the real log does not have any more events. */ DBUG_EXECUTE_IF("incomplete_group_in_relay_log", if ((typ == XID_EVENT) || @@ -4436,7 +4436,7 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi, IO_RPL_LOG_NAME, mi->master_log_pos, tmp.c_ptr_safe()); /* - Raise a warining during registering on master/requesting dump. + Raise a warning during registering on master/requesting dump. Log a message reading event. */ if (messages[SLAVE_RECON_MSG_COMMAND][0]) @@ -6134,7 +6134,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) /* compare local and event's versions of log_file, log_pos. - Heartbeat is sent only after an event corresponding to the corrdinates + Heartbeat is sent only after an event corresponding to the coordinates the heartbeat carries. Slave can not have a higher coordinate except in the only special case when mi->master_log_name, master_log_pos have never @@ -6143,7 +6143,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len) Slave can have lower coordinates, if some event from master was omitted. - TODO: handling `when' for SHOW SLAVE STATUS' snds behind + TODO: handling `when' for SHOW SLAVE STATUS' sends behind */ if (memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len()) || mi->master_log_pos > hb.log_pos) { @@ -6871,7 +6871,7 @@ void end_relay_log_info(Relay_log_info* rli) mysql_mutex_unlock(log_lock); /* Delete the slave's temporary tables from memory. - In the future there will be other actions than this, to ensure persistance + In the future there will be other actions than this, to ensure persistence of slave's temp tables after shutdown. */ rli->close_temporary_tables(); @@ -7245,7 +7245,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) finishes executing the new event; it will be look abnormal only if the events have old timestamps (then you get "many", 0, "many"). - Transient phases like this can be fixed with implemeting + Transient phases like this can be fixed with implementing Heartbeat event which provides the slave the status of the master at time the master does not have any new update to send. Seconds_Behind_Master would be zero only when master has no @@ -7848,7 +7848,7 @@ void Rows_event_tracker::update(const char *file_name, my_off_t pos, /** The function is called at next event reading after a sequence of Rows- log-events. It checks the end-of-statement status - of the past sequence to report on any isssue. + of the past sequence to report on any issue. In the positive case the tracker gets reset. @return true when the Rows- event group integrity found compromised, diff --git a/sql/socketpair.c b/sql/socketpair.c index ef89fa0446b..d913475a93b 100644 --- a/sql/socketpair.c +++ b/sql/socketpair.c @@ -26,7 +26,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Changes: - * 2023-12-25 Addopted for MariaDB usage + * 2023-12-25 Adopted for MariaDB usage * 2014-02-12: merge David Woodhouse, Ger Hobbelt improvements * git.infradead.org/users/dwmw2/openconnect.git/commitdiff/bdeefa54 * github.com/GerHobbelt/selectable-socketpair diff --git a/sql/sp.cc b/sql/sp.cc index ef6b742712e..3470c2e0ac1 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -838,7 +838,7 @@ static LEX_STRING copy_definition_string(String *defstr, /** - @brief The function parses input strings and returns SP stucture. + @brief The function parses input strings and returns SP structure. @param[in] thd Thread handler @param[in] defstr CREATE... string @@ -984,7 +984,7 @@ Sp_handler::db_load_routine(THD *thd, const Database_qualified_name *name, defstr.set_thread_specific(); /* - We have to add DEFINER clause and provide proper routine characterstics in + We have to add DEFINER clause and provide proper routine characteristics in routine definition statement that we build here to be able to use this definition for SHOW CREATE PROCEDURE later. */ @@ -1238,7 +1238,7 @@ Sp_handler_package_spec:: - SP_OK means that "CREATE PACKAGE pkg" had a correspoinding "CREATE PACKAGE BODY pkg", which was successfully dropped. */ - return ret; // Other codes mean an unexpecte error + return ret; // Other codes mean an unexpected error } return Sp_handler::sp_find_and_drop_routine(thd, table, name); } @@ -1550,7 +1550,7 @@ log: my_error(ER_OUT_OF_RESOURCES, MYF(0)); goto done; } - /* restore sql_mode when binloging */ + /* restore sql_mode when binlogging */ thd->variables.sql_mode= org_sql_mode; /* Such a statement can always go directly to binlog, no trans cache */ if (thd->binlog_query(THD::STMT_QUERY_TYPE, @@ -3107,7 +3107,7 @@ Sp_handler::show_create_sp(THD *thd, String *buf, (used for I_S ROUTINES & PARAMETERS tables). @param[in] thd thread handler - @param[in] proc_table mysql.proc table structurte + @param[in] proc_table mysql.proc table structure @param[in] db database name @param[in] name sp name @param[in] sql_mode SQL mode diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc index 50305f6abf4..60e01a99d10 100644 --- a/sql/sp_cache.cc +++ b/sql/sp_cache.cc @@ -177,7 +177,7 @@ void sp_cache_insert(sp_cache **cp, sp_head *sp) SYNOPSIS sp_cache_lookup() cp Cache to look into - name Name of rutine to find + name Name of routine to find NOTE An obsolete (but not more obsolete then since last diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 5f1f6c04b80..78209fa5330 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -722,7 +722,7 @@ bool sp_package::validate_public_routines(THD *thd, sp_package *spec) bool sp_package::validate_private_routines(THD *thd) { /* - Check that all forwad declarations in + Check that all forward declarations in CREATE PACKAGE BODY have implementations. */ List_iterator it(m_routine_declarations); @@ -1639,7 +1639,7 @@ bool sp_head::check_execute_access(THD *thd) const @param thd @param ret_value - @retval NULL - error (access denided or EOM) + @retval NULL - error (access denied or EOM) @retval !NULL - success (the invoker has rights to all %TYPE tables) */ @@ -2027,7 +2027,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, we have separate union for each such event and hence can't use query_id of real calling statement as the start of all these unions (this will break logic of replication of user-defined - variables). So we use artifical value which is guaranteed to + variables). So we use artificial value which is guaranteed to be greater than all query_id's of all statements belonging to previous events/unions. Possible alternative to this is logging of all function invocations @@ -2317,7 +2317,7 @@ sp_head::execute_procedure(THD *thd, List *args) /* In the case when we weren't able to employ reuse mechanism for - OUT/INOUT paranmeters, we should reallocate memory. This + OUT/INOUT parameters, we should reallocate memory. This allocation should be done on the arena which will live through all execution of calling routine. */ diff --git a/sql/sp_head.h b/sql/sp_head.h index 11f1dae5926..38566a96b33 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -286,7 +286,7 @@ public: /** Recursion level of the current SP instance. The levels are numbered from 0 */ ulong m_recursion_level; /** - A list of diferent recursion level instances for the same procedure. + A list of different recursion level instances for the same procedure. For every recursion level we have a sp_head instance. This instances connected in the list. The list ordered by increasing recursion level (m_recursion_level). @@ -299,7 +299,7 @@ public: /** Pointer to the first free (non-INVOKED) routine in the list of cached instances for this SP. This pointer is set only for the first - SP in the list of instences (see above m_first_cached_sp pointer). + SP in the list of instances (see above m_first_cached_sp pointer). The pointer equal to 0 if we have no free instances. For non-first instance value of this pointer meanless (point to itself); */ @@ -559,7 +559,7 @@ public: FOR index IN cursor(1,2,3) -- cursor with parameters The code generated by this method does the following during SP run-time: - - Sets all cursor parameter vartiables from "parameters" + - Sets all cursor parameter variables from "parameters" - Initializes the index ROW-type variable from the cursor (the structure is copied from the cursor to the index variable) - The cursor gets opened @@ -938,7 +938,7 @@ public: /* Check EXECUTE access: - - in case of a standalone rotuine, for the routine itself + - in case of a standalone routine, for the routine itself - in case of a package routine, for the owner package body */ bool check_execute_access(THD *thd) const; diff --git a/sql/sp_instr.cc b/sql/sp_instr.cc index a87d1c31756..9b392f88a5b 100644 --- a/sql/sp_instr.cc +++ b/sql/sp_instr.cc @@ -104,7 +104,7 @@ static int cmp_rqp_locations(const void *a_, const void *b_) 2) We need to empty thd->user_var_events after we have wrote a function call. This is currently done by making reset_dynamic(&thd->user_var_events); - calls in several different places. (TODO cosider moving this into + calls in several different places. (TODO consider moving this into mysql_bin_log.write() function) 4.2 Auto_increment storage in binlog diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc index 6565617a0e8..ac6c134514a 100644 --- a/sql/sp_pcontext.cc +++ b/sql/sp_pcontext.cc @@ -246,7 +246,7 @@ sp_variable *sp_pcontext::find_variable(const LEX_CSTRING *name, - p0 has frame offset 0 and run-time offset 1 - p1 has frame offset 1 and run-time offset 2 - Run-time offsets on a frame can have holes, but offsets monotonocally grow, + Run-time offsets on a frame can have holes, but offsets monotonically grow, so run-time offsets of all variables are not greater than the run-time offset of the very last variable in this frame. */ diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h index 4ba199040a6..64c95ffec8e 100644 --- a/sql/sp_pcontext.h +++ b/sql/sp_pcontext.h @@ -407,12 +407,12 @@ public: { public: /* - The label poiting to the body start, + The label pointing to the body start, either explicit or automatically generated. Used during generation of "ITERATE loop_label" to check if "loop_label" is a FOR loop label. - In case of a FOR loop, some additional code - (cursor fetch or iteger increment) is generated before + (cursor fetch or integer increment) is generated before the backward jump to the beginning of the loop body. - In case of other loop types (WHILE, REPEAT) only the jump is generated. diff --git a/sql/spatial.cc b/sql/spatial.cc index 0198fdb8055..6b34295566c 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -53,7 +53,7 @@ int MBR::within(const MBR *mbr) /* We have to take into account the 'dimension' of the MBR, where the dimension of a single point is 0, - the dimesion of an vertical or horizontal line is 1, + the dimension of a vertical or horizontal line is 1, and finally the dimension of the solid rectangle is 2. */ @@ -1070,7 +1070,7 @@ const Geometry::Class_info *Gis_point::get_class_info() const @param r sphere radius @param error pointer describing the error in case of the boundary conditions - @return distance in case without error, it is caclulcated distance (non-negative), + @return distance in case without error, it is calculated distance (non-negative), in case error exist, negative value. */ double Gis_point::calculate_haversine(const Geometry *g, diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ac6f0820a06..281338dd049 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -612,7 +612,7 @@ bool ROLE_GRANT_PAIR::init(MEM_ROOT *mem, /* Create a buffer that holds all 3 NULL terminated strings in succession To save memory space, the same buffer is used as the hashkey - Add the '\0' aswell. + Add the '\0' as well. */ size_t bufflen= username.length + hostname.length + rolename.length + 3; char *buff= (char *)alloc_root(mem, bufflen); @@ -3670,7 +3670,7 @@ static int acl_user_update(THD *thd, ACL_USER *acl_user, uint nauth, return 1; } } - DBUG_ASSERT(work_copy); // allocated under the same condinition + DBUG_ASSERT(work_copy); // allocated under the same condition memcpy(acl_user->auth, work_copy, nauth * sizeof(ACL_USER_PARAM::AUTH)); } @@ -3989,7 +3989,7 @@ static my_bool acl_role_reset_role_arrays(void *ptr, } /* - Add a the coresponding pointers present in the mapping to the entries in + Add the corresponding pointers present in the mapping to the entries in acl_users and acl_roles */ static bool add_role_user_mapping(ACL_USER_BASE *grantee, ACL_ROLE *role) @@ -4696,7 +4696,7 @@ static bool compare_hostname(const acl_host_and_ip *host, const char *hostname, @return a flag telling if the argument needs to be resolved or not. @retval TRUE the argument is a host name and needs to be resolved. - @retval FALSE the argument is either an IP address, or a patter and + @retval FALSE the argument is either an IP address, or a pattern and should not be resolved. */ @@ -5522,7 +5522,7 @@ public: column= (char*) memdup_root(&grant_memroot,c.ptr(), key_length=c.length()); } - /* this constructor assumes thas source->column is allocated in grant_memroot */ + /* this constructor assumes that source->column is allocated in grant_memroot */ GRANT_COLUMN(GRANT_COLUMN *source) : column(source->column), rights (source->rights), init_rights(NO_ACL), key_length(source->key_length) { } }; @@ -6283,7 +6283,7 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name, grant tables for the user. There is however always a small change that the user has modified the grant tables directly. - Also, there is also a second posibility that this routine entry + Also, there is also a second possibility that this routine entry is created for a role by being inherited from a granted role. */ if (revoke_grant) @@ -6458,9 +6458,9 @@ static void propagate_role_grants(ACL_ROLE *role, We need to rebuild all roles' related access bits. This cannot be a simple depth-first search, instead we have to merge - privieges for all roles granted to a specific grantee, *before* + privileges for all roles granted to a specific grantee, *before* merging privileges for this grantee. In other words, we must visit all - parent nodes of a specific node, before descencing into this node. + parent nodes of a specific node, before descending into this node. For example, if role1 is granted to role2 and role3, and role3 is granted to role2, after "GRANT ... role1", we cannot merge privileges @@ -8745,7 +8745,7 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant, char command[128]; get_privilege_desc(command, sizeof(command), want_access); - /* TODO perhaps error should print current rolename aswell */ + /* TODO perhaps error should print current rolename as well */ my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), command, sctx->priv_user, sctx->host_or_ip, column_name.str, table_name); DBUG_RETURN(1); @@ -8835,7 +8835,7 @@ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, @param fields an iterator over the fields of a table reference. @return Operation status @retval 0 Success - @retval 1 Falure + @retval 1 Failure @details This function walks over the columns of a table reference The columns may originate from different tables, depending on the kind of table reference, e.g. join, view. @@ -9055,7 +9055,7 @@ bool check_grant_db(THD *thd, const char *db) RETURN 0 ok - 1 Error: User did not have the requested privielges + 1 Error: User did not have the requested privileges ****************************************************************************/ bool check_grant_routine(THD *thd, privilege_t want_access, @@ -14052,7 +14052,7 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length) @param thd thread handle @return true in case the option require_secure_transport is on and the client - uses euther named pipe or unix socket or ssl, else return false + uses either named pipe or unix socket or ssl, else return false */ static bool check_require_secured_transport(THD *thd) diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 50716330b93..e55d8481650 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -236,7 +236,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, { /* Table open failed, maybe because we run out of memory. - Close all open tables and relaese all MDL locks + Close all open tables and release all MDL locks */ tdc_release_share(share); share->tdc->flush(thd, true); @@ -755,7 +755,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, /* CHECK/REPAIR TABLE command is only command where VIEW allowed here and this command use only temporary table method for VIEWs resolving => - there can't be VIEW tree substitition of join view => if opening table + there can't be VIEW tree substitution of join view => if opening table succeed then table->table will have real TABLE pointer as value (in case of join view substitution table->table can be 0, but here it is impossible) @@ -1005,7 +1005,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, /* Note that type() always return MYSQL_TYPE_BLOB for all blob types. Another function needs to be added - if we in the future want to distingush between blob + if we in the future want to distinguish between blob types here. */ enum enum_field_types type= field->type(); diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc index 5e10ca17448..3c9c656c7f5 100644 --- a/sql/sql_audit.cc +++ b/sql/sql_audit.cc @@ -290,7 +290,7 @@ int initialize_audit_plugin(void *plugin_) mysql_mutex_unlock(&LOCK_audit_mask); /* - Pre-acquire the newly inslalled audit plugin for events that + Pre-acquire the newly installed audit plugin for events that may potentially occur further during INSTALL PLUGIN. When audit event is triggered, audit subsystem acquires interested diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 61621960aa8..f6048cdf996 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -675,7 +675,7 @@ err: Clear 'check_table_binlog_row_based_done' flag. For tables which were used by current substatement the flag is cleared as part of 'ha_reset()' call. - For the rest of the open tables not used by current substament if this + For the rest of the open tables not used by current substatement if this flag is enabled as part of current substatement execution, (for example when THD::binlog_write_table_maps() calls prepare_for_row_logging()), clear the flag explicitly. @@ -2321,7 +2321,7 @@ retry_share: slightly increases probability of deadlock. This problem will be solved once Alik pushes his temporary table refactoring patch and we can start - pre-acquiring metadata locks at the beggining of + pre-acquiring metadata locks at the beginning of open_tables() call. */ enum enum_mdl_type mdl_type= MDL_BACKUP_DML; @@ -2836,7 +2836,7 @@ unlink_all_closed_tables(THD *thd, MYSQL_LOCK *lock, size_t reopen_count) /* We have to rollback any open transactions here. This is required in the case where the server has been killed - but some transations are still open (as part of locked tables). + but some transactions are still open (as part of locked tables). If we don't do this, we will get an assert in unlock_locked_tables(). */ ha_rollback_trans(thd, FALSE); @@ -4210,7 +4210,7 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags, We can't rely on simple check for TABLE_LIST::view to determine that this is a view since during re-execution we might reopen ordinary table in place of view and thus have TABLE_LIST::view - set from repvious execution and TABLE_LIST::table set from + set from previous execution and TABLE_LIST::table set from current. */ if (!tables->table && tables->view) @@ -4941,7 +4941,7 @@ bool DML_prelocking_strategy::handle_routine(THD *thd, /* We assume that for any "CALL proc(...)" statement sroutines_list will have 'proc' as first element (it may have several, consider e.g. - "proc(sp_func(...)))". This property is currently guaranted by the + "proc(sp_func(...)))". This property is currently guaranteed by the parser. */ @@ -6491,7 +6491,7 @@ find_field_in_table(THD *thd, TABLE *table, const Lex_ident_column &name, This procedure detects the type of the table reference 'table_list' and calls the corresponding search routine. - The routine checks column-level privieleges for the found field. + The routine checks column-level privileges for the found field. RETURN 0 field is not found @@ -7269,7 +7269,7 @@ test_if_string_in_list(const Lex_ident_column &find, List *str_list) set_new_item_local_context() thd pointer to current thread item item for which new context is created and set - table_ref table ref where an item showld be resolved + table_ref table ref where an item should be resolved DESCRIPTION Create a new name resolution context for an item, so that the item @@ -7867,7 +7867,7 @@ err: DESCRIPTION Apply the procedure 'store_top_level_join_columns' to each of the - top-level table referencs of the FROM clause. Adjust the list of tables + top-level table references of the FROM clause. Adjust the list of tables for name resolution - context->first_name_resolution_table to the top-most, lef-most NATURAL/USING join. @@ -8054,10 +8054,10 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, thd->column_usage= column_usage; DBUG_PRINT("info", ("thd->column_usage: %d", thd->column_usage)); /* - Followimg 2 condition always should be true (but they was added + Following 2 conditions always should be true (but they were added due to an error present only in 10.3): 1) nest_level shoud be 0 or positive; - 2) nest level of all SELECTs on the same level shoud be equal first + 2) nest level of all SELECTs on the same level shoud be equal to first SELECT on this level (and each other). */ DBUG_ASSERT(lex->current_select->nest_level >= 0); @@ -8393,7 +8393,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context, tables Table list (select_lex->table_list) conds Condition of current SELECT (can be changed by VIEW) leaves List of join table leaves list (select_lex->leaf_tables) - refresh It is onle refresh for subquery + refresh It is only refresh for subquery select_insert It is SELECT ... INSERT command want_access what access is needed full_table_list a parameter to pass to the make_leaves_list function @@ -9499,7 +9499,7 @@ my_bool mysql_rm_tmp_tables(void) } /* File can be already deleted by tmp_table.file->delete_table(). - So we hide error messages which happnes during deleting of these + So we hide error messages which happen during deleting of these files(MYF(0)). */ (void) mysql_file_delete(key_file_misc, path, MYF(0)); diff --git a/sql/sql_base.h b/sql/sql_base.h index 6b1dee77f54..a65a223b6e5 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -95,7 +95,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, */ #define MYSQL_OPEN_GET_NEW_TABLE 0x0040 /* 0x0080 used to be MYSQL_OPEN_SKIP_TEMPORARY */ -/** Fail instead of waiting when conficting metadata lock is discovered. */ +/** Fail instead of waiting when conflicting metadata lock is discovered. */ #define MYSQL_OPEN_FAIL_ON_MDL_CONFLICT 0x0100 /** Open tables using MDL_SHARED lock instead of one specified in parser. */ #define MYSQL_OPEN_FORCE_SHARED_MDL 0x0200 diff --git a/sql/sql_basic_types.h b/sql/sql_basic_types.h index 256c8baa590..6cfd7ffb762 100644 --- a/sql/sql_basic_types.h +++ b/sql/sql_basic_types.h @@ -133,7 +133,7 @@ public: { /* Use FRAC_NONE when the value needs no rounding nor truncation, - because it is already known not to haveany fractional digits outside + because it is already known not to have any fractional digits outside of the requested precision. */ FRAC_NONE= 0, diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h index 05b201a5d6e..b80acf2ebaf 100644 --- a/sql/sql_bitmap.h +++ b/sql/sql_bitmap.h @@ -49,7 +49,7 @@ public: template class Bitmap { /* - Workaround GCC optimizer bug (generating SSE instuctions on unaligned data) + Workaround GCC optimizer bug (generating SSE instructions on unaligned data) */ #if defined (__GNUC__) && defined(__x86_64__) && (__GNUC__ < 6) && !defined(__clang__) #define NEED_GCC_NO_SSE_WORKAROUND diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 45f7d4a837b..2dce6eafa82 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -296,7 +296,7 @@ functions: cache block by block. 6. Query_cache::resize - Used to change the available memory used by the query cache. This - will also invalidate the entrie query cache in one free operation. + will also invalidate the entries query cache in one free operation. 7. Query_cache::pack - Used when a FLUSH QUERY CACHE is issued. This changes the order of the used memory blocks in physical memory order and move all avail- @@ -1372,7 +1372,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) Testing 'query_cache_size' without a lock here is safe: the thing we may loose is that the query won't be cached, but we save on mutex locking in the case when query cache is disabled or the - query is uncachable. + query is uncacheable. See also a note on double-check locking usage above. */ @@ -2560,7 +2560,7 @@ void Query_cache::init() 0x0A LINE FEED 0x0B VERTICAL TAB 0x0C FORM FEED - 0x0D CARRIAGE RETUR + 0x0D CARRIAGE RETURN 0x20 SPACE Additionally, only some of the ASCII-compatible character sets @@ -3587,7 +3587,7 @@ Query_cache::insert_table(THD *thd, size_t key_len, const char *key, header->set_hashed(hash); /* - We insert this table without the assumption that it isn't refrenenced by + We insert this table without the assumption that it isn't referenced by any queries. */ header->m_cached_query_count= 0; @@ -3659,7 +3659,7 @@ Query_cache::allocate_block(size_t len, my_bool not_less, size_t min) if (len >= MY_MIN(query_cache_size, query_cache_limit)) { - DBUG_PRINT("qcache", ("Query cache hase only %zu memory and limit %zu", + DBUG_PRINT("qcache", ("Query cache has only %zu memory and limit %zu", query_cache_size, query_cache_limit)); DBUG_RETURN(0); // in any case we don't have such piece of memory } @@ -3701,7 +3701,7 @@ Query_cache::get_free_block(size_t len, my_bool not_less, size_t min) first = list; uint n = 0; while ( n < QUERY_CACHE_MEM_BIN_TRY && - first->length < len) //we don't need irst->next != list + first->length < len) //we don't need first->next != list { first=first->next; n++; @@ -4172,7 +4172,7 @@ Query_cache::is_cacheable(THD *thd, LEX *lex, if (thd->in_multi_stmt_transaction_mode() && ((*tables_type)&HA_CACHE_TBL_TRANSACT)) { - DBUG_PRINT("qcache", ("not in autocommin mode")); + DBUG_PRINT("qcache", ("not in autocommit mode")); DBUG_RETURN(0); } DBUG_PRINT("qcache", ("select is using %d tables", table_count)); @@ -4896,7 +4896,7 @@ my_bool Query_cache::check_integrity(bool locked) DBUG_PRINT("qcache", ("block %p, type %u...", block, (uint) block->type)); - // Check allignment + // Check alignment if ((((size_t)block) % ALIGN_SIZE(1)) != (((size_t)first_block) % ALIGN_SIZE(1))) { @@ -4982,7 +4982,7 @@ my_bool Query_cache::check_integrity(bool locked) break; } case Query_cache_block::RES_INCOMPLETE: - // This type of block can be not lincked yet (in multithread environment) + // This type of block can be not linked yet (in multithread environment) break; case Query_cache_block::RES_BEG: case Query_cache_block::RES_CONT: @@ -5150,7 +5150,7 @@ my_bool Query_cache::in_blocks(Query_cache_block * point) if (block != first_block) { DBUG_PRINT("error", - ("block %p (%p<-->%p) not owned by pysical list", + ("block %p (%p<-->%p) not owned by physical list", block, block->pprev, block->pnext)); return 1; } @@ -5163,7 +5163,7 @@ err1: if (block->pnext->pprev != block) { DBUG_PRINT("error", - ("block %p in physicel list is incorrect linked, next block %p referred as prev to %p (check from %p)", + ("block %p in physical list is incorrect linked, next block %p referred as prev to %p (check from %p)", block, block->pnext, block->pnext->pprev, point)); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 8575a11c311..822a6e5ed25 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -36,7 +36,7 @@ typedef struct st_changed_table_list CHANGED_TABLE_LIST; */ #define QUERY_CACHE_MIN_ALLOCATION_UNIT 512 -/* inittial size of hashes */ +/* initial size of hashes */ #define QUERY_CACHE_DEF_QUERY_HASH_SIZE 1024 #define QUERY_CACHE_DEF_TABLE_HASH_SIZE 1024 @@ -129,7 +129,7 @@ struct Query_cache_block size_t length; // length of all block size_t used; // length of data /* - Not used **pprev, **prev because really needed access to pervious block: + Not used **pprev, **prev because really needed access to previous block: *pprev to join free blocks *prev to access to opposite side of list in cyclic sorted list */ diff --git a/sql/sql_class.cc b/sql/sql_class.cc index c3e9d3b7b58..0fbacef1a95 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1113,7 +1113,7 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) #ifdef WITH_WSREP /* Suppress warnings/errors if the wsrep THD is going to replay. The - deadlock/interrupted errors may be transitient and should not be + deadlock/interrupted errors may be transient and should not be reported to the client. */ if (wsrep_must_replay(this)) @@ -3389,7 +3389,7 @@ select_export::~select_export() create_file() thd Thread handle path File name - exchange Excange class + exchange Exchange class cache IO cache RETURN @@ -5195,7 +5195,7 @@ TABLE *get_purge_table(THD *thd) return thd->open_tables; } -/** Find an open table in the list of prelocked tabled +/** Find an open table in the list of prelocked tables Used for foreign key actions, for example, in UPDATE t1 SET a=1; where a child table t2 has a KB on t1.a. @@ -5237,7 +5237,7 @@ void destroy_thd(MYSQL_THD thd) } /** - Create a THD that only has auxilliary functions + Create a THD that only has auxiliary functions It will never be added to the global connection list server_threads. It does not represent any client connection. @@ -5758,17 +5758,17 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd) /* If the storage engine detects a deadlock, and needs to choose a victim transaction to roll back, it can call this function to ask the upper - server layer for which of two possible transactions is prefered to be + server layer for which of two possible transactions is preferred to be aborted and rolled back. In parallel replication, if two transactions are running in parallel and one is fixed to commit before the other, then the one that commits later - will be prefered as the victim - chosing the early transaction as a victim + will be preferred as the victim - choosing the early transaction as a victim will not resolve the deadlock anyway, as the later transaction still needs to wait for the earlier to commit. - The return value is -1 if the first transaction is prefered as a deadlock - victim, 1 if the second transaction is prefered, or 0 for no preference (in + The return value is -1 if the first transaction is preferred as a deadlock + victim, 1 if the second transaction is preferred, or 0 for no preference (in which case the storage engine can make the choice as it prefers). */ extern "C" int @@ -5831,7 +5831,7 @@ extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd) } /* - This is similar to sqlcom_can_generate_row_events, with the expection + This is similar to sqlcom_can_generate_row_events, with the expectation that we only return 1 if we are going to generate row events in a transaction. CREATE OR REPLACE is always safe to do as this will run in it's own @@ -8018,7 +8018,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg, Besides, we should not try to print these warnings if it is not possible to write statements to the binary log as it happens when - the execution is inside a function, or generaly speaking, when + the execution is inside a function, or generally speaking, when the variables.option_bits & OPTION_BIN_LOG is false. */ @@ -8720,7 +8720,7 @@ void AUTHID::copy(MEM_ROOT *mem_root, const LEX_CSTRING *user_name, /* Set from a string in 'user@host' format. - This method resebmles parse_user(), + This method resembles parse_user(), but does not need temporary buffers. */ void AUTHID::parse(const char *str, size_t length) diff --git a/sql/sql_class.h b/sql/sql_class.h index 2eff34f3ae0..bea09e62b11 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1162,7 +1162,7 @@ public: Iterates registered threads. @param action called for every element - @param argument opque argument passed to action + @param argument opaque argument passed to action @return @retval 0 iteration completed successfully @@ -1226,7 +1226,7 @@ public: bool is_reprepared; #endif /* - The states relfects three diffrent life cycles for three + The state reflects three different life cycles for three different types of statements: Prepared statement: STMT_INITIALIZED -> STMT_PREPARED -> STMT_EXECUTED. Stored procedure: STMT_INITIALIZED_FOR_SP -> STMT_EXECUTED. @@ -2225,7 +2225,7 @@ public: - mask a warning/error and throw another one instead. When this method returns true, the sql condition is considered 'handled', and will not be propagated to upper layers. - It is the responsability of the code installing an internal handler + It is the responsibility of the code installing an internal handler to then check for trapped conditions, and implement logic to recover from the anticipated conditions trapped during runtime. @@ -3197,7 +3197,7 @@ public: chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK. */ HASH ull_hash; - /* Hash of used seqeunces (for PREVIOUS value) */ + /* Hash of used sequences (for PREVIOUS value) */ HASH sequences; #ifdef DBUG_ASSERT_EXISTS uint dbug_sentry; // watch out for memory corruption @@ -3991,7 +3991,7 @@ public: /* If this is a slave, the name of the connection stored here. - This is used for taging error messages in the log files. + This is used for tagging error messages in the log files. */ LEX_CSTRING connection_name; uint8 password; /* 0, 1 or 2 */ @@ -5461,7 +5461,7 @@ public: locked_tables_mode= mode_arg; } void leave_locked_tables_mode(); - /* Relesae transactional locks if there are no active transactions */ + /* Release transactional locks if there are no active transactions */ void release_transactional_locks() { if (!in_active_multi_stmt_transaction()) @@ -6262,7 +6262,7 @@ class select_result_interceptor; /* Interface for sending tabular data, together with some other stuff: - - Primary purpose seems to be seding typed tabular data: + - Primary purpose seems to be sending typed tabular data: = the DDL is sent with send_fields() = the rows are sent with send_data() Besides that, @@ -6414,7 +6414,7 @@ private: /* - Base class for select_result descendands which intercept and + Base class for select_result descendants which intercept and transform result set rows. As the rows are not sent to the client, sending of result set metadata should be suppressed as well. */ @@ -6724,7 +6724,7 @@ public: void abort_result_set() override; bool can_rollback_data() override { return 1; } - // Needed for access from local class MY_HOOKS in prepare(), since thd is proteted. + // Needed for access from local class MY_HOOKS in prepare(), since thd is protected. const THD *get_thd(void) { return thd; } const HA_CREATE_INFO *get_create_info() { return create_info; }; int prepare2(JOIN *join) override { return 0; } @@ -7114,7 +7114,7 @@ class select_union_recursive :public select_unit Function calls are forwarded to the wrapped select_result, but some functions are expected to be called only once for each query, so - they are only executed for the first SELECT in the union (execept + they are only executed for the first SELECT in the union (except for send_eof(), which is executed only for the last SELECT). This select_result is used when a UNION is not DISTINCT and doesn't @@ -7230,7 +7230,7 @@ public: /* This class specializes select_union to collect statistics about the - data stored in the temp table. Currently the class collects statistcs + data stored in the temp table. Currently the class collects statistics about NULLs. */ @@ -7257,9 +7257,9 @@ protected: */ uint max_nulls_in_row; /* - Count of rows writtent to the temp table. This is redundant as it is + Count of rows written to the temp table. This is redundant as it is already stored in handler::stats.records, however that one is relatively - expensive to compute (given we need that for evry row). + expensive to compute (given we need that for every row). */ ha_rows count_rows; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index d048df2ca70..d63e00da078 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -17,7 +17,7 @@ */ /* - Functions to autenticate and handle reqests for a connection + Functions to authenticate and handle requests for a connection */ #include "mariadb.h" @@ -800,7 +800,7 @@ bool thd_init_client_charset(THD *thd, uint cs_number) b. preserve non-default collations as is Perhaps eventually we should change (b) also to resolve non-default - collations accoding to @@character_set_collations. Clients that used to + collations according to @@character_set_collations. Clients that used to send a non-default collation ID in the handshake packet will have to set @@character_set_collations instead. */ @@ -1126,7 +1126,7 @@ void setup_connection_thread_globals(THD *thd) /* - Autenticate user, with error reporting + Authenticate user, with error reporting SYNOPSIS login_connection() @@ -1465,7 +1465,7 @@ end_thread: This and close_with_error are only called if we didn't manage to create a new thd object. - Note: err can be 0 if unknown/not inportant + Note: err can be 0 if unknown/not important */ void CONNECT::close_and_delete(uint err) @@ -1495,7 +1495,7 @@ void CONNECT::close_and_delete(uint err) /* Close a connection with a possible error to the end user - Alse deletes the connection object, like close_and_delete() + Else deletes the connection object, like close_and_delete() */ void CONNECT::close_with_error(uint sql_errno, diff --git a/sql/sql_const.h b/sql/sql_const.h index 58e4a27dff8..50e43b12b24 100644 --- a/sql/sql_const.h +++ b/sql/sql_const.h @@ -216,7 +216,7 @@ #define MIN_ROWS_AFTER_FILTERING 1.0 /** - Number of rows in a reference table when refered through a not unique key. + Number of rows in a reference table when referred through a not unique key. This value is only used when we don't know anything about the key distribution. */ diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index b3434b42eb2..f142de631a7 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -692,7 +692,7 @@ With_element::check_dependencies_in_with_clause(With_clause *with_clause, /** @brief - Find mutually recursive with elements and check that they have ancors + Find mutually recursive with elements and check that they have anchors @details This method performs the following: @@ -821,7 +821,7 @@ bool With_clause::check_anchors() el->work_dep_map|= elem->work_dep_map; } } - /* If the transitive closure displays any cycle report an arror */ + /* If the transitive closure displays any cycle report an error */ elem= with_elem; while ((elem= elem->get_next_mutually_recursive()) != with_elem) { @@ -1336,7 +1336,7 @@ With_element *st_select_lex::find_table_def_in_with_clauses(TABLE_LIST *table, /* If sl->master_unit() is the spec of a with element then the search for a definition was already done by With_element::check_dependencies_in_spec - and it was unsuccesful. Yet for units cloned from the spec it has not + and it was unsuccessful. Yet for units cloned from the spec it has not been done yet. */ With_clause *attached_with_clause= sl->get_with_clause(); @@ -1439,7 +1439,7 @@ bool st_select_lex::check_unrestricted_recursive(bool only_standard_compliant) if (!with_elem ||!with_elem->is_recursive) { /* - If this select is not from the specifiocation of a with elememt or + If this select is not from the specification of a with elememt or if this not a recursive with element then there is nothing to check. */ return false; diff --git a/sql/sql_cte.h b/sql/sql_cte.h index c5dacc5b495..769fcab15f6 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -62,7 +62,7 @@ public: @brief Definition of a CTE table It contains a reference to the name of the table introduced by this with element, - and a reference to the unit that specificies this table. Also it contains + and a reference to the unit that specifies this table. Also it contains a reference to the with clause to which this element belongs to. */ diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 16ccc2bebb1..d7366b4f186 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -827,7 +827,7 @@ mysql_create_db_internal(THD *thd, const Lex_ident_db &db, /* We come here when we managed to create the database, but not the option file. In this case it's best to just continue as if nothing has - happened. (This is a very unlikely senario) + happened. (This is a very unlikely scenario) */ thd->clear_error(); } @@ -1433,7 +1433,7 @@ static bool find_db_tables_and_rm_known_files(THD *thd, MY_DIR *dirp, SYNOPSIS rm_dir_w_symlink() - org_path path of derictory + org_path path of directory send_error send errors RETURN 0 OK @@ -1989,7 +1989,7 @@ bool mysql_upgrade_db(THD *thd, const Lex_ident_db &old_db) LEX_CSTRING table_str; DBUG_PRINT("info",("Examining: %s", file->name)); - /* skiping non-FRM files */ + /* skipping non-FRM files */ if (!(extension= (char*) fn_frm_ext(file->name))) continue; @@ -2029,7 +2029,7 @@ bool mysql_upgrade_db(THD *thd, const Lex_ident_db &old_db) old database and some tables in the new database. Let's delete the option file, and then the new database directory. If some tables were left in the new directory, rmdir() will fail. - It garantees we never loose any tables. + It guarantees we never lose any tables. */ build_table_filename(path, sizeof(path)-1, new_db.str,"",MY_DB_OPT_FILE, 0); @@ -2078,7 +2078,7 @@ bool mysql_upgrade_db(THD *thd, const Lex_ident_db &old_db) char oldname[FN_REFLEN + 1], newname[FN_REFLEN + 1]; DBUG_PRINT("info",("Examining: %s", file->name)); - /* skiping MY_DB_OPT_FILE */ + /* skipping MY_DB_OPT_FILE */ if (!files_charset_info->strnncoll(Lex_cstring_strlen(file->name), Lex_cstring_strlen(MY_DB_OPT_FILE))) continue; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 5764d09e9d8..12950c54347 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -834,7 +834,7 @@ bool Sql_cmd_delete::delete_from_single_table(THD *thd) /* From SQL2016, Part 2, 15.7 , - General Rules, 8), we can conclude that DELETE FOR PORTTION OF time performs + General Rules, 8), we can conclude that DELETE FOR PORTION OF time performs 0-2 INSERTS + DELETE. We can substitute INSERT+DELETE with one UPDATE, with a condition of no side effects. The side effect is possible if there is a BEFORE INSERT trigger, since it is the only one splitting DELETE and INSERT @@ -1214,7 +1214,7 @@ multi_delete::initialize_tables(JOIN *join) /* If the table we are going to delete from appears in join, we need to defer delete. So the delete - doesn't interfers with the scaning of results. + doesn't interfere with the scanning of results. */ delete_while_scanning= false; } @@ -1999,7 +1999,7 @@ bool Sql_cmd_delete::prepare_inner(THD *thd) } /* - Reset the exclude flag to false so it doesn't interfare + Reset the exclude flag to false so it doesn't interfere with further calls to unique_table */ lex->first_select_lex()->exclude_from_table_unique_test= FALSE; diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 2a8ab83bd3e..232d473378d 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -702,7 +702,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) { /* Here 'derived' is either a non-recursive table reference to a recursive - with table or a recursive table reference to a recursvive table whose + with table or a recursive table reference to a recursive table whose specification has been already prepared (a secondary recursive table reference. */ diff --git a/sql/sql_error.cc b/sql/sql_error.cc index 3167a3f500e..0352532fd11 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -981,7 +981,7 @@ size_t err_conv(char *buff, uint to_length, const char *from, @param to buffer to convert @param to_length buffer length - @param to_cs chraset to convert + @param to_cs charset to convert @param from string from convert @param from_length string length @param from_cs charset from convert diff --git a/sql/sql_error.h b/sql/sql_error.h index 89ef08610cd..984ed227ea9 100644 --- a/sql/sql_error.h +++ b/sql/sql_error.h @@ -201,7 +201,7 @@ public: /* class Sql_user_condition_identity. - Instances of this class uniquely idetify user defined conditions (EXCEPTION). + Instances of this class uniquely identify user defined conditions (EXCEPTION). SET sql_mode=ORACLE; CREATE PROCEDURE p1 @@ -403,7 +403,7 @@ private: /** Default constructor. - This constructor is usefull when allocating arrays. + This constructor is useful when allocating arrays. Note that the init() method should be called to complete the Sql_condition. */ Sql_condition() diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 655b43099fc..923e83b4295 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1377,7 +1377,7 @@ void Explain_table_access::push_extra(enum explain_extra_tag extra_tag) /* - Put the contents of 'key' field of EXPLAIN otuput into key_str. + Put the contents of 'key' field of EXPLAIN output into key_str. It is surprisingly complex: - hash join shows #hash#used_key @@ -1427,7 +1427,7 @@ void Explain_table_access::fill_key_str(String *key_str, bool is_json) const - for hash join, it is key_len:pseudo_key_len - [tabular form only] rowid filter length is added after "|". - In JSON, we consider this column to be legacy, it is superceded by + In JSON, we consider this column to be legacy, it is superseded by used_key_parts. */ @@ -1753,7 +1753,7 @@ int Explain_table_access::print_explain(select_result_sink *output, @return NULL - out of memory error - poiner on allocated copy of the string + pointer on allocated copy of the string */ const char *String_list::append_str(MEM_ROOT *mem_root, const char *str) @@ -3029,13 +3029,13 @@ void create_explain_query_if_not_exists(LEX *lex, MEM_ROOT *mem_root) /** - Build arrays for collectiong keys statistics, sdd possible key names + Build arrays for collecting keys statistics, add possible key names to the list and name array @param alloc MEM_ROOT to put data in @param list list of possible key names to fill @param table table of the keys - @patam possible_keys possible keys map + @param possible_keys possible keys map @retval 0 - OK @retval 1 - Error diff --git a/sql/sql_explain.h b/sql/sql_explain.h index b1d8226ea01..8bd91252f4b 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -201,7 +201,7 @@ class Explain_aggr_node; 1. A degenerate case. In this case, message!=NULL, and it contains a description of what kind of degenerate case it is (e.g. "Impossible WHERE"). - 2. a non-degenrate join. In this case, join_tabs describes the join. + 2. a non-degenerate join. In this case, join_tabs describes the join. In the non-degenerate case, a SELECT may have a GROUP BY/ORDER BY operation. @@ -620,7 +620,7 @@ public: bool incremental; /* - NULL if no join buferring used. + NULL if no join buffering used. Other values: BNL, BNLH, BKA, BKAH. */ const char *join_alg; diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 8aa619d28ab..c5edeb50f51 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -27,7 +27,7 @@ The problem here is that mysql_parse calls free_item to free all the items allocated at the end of every query. The workaround would to keep two item lists per THD - normal free_list and handler_items. - The second is to be freeed only on thread end. mysql_ha_open should + The second is to be freed only on thread end. mysql_ha_open should then do { handler_items=concat(handler_items, free_list); free_list=0; } But !!! do_command calls free_root at the end of every query and frees up @@ -102,7 +102,7 @@ SQL_HANDLER::~SQL_HANDLER() The hash object is an TABLE_LIST struct. The hash key is the alias name. The hash key length is the alias name length plus one for the - terminateing NUL character. + terminating NULL character. RETURN Pointer to the TABLE_LIST struct. @@ -186,7 +186,7 @@ static void mysql_ha_close_childs(THD *thd, TABLE_LIST *current_table_list, @note Though this function takes a list of tables, only the first list entry will be closed. - @mote handler_object is not deleted! + @note handler_object is not deleted! @note Broadcasts refresh if it closed a table with old version. */ @@ -601,7 +601,7 @@ static SQL_HANDLER *mysql_ha_find_handler(THD *thd, const LEX_CSTRING *name) @param keyname Key to use. @param key_expr List of key column values @param cond Where clause - @param in_prepare If we are in prepare phase (we can't evalute items yet) + @param in_prepare If we are in prepare phase (we can't evaluate items yet) @return 0 ok @return 1 error diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 623f08e9913..a028c219936 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -270,7 +270,7 @@ int search_keyword(THD *thd, TABLE *keywords, while (!read_record_info.read_record() && count<2) { - if (!select->cond->val_bool()) // Dosn't match like + if (!select->cond->val_bool()) // Doesn't match like continue; *key_id= (int)find_fields[help_keyword_help_keyword_id].field->val_int(); @@ -515,7 +515,7 @@ static bool send_answer_1_metadata(Protocol *protocol) RETURN VALUES 1 Writing of head failed -1 Writing of row failed - 0 Successeful send + 0 Successful send */ static int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) @@ -631,7 +631,7 @@ extern "C" int string_ptr_cmp(const void* ptr1, const void* ptr2) RETURN VALUES -1 Writing fail - 0 Data was successefully send + 0 Data was successfully send */ int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol, diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 4d237fca3e7..ba9d576a91e 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -656,7 +656,7 @@ bool open_and_lock_for_insert_delayed(THD *thd, TABLE_LIST *table_list) /** Create a new query string for removing DELAYED keyword for - multi INSERT DEALAYED statement. + multi INSERT DELAYED statement. @param[in] thd Thread handler @param[in] buf Query string @@ -820,7 +820,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, { /* It is RETURNING which needs network buffer to write result set and - it is array binfing which need network buffer to read parameters. + it is array binding which needs network buffer to read parameters. So we allocate yet another network buffer. The old buffer will be freed at the end of operation. */ @@ -1465,7 +1465,7 @@ abort: fields - fields used in insert IMPLEMENTATION - A view is insertable if the folloings are true: + A view is insertable if the followings are true: - All columns in the view are columns from a table - All not used columns in table have a default values - All field in view are unique (not referring to the same column) @@ -1568,7 +1568,7 @@ static bool check_view_insertability(THD *thd, TABLE_LIST *view, @return 0 if no error - ER_NOT_SUPPORTED_YET if the above condidion was met + ER_NOT_SUPPORTED_YET if the above condition was met */ int check_duplic_insert_without_overlaps(THD *thd, TABLE *table, enum_duplicates duplic) @@ -3597,7 +3597,7 @@ pthread_handler_t handle_delayed_insert(void *arg) /* Protect against mdl_locks trying to access open tables We use KILL_CONNECTION_HARD here to ensure that - THD::notify_shared_lock() dosn't try to access open tables after + THD::notify_shared_lock() doesn't try to access open tables after this. */ mysql_mutex_lock(&thd->LOCK_thd_data); @@ -4125,7 +4125,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) When we are not using GROUP BY and there are no ungrouped aggregate functions we can refer to other tables in the ON DUPLICATE KEY part. We use next_name_resolution_table - descructively, so check it first (views?) + destructively, so check it first (views?) */ DBUG_ASSERT (!table_list->next_name_resolution_table); if (lex->first_select_lex()->group_list.elements == 0 && @@ -4262,7 +4262,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) DESCRIPTION If the result table is the same as one of the source tables (INSERT SELECT), the result table is not finally prepared at the - join prepair phase. Do the final preparation now. + join prepare phase. Do the final preparation now. RETURN 0 OK @@ -4350,7 +4350,7 @@ int select_insert::send_data(List &values) Restore fields of the record since it is possible that they were changed by ON DUPLICATE KEY UPDATE clause. - If triggers exist then whey can modify some fields which were not + If triggers exist then they can modify some fields which were not originally touched by INSERT ... SELECT, so we have to restore their original values for the next row. */ diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index 3f012629d6b..b719c280ba3 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -53,7 +53,7 @@ static void save_or_restore_used_tabs(JOIN_TAB *join_tab, bool save); the field value is to be copied and the length of the copied fragment. Before returning the result the function increments the value of *field by 1. - The function ignores the fields 'blob_length' and 'ofset' of the + The function ignores the fields 'blob_length' and 'offset' of the descriptor. RETURN VALUE @@ -194,7 +194,7 @@ void JOIN_CACHE::calc_record_fields() We will need to store columns of SJ-inner tables (it_X_Y.*), but we're not interested in storing the columns of materialization tables - themselves. Beause of that, if the first non-const top-level table is a + themselves. Because of that, if the first non-const top-level table is a materialized table, we move to its bush_children: */ tab= join->join_tab + join->const_tables; @@ -643,7 +643,7 @@ void JOIN_CACHE::create_remaining_fields() used to store record lengths. The function also calculates the maximal length of the representation of record in the cache excluding blob_data. This value is used when - making a dicision whether more records should be added into the join + making a decision whether more records should be added into the join buffer or not. RETURN VALUE @@ -1266,7 +1266,7 @@ bool JOIN_CACHE::check_emb_key_usage() - null bitmaps for all tables, - null row flags for all tables (4) values of all data fields including - - full images of those fixed legth data fields that cannot have + - full images of those fixed length data fields that cannot have trailing spaces - significant part of fixed length fields that can have trailing spaces with the prepanded length @@ -2645,7 +2645,7 @@ inline bool JOIN_CACHE::check_match(uchar *rec_ptr) NOTES The same implementation of the virtual method join_null_complements - is used for BNL/BNLH/BKA/BKA join algorthm. + is used for BNL/BNLH/BKA/BKA join algorithm. RETURN VALUE return one of enum_nested_loop_state. @@ -4728,7 +4728,7 @@ DESCRIPTION matching the record loaded into the record buffer for join_tab when performing join operation by BKAH join algorithm. With BKAH algorithm, if association labels are used, then record loaded into the record buffer - for join_tab always has a direct reference to the chain of the mathing + for join_tab always has a direct reference to the chain of the matching records from the join buffer. If association labels are not used then then the chain of the matching records is obtained by the call of the get_key_chain_by_join_key function. diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h index bfed7afd4fb..514e9b851ca 100644 --- a/sql/sql_join_cache.h +++ b/sql/sql_join_cache.h @@ -1038,7 +1038,7 @@ public: /* The class JOIN_TAB_SCAN is a companion class for the classes JOIN_CACHE_BNL and JOIN_CACHE_BNLH. Actually the class implements the iterator over the - table joinded by BNL/BNLH join algorithm. + table joined by BNL/BNLH join algorithm. The virtual functions open, next and close are called for any iteration over the table. The function open is called to initiate the process of the iteration. The function next shall read the next record from the joined @@ -1377,7 +1377,7 @@ private: This flag is set to TRUE if the implementation of the MRR interface cannot handle range association labels and does not return them to the caller of the multi_range_read_next handler function. E.g. the implementation of - the MRR inteface for the Falcon engine could not return association + the MRR interface for the Falcon engine could not return association labels to the caller of multi_range_read_next. The flag is set by JOIN_CACHE_BKA::init() and is not ever changed. */ diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index c473bec574d..64d0de5fc26 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -332,7 +332,7 @@ Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal, should be re-interpreted as an Item_in_subselect, which corresponds to a when used inside an . - Our reading of Note 184 is reccursive, so that all: + Our reading of Note 184 is recursive, so that all: - IN (( )) - IN ((( ))) - IN '('^N ')'^N @@ -435,7 +435,7 @@ bool sp_create_assignment_lex(THD *thd, const char *pos) @param no_lookahead - True if the parser has no lookahead @param rhs_value_str - a string value for right hand side of assignment @param need_set_keyword - if a SET statement "SET a=10", - or a direct assignment overwise "a:=10" + or a direct assignment otherwise "a:=10" @return false if success, true otherwise. */ @@ -503,7 +503,7 @@ bool sp_create_assignment_instr(THD *thd, bool no_lookahead, if (lex->check_main_unit_semantics()) { /* - "lex" can be referrenced by: + "lex" can be referenced by: - sp_instr_set SET a= expr; - sp_instr_set_row_field SET r.a= expr; - sp_instr_stmt (just generated above) SET @a= expr; @@ -3223,7 +3223,7 @@ void st_select_lex_node::substitute_in_tree(st_select_lex_node *subst) SYNOPSYS st_select_lex_node::include_standalone() - upper - reference on node underr which this node should be included + upper - reference on node under which this node should be included ref - references on reference on this node */ void st_select_lex_node::include_standalone(st_select_lex_node *upper, @@ -3362,7 +3362,7 @@ void st_select_lex_node::exclude() st_select_lex_unit::exclude_level() NOTE: units which belong to current will be brought up on level of - currernt unit + current unit */ void st_select_lex_unit::exclude_level() { @@ -3431,7 +3431,7 @@ bool st_select_lex::mark_as_dependent(THD *thd, st_select_lex *last, found table as depended (of select where was found table) We move by name resolution context, bacause during merge can some select - be excleded from SELECT tree + be excluded from SELECT tree */ Name_resolution_context *c= &this->context; do @@ -3691,7 +3691,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) @detail The intent is to allow to eventually print back any query. - This is useful e.g. for storage engines that take over diferrent kinds of + This is useful e.g. for storage engines that take over different kinds of queries */ @@ -3942,7 +3942,7 @@ void LEX::cleanup_lex_after_parse_error(THD *thd) Here the variable 'i' references to the instruction that could be deleted by sp_head's destructor and it would result in server abnormal termination. This use case can theoretically happen in case the current stored routine's - instruction causes re-compilation of a SP intruction's statement and + instruction causes re-compilation of a SP instruction's statement and internal parse error happens during this process. Rather, just restore the original LEX object used before parser has been @@ -4278,7 +4278,7 @@ bool LEX::need_correct_ident() view given view NOTE - It have not sense to set CHECK OPTION for SELECT satement or subqueries, + It have not sense to set CHECK OPTION for SELECT statement or subqueries, so we do not. RETURN @@ -4449,7 +4449,7 @@ void LEX::set_trg_event_type_for_tables() trg2bit(TRG_EVENT_DELETE); break; /* - Basic INSERT. If there is an additional ON DUPLIATE KEY UPDATE + Basic INSERT. If there is an additional ON DUPLICATE KEY UPDATE clause, it will be handled later in this method. */ case SQLCOM_INSERT: /* fall through */ @@ -4719,7 +4719,7 @@ void LEX::cleanup_after_one_table_open() { derived_tables= 0; first_select_lex()->exclude_from_table_unique_test= false; - /* cleunup underlying units (units of VIEW) */ + /* cleanup underlying units (units of VIEW) */ for (SELECT_LEX_UNIT *un= first_select_lex()->first_inner_unit(); un; un= un->next_unit()) @@ -4734,7 +4734,7 @@ void LEX::cleanup_after_one_table_open() /* Save current state of Query_tables_list for this LEX, and prepare it - for processing of new statemnt. + for processing of new statement. SYNOPSIS reset_n_backup_query_tables_list() @@ -5076,7 +5076,7 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only) { /* If at least one subquery in a union is non-empty, the UNION result - is non-empty. If there is no UNION, the only subquery is non-empy. + is non-empty. If there is no UNION, the only subquery is non-empty. */ empty_union_result= inner_join->empty_result(); } @@ -5192,7 +5192,7 @@ void st_select_lex::append_table_to_list(TABLE_LIST *TABLE_LIST::*link, Replace given table from the leaf_tables list for a list of tables @param table Table to replace - @param list List to substititute the table for + @param list List to substitute the table for @details Replace 'table' from the leaf_tables list for a list of tables 'tbl_list'. @@ -5847,7 +5847,7 @@ bool LEX::save_prep_leaf_tables() Query_arena *arena= thd->stmt_arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); - //It is used for DETETE/UPDATE so top level has only one SELECT + //It is used for DELETE/UPDATE so top level has only one SELECT DBUG_ASSERT(first_select_lex()->next_select() == NULL); bool res= first_select_lex()->save_prep_leaf_tables(thd); @@ -8979,7 +8979,7 @@ uint binlog_unsafe_map[256]; Sets the combination given by "a" and "b" and automatically combinations given by other types of access, i.e. 2^(8 - 2), as unsafe. - It may happen a colision when automatically defining a combination as unsafe. + It may make a collision when automatically defining a combination as unsafe. For that reason, a combination has its unsafe condition redefined only when the new_condition is greater then the old. For instance, @@ -9113,7 +9113,7 @@ void binlog_unsafe_map_init() /** @brief - Collect fiels that are used in the GROUP BY of this st_select_lex + Collect fields that are used in the GROUP BY of this st_select_lex @param thd The thread handle @@ -9175,7 +9175,7 @@ bool st_select_lex::collect_grouping_fields(THD *thd) /** @brief - For a condition check possibility of exraction a formula over grouping fields + For a condition check possibility of extraction a formula over grouping fields @param thd The thread handle @param cond The condition whose subformulas are to be analyzed @@ -9189,7 +9189,7 @@ bool st_select_lex::collect_grouping_fields(THD *thd) the call-back parameter checker to check whether a primary formula depends only on grouping fields. The subformulas that are not usable are marked with the flag MARKER_NO_EXTRACTION. - The subformulas that can be entierly extracted are marked with the flag + The subformulas that can be entirely extracted are marked with the flag MARKER_FULL_EXTRACTION. @note This method is called before any call of extract_cond_for_grouping_fields. @@ -9269,7 +9269,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond) to figure out whether a subformula depends only on these fields or not. @note The built condition C is always implied by the condition cond - (cond => C). The method tries to build the least restictive such + (cond => C). The method tries to build the least restrictive such condition (i.e. for any other condition C' such that cond => C' we have C => C'). @note @@ -10713,7 +10713,7 @@ LEX::add_primary_to_query_expression_body(SELECT_LEX_UNIT *unit, /** Add query primary to a parenthesized query primary - pruducing a new query expression body + producing a new query expression body */ SELECT_LEX_UNIT * @@ -11302,7 +11302,7 @@ void st_select_lex::pushdown_cond_into_where_clause(THD *thd, Item *cond, (dt.a>2) OR (dt.a<3) condition from or1 again and push it into WHERE. This will cause duplicate conditions in WHERE of dt. - To avoid repeatable pushdown such OR conditions as or1 describen + To avoid repeatable pushdown such OR conditions as or1 described above are marked with MARKER_NO_EXTRACTION. @note @@ -12510,7 +12510,7 @@ bool LEX::sp_create_set_password_instr(THD *thd, @param pos - The position of the keyword `NAMES` inside the query @param cs - The character set part, or nullptr if DEFAULT @param cl - The collation (explicit or contextually typed) - @param no_lookahead - The tokinizer lookahead state + @param no_lookahead - The tokenizer lookahead state */ bool LEX::set_names(const char *pos, CHARSET_INFO *cs, diff --git a/sql/sql_lex.h b/sql/sql_lex.h index f0757395466..2ea96fd1764 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -956,7 +956,7 @@ class st_select_lex: public st_select_lex_node public: /* Currently the field first_nested is used only by parser. - It containa either a reference to the first select + It contains either a reference to the first select of the nest of selects to which 'this' belongs to, or in the case of priority jump it contains a reference to the select to which the priority nest has to be attached to. @@ -3340,7 +3340,7 @@ public: bool next_is_down:1; // use "main" SELECT_LEX for nrxt allocation; /* field_list was created for view and should be removed before PS/SP - rexecuton + reexecution */ bool empty_field_list_on_rset:1; /** @@ -3442,7 +3442,7 @@ public: Event_parse_data *event_parse_data; - /* Characterstics of trigger being created */ + /* Characteristics of trigger being created */ st_trg_chistics trg_chistics; /* @@ -4010,7 +4010,7 @@ public: const Lex_ident_sys_st *a, const Lex_ident_sys_st *b); /* - Create an Item corresponding to a ROW field valiable: var.field + Create an Item corresponding to a ROW field variable: var.field @param THD - THD, for mem_root @param rh [OUT] - the rcontext handler (local vs package variables) @param var - the ROW variable name @@ -4682,7 +4682,7 @@ public: @retval 0 ok @retval - 1 error ; In this case the error messege is sent to the client + 1 error ; In this case the error message is sent to the client */ bool check_simple_select(const LEX_CSTRING *option) { diff --git a/sql/sql_limit.h b/sql/sql_limit.h index 335aff9d215..dc02da3ef1f 100644 --- a/sql/sql_limit.h +++ b/sql/sql_limit.h @@ -43,7 +43,7 @@ class Select_limit_counters select_limit_cnt= limit; with_ties= with_ties_arg; /* - Guard against an overflow condition, where limit + offset exceede + Guard against an overflow condition, where limit + offset exceeds ha_rows value range. This case covers unreasonably large parameter values that do not have any practical use so assuming in this case that the query does not have a limit is fine. diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 5cd86906c0d..a785df0347c 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -191,7 +191,7 @@ class READ_INFO: public Load_data_param For example, suppose we have an ujis file with bytes 0x8FA10A, where: - 0x8FA1 is an incomplete prefix of a 3-byte character (it should be [8F][A1-FE][A1-FE] to make a full 3-byte character) - - 0x0A is a line demiliter + - 0x0A is a line delimiter This file has some broken data, the trailing [A1-FE] is missing. In this example it works as follows: @@ -421,7 +421,7 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list, INSERT_ACL | UPDATE_ACL, INSERT_ACL | UPDATE_ACL, FALSE)) DBUG_RETURN(-1); - if (!table_list->table || // do not suport join view + if (!table_list->table || // do not support join view !table_list->single_table_updatable() || // and derived tables check_key_in_view(thd, table_list)) { @@ -1008,7 +1008,7 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex, #endif /**************************************************************************** -** Read of rows of fixed size + optional garage + optonal newline +** Read of rows of fixed size + optional garage + optional newline ****************************************************************************/ static int @@ -1080,7 +1080,7 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, uchar save_chr; if ((length=(uint) (read_info.row_end - pos)) > fixed_length) length= fixed_length; - save_chr= pos[length]; pos[length]= '\0'; // Safeguard aganst malloc + save_chr= pos[length]; pos[length]= '\0'; // Safeguard against malloc dst->load_data_set_value(thd, (const char *) pos, length, &read_info); pos[length]= save_chr; if ((pos+= length) > read_info.row_end) @@ -1420,7 +1420,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, char READ_INFO::unescape(char chr) { - /* keep this switch synchornous with the ESCAPE_CHARS macro */ + /* keep this switch synchronous with the ESCAPE_CHARS macro */ switch(chr) { case 'n': return '\n'; case 't': return '\t'; diff --git a/sql/sql_mode.h b/sql/sql_mode.h index 12eac1e83ae..9068878c477 100644 --- a/sql/sql_mode.h +++ b/sql/sql_mode.h @@ -22,13 +22,13 @@ class Sql_mode_dependency A combination of hard and soft dependency on sql_mode. - Used to watch if a GENERATED ALWAYS AS expression guarantees consitent + Used to watch if a GENERATED ALWAYS AS expression guarantees consistent data written to its virtual column. A virtual column can appear in an index if: - the generation expression does not depend on any sql_mode flags, or - the generation expression has a soft dependency on an sql_mode flag, - and the column knows how to handle this dependeny. + and the column knows how to handle this dependency. A virtual column cannot appear in an index if: - its generation expression has a hard dependency diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index b18cf0a1076..787873c4d72 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2811,7 +2811,7 @@ bool sp_process_definer(THD *thd) case, we should assign CURRENT_USER as definer. - Our slave received an updated from the master, that does not - replicate definer for stored rountines. We should also assign + replicate definer for stored routines. We should also assign CURRENT_USER as definer here, but also we should mark this routine as NON-SUID. This is essential for the sake of backward compatibility. @@ -2963,7 +2963,7 @@ retry: { /* Deadlock occurred during upgrade of metadata lock. - Let us restart acquring and opening tables for LOCK TABLES. + Let us restart acquiring and opening tables for LOCK TABLES. */ close_tables_for_reopen(thd, &tables, mdl_savepoint, true); if (thd->open_temporary_tables(tables)) @@ -2990,7 +2990,7 @@ retry: Either definer or invoker has to have PRIV_LOCK_TABLES to be able to lock view and its tables. For mysqldump (that locks views before dumping their structures) compatibility we allow locking - views that select from I_S or P_S tables, but downrade the lock + views that select from I_S or P_S tables, but downgrade the lock to TL_READ */ if (table->belong_to_view && @@ -6852,7 +6852,7 @@ bool check_one_table_access(THD *thd, privilege_t privilege, if (check_single_table_access (thd,privilege,all_tables, FALSE)) return 1; - /* Check rights on tables of subselects and implictly opened tables */ + /* Check rights on tables of subselects and implicitly opened tables */ TABLE_LIST *subselects_tables, *view= all_tables->view ? all_tables : 0; if ((subselects_tables= all_tables->next_global)) { diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 5d1cf53afc1..366b84843e7 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1909,7 +1909,7 @@ bool check_part_func_fields(Field **ptr, bool ok_with_charsets) NOTES This function is called as part of opening the table by opening the .frm file. It is a part of CREATE TABLE to do this so it is quite permissible - that errors due to erroneus syntax isn't found until we come here. + that errors due to erroneous syntax isn't found until we come here. If the user has used a non-existing field in the table is one such example of an error that is not discovered until here. */ @@ -2131,8 +2131,8 @@ static int add_keyword_string(String *str, const char *keyword, /** @brief Truncate the partition file name from a path it it exists. - @note A partition file name will contian one or more '#' characters. -One of the occurances of '#' will be either "#P#" or "#p#" depending + @note A partition file name will contain one or more '#' characters. +One of the occurrences of '#' will be either "#P#" or "#p#" depending on whether the storage engine has converted the filename to lower case. */ void truncate_partition_filename(char *path) @@ -2456,7 +2456,7 @@ end: /** - Add 'KEY' word, with optional 'ALGORTIHM = N'. + Add 'KEY' word, with optional 'ALGORITHM = N'. @param str String to write to. @param part_info partition_info holding the used key_algorithm @@ -4769,7 +4769,7 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info, @retval FALSE if they are equal, otherwise TRUE. - @note Any differens that would cause a change in the frm file is prohibited. + @note Any differences that would cause a change in the frm file is prohibited. Such options as data_file_name, index_file_name, min_rows, max_rows etc. are not allowed to differ. But comment is allowed to differ. */ @@ -5125,7 +5125,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, } /* Adding history partitions to non-history partitioning or - non-history parittions to history partitioning is prohibited. + non-history partitions to history partitioning is prohibited. */ else if (thd->work_part_info->part_type == VERSIONING_PARTITION || tab_part_info->part_type == VERSIONING_PARTITION) @@ -7737,7 +7737,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, can release all other locks on the table and since no one can open the table, there can be no new threads accessing the table. They will be hanging on this exclusive lock. - 3) Write an entry to remove the new parttions if crash occurs + 3) Write an entry to remove the new partitions if crash occurs 4) Add the new partitions. 5) Close all instances of the table and remove them from the table cache. 6) Old place for write binlog diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc index 1876d025801..9cc29391d76 100644 --- a/sql/sql_partition_admin.cc +++ b/sql/sql_partition_admin.cc @@ -152,7 +152,7 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table) if (unlikely(part_table->file->ht != partition_hton)) { /* - Only allowed on partitioned tables throught the generic ha_partition + Only allowed on partitioned tables throughout the generic ha_partition handler, i.e not yet for native partitioning. */ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); @@ -327,7 +327,7 @@ bool compare_table_with_partition(THD *thd, TABLE *table, TABLE *part_table, @param thd Thread handle @param name name of table/partition 1 (to be exchanged with 2) @param from_name name of table/partition 2 (to be exchanged with 1) - @param tmp_name temporary name to use while exchaning + @param tmp_name temporary name to use while exchanging @param ht handlerton of the table/partitions @return Operation status diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 0c297db34be..c71dbf88026 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -238,7 +238,7 @@ static struct /* A mutex LOCK_plugin must be acquired before accessing the following variables/structures. - We are always manipulating ref count, so a rwlock here is unneccessary. + We are always manipulating ref count, so a rwlock here is unnecessary. */ mysql_mutex_t LOCK_plugin; static DYNAMIC_ARRAY plugin_dl_array; @@ -3824,7 +3824,7 @@ void plugin_opt_set_limits(struct my_option *options, The set is stored in the pre-allocated static array supplied to the function. The size of the array is calculated as (number_of_plugin_varaibles*2+3). The - reason is that each option can have a prefix '--plugin-' in addtion to the + reason is that each option can have a prefix '--plugin-' in addition to the shorter form '--<plugin-name>'. There is also space allocated for terminating NULL pointers. diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6c50f8b4018..4654cf857cd 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1974,7 +1974,7 @@ static bool mysql_test_create_view(Prepared_statement *stmt) res= select_like_stmt_test(stmt, 0, 0); err: - /* put view back for PS rexecuting */ + /* put view back for PS reexecuting */ lex->link_first_table_back(view, link_to_local); DBUG_RETURN(res); } @@ -2616,7 +2616,7 @@ end: mysql_sql_stmt_prepare() and mysql_sql_stmt_execute_immediate() call get_dynamic_sql_string() and then call respectively - Prepare_statement::prepare() and Prepare_statment::execute_immediate(), + Prepare_statement::prepare() and Prepare_statement::execute_immediate(), who store the returned result into its permanent location using alloc_query(). "buffer" is still not destructed at that time. @@ -4312,7 +4312,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) NULL in case of SQL PS @param packet_end end of the packet. NULL in case of SQL PS - @todo Use a paremeter source class family instead of 'if's, and + @todo Use a parameter source class family instead of 'if's, and support stored procedure variables. @retval TRUE an error occurred when assigning a parameter (likely @@ -4926,7 +4926,7 @@ Prepared_statement::swap_prepared_statement(Prepared_statement *copy) @param expanded_query A query for binlogging which has all parameter markers ('?') replaced with their actual values. @param open_cursor True if an attempt to open a cursor should be made. - Currenlty used only in the binary protocol. + Currently used only in the binary protocol. @note Preconditions, postconditions. @@ -5126,7 +5126,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) SET STATEMENT clause is performed on return from the method Prepared_statement::execute(), by the time the function log_slow_statement() be invoked from the function dispatch_command() all variables set by - the SET STATEMEN clause would be already reset to their original values + the SET STATEMENT clause would be already reset to their original values that break semantic of the SET STATEMENT clause. E.g., lets consider the following statements diff --git a/sql/sql_priv.h b/sql/sql_priv.h index c40068751eb..42b95bccd1d 100644 --- a/sql/sql_priv.h +++ b/sql/sql_priv.h @@ -268,7 +268,7 @@ /* - Uncachable causes: + Uncacheable causes: */ /* This subquery has fields from outer query (put by user) */ #define UNCACHEABLE_DEPENDENT_GENERATED 1 diff --git a/sql/sql_profile.h b/sql/sql_profile.h index 57a675d8b39..4678702ca63 100644 --- a/sql/sql_profile.h +++ b/sql/sql_profile.h @@ -280,7 +280,7 @@ public: At a point in execution where we know the query source, save the text of it in the query profile. - This must be called exactly once per descrete statement. + This must be called exactly once per discrete statement. */ void set_query_source(char *query_source_arg, size_t query_length_arg) { diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 2dcefca699a..7174d8ae7e2 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -3432,7 +3432,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) /* Below we will start all stopped threads. But if the user wants to start only one thread, do as if the other thread was running (as we - don't wan't to touch the other thread), so set the bit to 0 for the + don't want to touch the other thread), so set the bit to 0 for the other thread */ if (thd->lex->slave_thd_opt) @@ -3603,7 +3603,7 @@ int stop_slave(THD* thd, Master_info* mi, bool net_report ) /* Below we will stop all running threads. But if the user wants to stop only one thread, do as if the other thread - was stopped (as we don't wan't to touch the other thread), so set the + was stopped (as we don't want to touch the other thread), so set the bit to 0 for the other thread */ if (thd->lex->slave_thd_opt) @@ -4299,7 +4299,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) } } /* - Coordinates in rli were spoilt by the 'if (need_relay_log_purge)' block, + Coordinates in rli were spoiled by the 'if (need_relay_log_purge)' block, so restore them to good values. If we left them to ''/0, that would work; but that would fail in the case of 2 successive CHANGE MASTER (without a START SLAVE in between): because first one would set the coords in mi to @@ -4892,7 +4892,7 @@ err: /** Load data's io cache specific hook to be executed before a chunk of data is being read into the cache's buffer - The fuction instantianates and writes into the binlog + The function instantiates and writes into the binlog replication events along LOAD DATA processing. @param file pointer to io-cache diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8450b166497..be3efae83e7 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -628,7 +628,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, /* 'options' of mysql_select will be set in JOIN, as far as JOIN for every PS/SP execution new, we will not need reset this flag if - setup_tables_done_option changed for next rexecution + setup_tables_done_option changed for next reexecution */ res= mysql_select(thd, select_lex->table_list.first, @@ -2081,7 +2081,7 @@ bool JOIN::make_range_rowid_filters() DBUG_RETURN(true); /* Fatal error */ } /* - If SUBS_IN_TO_EXISTS strtrategy is chosen for the subquery then + If SUBS_IN_TO_EXISTS strategy is chosen for the subquery then additional conditions are injected into WHERE/ON/HAVING and it may happen that the call of test_quick_select() discovers impossible range. */ @@ -2928,7 +2928,7 @@ int JOIN::optimize_stage2() } /* - Perform the optimization on fields evaliation mentioned above + Perform the optimization on fields evaluation mentioned above for all used ref items. */ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; @@ -4949,7 +4949,7 @@ int JOIN::exec_inner() /* Evaluate expensive constant conditions that were not evaluated during optimization. Do not evaluate them for EXPLAIN statements as these - condtions may be arbitrarily costly, and because the optimize phase + conditions may be arbitrarily costly, and because the optimize phase might not have produced a complete executable plan for EXPLAINs. */ if (!zero_result_cause && @@ -5261,7 +5261,7 @@ find_partial_select_handler(THD *thd, SELECT_LEX *select_lex, WHERE clause of the top level select @param og_num total number of ORDER BY and GROUP BY clauses arguments - @param order linked list of ORDER BY agruments + @param order linked list of ORDER BY arguments @param group linked list of GROUP BY arguments @param having top level item of HAVING expression @param proc_param list of PROCEDUREs @@ -5681,7 +5681,7 @@ make_join_statistics(JOIN *join, List &tables_list, { /* Information schema is slow and we don't know how many rows we will - find. Be setting a moderate ammount of rows we are more likely + find. Be setting a moderate amount of rows we are more likely to have it materialized if needed. */ table->file->stats.records= table->used_stat_records= 100; @@ -7350,7 +7350,7 @@ static bool add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field) /* If a key use is extracted from an equi-join predicate then it is added not only as a key use for every index whose component can - be evalusted utilizing this key use, but also as a key use for + be evaluated utilizing this key use, but also as a key use for hash join. Such key uses are marked with a special key number. */ if (add_keyuse(keyuse_array, key_field, get_hash_join_key_no(), 0)) @@ -7616,7 +7616,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, directly to fill at most 2 array elements, either of KEY_FIELD or SARGABLE_PARAM type. For a BETWEEN predicate 3 elements can be filled as this predicate is considered as - saragable with respect to each of its argument. + sargable with respect to each of its argument. An IN predicate can require at most 1 element as currently it is considered as sargable only for its first argument. Multiple equality can add elements that are filled after @@ -8137,7 +8137,7 @@ static double apply_selectivity_for_table(JOIN_TAB *s, DBUG_ASSERT(sel >= 0 && sel <= 1.0); /* table->cond_selectivity will include data from opt_range. - Here we check that this is indeeded the case. + Here we check that this is indeed the case. Note that if table_records == 0, then 'sel' is probably 1 */ DBUG_ASSERT(table_records == 0 || @@ -8559,7 +8559,7 @@ const char* dbug_print_join_prefix(const POSITION *join_positions, The function finds the best access path to table 's' from the passed partial plan where an access path is the general term for any means to - cacess the data in 's'. An access path may use either an index or a scan, + access the data in 's'. An access path may use either an index or a scan, whichever is cheaper. The input partial plan is passed via the array 'join->positions' of length 'idx'. The chosen access method for 's' and its cost are stored in 'join->positions[idx]'. @@ -8938,11 +8938,11 @@ best_access_path(JOIN *join, If range optimizer was able to construct a "range" access on this index, then its condition "quick_cond" was - eqivalent to ref_const_cond (*), and we can re-use E(#rows) + equivalent to ref_const_cond (*), and we can re-use E(#rows) from the range optimizer. Proof of (*): By properties of range and ref optimizers - quick_cond will be equal or tighther than ref_const_cond. + quick_cond will be equal or tighter than ref_const_cond. ref_const_cond already covers "smallest" possible interval - a singlepoint interval over all keyparts. Therefore, quick_cond is equivalent to ref_const_cond (if it was an @@ -9282,7 +9282,7 @@ best_access_path(JOIN *join, index condition pushdown is employed for the used key access). Let this number be N. Then the total gain from using the filter is N*a_adj - b where b is the cost of building the filter and - a_adj is calcilated as follows: + a_adj is calculated as follows: a - (1-access_cost_factor)*(1-s) = (1+1_cond_eval_cost)*(1-s)-1_probe_cost - (1-access_cost_factor)*(1-s) = (1-s)*(1_cond_eval_cost+access_cost_factor) - 1_probe_cost. @@ -9420,7 +9420,7 @@ best_access_path(JOIN *join, /* If there is no key to access the table, but there is an equi-join - predicate connecting the table with the privious tables then we + predicate connecting the table with the previous tables then we consider the possibility of using hash join. We need also to check that: (1) s is inner table of semi-join -> join cache is allowed for semijoins @@ -10574,7 +10574,7 @@ optimize_straight_join(JOIN *join, table_map remaining_tables) or disk. The following calculation takes a middle ground where assume we can sort the keys in memory but have to use a disk based - temporary table to retrive the rows. + temporary table to retrieve the rows. This cost is probably much bigger than it has to be... */ double sort_cost; @@ -10612,7 +10612,7 @@ optimize_straight_join(JOIN *join, table_map remaining_tables) best complete continuation of the partial QEP. This continuation can be used directly as a result of the search. -# When (search_depth == 1) the 'best_extension_by_limited_search' - consideres the extension of the current QEP with each of the remaining + considers the extension of the current QEP with each of the remaining unoptimized tables. All other cases are in-between these two extremes. Thus the parameter @@ -11145,7 +11145,7 @@ double table_after_join_selectivity(JOIN *join, uint idx, JOIN_TAB *s, tbl.keypart1=expr1 AND tbl.keypart2=expr2 AND ... - and it will only return rows for which this condition is satisified. + and it will only return rows for which this condition is satisfied. Suppose, certain expr{i} is a constant. Since ref access only returns rows that satisfy @@ -11383,7 +11383,7 @@ exit: /* Check if the table is an EQ_REF or similar table and there is no cost - to gain by moveing it to a later stage. + to gain by moving it to a later stage. We call such a table a edge table (or hanging leaf) as it will read at most one row and will not add to the number of row combinations in the join. */ @@ -11438,7 +11438,7 @@ static int sort_positions(const void *a_, const void *b_) @param join JOIN object @param trace_one_table Current optimizer_trace - @param pos Pointer to remanining tables + @param pos Pointer to remaining tables @param allowed_tables bitmap of allowed tables. On return set to the collected tables. @param store_poisition Points to where to store next found SORT_POSITION. @@ -11531,7 +11531,7 @@ get_costs_for_tables(JOIN *join, table_map remaining_tables, uint idx, /* @brief - Check if it is potentally possible to short-cut the JOIN execution due to + Check if it is potentially possible to short-cut the JOIN execution due to ORDER BY ... LIMIT clause @detail @@ -11649,7 +11649,7 @@ bool test_if_skip_sort_order_early(JOIN *join, if (ref_key != MAX_KEY && usable_keys.is_set(ref_key)) { - return true; // we're using an index that produces the reqired ordering. + return true; // we're using an index that produces the required ordering. } /* @@ -11862,7 +11862,7 @@ bool join_limit_shortcut_limits_tables(const JOIN *join, uint idx, table_map *ma plan with the access plan that results in lowest cost of the expanded partial plan, and removes the corresponding relation from 'remaining_tables'. The algorithm continues until it either constructs a - complete optimal plan, or constructs an optimal plartial plan with size = + complete optimal plan, or constructs an optimal partial plan with size = search_depth. The final optimal plan is stored in 'join->best_positions'. The @@ -11951,7 +11951,7 @@ bool join_limit_shortcut_limits_tables(const JOIN *join, uint idx, table_map *ma @retval enum_best_search::SEARCH_OK All fine @retval - enum_best_search::SEARCH_FOUND_EDGE All remaning tables are edge tables + enum_best_search::SEARCH_FOUND_EDGE All remaining tables are edge tables @retval enum_best_search::SEARCH_ABORT Killed by user @retval @@ -12187,7 +12187,7 @@ best_extension_by_limited_search(JOIN *join, Store the current record count and cost as the best possible cost at this level if the following holds: - It's the lowest record number and cost so far - - There is no remaing table that could improve index usage + - There is no remaining table that could improve index usage or we found an EQ_REF or REF key with less than 2 matching records (good enough). */ @@ -12319,7 +12319,7 @@ best_extension_by_limited_search(JOIN *join, or disk. The following calculation takes a middle ground where assume we can sort the keys in memory but have to use a disk based - temporary table to retrive the rows. + temporary table to retrieve the rows. This cost is probably much bigger than it has to be... */ double sort_cost; @@ -12368,7 +12368,7 @@ end: /** - Find how much space the prevous read not const tables takes in cache. + Find how much space the previous read not const tables takes in cache. */ void JOIN_TAB::calc_used_field_length(bool max_fl) @@ -12674,7 +12674,7 @@ cache_record_length(JOIN *join,uint idx) repeated rows: t2.records_out * t3.records_out engine_calls: record_count/repeated_rows = t1.records_out - If t4 depends on a table that uses EQ_REF access, we can multipy that + If t4 depends on a table that uses EQ_REF access, we can multiply that table's repeated_rows with current table's repeated_rows to take that into account. @@ -13832,7 +13832,7 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2) We look through equalities chosen to perform ref/eq_ref access, pick equalities that have form "tbl.part_of_key = othertbl.field" (where othertbl is a non-const table and othertbl.field may be NULL) - and add them to conditions on correspoding tables (othertbl in this + and add them to conditions on corresponding tables (othertbl in this example). Exception from that is the case when referred_tab->join != join. @@ -13863,7 +13863,7 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2) 1.1 add_key_part saves these to KEYUSE. 2. create_ref_for_key copies them to TABLE_REF. 3. add_not_null_conds adds "x IS NOT NULL" to join_tab->select_cond of - appropiate JOIN_TAB members. + appropriate JOIN_TAB members. */ static void add_not_null_conds(JOIN *join) @@ -15438,7 +15438,7 @@ void revise_cache_usage(JOIN_TAB *join_tab) TRUE <=> EOF (no action) DESCRIPTION - This function is used by semi-join materialization to capture suquery's + This function is used by semi-join materialization to capture subquery's resultset and write it into the temptable (that is, materialize it). NOTE @@ -15705,7 +15705,7 @@ uint check_join_cache_usage(JOIN_TAB *tab, The problem is, the temp.table is not filled (actually not even opened properly) yet, and this doesn't let us call handler->multi_range_read_info(). It is possible to come up with - estimates, etc. without acessing the table, but it seems not to worth the + estimates, etc. without accessing the table, but it seems not to worth the effort now. */ if (tab->table->pos_in_table_list->is_materialized_derived()) @@ -16430,7 +16430,7 @@ bool JOIN_TAB::build_range_rowid_filter() /** The same handler object (table->file) is used to build a filter - and to perfom a primary table access (by the main query). + and to perform a primary table access (by the main query). To estimate the time for filter building tracker should be changed and after building of the filter has been finished it should be @@ -16911,7 +16911,7 @@ bool TABLE_REF::is_access_triggered() For a join that is resolved using a temporary table, the first sweep is performed against actual tables and an intermediate result is inserted - into the temprorary table. + into the temporary table. The last sweep is performed against the temporary table. Therefore, the base tables and associated buffers used to fill the temporary table are no longer needed, and this function is called to free them. @@ -17172,7 +17172,7 @@ void JOIN::free_pushdown_handlers(List& join_list) Remove the following expressions from ORDER BY and GROUP BY: Constant expressions @n Expression that only uses tables that are of type EQ_REF and the reference - is in the ORDER list or if all refereed tables are of the above type. + is in the ORDER list or if all referred tables are of the above type. In the following, the X field can be removed: @code @@ -17664,7 +17664,7 @@ return_zero_rows(JOIN *join, select_result *result, List *tables, /* JOIN::join_free() must be called after the virtual method select::send_result_set_metadata() returned control since - implementation of this method could use data strutcures + implementation of this method could use data structures that are released by the method JOIN::join_free(). */ join->join_free(); @@ -17837,7 +17837,7 @@ finish: the Field::eq_def method) are placed to the same multiple equalities. Because of this some equality predicates are not eliminated and can be used in the constant propagation procedure. - We could weeken the equlity test as soon as at least one of the + We could weaken the equality test as soon as at least one of the equal fields is to be equal to a constant. It would require a more complicated implementation: we would have to store, in general case, its own constant for each fields from the multiple @@ -17849,8 +17849,8 @@ finish: The implementation does not follow exactly the above rules to build a new multiple equality for the equality predicate. If it processes the equality of the form field1=field2, it - looks for multiple equalities me1 containig field1 and me2 containing - field2. If only one of them is found the fuction expands it with + looks for multiple equalities me1 containing field1 and me2 containing + field2. If only one of them is found the function expands it with the lacking field. If multiple equalities for both fields are found they are merged. If both searches fail a new multiple equality containing just field1 and field2 is added to the existing @@ -18290,7 +18290,7 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal, will be replaced by (=(t1.a,t2.b,t3.c,t4.d) AND t2.b>5) The function performs the substitution in a recursive descent by - the condtion tree, passing to the next AND level a chain of multiple + the condition tree, passing to the next AND level a chain of multiple equality predicates which have been built at the upper levels. The Item_equal items built at the level are attached to other non-equality conjucts as a sublist. The pointer to the inherited @@ -18456,7 +18456,7 @@ COND *Item_func_eq::build_equal_items(THD *thd, E.g. in the following where condition WHERE a=5 AND (b=5 or a=c) (b=5) and (a=c) are standalone equalities. - In general we can't leave alone standalone eqalities: + In general we can't leave alone standalone equalities: for WHERE a=b AND c=d AND (b=c OR d=5) b=c is replaced by =(a,b,c,d). */ @@ -18838,7 +18838,7 @@ static TABLE_LIST* embedding_sjm(Item *item) If cond is equal to 0, then not more then one equality is generated and a pointer to it is returned as the result of the function. - Equality substutution and semi-join materialization nests: + Equality substitution and semi-join materialization nests: In case join order looks like this: @@ -18853,7 +18853,7 @@ static TABLE_LIST* embedding_sjm(Item *item) outer_tbl1.col. Item_equal::get_first() also takes similar measures for dealing with - equality substitution in presense of SJM nests. + equality substitution in presence of SJM nests. Grep for EqualityPropagationAndSjmNests for a more verbose description. @@ -19084,7 +19084,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, We're doing substitution for an Item which will be evaluated in the context of a particular item. For example, if the optimizer does a ref access on "tbl1.key= expr" then - = equality substitution will be perfomed on 'expr' + = equality substitution will be performed on 'expr' = it is known in advance that 'expr' will be evaluated when table t1 is accessed. Note that in this kind of substution we never have to replace Item_equal @@ -19098,7 +19098,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, 2. context_tab == NO_PARTICULAR_TAB We're doing substitution in WHERE/ON condition, which is not yet attached to any particular join_tab. We will use information about the - chosen join order to make "optimal" substitions, i.e. those that allow + chosen join order to make "optimal" substitutions, i.e. those that allow to apply filtering as soon as possible. See eliminate_item_equal() and Item_equal::get_first() for details. @@ -19515,7 +19515,7 @@ propagate_cond_constants(THD *thd, I_List *save_list, to check whether the query contains invalid cross-references. The forth attribute is an auxiliary one and is used to calculate dep_tables. - As the attribute dep_tables qualifies possibles orders of tables in the + As the attribute dep_tables qualifies possible orders of tables in the execution plan, the dependencies required by the straight join modifiers are reflected in this attribute as well. The function also removes all braces that can be removed from the join @@ -20604,11 +20604,11 @@ bool cond_has_datetime_is_null(Item *cond) } /* - Check if passed condtition has for of + Check if passed condition has for of not_null_date_col IS NULL - where not_null_date_col has a datte or datetime type + where not_null_date_col has a date or datetime type */ bool cond_is_datetime_is_null(Item *cond) @@ -20976,7 +20976,7 @@ Item_bool_func2::remove_eq_conds(THD *thd, Item::cond_result *cond_value, cond_value the resulting value of the condition NOTES - calls the inner_remove_eq_conds to check all the tree reqursively + calls the inner_remove_eq_conds to check all the tree recursively RETURN *COND with the simplified condition @@ -21520,7 +21520,7 @@ static bool make_json_valid_expr(TABLE *table, Field *field) @param default_field If field has a default value field, store it here @param group 1 if we are going to do a relative group by on result @param modify_item 1 if item->result_field should point to new item. - This is relevent for how fill_record() is going to + This is relevant for how fill_record() is going to work: If modify_item is 1 then fill_record() will update the record in the original table. @@ -21674,11 +21674,11 @@ void Create_tmp_table::add_field(TABLE *table, Field *field, uint fieldnr, @param fields list of items that will be used to define column types of the table (also see NOTES) @param group Create an unique key over all group by fields. - This is used to retrive the row during + This is used to retrieve the row during end_write_group() and update them. @param distinct should table rows be distinct @param save_sum_fields see NOTES - @param select_options Optiions for how the select is run. + @param select_options Options for how the select is run. See sql_priv.h for a list of options. @param rows_limit Maximum number of rows to insert into the temporary table @@ -22400,7 +22400,7 @@ bool Create_tmp_table::finalize(THD *thd, m_key_part_info->null_offset= (uint) (field->null_ptr - (uchar*) table->record[0]); cur_group->buff++; // Pointer to field data - m_group_buff++; // Skipp null flag + m_group_buff++; // Skip null flag } m_group_buff+= cur_group->field->pack_length(); } @@ -23826,7 +23826,7 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, @param end_records TRUE <=> all records were accumulated, send them further @details - This function accumulates records of the aggreagation operation for + This function accumulates records of the aggregation operation for the node join_tab from the execution plan in a tmp table. To add a new record the function calls join_tab->aggr->put_records. When there is no more records to save, in this @@ -24084,7 +24084,7 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) @param join pointer to the structure providing all context info for the query @param join_tab the first next table of the execution plan to be retrieved - @param end_records true when we need to perform final steps of retrival + @param end_records true when we need to perform final steps of retrieval @return return one of enum_nested_loop_state, except NESTED_LOOP_NO_MORE_ROWS. @@ -25222,7 +25222,7 @@ int join_init_read_record(JOIN_TAB *tab) /* JT_NEXT means that we should use an index scan on index 'tab->index' However if filesort is set, the table was already sorted above - and now have to retrive the rows from the tmp file or by rnd_pos() + and now have to retrieve the rows from the tmp file or by rnd_pos() If !(tab->select && tab->select->quick)) it means that we are in "Range checked for each record" and we better let the normal init_read_record() handle this case @@ -25270,7 +25270,7 @@ JOIN_TAB::sort_table() JOIN::ordered_index_order_by : JOIN::ordered_index_group_by)); rc= create_sort_index(join->thd, join, this, NULL); - /* Disactivate rowid filter if it was used when creating sort index */ + /* Deactivate rowid filter if it was used when creating sort index */ if (rowid_filter) table->file->rowid_filter_is_active= false; return (rc != 0); @@ -26997,7 +26997,7 @@ void compute_part_of_sort_key_for_equals(JOIN *join, TABLE *table, else { /* - Walk through join's muliple equalities and find the one that contains + Walk through join's multiple equalities and find the one that contains item_field. */ if (!join->cond_equal) @@ -27654,7 +27654,7 @@ check_reverse_order: /* Cleanup: We may have both a 'select->quick' and 'save_quick' (original) - at this point. Delete the one that we wan't use. + at this point. Delete the one that we won't use. */ skipped_filesort: @@ -27903,7 +27903,7 @@ JOIN_TAB::remove_duplicates() { if (item->get_tmp_table_field()) { - /* Field is stored in temporary table, skipp */ + /* Field is stored in temporary table, skip */ field_count++; } else @@ -28351,7 +28351,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array, bool is_group_field, bool add_to_all_fields, bool from_window_spec) { - Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */ + Item *order_item= *order->item; /* The item from the GROUP/ORDER clause. */ Item::Type order_item_type; Item **select_item; /* The corresponding item from the SELECT clause. */ Field *from_field; /* The corresponding field from the FROM clause. */ @@ -29237,7 +29237,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, goto err; if (pos->type() == Item::REF_ITEM) { - /* preserve the names of the ref when dereferncing */ + /* preserve the names of the ref when dereferencing */ Item_ref *ref= (Item_ref *) pos; item->db_name= ref->db_name; item->table_name= ref->table_name; @@ -29390,7 +29390,7 @@ bool JOIN::alloc_func_list() group_parts= send_group_parts; /* If distinct, reserve memory for possible - disctinct->group_by optimization + distinct->group_by optimization */ if (select_distinct) { @@ -29872,7 +29872,7 @@ copy_funcs(Item **func_ptr, const THD *thd) /** Create a condition for a const reference and add this to the - currenct select for the table. + current select for the table. */ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) @@ -31447,7 +31447,7 @@ static void print_table_array(THD *thd, TABLE_LIST *curr= *tbl; /* - The "eliminated_tables &&" check guards againist the case of + The "eliminated_tables &&" check guards against the case of printing the query for CREATE VIEW. We do that without having run JOIN::optimize() and so will have nested_join->used_tables==0. */ @@ -33995,7 +33995,7 @@ bool JOIN::optimize_upper_rownum_func() @return 1 No or invalid rownum() compare @return 0 rownum() is compared with a constant. In this case *args contains the constant and - *inv_order constains 1 if the rownum() was the right + *inv_order contains 1 if the rownum() was the right argument, like in 'WHERE 2 >= rownum()'. */ @@ -34351,7 +34351,7 @@ err: @details The function assumes that each type of a DML statement has its own - implementation of the virtunal functions precheck(). It is also + implementation of the virtual functions precheck(). It is also assumed that that the virtual function execute execute_inner() is to be overridden by the implementations for specific commands. diff --git a/sql/sql_select.h b/sql/sql_select.h index 2621bd4cd0d..b20a48eaaa3 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -60,7 +60,7 @@ typedef struct keyuse_t { !NULL - This KEYUSE was created from an equality that was wrapped into an Item_func_trig_cond. This means the equality (and validity of this KEYUSE element) can be turned on and off. The on/off state - is indicted by the pointed value: + is indicated by the pointed value: *cond_guard == TRUE <=> equality condition is on *cond_guard == FALSE <=> equality condition is off @@ -841,7 +841,7 @@ public: double *record_count, double *read_time, table_map *handled_fanout, - sj_strategy_enum *stratey, + sj_strategy_enum *strategy, POSITION *loose_scan_pos) override; void mark_used() override { is_used= TRUE; } @@ -1026,7 +1026,7 @@ public: pushed down selection condition is applied) per each row combination of previous tables. - In best_access_path() it is set to the minum number of accepted rows + In best_access_path() it is set to the minimum number of accepted rows for any possible access method or filter: records_out takes into account table->cond_selectivity, the WHERE clause @@ -1134,7 +1134,7 @@ public: *very* imprecise guesses made in best_access_path(). */ bool use_join_buffer; - /* True if we can use join_buffer togethere with firstmatch */ + /* True if we can use join_buffer together with firstmatch */ bool firstmatch_with_join_buf; POSITION(); }; @@ -1295,7 +1295,7 @@ public: */ TABLE **table; /** - The table which has an index that allows to produce the requried ordering. + The table which has an index that allows to produce the required ordering. A special value of 0x1 means that the ordering will be produced by passing 1st non-const table to filesort(). NULL means no such table exists. */ @@ -1413,7 +1413,7 @@ public: ha_rows send_records,found_records, accepted_rows; /* - LIMIT for the JOIN operation. When not using aggregation or DISITNCT, this + LIMIT for the JOIN operation. When not using aggregation or DISTINCT, this is the same as select's LIMIT clause specifies. Note that this doesn't take sql_calc_found_rows into account. */ @@ -1507,7 +1507,7 @@ public: double best_read; /* Estimated result rows (fanout) of the join operation. If this is a subquery - that is reexecuted multiple times, this value includes the estiamted # of + that is reexecuted multiple times, this value includes the estimated # of reexecutions. This value is equal to the multiplication of all join->positions[i].records_read of a JOIN. */ @@ -1612,7 +1612,7 @@ public: bool need_tmp; bool hidden_group_fields; - /* TRUE if there was full cleunap of the JOIN */ + /* TRUE if there was full cleanup of the JOIN */ bool cleaned; DYNAMIC_ARRAY keyuse; Item::cond_result cond_value, having_value; @@ -1654,13 +1654,13 @@ public: COND *conds; // ---"--- Item *conds_history; // store WHERE for explain COND *outer_ref_cond; /// *join_list; ///< list of joined tables in reverse order COND_EQUAL *cond_equal; COND_EQUAL *having_equal; /* - Constant codition computed during optimization, but evaluated during + Constant condition computed during optimization, but evaluated during join execution. Typically expensive conditions that should not be evaluated at optimization time. */ @@ -1797,7 +1797,7 @@ public: bool make_sum_func_list(List &all_fields, List &send_fields, bool before_group_by); - /// Initialzes a slice, see comments for ref_ptrs above. + /// Initializes a slice, see comments for ref_ptrs above. Ref_ptr_array ref_ptr_array_slice(size_t slice_num) { size_t slice_sz= select_lex->ref_pointer_array.size() / 5U; @@ -2498,7 +2498,7 @@ create_virtual_tmp_table(THD *thd, List &field_list) is enabled, we now enable "simulate_out_of_memory". This effectively makes table->init() fail on OOM inside multi_alloc_root(). This is done to test that ~Virtual_tmp_table() called from the "delete" - below correcly handles OOM. + below correctly handles OOM. */ DBUG_EXECUTE_IF("simulate_create_virtual_tmp_table_out_of_memory", DBUG_SET("+d,simulate_out_of_memory");); diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc index 7348c49c70d..976c0dcbb65 100644 --- a/sql/sql_sequence.cc +++ b/sql/sql_sequence.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2017, MariaDB Corporation, Alibaba Corporation - Copyrgiht (c) 2020, MariaDB Corporation. + Copyright (c) 2020, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -566,7 +566,7 @@ bool sequence_insert(THD *thd, LEX *lex, TABLE_LIST *org_table_list) } -/* Create a SQUENCE object */ +/* Create a SEQUENCE object */ SEQUENCE::SEQUENCE() :all_values_used(0), initialized(SEQ_UNINTIALIZED) { diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 9563022632e..ac50df7f625 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -317,7 +317,7 @@ end: /* Forget current servers cache and read new servers - from the conneciton table. + from the connection table. SYNOPSIS servers_reload() @@ -941,7 +941,7 @@ end: FOREIGN_SERVER *altered NOTES - This function takes as an argument the FOREIGN_SERVER structi pointer + This function takes as an argument the FOREIGN_SERVER struct pointer for the existing server and the FOREIGN_SERVER struct populated with only the members which have been updated. It then "merges" the "altered" struct members to the existing server, the existing server then represents an diff --git a/sql/sql_show.cc b/sql/sql_show.cc index bc45905590f..2a046a25a2a 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1361,7 +1361,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) /* Metadata locks taken during SHOW CREATE should be released when - the statmement completes as it is an information statement. + the statement completes as it is an information statement. */ MDL_savepoint mdl_savepoint= thd->mdl_context.mdl_savepoint(); @@ -3630,7 +3630,7 @@ int add_status_vars(SHOW_VAR *list) while (list->name) res|= insert_dynamic(&all_status_vars, (uchar*)list++); res|= insert_dynamic(&all_status_vars, (uchar*)list); // appending NULL-element - all_status_vars.elements--; // but next insert_dynamic should overwite it + all_status_vars.elements--; // but next insert_dynamic should overwrite it if (status_vars_inited) sort_dynamic(&all_status_vars, show_var_cmp); status_var_array_version++; @@ -6181,7 +6181,7 @@ err: @brief Fill IS.table with temporary tables @param[in] thd thread handler @param[in] table I_S table (TABLE) - @param[in] tmp_tbl temporary table to be represetned by IS.table + @param[in] tmp_tbl temporary table to be represented by IS.table @return Operation status @retval 0 - success @retval 1 - failure @@ -6228,7 +6228,7 @@ static void store_column_type(TABLE *table, Field *field, CHARSET_INFO *cs, tmp_buff= strchr(column_type.c_ptr_safe(), '('); if (!tmp_buff) /* - if there is no dimention part then check the presence of + if there is no dimension part then check the presence of [unsigned] [zerofill] attributes and cut them of if exist. */ tmp_buff= strchr(column_type.c_ptr_safe(), ' '); @@ -8824,7 +8824,7 @@ struct schema_table_ref }; /* - Find schema_tables elment by name + Find schema_tables element by name SYNOPSIS find_schema_table_in_plugin() @@ -9658,7 +9658,7 @@ bool get_schema_tables_result(JOIN *join, continue; /* - Do not fill in tables thare are marked as JT_CONST as these will never + Do not fill in tables marked as JT_CONST as these will never be read and they also don't have a tab->read_record.table set! This can happen with queries like SELECT * FROM t1 LEFT JOIN (t1 AS t1b JOIN INFORMATION_SCHEMA.ROUTINES) @@ -9731,7 +9731,7 @@ bool get_schema_tables_result(JOIN *join, /* This hack is here, because I_S code uses thd->clear_error() a lot. Which means, a Warnings_only_error_handler cannot handle the error - corectly as it does not know whether an error is real (e.g. caused + correctly as it does not know whether an error is real (e.g. caused by tab->select_cond->val_int()) or will be cleared later. Thus it ignores all errors, and the real one (that is, the error that was not cleared) is pushed now. @@ -11089,7 +11089,7 @@ static bool show_create_trigger_impl(THD *thd, Trigger *trigger) /** Read TRN and TRG files to obtain base table name for the specified - trigger name and construct TABE_LIST object for the base table. + trigger name and construct TABLE_LIST object for the base table. @param thd Thread context. @param trg_name Trigger name. diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 6905d472d5e..2c789d31ad6 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -38,8 +38,8 @@ struct TABLE; Only fixed layout is supported now. Null bit maps for the appended values is placed before the values themselves. Offsets are from the last sorted field, that is from the - record referefence, which is still last component of sorted records. - It is preserved for backward compatiblility. + record reference, which is still last component of sorted records. + It is preserved for backward compatibility. The structure is used tp store values of the additional fields in the sort buffer. It is used also when these values are read from a temporary file/buffer. As the reading procedures are beyond the diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index a12892a50f2..cc21a029d36 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1074,7 +1074,7 @@ public: This implementation of a purely virtual method sets the value of the columns 'min_value', 'max_value', 'nulls_ratio', 'avg_length', 'avg_frequency', 'hist_size', 'hist_type' and 'histogram' of the - stistical table columns_stat according to the contents of the bitmap + statistical table columns_stat according to the contents of the bitmap write_stat.column_stat_nulls and the values of the fields min_value, max_value, nulls_ratio, avg_length, avg_frequency, hist_size, hist_type and histogram of the structure write_stat from the Field structure @@ -1569,7 +1569,7 @@ public: /* Initialize the iterator. It will return rows with n_keyparts matching the - curernt values. + current values. @return false - OK true - Error @@ -2928,7 +2928,7 @@ int update_statistics_for_table(THD *thd, TABLE *table) } /* - Ensure that no one is reading satistics while we are writing them + Ensure that no one is reading statistics while we are writing them This ensures that statistics is always read consistently */ mysql_mutex_lock(&table->s->LOCK_statistics); @@ -2944,7 +2944,7 @@ int update_statistics_for_table(THD *thd, TABLE *table) if (err) rc= 1; - /* Update the statistical table colum_stats */ + /* Update the statistical table column_stats */ stat_table= tables[COLUMN_STAT].table; Column_stat column_stat(stat_table, table); for (Field **field_ptr= table->field; *field_ptr; field_ptr++) @@ -3306,7 +3306,7 @@ read_statistics_for_tables(THD *thd, TABLE_LIST *tables, bool force_reload) DEBUG_SYNC(thd, "statistics_read_start"); /* - Do not read statistics for any query that explicity involves + Do not read statistics for any query that explicitly involves statistical tables, failure to to do so we may end up in a deadlock. */ @@ -4340,7 +4340,7 @@ double get_column_range_cardinality(Field *field, @param endpoint The constant @param avg_sel Average selectivity of condition "col=const" in this table. - It is calcuated as (#non_null_values / #distinct_values). + It is calculated as (#non_null_values / #distinct_values). @return Expected condition selectivity (a number between 0 and 1) @@ -4388,7 +4388,7 @@ double Histogram_binary::point_selectivity(Field *field, key_range *endpoint, /* A special case: we're looking at a single bucket, and that bucket has zero value-length. Use the multi-bucket formula (attempt to use - single-bucket formula will cause divison by zero). + single-bucket formula will cause division by zero). For more details see [re_zero_length_buckets] above. */ @@ -4476,7 +4476,7 @@ bool is_stat_table(const Lex_ident_db &db, const Lex_ident_table &table) } /* - Check wheter we can use EITS statistics for a field or not + Check whether we can use EITS statistics for a field or not TRUE : Use EITS for the columns FALSE: Otherwise @@ -4494,7 +4494,7 @@ bool is_eits_usable(Field *field) (1): checks if we have EITS statistics for a particular column (2): Don't use EITS for GEOMETRY columns (3): Disabling reading EITS statistics for columns involved in the - partition list of a table. We assume the selecticivity for + partition list of a table. We assume the selectivity for such columns would be handled during partition pruning. */ diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index ad3bb9ae7b7..7b40c0afefc 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -24,7 +24,7 @@ similar to the COMPLEMENTARY and PREFERABLY respectively except that with these values we would not be collecting EITS for queries like ANALYZE TABLE t1; - To collect EITS with these values, we have to use PERSISITENT FOR + To collect EITS with these values, we have to use PERSISTENT FOR analyze table t1 persistent for columns (col1,col2...) index (idx1, idx2...) or @@ -579,7 +579,7 @@ public: @retval TRUE: Statistics are not present for a column - FALSE: Statisitics are present for a column + FALSE: Statistics are present for a column */ bool no_stat_values_provided() { diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 087e03dccab..8015051ac5b 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -64,7 +64,7 @@ bool Binary_string::real_alloc(size_t length) null character is inserted at the appropriate position. - If the String does not keep a private buffer on the heap, such a buffer - will be allocated and the string copied accoring to its length, as found + will be allocated and the string copied according to its length, as found in String::length(). For C compatibility, the new string buffer is null terminated if it was @@ -340,7 +340,7 @@ bool String::needs_conversion(size_t arg_length, Checks that the source string can just be copied to the destination string without conversion. Unlike needs_conversion it will require conversion on incoming binary data - to ensure the data are verified for vailidity first. + to ensure the data are verified for validity first. @param arg_length Length of string to copy. @param from_cs Character set to copy from @@ -385,7 +385,7 @@ bool String::needs_conversion_on_storage(size_t arg_length, cs Character set for 'str' NOTES - For real multi-byte, ascii incompatible charactser sets, + For real multi-byte, ascii incompatible character sets, like UCS-2, add leading zeros if we have an incomplete character. Thus, SELECT _ucs2 0xAA @@ -953,7 +953,7 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length) "from" typically points to a temporary buffer inside Item_xxx::val_str(), or to Item::str_value, and thus is "less permanent" than "to". - Reallocating "to" may give more benifits: + Reallocating "to" may give more benefits: - "to" can point to a "more permanent" storage and can be reused for multiple rows, e.g. str_buffer in Protocol::send_result_set_row(), which is passed to val_str() for all string type rows. diff --git a/sql/sql_string.h b/sql/sql_string.h index ec6ffd0f6c6..9f97923c194 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -278,7 +278,7 @@ public: } /* NOTE: If one intend to use the c_ptr() method, the following two - contructors need the size of memory for STR to be at least LEN+1 (to make + constructors need the size of memory for STR to be at least LEN+1 (to make room for zero termination). */ Binary_string(const char *str, size_t len) @@ -686,14 +686,14 @@ public: if (unlikely(!Ptr)) return (char*) ""; /* - Here we assume that any buffer used to initalize String has + Here we assume that any buffer used to initialize String has an end \0 or have at least an accessable character at end. This is to handle the case of String("Hello",5) and String("hello",5) efficiently. We have two options here. To test for !Alloced_length or !alloced. Using "Alloced_length" is slightly safer so that we do not read - from potentially unintialized memory (normally not dangerous but + from potentially uninitialized memory (normally not dangerous but may give warnings in valgrind), but "alloced" is safer as there are less change to get memory loss from code that is using String((char*), length) or String.set((char*), length) and does @@ -713,7 +713,7 @@ public: } /* One should use c_ptr() instead for most cases. This will be deleted soon, - kept for compatiblity. + kept for compatibility. */ inline char *c_ptr_quick() { @@ -723,7 +723,7 @@ public: This is to be used only in the case when one cannot use c_ptr(). The cases are: - When one initializes String with an external buffer and length and - buffer[length] could be uninitalized when c_ptr() is called. + buffer[length] could be uninitialized when c_ptr() is called. - When valgrind gives warnings about uninitialized memory with c_ptr(). */ inline char *c_ptr_safe() @@ -857,7 +857,7 @@ public: { } /* NOTE: If one intend to use the c_ptr() method, the following two - contructors need the size of memory for STR to be at least LEN+1 (to make + constructors need the size of memory for STR to be at least LEN+1 (to make room for zero termination). */ String(const char *str, size_t len, CHARSET_INFO *cs) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 513a0e881c8..577296d092c 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1172,7 +1172,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists, locks for temporary tables: they are local to the session. Later in this function we release the MDL lock only if - table->mdl_requeset.ticket is not NULL. Thus here we + table->mdl_request.ticket is not NULL. Thus here we ensure that we won't release the metadata lock on the base table locked with LOCK TABLES as a side effect of temporary table drop. @@ -1233,7 +1233,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists, @param comment_start returns the beginning of the comment if found. @retval 0 no comment found - @retval >0 the lenght of the comment found + @retval >0 the length of the comment found */ static uint32 get_comment(THD *thd, uint32 comment_pos, @@ -3284,7 +3284,7 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info, field_no < select_field_pos: both field and dup are table fields; dup_no >= select_field_pos: both field and dup are select fields or - field is implicit systrem field and dup is select field. + field is implicit system field and dup is select field. We are not allowed to put row_start/row_end into SELECT expression. */ @@ -3415,7 +3415,7 @@ mysql_prepare_create_table_finalize(THD *thd, HA_CREATE_INFO *create_info, if (init_key_info(thd, alter_info, create_info, file)) DBUG_RETURN(TRUE); - /* Calculate number of key segements */ + /* Calculate number of key segments */ *key_count= 0; while ((key=key_iterator++)) @@ -4946,7 +4946,7 @@ warn: in various version of CREATE TABLE statement. @result - 1 unspecifed error + 1 unspecified error 2 error; Don't log create statement 0 ok -1 Table was used with IF NOT EXISTS and table existed (warning, not error) @@ -5873,7 +5873,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, /* Since temporary tables are not replicated under row-based replication, CREATE TABLE ... LIKE ... needs special - treatement. We have some cases to consider, according to the + treatment. We have some cases to consider, according to the following decision table: ==== ========= ========= ============================== @@ -6965,7 +6965,7 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, ALTER_RENAME_COLUMN is replaced by ALTER_COLUMN_NAME. ALTER_CHANGE_COLUMN_DEFAULT is replaced by ALTER_CHANGE_COLUMN ALTER_PARSE_ADD_COLUMN, ALTER_PARSE_DROP_COLUMN, ALTER_ADD_INDEX and - ALTER_DROP_INDEX are replaced with versions that have higher granuality. + ALTER_DROP_INDEX are replaced with versions that have higher granularity. */ alter_table_operations flags_to_remove= @@ -8634,7 +8634,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, break; } /* - DROP COLULMN xxx + DROP COLUMN xxx 1. it does not see INVISIBLE_SYSTEM columns 2. otherwise, normally a column is dropped 3. unless it's a system versioning column (but see below). @@ -9699,7 +9699,7 @@ fk_check_column_changes(THD *thd, const TABLE *table, { /* Column in a FK has changed significantly and it - may break referential intergrity. + may break referential integrity. */ result= FK_COLUMN_DATA_CHANGE; goto func_exit; @@ -10343,7 +10343,7 @@ static bool wait_for_master(THD *thd) here is finished. @param thd Thread handle - @param start_alter_state ALTER replicaton execution context + @param start_alter_state ALTER replication execution context @param mi Master_info of the replication source */ static void alter_committed(THD *thd, start_alter_info* info, Master_info *mi) @@ -11358,7 +11358,7 @@ do_continue:; is updated without data transformations and the table would be corrupted without any way for MariaDB to notice this during check/upgrade). - This logic ensurses that ALTER TABLE ... FORCE (no other + This logic ensures that ALTER TABLE ... FORCE (no other options) will always be be able to repair a table structure and convert data from any old format. - In-place is impossible for given operation. diff --git a/sql/sql_time.cc b/sql/sql_time.cc index cf8e9047b83..099610f6f30 100644 --- a/sql/sql_time.cc +++ b/sql/sql_time.cc @@ -164,7 +164,7 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week) a date at start of january) In this case one can get 53 for the first week of next year. This flag ensures that the week is relevant for the given year. Note that this flag is only - releveant if WEEK_JANUARY is not set. + relevant if WEEK_JANUARY is not set. If set Week is in range 1-53. diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 95ef0d5a008..edadc30d494 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -788,7 +788,7 @@ drop_orphan_trn: well-formed statement for creation this trigger. @param[out] trigger_def query to be stored in trigger file. As stmt_query, but without "OR REPLACE" and no FOLLOWS/PRECEDES. - @param[out] trg_definer The triggger definer. + @param[out] trg_definer The trigger definer. @param[out] trg_definer_holder Used as a buffer for definer. @note @@ -3008,7 +3008,7 @@ process_unknown_string(const char *&unknown_key, uchar* base, /** - Contruct path to TRN-file. + Construct path to TRN-file. @param thd[in] Thread context. @param trg_name[in] Trigger name. diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index a472327481f..5705802bab7 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -30,7 +30,7 @@ Walk through all VALUES items. @param @param processor - the processor to call for each Item - @param walk_qubquery - if should dive into subquery items + @param walk_subquery - if should dive into subquery items @param argument - the argument to pass recursively @retval true on error @@ -118,8 +118,8 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast &li) types and aggregates them with the previous ones stored in holders. If list_a is the first one in the list of lists its elements types are put in holders. The errors can be reported when count of list_a elements is - different from the first_list_el_count. Also error can be reported whe - n aggregation can't be made. + different from the first_list_el_count. Also error can be reported when + aggregation can't be made. @retval true if an error was reported @@ -1171,7 +1171,7 @@ bool Item_func_in::to_be_transformed_into_in_subq(THD *thd) @details For each IN predicate from AND parts of the WHERE condition and/or ON expressions of the SELECT for this join the method performs - the intransformation into an equivalent IN sunquery if it's needed. + the intransformation into an equivalent IN subquery if it's needed. @retval false always diff --git a/sql/sql_type.cc b/sql/sql_type.cc index 5c367c994bc..0cc22d2d348 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -445,7 +445,7 @@ bool Timestamp::to_native(Native *to, uint decimals) const uint len= my_timestamp_binary_length(decimals); if (to->reserve(len)) { - to->length(0); // Safety: set to '0000-00-00 00:00:00' on falures + to->length(0); // Safety: set to '0000-00-00 00:00:00' on failures return true; } my_timestamp_to_binary(this, (uchar *) to->ptr(), decimals); @@ -1411,7 +1411,7 @@ Type_handler::odbc_literal_type_handler(const LEX_CSTRING *type_str) TODO: type_handler_adjusted_to_max_octet_length() and string_type_handler() provide very similar functionality, to properly choose between - VARCHAR/VARBINARY vs TEXT/BLOB variations taking into accoung maximum + VARCHAR/VARBINARY vs TEXT/BLOB variations taking into account maximum possible octet length. We should probably get rid of either of them and use the same method @@ -1873,7 +1873,7 @@ Type_handler::bit_and_int_mixture_handler(uint max_char_length) Note, independently from "treat_bit_as_number": - a single BIT argument gives BIT as a result - - two BIT couterparts give BIT as a result + - two BIT counterparts give BIT as a result - (BIT + explicit NULL) or (explicit NULL + BIT) give BIT @details This function aggregates field types from the array of items. @@ -4547,7 +4547,7 @@ bool Type_handler_string_result:: ... AND a='oe' to ... AND 'oe' COLLATE utf8_german2_ci='oe' - it will be evalulated to TRUE and removed from the condition, + it will be evaluated to TRUE and removed from the condition, so the overall query will be simplified to: SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci; @@ -4675,8 +4675,8 @@ Type_handler_timestamp_common::create_item_copy(THD *thd, Item *item) const /* This method handles YEAR and BIT data types. - It does not switch the data type to DECIAMAL on a - unsigned_flag mistmatch. This important for combinations + It does not switch the data type to DECIMAL on a + unsigned_flag mismatch. This important for combinations like YEAR+NULL, BIT+NULL. */ bool Type_handler_int_result:: @@ -5241,7 +5241,7 @@ bool Type_handler_int_result::Item_val_bool(Item *item) const and we need to evaluate the boolean value from the integer value as a fall-back method. To avoid the assert, let's hide the IS_COND flag. Eventually we'll need to implement val_bool() in all Item descendants and - remove the trick with flags. This change would be too ricky for 10.6. + remove the trick with flags. This change would be too tricky for 10.6. Let's do it in a later version. */ item_base_t flags= item->base_flags; @@ -7350,7 +7350,7 @@ decimal_digits_t Type_handler_long_ge0::Item_decimal_precision(const Item *item) DBUG_ASSERT(item->max_length); DBUG_ASSERT(!item->decimals); /* - Unlinke in Type_handler_long, Type_handler_long_ge does + Unlike in Type_handler_long, Type_handler_long_ge does not reserve one character for the sign. All max_length characters are digits. */ @@ -9014,7 +9014,7 @@ Type_handler_temporal_result::Item_const_eq(const Item_const *a, /* @brief - Check if two costant timestamp values are identical. + Check if two constant timestamp values are identical. @return true <=> *a and *b are identical diff --git a/sql/sql_type.h b/sql/sql_type.h index 195921a5e27..33c479f4f0d 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -1259,7 +1259,7 @@ public: }; public: - // Contructors for Item + // Constructors for Item Temporal_hybrid(THD *thd, Item *item, date_mode_t fuzzydate); Temporal_hybrid(THD *thd, Item *item) :Temporal_hybrid(thd, item, Options(thd)) @@ -1315,7 +1315,7 @@ public: else make_from_decimal(thd, warn, nr, mode); } - // End of constuctors + // End of constructors bool copy_valid_value_to_mysql_time(MYSQL_TIME *ltime) const { @@ -3072,7 +3072,7 @@ enum Derivation - BINARY(expr) and CAST(expr AS BINARY) */ DERIVATION_IMPLICIT= 2, - DERIVATION_NONE= 1, // A mix (e.g. CONCAT) of two differrent collations + DERIVATION_NONE= 1, // A mix (e.g. CONCAT) of two different collations DERIVATION_EXPLICIT= 0 // An explicit COLLATE clause }; @@ -3470,7 +3470,7 @@ public: /* A container for very specific data type attributes. - For now it prodives space for: + For now it provides space for: - one const pointer attributes - one unt32 attribute */ diff --git a/sql/sql_type_int.h b/sql/sql_type_int.h index e015e989da3..b8ebf7d4d42 100644 --- a/sql/sql_type_int.h +++ b/sql/sql_type_int.h @@ -321,7 +321,7 @@ public: Value range: -ULONGLONG_MAX .. +ULONGLONG_MAX. Provides a wider range for negative numbers than Longlong_hybrid does. - Usefull to store intermediate results of an expression whose value + Useful to store intermediate results of an expression whose value is further needed to be negated. For example, these methods: - Item_func_mul::int_op() - Item_func_int_div::val_int() diff --git a/sql/sql_type_json.cc b/sql/sql_type_json.cc index 27072de2d55..c3dfd79668f 100644 --- a/sql/sql_type_json.cc +++ b/sql/sql_type_json.cc @@ -43,7 +43,7 @@ Named_type_handler const Type_handler * Type_handler_json_common::json_type_handler_from_generic(const Type_handler *th) { - // Test in the order of likelyhood. + // Test in the order of likelihood. if (th == &type_handler_long_blob) return &type_handler_long_blob_json; if (th == &type_handler_varchar) diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 149e82bacae..70951d456dd 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -85,7 +85,7 @@ static const char *init_syms(udf_func *tmp, char *nm) tmp->func_init= (Udf_func_init) dlsym(tmp->dlhandle, nm); /* - to prefent loading "udf" from, e.g. libc.so + to prevent loading "udf" from, e.g. libc.so let's ensure that at least one auxiliary symbol is defined */ if (!tmp->func_init && !tmp->func_deinit && tmp->type != UDFTYPE_AGGREGATE) @@ -440,7 +440,7 @@ static udf_func *add_udf(LEX_CSTRING *name, Item_result ret, const char *dl, @param table table of mysql.func @retval TRUE found - @retral FALSE not found + @retval FALSE not found */ static bool find_udf_in_table(const LEX_CSTRING &exact_name, TABLE *table) diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 8996a20bd24..a2e8c58ba14 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1686,7 +1686,7 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg, /* setup_tables_done_option should be set only for very first SELECT, - because it protect from secont setup_tables call for select-like non + because it protect from second setup_tables call for select-like non select commands (DELETE/INSERT/...) and they use only very first SELECT (for union it can be only INSERT ... SELECT). */ @@ -1855,7 +1855,7 @@ cont: TMP_TABLE_ALL_COLUMNS); /* Force the temporary table to be a MyISAM table if we're going to use - fullext functions (MATCH ... AGAINST .. IN BOOLEAN MODE) when reading + fulltext functions (MATCH ... AGAINST .. IN BOOLEAN MODE) when reading from it (this should be removed in 5.2 when fulltext search is moved out of MyISAM). */ @@ -2240,7 +2240,7 @@ bool st_select_lex_unit::optimize() { if (item->assigned()) { - item->assigned(0); // We will reinit & rexecute unit + item->assigned(0); // We will reinit & reexecute unit item->reset(); } if (table->is_created()) diff --git a/sql/sql_update.cc b/sql/sql_update.cc index f847e0d3d9e..42172a5947e 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -61,7 +61,7 @@ bool records_are_comparable(const TABLE *table) { /** - Compares the input and outbut record buffers of the table to see if a row + Compares the input and output record buffers of the table to see if a row has changed. @return true if row has changed. @@ -254,7 +254,7 @@ static void prepare_record_for_error_message(int error, TABLE *table) /* Only duplicate key errors print the key value. - If storage engine does always read all columns, we have the value alraedy. + If storage engine does always read all columns, we have the value already. */ if ((error != HA_ERR_FOUND_DUPP_KEY) || !(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)) @@ -1682,7 +1682,7 @@ bool Multiupdate_prelocking_strategy::handle_end(THD *thd) for (tl= table_list; tl ; tl= tl->next_local) if (tl->view) break; - // ... and pass this knowlage in check_fields call + // ... and pass this knowledge in check_fields call if (check_fields(thd, table_list, *fields, tl != NULL )) DBUG_RETURN(1); @@ -1810,7 +1810,7 @@ multi_update::multi_update(THD *thd_arg, updated_sys_ver(0), tables_to_update(get_table_map(fields)) { - // Defer error reporting to multi_update::init whne tables_to_update is zero + // Defer error reporting to multi_update::init when tables_to_update is zero // because we don't have exceptions and we can't return values from a constructor. } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 2e0240f6b3d..01d7a056c89 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -2220,10 +2220,10 @@ bool insert_view_fields(THD *thd, List *list, TABLE_LIST *view) SINOPSYS view_checksum() - thd threar handler + thd thread handler view view for check - RETUIRN + RETURN HA_ADMIN_OK OK HA_ADMIN_NOT_IMPLEMENTED it is not VIEW HA_ADMIN_WRONG_CHECKSUM check sum is wrong diff --git a/sql/sql_window.cc b/sql/sql_window.cc index 3ab5abf4658..c36fba7d719 100644 --- a/sql/sql_window.cc +++ b/sql/sql_window.cc @@ -261,7 +261,7 @@ setup_windows(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, } /* For "win_func() OVER (ORDER BY order_list RANGE BETWEEN ...)", - - ORDER BY order_list must not be ommitted + - ORDER BY order_list must not be omitted - the list must have a single element. But it really only matters if the frame is bounded. */ @@ -985,7 +985,7 @@ public: } private: - /* The table that is acccesed by this cursor. */ + /* The table that is accessed by this cursor. */ TABLE *table; /* Buffer where to store the table's record data. */ uchar *record; @@ -3335,7 +3335,7 @@ bool st_select_lex::add_window_func(Item_window_func *win_func) // over (partition by a, order by x) && over (order by x). // // The first function requires an ordering by a first and then by x, - // while the seond function requires an ordering by x first. + // while the second function requires an ordering by x first. // The same restriction is not required for the order by clause. if (largest_partition.elements && !spec->partition_list.elements) { diff --git a/sql/sql_window.h b/sql/sql_window.h index 79a185edb6d..7009b8895a6 100644 --- a/sql/sql_window.h +++ b/sql/sql_window.h @@ -190,7 +190,7 @@ class Frame_cursor; /* This handles computation of one window function. - Currently, we make a spearate filesort() call for each window function. + Currently, we make a separate filesort() call for each window function. */ class Window_func_runner : public Sql_alloc @@ -240,7 +240,7 @@ class Explain_aggr_window_funcs; This is a "window function computation phase": a single object of this class takes care of computing all window functions in a SELECT. - - JOIN optimizer is exected to call setup() during query optimization. + - JOIN optimizer is executed to call setup() during query optimization. - JOIN::exec() should call exec() once it has collected join output in a temporary table. */ diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c8f6bb6b746..99c83007484 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -632,11 +632,11 @@ static bool binlog_format_check(sys_var *self, THD *thd, set_var *var) binlog_format_names[var->save_result.ulonglong_value]); /* We allow setting up binlog_format other then ROW for session scope when - wsrep/flasback is enabled.This is done because of 2 reasons + wsrep/flashback is enabled. This is done because of 2 reasons 1. User might want to run pt-table-checksum. 2. SuperUser knows what is doing :-) - For refrence:- MDEV-7322 + For reference:- MDEV-7322 */ if (var->type == OPT_GLOBAL) { @@ -1165,7 +1165,7 @@ static bool event_scheduler_update(sys_var *self, THD *thd, enum_var_type type) start/stop, there is a possibility that the server variable can become out of sync with the real event scheduler state. - This can happen with two concurrent statments if the first gets + This can happen with two concurrent statements if the first gets interrupted after start/stop but before retaking LOCK_global_system_variables. However, this problem should be quite rare and it's difficult to avoid it without opening up possibilities @@ -5671,7 +5671,7 @@ bool Sys_var_rpl_filter::set_filter_value(const char *value, Master_info *mi) bool status= true; Rpl_filter* rpl_filter= mi->rpl_filter; - /* Proctect against other threads */ + /* Protect against other threads */ mysql_mutex_lock(&LOCK_active_mi); switch (opt_id) { case OPT_REPLICATE_REWRITE_DB: @@ -6761,7 +6761,7 @@ static const char *default_regex_flags_names[]= "DOTALL", // (?s) . matches anything including NL "DUPNAMES", // (?J) Allow duplicate names for subpatterns "EXTENDED", // (?x) Ignore white space and # comments - "EXTENDED_MORE",//(?xx) Ignore white space and # comments inside cheracter + "EXTENDED_MORE",//(?xx) Ignore white space and # comments inside character "EXTRA", // means nothing since PCRE2 "MULTILINE", // (?m) ^ and $ match newlines within data "UNGREEDY", // (?U) Invert greediness of quantifiers @@ -6817,7 +6817,7 @@ static Sys_var_ulong Sys_log_slow_rate_limit( /* Full is not needed below anymore as one can set all bits with '= ALL', but - we need it for compatiblity with earlier versions. + we need it for compatibility with earlier versions. */ static const char *log_slow_verbosity_names[]= { "innodb", "query_plan", "explain", "engine", "warnings", "full", 0}; diff --git a/sql/table.cc b/sql/table.cc index 76f706d849c..83bfbbb111b 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2916,7 +2916,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, /* If the key column is of NOT NULL BLOB type, then it - will definitly have key prefix. And if key part prefix size + will definitely have key prefix. And if key part prefix size is equal to the BLOB column max size, then we can promote it to primary key. */ @@ -2938,8 +2938,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, Make sure that the primary key is not marked as IGNORE This can happen in the case 1) when IGNORE is mentioned in the Key specification - 2) When a unique NON-NULLABLE key is promted to a primary key. - The unqiue key could have been marked as IGNORE when there + 2) When a unique NON-NULLABLE key is promoted to a primary key. + The unique key could have been marked as IGNORE when there was a primary key in the table. Eg: @@ -2947,7 +2947,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, so for this table when we try to IGNORE key1 then we run: ALTER TABLE t1 ALTER INDEX key1 IGNORE - this runs successsfully and key1 is marked as IGNORE. + this runs successfully and key1 is marked as IGNORE. But lets say then we drop the primary key ALTER TABLE t1 DROP PRIMARY @@ -4984,7 +4984,7 @@ void open_table_error(TABLE_SHARE *share, enum open_frm_error error, /* ** fix a str_type to a array type - ** typeparts separated with some char. differents types are separated + ** typeparts separated with some char. different types are separated ** with a '\0' */ @@ -6256,10 +6256,10 @@ bool TABLE_LIST::setup_underlying(THD *thd) TABLE_LIST::prep_where() thd - thread handler conds - condition of this JOIN - no_where_clause - do not build WHERE or ON outer qwery do not need it + no_where_clause - do not build WHERE or ON outer query do not need it (it is INSERT), we do not need conds if this flag is set - NOTE: have to be called befor CHECK OPTION preparation, because it makes + NOTE: have to be called before CHECK OPTION preparation, because it makes fix_fields for view WHERE clause RETURN @@ -6904,7 +6904,7 @@ TABLE_LIST *TABLE_LIST::last_leaf_for_name_resolution() SYNOPSIS register_want_access() - want_access Acess which we require + want_access Access which we require */ void TABLE_LIST::register_want_access(privilege_t want_access) @@ -7014,7 +7014,7 @@ Security_context *TABLE_LIST::find_view_security_context(THD *thd) } if (upper_view) { - DBUG_PRINT("info", ("Securety context of view %s will be used", + DBUG_PRINT("info", ("Security context of view %s will be used", upper_view->alias.str)); sctx= upper_view->view_sctx; DBUG_ASSERT(sctx); @@ -7030,7 +7030,7 @@ Security_context *TABLE_LIST::find_view_security_context(THD *thd) /* - Prepare security context and load underlying tables priveleges for view + Prepare security context and load underlying tables privileges for view SYNOPSIS TABLE_LIST::prepare_security() @@ -7416,12 +7416,12 @@ void Field_iterator_table_ref::set_field_iterator() If the table reference we are iterating over is a natural join, or it is an operand of a natural join, and TABLE_LIST::join_columns contains all the columns of the join operand, then we pick the columns from - TABLE_LIST::join_columns, instead of the orginial container of the + TABLE_LIST::join_columns, instead of the original container of the columns of the join operator. */ if (table_ref->is_join_columns_complete) { - /* Necesary, but insufficient conditions. */ + /* Necessary, but insufficient conditions. */ DBUG_ASSERT(table_ref->is_natural_join || table_ref->nested_join || (table_ref->join_columns && @@ -7549,7 +7549,7 @@ GRANT_INFO *Field_iterator_table_ref::grant() created natural join column. The former happens for base tables or views, and the latter for natural/using joins. If a new field is created, then the field is added to 'parent_table_ref' if it is - given, or to the original table referene of the field if + given, or to the original table reference of the field if parent_table_ref == NULL. NOTES @@ -7564,7 +7564,7 @@ GRANT_INFO *Field_iterator_table_ref::grant() fields. This is OK because for such table references Field_iterator_table_ref iterates over the fields of the nested table references (recursively). In this way we avoid the storage - of unnecessay copies of result columns of nested joins. + of unnecessary copies of result columns of nested joins. RETURN # Pointer to a column of a natural join (or its operand) @@ -8311,7 +8311,7 @@ bool TABLE::check_virtual_columns_marked_for_write() This is done once for the TABLE_SHARE the first time the table is opened. The marking must be done non-destructively to handle the case when - this could be run in parallely by two threads + this could be run in parallel by two threads */ void TABLE::mark_columns_used_by_virtual_fields(void) @@ -8582,7 +8582,7 @@ void TABLE::create_key_part_by_field(KEY_PART_INFO *key_part_info, imposed on the keys of any temporary table. We need to filter out BLOB columns here, because ref access optimizer creates - KEYUSE objects for equalities for non-key columns for two puproses: + KEYUSE objects for equalities for non-key columns for two purposes: 1. To discover possible keys for derived_with_keys optimization 2. To do hash joins For the purpose of #1, KEYUSE objects are not created for "blob_column=..." . @@ -9911,7 +9911,7 @@ err_killed: @detail Reset const_table flag for this table. If this table is a merged derived - table/view the flag is recursively reseted for all tables of the underlying + table/view the flag is recursively reset for all tables of the underlying select. */ diff --git a/sql/table.h b/sql/table.h index dfcf6c03fc7..f398fe697d1 100644 --- a/sql/table.h +++ b/sql/table.h @@ -770,7 +770,7 @@ struct TABLE_SHARE LEX_CSTRING comment; /* Comment about table */ CHARSET_INFO *table_charset; /* Default charset of string fields */ - MY_BITMAP *check_set; /* Fields used by check constrant */ + MY_BITMAP *check_set; /* Fields used by check constraint */ MY_BITMAP all_set; /* Key which is used for looking-up table in table cache and in the list @@ -1270,7 +1270,7 @@ public: truncated_value= false; } /** - Fuction creates duplicate of 'from' + Function creates duplicate of 'from' string in 'storage' MEM_ROOT. @param from string to copy @@ -1488,7 +1488,7 @@ public: select max(col1), col2 from t1. In this case, the query produces one row with all columns having NULL values. - Interpetation: If maybe_null!=0, all fields of the table are considered + Interpretation: If maybe_null!=0, all fields of the table are considered NULLable (and have NULL values when null_row=true) */ uint maybe_null; @@ -2665,7 +2665,7 @@ struct TABLE_LIST For the @c TABLE_LIST representing the derived table @c b, @c derived points to the SELECT_LEX_UNIT representing the result of the query within - parenteses. + parentheses. - Views. This is set for views with @verbatim ALGORITHM = TEMPTABLE @endverbatim by mysql_make_view(). @@ -2823,7 +2823,7 @@ struct TABLE_LIST bool updating; /* for replicate-do/ignore table */ bool ignore_leaves; /* preload only non-leaf nodes */ bool crashed; /* Table was found crashed */ - bool skip_locked; /* Skip locked in view defination */ + bool skip_locked; /* Skip locked in view definition */ table_map dep_tables; /* tables the table depends on */ table_map on_expr_dep_tables; /* tables on expression depends on */ struct st_nested_join *nested_join; /* if the element is a nested join */ @@ -3712,7 +3712,7 @@ public: */ enum_tx_isolation iso_level() const; /** - Stores transactioin isolation level to internal TABLE object. + Stores transaction isolation level to internal TABLE object. */ void store_iso_level(enum_tx_isolation iso_level) { diff --git a/sql/table_cache.cc b/sql/table_cache.cc index b804a3e0627..8a569fd5adb 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -159,7 +159,7 @@ struct Table_cache_instance /** Lock table cache mutex and check contention. - Instance is considered contested if more than 20% of mutex acquisiotions + Instance is considered contested if more than 20% of mutex acquisitions can't be served immediately. Up to 100 000 probes may be performed to avoid instance activation on short sporadic peaks. 100 000 is estimated maximum number of queries one instance can serve in one second. @@ -168,8 +168,8 @@ struct Table_cache_instance system, that is expected number of instances is activated within reasonable warmup time. It may have to be adjusted for other systems. - Only TABLE object acquistion is instrumented. We intentionally avoid this - overhead on TABLE object release. All other table cache mutex acquistions + Only TABLE object acquisition is instrumented. We intentionally avoid this + overhead on TABLE object release. All other table cache mutex acquisitions are considered out of hot path and are not instrumented either. */ void lock_and_check_contention(uint32_t n_instances, uint32_t instance) @@ -300,7 +300,7 @@ static void tc_remove_all_unused_tables(TDC_element *element, - free resources related to unused objects @note This is called by 'handle_manager' when one wants to - periodicly flush all not used tables. + periodically flush all not used tables. */ static my_bool tc_purge_callback(void *_element, void *_purge_tables) @@ -539,7 +539,7 @@ static void tdc_delete_share_from_hash(TDC_element *element) /** - Prepeare table share for use with table definition cache. + Prepare table share for use with table definition cache. */ static void lf_alloc_constructor(uchar *arg) diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index f145e581b3f..20ce8a50f1b 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -610,7 +610,7 @@ bool THD::rename_temporary_table(TABLE *table, @param is_trans [OUT] Is set to the type of the table: transactional (e.g. innodb) as true or non-transactional (e.g. myisam) as false. - @paral delete_table [IN] Whether to delete the table files? + @param delete_table [IN] Whether to delete the table files? @return false Table was dropped true Error @@ -1611,7 +1611,7 @@ void THD::close_unused_temporary_table_instances(const TABLE_LIST *tl) /* Note: removing current list element doesn't invalidate iterator. */ share->all_tmp_tables.remove(table); /* - At least one instance should be left (guaratead by calling this + At least one instance should be left (guaranteed by calling this function for table which is opened and the table is under processing) */ DBUG_ASSERT(share->all_tmp_tables.front()); diff --git a/sql/threadpool_generic.cc b/sql/threadpool_generic.cc index 4dd9faddd7e..2f5bcae6ac1 100644 --- a/sql/threadpool_generic.cc +++ b/sql/threadpool_generic.cc @@ -662,7 +662,7 @@ void check_stall(thread_group_t *thread_group) Q : Will this handling lead to an unbound growth of threads, if queue stalls permanently? A : No. If queue stalls permanently, it is an indication for many very long - simultaneous queries. The maximum number of simultanoues queries is + simultaneous queries. The maximum number of simultaneous queries is max_connections, further we have threadpool_max_threads limit, upon which no worker threads are created. So in case there is a flood of very long queries, threadpool would slowly approach thread-per-connection behavior. @@ -750,7 +750,7 @@ static TP_connection_generic * listener(worker_thread_t *current_thread, /* We got some network events and need to make decisions : whether - listener hould handle events and whether or not any wake worker + listener should handle events and whether or not any wake worker threads so they can handle events. Q1 : Should listener handle an event itself, or put all events into @@ -1753,7 +1753,7 @@ static void print_pool_blocked_message(bool max_threads_reached) sql_print_information("Threadpool has been blocked for %u seconds\n", (uint)((now- pool_block_start)/1000000)); - /* avoid reperated messages for the same blocking situation */ + /* avoid repeated messages for the same blocking situation */ msg_written= true; } } diff --git a/sql/threadpool_winsockets.h b/sql/threadpool_winsockets.h index ca2068b759d..ce1d041063b 100644 --- a/sql/threadpool_winsockets.h +++ b/sql/threadpool_winsockets.h @@ -46,7 +46,7 @@ struct win_aiosocket /** - Begins asynchronnous reading from socket/pipe. + Begins asynchronous reading from socket/pipe. On IO completion, pre-read some bytes into internal buffer */ DWORD begin_read(); diff --git a/sql/transaction.cc b/sql/transaction.cc index 0dd5e1bebab..432943aae19 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -440,7 +440,7 @@ bool trans_rollback_implicit(THD *thd) res= ha_rollback_trans(thd, true); /* We don't reset OPTION_BEGIN flag below to simulate implicit start - of new transacton in @@autocommit=1 mode. This is necessary to + of new transaction in @@autocommit=1 mode. This is necessary to preserve backward compatibility. */ thd->variables.option_bits&= ~(OPTION_BINLOG_THIS_TRX); diff --git a/sql/tztime.cc b/sql/tztime.cc index 60e69afca59..469edbb3636 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -391,7 +391,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage) (next_leap_idx < sp->leapcnt) ? sp->lsis[next_leap_idx].ls_trans - 1: MY_TIME_T_MAX); /* - again assuming that end_t can be overlowed only in positive side + again assuming that end_t can be overflowed only in positive side we also assume that end_t won't be overflowed in this case. */ if (cur_off_and_corr > 0 && @@ -692,7 +692,7 @@ find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp) (60th and 61st second, look how we calculate them as "hit" in this function). Under realistic assumptions about frequency of transitions the same array - can be used fot MYSQL_TIME -> my_time_t conversion. For this we need to + can be used for MYSQL_TIME -> my_time_t conversion. For this we need to implement tweaked binary search which will take into account that some MYSQL_TIME has two matching my_time_t ranges and some of them have none. */ @@ -812,7 +812,7 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) DESCRIPTION This is mktime analog for MySQL. It is essentially different - from mktime (or hypotetical my_mktime) because: + from mktime (or hypothetical my_mktime) because: - It has no idea about tm_isdst member so if it has two answers it will give the smaller one - If we are in spring time gap then it will return @@ -849,8 +849,8 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) my_time_t conversion. It is piecewise linear function which is defined by combination of transition times as break points and times offset as changing function parameter. The possible inverse function for this - converison would be ambiguos but with MySQL's restrictions we can use - some function which is the same as inverse function on unambigiuos + conversion would be ambiguos but with MySQL's restrictions we can use + some function which is the same as inverse function on unambiguous ranges and coincides with one of branches of inverse function in other ranges. Thus we just need to build table which will determine this shifted my_time_t -> my_time_t conversion similar to existing @@ -1003,7 +1003,7 @@ static const String tz_SYSTEM_name("SYSTEM", 6, &my_charset_latin1); were no explicit time zone specified. On the other hand because of this conversion methods provided by this class is significantly slower and possibly less multi-threaded-friendly than corresponding Time_zone_db - methods so the latter should be preffered there it is possible. + methods so the latter should be preferred there if it is possible. */ class Time_zone_system : public Time_zone { @@ -1489,7 +1489,7 @@ static mysql_mutex_t tz_LOCK; static bool tz_inited= 0; /* - This two static variables are inteded for holding info about leap seconds + This two static variables are intended for holding info about leap seconds shared by all time zones. */ static uint tz_leapcnt= 0; @@ -2095,7 +2095,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) /* We have to allow HA_ERR_KEY_NOT_FOUND because some time zones - for example UTC have no transitons. + for example UTC have no transitions. */ if (res != HA_ERR_END_OF_FILE && res != HA_ERR_KEY_NOT_FOUND) { diff --git a/sql/tztime.h b/sql/tztime.h index fc5905377c3..54d70d418cb 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -61,7 +61,7 @@ public: virtual void get_timezone_information(struct my_tz* curr_tz, const MYSQL_TIME *local_TIME) const = 0; /** - We need this only for surpressing warnings, objects of this type are + We need this only for suppressing warnings, objects of this type are allocated on MEM_ROOT and should not require destruction. */ virtual ~Time_zone() = default; @@ -88,7 +88,7 @@ extern my_time_t sec_since_epoch_TIME(MYSQL_TIME *t); /** Number of elements in table list produced by my_tz_get_table_list() (this table list contains tables which are needed for dynamical loading - of time zone descriptions). Actually it is imlementation detail that + of time zone descriptions). Actually it is implementation detail that should not be used anywhere outside of tztime.h and tztime.cc. */ diff --git a/sql/unireg.cc b/sql/unireg.cc index fca005cc990..a9f1f618010 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -20,7 +20,7 @@ Functions to create a unireg form-file from a FIELD and a fieldname-fieldinfo struct. In the following functions FIELD * is an ordinary field-structure with - the following exeptions: + the following exceptions: sc_length,typepos,row,kol,dtype,regnr and field need not to be set. str is a (long) to record position where 0 is the first position. */ diff --git a/sql/unireg.h b/sql/unireg.h index 7a7e97c082c..44a9c8f5289 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -131,7 +131,7 @@ #define OPEN_TRIGGER_ONLY (1 << 21) /** - This flag is used in information schema to determine if handling funciton + This flag is used in information schema to determine if handling function can treat open result extensively and provide some user output even if table open fails. */ diff --git a/sql/winmain.cc b/sql/winmain.cc index ff409a6d492..694e62e87d5 100644 --- a/sql/winmain.cc +++ b/sql/winmain.cc @@ -281,7 +281,7 @@ __declspec(dllexport) int mysqld_win_main(int argc, char **argv) /* Register/remove services functionality. This is kept for backward compatibility only, and is - superseeded by much more versatile mysql_install_db.exe + superseded by much more versatile mysql_install_db.exe "mysqld --remove=svc" has no advantage over OS own "sc delete svc" diff --git a/sql/winservice.c b/sql/winservice.c index c5fba81051c..c59e6750812 100644 --- a/sql/winservice.c +++ b/sql/winservice.c @@ -156,7 +156,7 @@ static int fix_and_check_datadir(mysqld_service_properties *props) Note that this function carefully avoids using mysql libraries (e.g dbug), since it is used in unusual environments (windows installer, MFC), where we do not have much control over how threads are created and destroyed, so we - cannot assume MySQL thread initilization here. + cannot assume MySQL thread initialization here. */ int get_mysql_service_properties(const wchar_t *bin_path, mysqld_service_properties *props) diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc index 4c172d8804b..5f73133f56f 100644 --- a/sql/wsrep_client_service.cc +++ b/sql/wsrep_client_service.cc @@ -285,7 +285,7 @@ enum wsrep::provider::status Wsrep_client_service::replay() // Replace the security context of the replayer with the security context // of the original THD. Since security context class doesn't have proper // copy constructors, we need to store the original one and set it back - // before destruction so that THD desctruction doesn't cause double-free + // before destruction so that THD destruction doesn't cause double-free // on the replaced security context. Security_context old_ctx = replayer_thd->main_security_ctx; replayer_thd->main_security_ctx = m_thd->main_security_ctx; @@ -368,7 +368,7 @@ int Wsrep_client_service::bf_rollback() wsrep_thd_transaction_state_str(m_thd), m_thd->killed); - /* If client is quiting all below will be done in THD::cleanup() + /* If client is quitting all below will be done in THD::cleanup() TODO: why we need this any other case? */ if (m_thd->wsrep_cs().state() != wsrep::client_state::s_quitting) { diff --git a/sql/wsrep_client_service.h b/sql/wsrep_client_service.h index f53d9be083d..c8522ea1439 100644 --- a/sql/wsrep_client_service.h +++ b/sql/wsrep_client_service.h @@ -15,7 +15,7 @@ /** @file wsrep_client_service.h - This file provides declaratios for client service implementation. + This file provides declarations for client service implementation. See wsrep/client_service.hpp for interface documentation. */ diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 3e5b9f1ea15..d8308652112 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -1022,7 +1022,7 @@ void wsrep_init_startup (bool sst_first) either SST was not necessary or SST has been delivered. With mysqldump SST (!sst_first) wait until the server reaches - joiner state and procedd to accepting connections. + joiner state and proceed to accepting connections. */ int err= 0; if (sst_first) diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 6cd8d6cea95..39c7a42d230 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -166,7 +166,7 @@ int wsrep_show_ready(THD *thd, SHOW_VAR *var, void *buff, void wsrep_free_status(THD *thd); void wsrep_update_cluster_state_uuid(const char* str); -/* Filters out --wsrep-new-cluster oprtion from argv[] +/* Filters out --wsrep-new-cluster option from argv[] * should be called in the very beginning of main() */ void wsrep_filter_new_cluster (int* argc, char* argv[]); diff --git a/sql/wsrep_schema.h b/sql/wsrep_schema.h index 0a2e37ba1b3..5013e2bdead 100644 --- a/sql/wsrep_schema.h +++ b/sql/wsrep_schema.h @@ -124,7 +124,7 @@ class Wsrep_schema /** Recover streaming transactions from SR table. - This method should be called after storage enignes are initialized. + This method should be called after storage engines are initialized. It will scan SR table and replay found streaming transactions. @param orig_thd The THD object of the calling thread. diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 7097853e61b..1729f202cf9 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -1414,7 +1414,7 @@ std::string wsrep_sst_prepare() if (is_ipv6) { - /* wsrep_sst_*.sh scripts requite ipv6 addreses to be in square breackets */ + /* wsrep_sst_*.sh scripts require ipv6 addresses to be in square brackets */ ip_buf[0] = '['; /* the length (len) already includes the null byte: */ memcpy(ip_buf + 1, address, len - 1); @@ -1467,7 +1467,7 @@ std::string wsrep_sst_prepare() "replaced by %s", method, WSREP_SST_MARIABACKUP); method = WSREP_SST_MARIABACKUP; } - // we already did SST at initializaiton, now engines are running + // we already did SST at initialization, now engines are running // sql_print_information() is here because the message is too long // for WSREP_INFO. sql_print_information ("WSREP: " @@ -2338,7 +2338,7 @@ int wsrep_sst_donate(const std::string& msg, sst_auth auth; if (sst_auth_real) { - /* User supplied non-trivial wsre_sst_auth, use it */ + /* User supplied non-trivial wsrep_sst_auth, use it */ const char* col= sst_strchrnul(sst_auth_real, ':'); auth.name_ = std::string(sst_auth_real, col - sst_auth_real); auth.pswd_ = std::string(':' == *col ? col + 1 : ""); diff --git a/sql/wsrep_sst.h b/sql/wsrep_sst.h index 4ac7eba858c..929a6daa8cc 100644 --- a/sql/wsrep_sst.h +++ b/sql/wsrep_sst.h @@ -76,7 +76,7 @@ extern void wsrep_sst_auth_free(); extern void wsrep_SE_init_grab(); /*! grab init critical section */ extern void wsrep_SE_init_wait(); /*! wait for SE init to complete */ -extern void wsrep_SE_init_done(); /*! signal that SE init is complte */ +extern void wsrep_SE_init_done(); /*! signal that SE init is complete */ extern void wsrep_SE_initialized(); /*! mark SE initialization complete */ /** diff --git a/sql/wsrep_storage_service.cc b/sql/wsrep_storage_service.cc index 4885fd9f7e6..97fa4bfc096 100644 --- a/sql/wsrep_storage_service.cc +++ b/sql/wsrep_storage_service.cc @@ -80,7 +80,7 @@ int Wsrep_storage_service::start_transaction(const wsrep::ws_handle& ws_handle) { DBUG_ENTER("Wsrep_storage_service::start_transaction"); DBUG_ASSERT(m_thd == current_thd); - DBUG_PRINT("info", ("Wsrep_storage_service::start_transcation(%llu, %p)", + DBUG_PRINT("info", ("Wsrep_storage_service::start_transaction(%llu, %p)", m_thd->thread_id, m_thd)); m_thd->set_wsrep_next_trx_id(ws_handle.transaction_id().get()); DBUG_RETURN(m_thd->wsrep_cs().start_transaction( diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 25e71638efd..6c142b79634 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -165,7 +165,7 @@ static inline int wsrep_start_trx_if_not_started(THD* thd) /* Called after each row operation. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_after_row_internal(THD* thd) { @@ -253,7 +253,7 @@ static inline bool wsrep_run_commit_hook(THD* thd, bool all) /* Called before the transaction is prepared. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_before_prepare(THD* thd, bool all) { @@ -283,7 +283,7 @@ static inline int wsrep_before_prepare(THD* thd, bool all) /* Called after the transaction has been prepared. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_after_prepare(THD* thd, bool all) { @@ -302,7 +302,7 @@ static inline int wsrep_after_prepare(THD* thd, bool all) This function must be called from both client and applier contexts before commit. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_before_commit(THD* thd, bool all) { @@ -364,7 +364,7 @@ static inline int wsrep_before_commit(THD* thd, bool all) @param all @param err Error buffer in case of applying error - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_ordered_commit(THD* thd, bool all) { @@ -378,7 +378,7 @@ static inline int wsrep_ordered_commit(THD* thd, bool all) /* Called after the transaction has been committed. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_after_commit(THD* thd, bool all) { @@ -404,7 +404,7 @@ static inline int wsrep_after_commit(THD* thd, bool all) /* Called before the transaction is rolled back. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_before_rollback(THD* thd, bool all) { @@ -450,7 +450,7 @@ static inline int wsrep_before_rollback(THD* thd, bool all) /* Called after the transaction has been rolled back. - Return zero on succes, non-zero on failure. + Return zero on success, non-zero on failure. */ static inline int wsrep_after_rollback(THD* thd, bool all) { diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 3394ed6913c..16963f17891 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -782,7 +782,7 @@ bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type) res= wsrep_create_appliers(wsrep_slave_count_change, true); mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_unlock(&LOCK_wsrep_slave_threads); - // Thread creation and execution is asyncronous, therefore we need + // Thread creation and execution is asynchronous, therefore we need // wait them to be started or error produced while (wsrep_running_applier_threads != (ulong)wsrep_slave_threads && !wsrep_thread_create_failed.load(std::memory_order_relaxed)) diff --git a/sql/xa.cc b/sql/xa.cc index 730e312b746..12021f9a815 100644 --- a/sql/xa.cc +++ b/sql/xa.cc @@ -43,7 +43,7 @@ class XID_cache_element { /* m_state is used to prevent elements from being deleted while XA RECOVER - iterates xid cache and to prevent recovered elments from being acquired by + iterates xid cache and to prevent recovered elements from being acquired by multiple threads. bits 1..29 are reference counter