Fix various spelling errors

e.g.
- dont -> don't
- occurence -> occurrence
- succesfully -> successfully
- easyly -> easily

Also remove trailing space in selected files.

These changes span:
- server core
- Connect and Innobase storage engine code
- OQgraph, Sphinx and TokuDB storage engines

Related to MDEV-21769.
This commit is contained in:
Otto Kekäläinen 2020-03-04 18:30:08 +02:00
parent 3c57693ff1
commit c8388de2fd
40 changed files with 80 additions and 80 deletions

View File

@ -1,7 +1,7 @@
SET TIME_ZONE = "+00:00"; SET TIME_ZONE = "+00:00";
--echo # --echo #
--echo # Test of errors for column data types that dont support function --echo # Test of errors for column data types that don't support function
--echo # defaults. --echo # defaults.
--echo # --echo #

View File

@ -6,7 +6,7 @@
# #
SET TIME_ZONE = "+00:00"; SET TIME_ZONE = "+00:00";
# #
# Test of errors for column data types that dont support function # Test of errors for column data types that don't support function
# defaults. # defaults.
# #
CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP ); CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP );
@ -1552,7 +1552,7 @@ DROP TABLE t1;
# #
SET TIME_ZONE = "+00:00"; SET TIME_ZONE = "+00:00";
# #
# Test of errors for column data types that dont support function # Test of errors for column data types that don't support function
# defaults. # defaults.
# #
CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP(6) ); CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP(6) );

View File

@ -7,7 +7,7 @@ set default_storage_engine=innodb;
# #
SET TIME_ZONE = "+00:00"; SET TIME_ZONE = "+00:00";
# #
# Test of errors for column data types that dont support function # Test of errors for column data types that don't support function
# defaults. # defaults.
# #
CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP ); CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP );
@ -1553,7 +1553,7 @@ DROP TABLE t1;
# #
SET TIME_ZONE = "+00:00"; SET TIME_ZONE = "+00:00";
# #
# Test of errors for column data types that dont support function # Test of errors for column data types that don't support function
# defaults. # defaults.
# #
CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP(6) ); CREATE OR REPLACE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP(6) );

View File

@ -2616,7 +2616,7 @@ DROP TABLE t1, t2;
--echo # End of 5.3 tests. --echo # End of 5.3 tests.
# #
# MDEV-4829 BEFORE INSERT triggers dont issue 1406 error # MDEV-4829 BEFORE INSERT triggers don't issue 1406 error
# Also check timestamp for trigger # Also check timestamp for trigger
# #

View File

@ -12,9 +12,9 @@
# iii) On master, do one short time query and one long time query, on slave # iii) On master, do one short time query and one long time query, on slave
# and check that slow query is logged to slow query log but fast query # and check that slow query is logged to slow query log but fast query
# is not. # is not.
# iv) On slave, check that slow queries go into the slow log and fast dont, # iv) On slave, check that slow queries go into the slow log and fast don't,
# when issued through a regular client connection # when issued through a regular client connection
# v) On slave, check that slow queries go into the slow log and fast dont # v) On slave, check that slow queries go into the slow log and fast don't
# when we use SET TIMESTAMP= 1 on a regular client connection. # when we use SET TIMESTAMP= 1 on a regular client connection.
# vi) check that when setting slow_query_log= OFF in a connection 'extra2' # vi) check that when setting slow_query_log= OFF in a connection 'extra2'
# prevents logging slow queries in a connection 'extra' # prevents logging slow queries in a connection 'extra'

View File

@ -230,7 +230,7 @@ static void do_skip(Copy_field *copy __attribute__((unused)))
note: if the record we're copying from is NULL-complemetned (i.e. note: if the record we're copying from is NULL-complemetned (i.e.
from_field->table->null_row==1), it will also have all NULLable columns to be from_field->table->null_row==1), it will also have all NULLable columns to be
set to NULLs, so we dont need to check table->null_row here. set to NULLs, so we don't need to check table->null_row here.
*/ */
static void do_copy_null(Copy_field *copy) static void do_copy_null(Copy_field *copy)

View File

@ -174,7 +174,7 @@ void Filesort_buffer::sort_buffer(const Sort_param *param, uint count)
if (count <= 1 || size == 0) if (count <= 1 || size == 0)
return; return;
// dont reverse for PQ, it is already done // don't reverse for PQ, it is already done
if (!param->using_pq) if (!param->using_pq)
reverse_record_pointers(); reverse_record_pointers();

View File

@ -1313,7 +1313,7 @@ struct st_sp_security_context;
Item_sum_sp handles STORED AGGREGATE FUNCTIONS Item_sum_sp handles STORED AGGREGATE FUNCTIONS
Each Item_sum_sp represents a custom aggregate function. Inside the Each Item_sum_sp represents a custom aggregate function. Inside the
function's body, we require at least one occurence of FETCH GROUP NEXT ROW function's body, we require at least one occurrence of FETCH GROUP NEXT ROW
instruction. This cursor is what makes custom stored aggregates possible. instruction. This cursor is what makes custom stored aggregates possible.
During computation the function's add method is called. This in turn performs During computation the function's add method is called. This in turn performs
@ -1341,7 +1341,7 @@ struct st_sp_security_context;
group is already set in the argument x. This behaviour is done so when group is already set in the argument x. This behaviour is done so when
a user writes a function, he should "logically" include FETCH GROUP NEXT ROW a user writes a function, he should "logically" include FETCH GROUP NEXT ROW
before any "add" instructions in the stored function. This means however that before any "add" instructions in the stored function. This means however that
internally, the first occurence doesn't stop the function. See the internally, the first occurrence doesn't stop the function. See the
implementation of FETCH GROUP NEXT ROW for details as to how it happens. implementation of FETCH GROUP NEXT ROW for details as to how it happens.
Either way, one should assume that after calling "Item_sum_sp::add()" that Either way, one should assume that after calling "Item_sum_sp::add()" that

View File

@ -1247,7 +1247,7 @@ int DsMrr_impl::setup_two_handlers()
scans. scans.
Calling primary_file->index_end() will invoke dsmrr_close() for this object, Calling primary_file->index_end() will invoke dsmrr_close() for this object,
which will delete secondary_file. We need to keep it, so put it away and dont which will delete secondary_file. We need to keep it, so put it away and don't
let it be deleted: let it be deleted:
*/ */
if (primary_file->inited == handler::INDEX) if (primary_file->inited == handler::INDEX)

View File

@ -5760,7 +5760,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
{ {
idx_scan.add("chosen", true); idx_scan.add("chosen", true);
if (!*scan_ptr) if (!*scan_ptr)
idx_scan.add("cause", "first occurence of index prefix"); idx_scan.add("cause", "first occurrence of index prefix");
else else
idx_scan.add("cause", "better cost for same idx prefix"); idx_scan.add("cause", "better cost for same idx prefix");
*scan_ptr= *index_scan; *scan_ptr= *index_scan;

View File

@ -2204,7 +2204,7 @@ int pull_out_semijoin_tables(JOIN *join)
/* /*
Don't do table pull-out for nested joins (if we get nested joins here, it Don't do table pull-out for nested joins (if we get nested joins here, it
means these are outer joins. It is theoretically possible to do pull-out means these are outer joins. It is theoretically possible to do pull-out
for some of the outer tables but we dont support this currently. for some of the outer tables but we don't support this currently.
*/ */
bool have_join_nest_children= FALSE; bool have_join_nest_children= FALSE;

View File

@ -795,7 +795,7 @@ do_retry:
else else
{ {
/* /*
A failure of a preceeding "parent" transaction may not be A failure of a preceding "parent" transaction may not be
seen by the current one through its own worker_error. seen by the current one through its own worker_error.
Such induced error gets set by ourselves now. Such induced error gets set by ourselves now.
*/ */

View File

@ -2775,7 +2775,7 @@ int Sroutine_hash_entry::sp_cache_routine(THD *thd,
@param[out] sp Pointer to sp_head object for routine, NULL if routine was @param[out] sp Pointer to sp_head object for routine, NULL if routine was
not found. not found.
@retval 0 Either routine is found and was succesfully loaded into cache @retval 0 Either routine is found and was successfully loaded into cache
or it does not exist. or it does not exist.
@retval non-0 Error while loading routine from mysql,proc table. @retval non-0 Error while loading routine from mysql,proc table.
*/ */

View File

@ -27,7 +27,7 @@ static ulong volatile Cversion= 1;
/* /*
Cache of stored routines. Cache of stored routines.
*/ */
class sp_cache class sp_cache
@ -149,8 +149,8 @@ void sp_cache_end()
sp_cache_insert() sp_cache_insert()
cp The cache to put routine into cp The cache to put routine into
sp Routine to insert. sp Routine to insert.
TODO: Perhaps it will be more straightforward if in case we returned an TODO: Perhaps it will be more straightforward if in case we returned an
error from this function when we couldn't allocate sp_cache. (right error from this function when we couldn't allocate sp_cache. (right
now failure to put routine into cache will cause a 'SP not found' now failure to put routine into cache will cause a 'SP not found'
error to be reported at some later time) error to be reported at some later time)
@ -173,18 +173,18 @@ void sp_cache_insert(sp_cache **cp, sp_head *sp)
} }
/* /*
Look up a routine in the cache. Look up a routine in the cache.
SYNOPSIS SYNOPSIS
sp_cache_lookup() sp_cache_lookup()
cp Cache to look into cp Cache to look into
name Name of rutine to find name Name of rutine to find
NOTE NOTE
An obsolete (but not more obsolete then since last An obsolete (but not more obsolete then since last
sp_cache_flush_obsolete call) routine may be returned. sp_cache_flush_obsolete call) routine may be returned.
RETURN RETURN
The routine or The routine or
NULL if the routine not found. NULL if the routine not found.
*/ */
@ -204,7 +204,7 @@ sp_head *sp_cache_lookup(sp_cache **cp, const Database_qualified_name *name)
SYNOPSIS SYNOPSIS
sp_cache_invalidate() sp_cache_invalidate()
NOTE NOTE
This is called when a VIEW definition is created or modified (and in some This is called when a VIEW definition is created or modified (and in some
other contexts). We can't destroy sp_head objects here as one may modify other contexts). We can't destroy sp_head objects here as one may modify
@ -225,7 +225,7 @@ void sp_cache_invalidate()
@param[in] sp SP to remove. @param[in] sp SP to remove.
@note This invalidates pointers to sp_head objects this thread @note This invalidates pointers to sp_head objects this thread
uses. In practice that means 'dont call this function when uses. In practice that means don't call this function when
inside SP'. inside SP'.
*/ */
@ -264,7 +264,7 @@ sp_cache_enforce_limit(sp_cache *c, ulong upper_limit_for_elements)
} }
/************************************************************************* /*************************************************************************
Internal functions Internal functions
*************************************************************************/ *************************************************************************/
extern "C" uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, extern "C" uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen,

View File

@ -1472,7 +1472,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
/* /*
Reset the return code to zero if the transaction was Reset the return code to zero if the transaction was
replayed succesfully. replayed successfully.
*/ */
if (must_replay && !wsrep_current_error(thd)) if (must_replay && !wsrep_current_error(thd))
{ {

View File

@ -6808,7 +6808,7 @@ void THD::binlog_prepare_row_images(TABLE *table)
/** /**
if there is a primary key in the table (ie, user declared PK or a if there is a primary key in the table (ie, user declared PK or a
non-null unique index) and we dont want to ship the entire image, non-null unique index) and we don't want to ship the entire image,
and the handler involved supports this. and the handler involved supports this.
*/ */
if (table->s->primary_key < MAX_KEY && if (table->s->primary_key < MAX_KEY &&

View File

@ -4683,7 +4683,7 @@ public:
information to decide the logging format. So that cases we call decide_logging_format_2 information to decide the logging format. So that cases we call decide_logging_format_2
at later stages in execution. at later stages in execution.
One example would be binlog format for IODKU but column with unique key is not inserted. One example would be binlog format for IODKU but column with unique key is not inserted.
We dont have inserted columns info when we call decide_logging_format so on later stage we call We don't have inserted columns info when we call decide_logging_format so on later stage we call
decide_logging_format_low decide_logging_format_low
@returns 0 if no format is changed @returns 0 if no format is changed
@ -5365,7 +5365,7 @@ public:
It is aimed at capturing SHOW EXPLAIN output, so: It is aimed at capturing SHOW EXPLAIN output, so:
- Unlike select_result class, we don't assume that the sent data is an - Unlike select_result class, we don't assume that the sent data is an
output of a SELECT_LEX_UNIT (and so we dont apply "LIMIT x,y" from the output of a SELECT_LEX_UNIT (and so we don't apply "LIMIT x,y" from the
unit) unit)
- We don't try to convert the target table to MyISAM - We don't try to convert the target table to MyISAM
*/ */

View File

@ -152,7 +152,7 @@ private:
Item *val; Item *val;
/* hit/miss counters */ /* hit/miss counters */
ulong hit, miss; ulong hit, miss;
/* Set on if the object has been succesfully initialized with init() */ /* Set on if the object has been successfully initialized with init() */
bool inited; bool inited;
}; };

View File

@ -5787,7 +5787,7 @@ int st_select_lex_unit::save_union_explain(Explain_query *output)
eu->connection_type= Explain_node::EXPLAIN_NODE_DERIVED; eu->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
/* /*
Note: Non-merged semi-joins cannot be made out of UNIONs currently, so we Note: Non-merged semi-joins cannot be made out of UNIONs currently, so we
dont ever set EXPLAIN_NODE_NON_MERGED_SJ. don't ever set EXPLAIN_NODE_NON_MERGED_SJ.
*/ */
for (SELECT_LEX *sl= first; sl; sl= sl->next_select()) for (SELECT_LEX *sl= first; sl; sl= sl->next_select())
eu->add_select(sl->select_number); eu->add_select(sl->select_number);

View File

@ -6403,18 +6403,18 @@ max_part_bit(key_part_map bits)
/** /**
Add a new keuse to the specified array of KEYUSE objects Add a new keuse to the specified array of KEYUSE objects
@param[in,out] keyuse_array array of keyuses to be extended @param[in,out] keyuse_array array of keyuses to be extended
@param[in] key_field info on the key use occurrence @param[in] key_field info on the key use occurrence
@param[in] key key number for the keyse to be added @param[in] key key number for the keyse to be added
@param[in] part key part for the keyuse to be added @param[in] part key part for the keyuse to be added
@note @note
The function builds a new KEYUSE object for a key use utilizing the info The function builds a new KEYUSE object for a key use utilizing the info
on the left and right parts of the given key use extracted from the on the left and right parts of the given key use extracted from the
structure key_field, the key number and key part for this key use. structure key_field, the key number and key part for this key use.
The built object is added to the dynamic array keyuse_array. The built object is added to the dynamic array keyuse_array.
@retval 0 the built object is succesfully added @retval 0 the built object is successfully added
@retval 1 otherwise @retval 1 otherwise
*/ */
@ -14908,28 +14908,28 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
left_item, right_item, cond_equal); left_item, right_item, cond_equal);
} }
/** /**
Item_xxx::build_equal_items() Item_xxx::build_equal_items()
Replace all equality predicates in a condition referenced by "this" Replace all equality predicates in a condition referenced by "this"
by multiple equality items. by multiple equality items.
At each 'and' level the function detects items for equality predicates At each 'and' level the function detects items for equality predicates
and replaced them by a set of multiple equality items of class Item_equal, and replaced them by a set of multiple equality items of class Item_equal,
taking into account inherited equalities from upper levels. taking into account inherited equalities from upper levels.
If an equality predicate is used not in a conjunction it's just If an equality predicate is used not in a conjunction it's just
replaced by a multiple equality predicate. replaced by a multiple equality predicate.
For each 'and' level the function set a pointer to the inherited For each 'and' level the function set a pointer to the inherited
multiple equalities in the cond_equal field of the associated multiple equalities in the cond_equal field of the associated
object of the type Item_cond_and. object of the type Item_cond_and.
The function also traverses the cond tree and and for each field reference The function also traverses the cond tree and and for each field reference
sets a pointer to the multiple equality item containing the field, if there sets a pointer to the multiple equality item containing the field, if there
is any. If this multiple equality equates fields to a constant the is any. If this multiple equality equates fields to a constant the
function replaces the field reference by the constant in the cases function replaces the field reference by the constant in the cases
when the field is not of a string type or when the field reference is when the field is not of a string type or when the field reference is
just an argument of a comparison predicate. just an argument of a comparison predicate.
The function also determines the maximum number of members in The function also determines the maximum number of members in
equality lists of each Item_cond_and object assigning it to equality lists of each Item_cond_and object assigning it to
thd->lex->current_select->max_equal_elems. thd->lex->current_select->max_equal_elems.
@ -14943,7 +14943,7 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
in a conjuction for a minimal set of multiple equality predicates. in a conjuction for a minimal set of multiple equality predicates.
This set can be considered as a canonical representation of the This set can be considered as a canonical representation of the
sub-conjunction of the equality predicates. sub-conjunction of the equality predicates.
E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by
(=(t1.a,t2.b,t3.c) AND t2.b>5), not by (=(t1.a,t2.b,t3.c) AND t2.b>5), not by
(=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5); (=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5);
while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by
@ -14954,16 +14954,16 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
The function performs the substitution in a recursive descent by The function performs the substitution in a recursive descent by
the condtion tree, passing to the next AND level a chain of multiple the condtion tree, passing to the next AND level a chain of multiple
equality predicates which have been built at the upper levels. equality predicates which have been built at the upper levels.
The Item_equal items built at the level are attached to other The Item_equal items built at the level are attached to other
non-equality conjucts as a sublist. The pointer to the inherited non-equality conjucts as a sublist. The pointer to the inherited
multiple equalities is saved in the and condition object (Item_cond_and). multiple equalities is saved in the and condition object (Item_cond_and).
This chain allows us for any field reference occurence easyly to find a This chain allows us for any field reference occurrence easily to find a
multiple equality that must be held for this occurence. multiple equality that must be held for this occurrence.
For each AND level we do the following: For each AND level we do the following:
- scan it for all equality predicate (=) items - scan it for all equality predicate (=) items
- join them into disjoint Item_equal() groups - join them into disjoint Item_equal() groups
- process the included OR conditions recursively to do the same for - process the included OR conditions recursively to do the same for
lower AND levels. lower AND levels.
We need to do things in this order as lower AND levels need to know about We need to do things in this order as lower AND levels need to know about
all possible Item_equal objects in upper levels. all possible Item_equal objects in upper levels.
@ -14999,7 +14999,7 @@ COND *Item_cond_and::build_equal_items(THD *thd,
/* /*
Retrieve all conjuncts of this level detecting the equality Retrieve all conjuncts of this level detecting the equality
that are subject to substitution by multiple equality items and that are subject to substitution by multiple equality items and
removing each such predicate from the conjunction after having removing each such predicate from the conjunction after having
found/created a multiple equality whose inference the predicate is. found/created a multiple equality whose inference the predicate is.
*/ */
while ((item= li++)) while ((item= li++))
@ -25718,7 +25718,7 @@ void free_underlaid_joins(THD *thd, SELECT_LEX *select)
****************************************************************************/ ****************************************************************************/
/** /**
Replace occurences of group by fields in an expression by ref items. Replace occurrences of group by fields in an expression by ref items.
The function replaces occurrences of group by fields in expr The function replaces occurrences of group by fields in expr
by ref objects for these fields unless they are under aggregate by ref objects for these fields unless they are under aggregate

View File

@ -3455,7 +3455,7 @@ export sql_mode_t expand_sql_mode(sql_mode_t sql_mode)
if (sql_mode & MODE_ANSI) if (sql_mode & MODE_ANSI)
{ {
/* /*
Note that we dont set Note that we don't set
MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | MODE_NO_FIELD_OPTIONS MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | MODE_NO_FIELD_OPTIONS
to allow one to get full use of MySQL in this mode. to allow one to get full use of MySQL in this mode.

View File

@ -125,7 +125,7 @@ static inline bool wsrep_streaming_enabled(THD* thd)
} }
/* /*
Return number of fragments succesfully certified for the Return number of fragments successfully certified for the
current statement. current statement.
*/ */
static inline size_t wsrep_fragments_certified_for_stmt(THD* thd) static inline size_t wsrep_fragments_certified_for_stmt(THD* thd)

View File

@ -428,7 +428,7 @@ char *ExtractFromPath(PGLOBAL g, char *pBuff, char *FileName, OPVAL op)
#ifdef NOT_USED #ifdef NOT_USED
/***********************************************************************/ /***********************************************************************/
/* Check the occurence and matching of a pattern against a string. */ /* Check the occurrence and matching of a pattern against a string. */
/* Because this function is only used for catalog name checking, */ /* Because this function is only used for catalog name checking, */
/* it must be case insensitive. */ /* it must be case insensitive. */
/***********************************************************************/ /***********************************************************************/
@ -572,7 +572,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp)
b = (t || !*sp); /* true if % or void strg. */ b = (t || !*sp); /* true if % or void strg. */
else if (!t) { else if (!t) {
/*******************************************************************/ /*******************************************************************/
/* No character to skip, check occurence of <subtring-specifier> */ /* No character to skip, check occurrence of <subtring-specifier> */
/* at the very beginning of remaining string. */ /* at the very beginning of remaining string. */
/*******************************************************************/ /*******************************************************************/
if (p) { if (p) {
@ -586,7 +586,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp)
if (p) if (p)
/*****************************************************************/ /*****************************************************************/
/* Here is the case explaining why we need a recursive routine. */ /* Here is the case explaining why we need a recursive routine. */
/* The test must be done not only against the first occurence */ /* The test must be done not only against the first occurrence */
/* of the <substring-specifier> in the remaining string, */ /* of the <substring-specifier> in the remaining string, */
/* but also with all eventual succeeding ones. */ /* but also with all eventual succeeding ones. */
/*****************************************************************/ /*****************************************************************/

View File

@ -292,7 +292,7 @@ TDBOCCUR::TDBOCCUR(POCCURDEF tdp) : TDBPRX(tdp)
Col = NULL; // To source column blocks array Col = NULL; // To source column blocks array
Mult = PrepareColist(Colist); // Multiplication factor Mult = PrepareColist(Colist); // Multiplication factor
N = 0; // The current table index N = 0; // The current table index
M = 0; // The occurence rank M = 0; // The occurrence rank
RowFlag = 0; // 0: Ok, 1: Same, 2: Skip RowFlag = 0; // 0: Ok, 1: Same, 2: Skip
} // end of TDBOCCUR constructor } // end of TDBOCCUR constructor
@ -431,7 +431,7 @@ int TDBOCCUR::GetMaxSize(PGLOBAL g)
/***********************************************************************/ /***********************************************************************/
/* In this sample, ROWID will be the (virtual) row number, */ /* In this sample, ROWID will be the (virtual) row number, */
/* while ROWNUM will be the occurence rank in the multiple column. */ /* while ROWNUM will be the occurrence rank in the multiple column. */
/***********************************************************************/ /***********************************************************************/
int TDBOCCUR::RowNumber(PGLOBAL, bool b) int TDBOCCUR::RowNumber(PGLOBAL, bool b)
{ {

View File

@ -35,7 +35,7 @@ class OCCURDEF : public PRXDEF { /* Logical table description */
protected: protected:
// Members // Members
char *Colist; /* The source column list */ char *Colist; /* The source column list */
char *Xcol; /* The multiple occurence column */ char *Xcol; /* The multiple occurrence column */
char *Rcol; /* The rank column */ char *Rcol; /* The rank column */
}; // end of OCCURDEF }; // end of OCCURDEF
@ -76,12 +76,12 @@ class TDBOCCUR : public TDBPRX {
PCOL *Col; // To source multiple columns PCOL *Col; // To source multiple columns
int Mult; // Multiplication factor int Mult; // Multiplication factor
int N; // The current table index int N; // The current table index
int M; // The occurence rank int M; // The occurrence rank
BYTE RowFlag; // 0: Ok, 1: Same, 2: Skip BYTE RowFlag; // 0: Ok, 1: Same, 2: Skip
}; // end of class TDBOCCUR }; // end of class TDBOCCUR
/***********************************************************************/ /***********************************************************************/
/* Class OCCURCOL: for the multiple occurence column. */ /* Class OCCURCOL: for the multiple occurrence column. */
/***********************************************************************/ /***********************************************************************/
class OCCURCOL : public COLBLK { class OCCURCOL : public COLBLK {
public: public:
@ -106,7 +106,7 @@ class OCCURCOL : public COLBLK {
}; // end of class OCCURCOL }; // end of class OCCURCOL
/***********************************************************************/ /***********************************************************************/
/* Class RANKCOL: for the multiple occurence column ranking. */ /* Class RANKCOL: for the multiple occurrence column ranking. */
/***********************************************************************/ /***********************************************************************/
class RANKCOL : public COLBLK { class RANKCOL : public COLBLK {
public: public:

View File

@ -404,7 +404,7 @@ TDBPIVOT::TDBPIVOT(PPIVOTDEF tdp) : TDBPRX(tdp)
Accept = tdp->Accept; Accept = tdp->Accept;
Mult = -1; // Estimated table size Mult = -1; // Estimated table size
N = 0; // The current table index N = 0; // The current table index
M = 0; // The occurence rank M = 0; // The occurrence rank
FileStatus = 0; // Logical End-of-File FileStatus = 0; // Logical End-of-File
RowFlag = 0; // 0: Ok, 1: Same, 2: Skip RowFlag = 0; // 0: Ok, 1: Same, 2: Skip
} // end of TDBPIVOT constructor } // end of TDBPIVOT constructor
@ -644,7 +644,7 @@ int TDBPIVOT::GetMaxSize(PGLOBAL g __attribute__((unused)))
/***********************************************************************/ /***********************************************************************/
/* In this sample, ROWID will be the (virtual) row number, */ /* In this sample, ROWID will be the (virtual) row number, */
/* while ROWNUM will be the occurence rank in the multiple column. */ /* while ROWNUM will be the occurrence rank in the multiple column. */
/***********************************************************************/ /***********************************************************************/
int TDBPIVOT::RowNumber(PGLOBAL, bool b) int TDBPIVOT::RowNumber(PGLOBAL, bool b)
{ {

View File

@ -138,7 +138,7 @@ class TDBPIVOT : public TDBPRX {
int Mult; // Multiplication factor int Mult; // Multiplication factor
int Ncol; // The number of generated columns int Ncol; // The number of generated columns
int N; // The current table index int N; // The current table index
int M; // The occurence rank int M; // The occurrence rank
int Port; // MySQL port number int Port; // MySQL port number
BYTE FileStatus; // 0: First 1: Rows 2: End-of-File BYTE FileStatus; // 0: First 1: Rows 2: End-of-File
BYTE RowFlag; // 0: Ok, 1: Same, 2: Skip BYTE RowFlag; // 0: Ok, 1: Same, 2: Skip

View File

@ -536,7 +536,7 @@ int TDBPRX::GetMaxSize(PGLOBAL g)
/***********************************************************************/ /***********************************************************************/
/* In this sample, ROWID will be the (virtual) row number, */ /* In this sample, ROWID will be the (virtual) row number, */
/* while ROWNUM will be the occurence rank in the multiple column. */ /* while ROWNUM will be the occurrence rank in the multiple column. */
/***********************************************************************/ /***********************************************************************/
int TDBPRX::RowNumber(PGLOBAL g, bool b) int TDBPRX::RowNumber(PGLOBAL g, bool b)
{ {

View File

@ -103,7 +103,7 @@ TDBXCL::TDBXCL(PXCLDEF tdp) : TDBPRX(tdp)
Xcolp = NULL; // To the XCLCOL column Xcolp = NULL; // To the XCLCOL column
Mult = tdp->Mult; // Multiplication factor Mult = tdp->Mult; // Multiplication factor
N = 0; // The current table index N = 0; // The current table index
M = 0; // The occurence rank M = 0; // The occurrence rank
RowFlag = 0; // 0: Ok, 1: Same, 2: Skip RowFlag = 0; // 0: Ok, 1: Same, 2: Skip
New = TRUE; // TRUE for new line New = TRUE; // TRUE for new line
Sep = tdp->Sep; // The Xcol separator Sep = tdp->Sep; // The Xcol separator
@ -142,7 +142,7 @@ int TDBXCL::GetMaxSize(PGLOBAL g)
/***********************************************************************/ /***********************************************************************/
/* For this table type, ROWID is the (virtual) row number, */ /* For this table type, ROWID is the (virtual) row number, */
/* while ROWNUM is be the occurence rank in the multiple column. */ /* while ROWNUM is be the occurrence rank in the multiple column. */
/***********************************************************************/ /***********************************************************************/
int TDBXCL::RowNumber(PGLOBAL, bool b) int TDBXCL::RowNumber(PGLOBAL, bool b)
{ {

View File

@ -72,7 +72,7 @@ class TDBXCL : public TDBPRX {
PXCLCOL Xcolp; // To the XCVCOL column PXCLCOL Xcolp; // To the XCVCOL column
int Mult; // Multiplication factor int Mult; // Multiplication factor
int N; // The current table index int N; // The current table index
int M; // The occurence rank int M; // The occurrence rank
BYTE RowFlag; // 0: Ok, 1: Same, 2: Skip BYTE RowFlag; // 0: Ok, 1: Same, 2: Skip
bool New; // TRUE for new line bool New; // TRUE for new line
char Sep; // The Xcol separator char Sep; // The Xcol separator

View File

@ -1436,7 +1436,7 @@ fts_drop_table(
dict_table_close(table, TRUE, FALSE); dict_table_close(table, TRUE, FALSE);
/* Pass nonatomic=false (dont allow data dict unlock), /* Pass nonatomic=false (don't allow data dict unlock),
because the transaction may hold locks on SYS_* tables from because the transaction may hold locks on SYS_* tables from
previous calls to fts_drop_table(). */ previous calls to fts_drop_table(). */
error = row_drop_table_for_mysql(table_name, trx, error = row_drop_table_for_mysql(table_name, trx,

View File

@ -581,7 +581,7 @@ fts_zip_read_word(
/* Finished decompressing block. */ /* Finished decompressing block. */
if (zip->zp->avail_in == 0) { if (zip->zp->avail_in == 0) {
/* Free the block thats been decompressed. */ /* Free the block that's been decompressed. */
if (zip->pos > 0) { if (zip->pos > 0) {
ulint prev = zip->pos - 1; ulint prev = zip->pos - 1;

View File

@ -252,7 +252,7 @@ static double _ma_search_pos(MARIA_HA *info, MARIA_KEY *key,
pages we are counting keys. pages we are counting keys.
If this is a node then we have to search backwards to find the If this is a node then we have to search backwards to find the
first occurence of the key. The row position in a node tree first occurrence of the key. The row position in a node tree
is keynr (starting from 0) + offset for sub tree. If there is is keynr (starting from 0) + offset for sub tree. If there is
no sub tree to search, then we are at start of next sub tree. no sub tree to search, then we are at start of next sub tree.

View File

@ -566,7 +566,7 @@ int ha_oqgraph::open(const char *name, int mode, uint test_if_locked)
// What I think this code is doing: // What I think this code is doing:
// * Our OQGRAPH table is `database_blah/name` // * Our OQGRAPH table is `database_blah/name`
// * We point p --> /name (or if table happened to be simply `name`, to `name`, dont know if this is possible) // * We point p --> /name (or if table happened to be simply `name`, to `name`, don't know if this is possible)
// * plen seems to be then set to length of `database_blah/options_data_table_name` // * plen seems to be then set to length of `database_blah/options_data_table_name`
// * then we set share->normalized_path.str and share->path.str to `database_blah/options_data_table_name` // * then we set share->normalized_path.str and share->path.str to `database_blah/options_data_table_name`
// * I assume that this verbiage is needed so the memory used by share->path.str is set in the share mem root // * I assume that this verbiage is needed so the memory used by share->path.str is set in the share mem root

View File

@ -142,7 +142,7 @@ SELECT * FROM graph WHERE latch='-1' and origid is NULL;
latch origid destid weight seq linkid latch origid destid weight seq linkid
Warnings: Warnings:
Warning 1210 Incorrect arguments to OQGRAPH latch Warning 1210 Incorrect arguments to OQGRAPH latch
# Make sure we dont crash if someone passed in a UTF string # Make sure we don't crash if someone passed in a UTF string
SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄'; SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄';
latch origid destid weight seq linkid latch origid destid weight seq linkid
SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄' and destid=2 and origid=1; SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄' and destid=2 and origid=1;

View File

@ -91,7 +91,7 @@ SELECT * FROM graph WHERE latch='-1' and destid=1;
SELECT * FROM graph WHERE latch='-1' and origid=666; SELECT * FROM graph WHERE latch='-1' and origid=666;
SELECT * FROM graph WHERE latch='-1' and origid is NULL; SELECT * FROM graph WHERE latch='-1' and origid is NULL;
--echo # Make sure we dont crash if someone passed in a UTF string --echo # Make sure we don't crash if someone passed in a UTF string
#-- Note the next line couter-intuitively produces no warning #-- Note the next line couter-intuitively produces no warning
SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄'; SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄';
SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄' and destid=2 and origid=1; SELECT * FROM graph WHERE latch='Ω Ohms Tennis Ball 〄' and destid=2 and origid=1;
@ -125,7 +125,7 @@ FLUSH TABLES;
TRUNCATE TABLE graph_base; TRUNCATE TABLE graph_base;
#-- Uncomment the following after fixing https://bugs.launchpad.net/oqgraph/+bug/xxxxxxx - Causes the later select to not fail! #-- Uncomment the following after fixing https://bugs.launchpad.net/oqgraph/+bug/xxxxxxx - Causes the later select to not fail!
#-- For now dont report a separate bug as it may be a manifestation of https://bugs.launchpad.net/oqgraph/+bug/1195735 #-- For now don't report a separate bug as it may be a manifestation of https://bugs.launchpad.net/oqgraph/+bug/1195735
SELECT * FROM graph; SELECT * FROM graph;
#-- Expect error if we pull the table out from under #-- Expect error if we pull the table out from under

View File

@ -26,7 +26,7 @@ CREATE TABLE backing (
# Here we enable scaffolding to let us create a deprecated table # Here we enable scaffolding to let us create a deprecated table
# so we can check that the new code will still allow queries to be performed # so we can check that the new code will still allow queries to be performed
# on a legacy database # on a legacy database
# It should still generate a warning (1287) - but I dont know how to test for that # It should still generate a warning (1287) - but I don't know how to test for that
# #
# latch SMALLINT UNSIGNED NULL' is deprecated and will be removed in a future # latch SMALLINT UNSIGNED NULL' is deprecated and will be removed in a future
# release. Please use 'latch VARCHAR(32) NULL' instead # release. Please use 'latch VARCHAR(32) NULL' instead

View File

@ -13,7 +13,7 @@ CREATE TABLE graph_base (
# Backwards compatibility test # Backwards compatibility test
# First we ensure the scaffolding is disabled (default situation) # First we ensure the scaffolding is disabled (default situation)
# and check we cant create a table with an integer latch # and check we cant create a table with an integer latch
# Assume this is the default, so dont explicity set false yet: # Assume this is the default, so don't explicity set false yet:
# SET GLOBAL oqgraph_allow_create_integer_latch=false; # SET GLOBAL oqgraph_allow_create_integer_latch=false;
--echo The next error 140 + 1005 is expected --echo The next error 140 + 1005 is expected
--error 140 --error 140
@ -32,7 +32,7 @@ CREATE TABLE graph (
# Here we enable scaffolding to let us create a deprecated table # Here we enable scaffolding to let us create a deprecated table
# so we can check that the new code will still allow queries to be performed # so we can check that the new code will still allow queries to be performed
# on a legacy database # on a legacy database
# It should still generate a warning (1287) - but I dont know how to test for that # It should still generate a warning (1287) - but I don't know how to test for that
# #
# latch SMALLINT UNSIGNED NULL' is deprecated and will be removed in a future # latch SMALLINT UNSIGNED NULL' is deprecated and will be removed in a future
# release. Please use 'latch VARCHAR(32) NULL' instead # release. Please use 'latch VARCHAR(32) NULL' instead

View File

@ -199,7 +199,7 @@ enum ESphRankMode
SPH_RANK_PROXIMITY_BM25 = 0, ///< default mode, phrase proximity major factor and BM25 minor one SPH_RANK_PROXIMITY_BM25 = 0, ///< default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1, ///< statistical mode, BM25 ranking only (faster but worse quality) SPH_RANK_BM25 = 1, ///< statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2, ///< no ranking, all matches get a weight of 1 SPH_RANK_NONE = 2, ///< no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3, ///< simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts SPH_RANK_WORDCOUNT = 3, ///< simple word-count weighting, rank is a weighted sum of per-field keyword occurrence counts
SPH_RANK_PROXIMITY = 4, ///< phrase proximity SPH_RANK_PROXIMITY = 4, ///< phrase proximity
SPH_RANK_MATCHANY = 5, ///< emulate old match-any weighting SPH_RANK_MATCHANY = 5, ///< emulate old match-any weighting
SPH_RANK_FIELDMASK = 6, ///< sets bits where there were matches SPH_RANK_FIELDMASK = 6, ///< sets bits where there were matches

View File

@ -8681,7 +8681,7 @@ fi[]dnl
# to PKG_CHECK_MODULES(), but does not set variables or print errors. # to PKG_CHECK_MODULES(), but does not set variables or print errors.
# #
# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) # Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
# only at the first occurence in configure.ac, so if the first place # only at the first occurrence in configure.ac, so if the first place
# it's called might be skipped (such as if it is within an "if", you # it's called might be skipped (such as if it is within an "if", you
# have to call PKG_CHECK_EXISTS manually # have to call PKG_CHECK_EXISTS manually
# -------------------------------------------------------------- # --------------------------------------------------------------