Fixed compiler warnings
When merging this with 10.2 and later, one can just use the 10.2 or later code
This commit is contained in:
parent
29dbb23fb7
commit
5a16fe0e6f
@ -432,7 +432,7 @@ handlerton *connect_hton= NULL;
|
|||||||
uint GetTraceValue(void)
|
uint GetTraceValue(void)
|
||||||
{return (uint)(connect_hton ? THDVAR(current_thd, xtrace) : 0);}
|
{return (uint)(connect_hton ? THDVAR(current_thd, xtrace) : 0);}
|
||||||
bool ExactInfo(void) {return THDVAR(current_thd, exact_info);}
|
bool ExactInfo(void) {return THDVAR(current_thd, exact_info);}
|
||||||
bool CondPushEnabled(void) {return THDVAR(current_thd, cond_push);}
|
static bool CondPushEnabled(void) {return THDVAR(current_thd, cond_push);}
|
||||||
USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);}
|
USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);}
|
||||||
int GetConvSize(void) {return THDVAR(current_thd, conv_size);}
|
int GetConvSize(void) {return THDVAR(current_thd, conv_size);}
|
||||||
TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);}
|
TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
#pragma implementation // gcc: Class implementation
|
#pragma implementation // gcc: Class implementation
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if _MSC_VER>=1400
|
#if defined(_MSC_VER) && _MSC_VER>=1400
|
||||||
#define _CRT_SECURE_NO_DEPRECATE 1
|
#define _CRT_SECURE_NO_DEPRECATE 1
|
||||||
#define _CRT_NONSTDC_NO_DEPRECATE 1
|
#define _CRT_NONSTDC_NO_DEPRECATE 1
|
||||||
#endif
|
#endif
|
||||||
@ -64,7 +64,7 @@
|
|||||||
#define MSG_WAITALL 0
|
#define MSG_WAITALL 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if _MSC_VER>=1400
|
#if defined(_MSC_VER) && _MSC_VER>=1400
|
||||||
#pragma warning(push,4)
|
#pragma warning(push,4)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1041,8 +1041,8 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
|
|||||||
bool bOk = true;
|
bool bOk = true;
|
||||||
bool bQL = false;
|
bool bQL = false;
|
||||||
char * sScheme = NULL;
|
char * sScheme = NULL;
|
||||||
char * sHost = SPHINXAPI_DEFAULT_HOST;
|
char * sHost = (char*) SPHINXAPI_DEFAULT_HOST;
|
||||||
char * sIndex = SPHINXAPI_DEFAULT_INDEX;
|
char * sIndex = (char*) SPHINXAPI_DEFAULT_INDEX;
|
||||||
int iPort = SPHINXAPI_DEFAULT_PORT;
|
int iPort = SPHINXAPI_DEFAULT_PORT;
|
||||||
|
|
||||||
// parse connection string, if any
|
// parse connection string, if any
|
||||||
@ -1068,12 +1068,12 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
|
|||||||
sHost--; // reuse last slash
|
sHost--; // reuse last slash
|
||||||
iPort = 0;
|
iPort = 0;
|
||||||
if (!( sIndex = strrchr ( sHost, ':' ) ))
|
if (!( sIndex = strrchr ( sHost, ':' ) ))
|
||||||
sIndex = SPHINXAPI_DEFAULT_INDEX;
|
sIndex = (char*) SPHINXAPI_DEFAULT_INDEX;
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
*sIndex++ = '\0';
|
*sIndex++ = '\0';
|
||||||
if ( !*sIndex )
|
if ( !*sIndex )
|
||||||
sIndex = SPHINXAPI_DEFAULT_INDEX;
|
sIndex = (char*) SPHINXAPI_DEFAULT_INDEX;
|
||||||
}
|
}
|
||||||
bOk = true;
|
bOk = true;
|
||||||
break;
|
break;
|
||||||
@ -1095,7 +1095,7 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
|
|||||||
if ( sIndex )
|
if ( sIndex )
|
||||||
*sIndex++ = '\0';
|
*sIndex++ = '\0';
|
||||||
else
|
else
|
||||||
sIndex = SPHINXAPI_DEFAULT_INDEX;
|
sIndex = (char*) SPHINXAPI_DEFAULT_INDEX;
|
||||||
|
|
||||||
iPort = atoi(sPort);
|
iPort = atoi(sPort);
|
||||||
if ( !iPort )
|
if ( !iPort )
|
||||||
@ -1107,7 +1107,7 @@ static bool ParseUrl ( CSphSEShare * share, TABLE * table, bool bCreate )
|
|||||||
if ( sIndex )
|
if ( sIndex )
|
||||||
*sIndex++ = '\0';
|
*sIndex++ = '\0';
|
||||||
else
|
else
|
||||||
sIndex = SPHINXAPI_DEFAULT_INDEX;
|
sIndex = (char*) SPHINXAPI_DEFAULT_INDEX;
|
||||||
}
|
}
|
||||||
bOk = true;
|
bOk = true;
|
||||||
break;
|
break;
|
||||||
@ -1303,8 +1303,8 @@ CSphSEQuery::CSphSEQuery ( const char * sQuery, int iLength, const char * sIndex
|
|||||||
, m_sGeoLongAttr ( "" )
|
, m_sGeoLongAttr ( "" )
|
||||||
, m_fGeoLatitude ( 0.0f )
|
, m_fGeoLatitude ( 0.0f )
|
||||||
, m_fGeoLongitude ( 0.0f )
|
, m_fGeoLongitude ( 0.0f )
|
||||||
, m_sComment ( "" )
|
, m_sComment ( (char*) "" )
|
||||||
, m_sSelect ( "*" )
|
, m_sSelect ( (char*) "*" )
|
||||||
|
|
||||||
, m_pBuf ( NULL )
|
, m_pBuf ( NULL )
|
||||||
, m_pCur ( NULL )
|
, m_pCur ( NULL )
|
||||||
@ -1738,7 +1738,7 @@ bool CSphSEQuery::ParseField ( char * sField )
|
|||||||
}
|
}
|
||||||
} else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,...
|
} else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,...
|
||||||
{
|
{
|
||||||
char * sName = NULL;
|
sName = NULL;
|
||||||
int iType = 0;
|
int iType = 0;
|
||||||
CSphSEQuery::Override_t * pOverride = NULL;
|
CSphSEQuery::Override_t * pOverride = NULL;
|
||||||
|
|
||||||
@ -1794,7 +1794,7 @@ bool CSphSEQuery::ParseField ( char * sField )
|
|||||||
*sRest++ = '\0';
|
*sRest++ = '\0';
|
||||||
if (!( sRest - sId )) break;
|
if (!( sRest - sId )) break;
|
||||||
|
|
||||||
char * sValue = sRest;
|
sValue = sRest;
|
||||||
if ( ( sRest = strchr ( sRest, ',' ) )!=NULL )
|
if ( ( sRest = strchr ( sRest, ',' ) )!=NULL )
|
||||||
*sRest++ = '\0';
|
*sRest++ = '\0';
|
||||||
if ( !*sValue )
|
if ( !*sValue )
|
||||||
@ -2213,7 +2213,7 @@ int ha_sphinx::Connect ( const char * sHost, ushort uPort )
|
|||||||
}
|
}
|
||||||
|
|
||||||
char sError[512];
|
char sError[512];
|
||||||
int iSocket = socket ( iDomain, SOCK_STREAM, 0 );
|
int iSocket = (int) socket ( iDomain, SOCK_STREAM, 0 );
|
||||||
|
|
||||||
if ( iSocket<0 )
|
if ( iSocket<0 )
|
||||||
{
|
{
|
||||||
@ -2538,12 +2538,6 @@ char * ha_sphinx::UnpackString ()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline const char * FixNull ( const char * s )
|
|
||||||
{
|
|
||||||
return s ? s : "(null)";
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool ha_sphinx::UnpackSchema ()
|
bool ha_sphinx::UnpackSchema ()
|
||||||
{
|
{
|
||||||
SPH_ENTER_METHOD();
|
SPH_ENTER_METHOD();
|
||||||
@ -2674,7 +2668,7 @@ bool ha_sphinx::UnpackStats ( CSphSEStats * pStats )
|
|||||||
assert ( pStats );
|
assert ( pStats );
|
||||||
|
|
||||||
char * pCurSave = m_pCur;
|
char * pCurSave = m_pCur;
|
||||||
for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
|
for ( uint m=0; m<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); m++ ) // NOLINT
|
||||||
{
|
{
|
||||||
m_pCur += m_bId64 ? 12 : 8; // skip id+weight
|
m_pCur += m_bId64 ? 12 : 8; // skip id+weight
|
||||||
for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
|
for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
|
||||||
@ -3159,7 +3153,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
af->store ( sBuf, pCur-sBuf, &my_charset_bin );
|
af->store ( sBuf, uint(pCur-sBuf), &my_charset_bin );
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -3386,39 +3380,39 @@ ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * )
|
|||||||
// currently provided for doing that.
|
// currently provided for doing that.
|
||||||
//
|
//
|
||||||
// Called from handle.cc by ha_create_table().
|
// Called from handle.cc by ha_create_table().
|
||||||
int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
|
int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
|
||||||
{
|
{
|
||||||
SPH_ENTER_METHOD();
|
SPH_ENTER_METHOD();
|
||||||
char sError[256];
|
char sError[256];
|
||||||
|
|
||||||
CSphSEShare tInfo;
|
CSphSEShare tInfo;
|
||||||
if ( !ParseUrl ( &tInfo, table, true ) )
|
if ( !ParseUrl ( &tInfo, table_arg, true ) )
|
||||||
SPH_RET(-1);
|
SPH_RET(-1);
|
||||||
|
|
||||||
// check SphinxAPI table
|
// check SphinxAPI table
|
||||||
for ( ; !tInfo.m_bSphinxQL; )
|
for ( ; !tInfo.m_bSphinxQL; )
|
||||||
{
|
{
|
||||||
// check system fields (count and types)
|
// check system fields (count and types)
|
||||||
if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS )
|
if ( table_arg->s->fields<SPHINXSE_SYSTEM_COLUMNS )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns",
|
my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns",
|
||||||
name, SPHINXSE_SYSTEM_COLUMNS );
|
name, SPHINXSE_SYSTEM_COLUMNS );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( !IsIDField ( table->field[0] ) )
|
if ( !IsIDField ( table_arg->field[0] ) )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name );
|
my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( !IsIntegerFieldType ( table->field[1]->type() ) )
|
if ( !IsIntegerFieldType ( table_arg->field[1]->type() ) )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name );
|
my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum_field_types f2 = table->field[2]->type();
|
enum_field_types f2 = table_arg->field[2]->type();
|
||||||
if ( f2!=MYSQL_TYPE_VARCHAR
|
if ( f2!=MYSQL_TYPE_VARCHAR
|
||||||
&& f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB )
|
&& f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB )
|
||||||
{
|
{
|
||||||
@ -3428,28 +3422,28 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
|
|||||||
|
|
||||||
// check attributes
|
// check attributes
|
||||||
int i;
|
int i;
|
||||||
for ( i=3; i<(int)table->s->fields; i++ )
|
for ( i=3; i<(int)table_arg->s->fields; i++ )
|
||||||
{
|
{
|
||||||
enum_field_types eType = table->field[i]->type();
|
enum_field_types eType = table_arg->field[i]->type();
|
||||||
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
|
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float",
|
my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float",
|
||||||
name, i+1, table->field[i]->field_name );
|
name, i+1, table_arg->field[i]->field_name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( i!=(int)table->s->fields )
|
if ( i!=(int)table_arg->s->fields )
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// check index
|
// check index
|
||||||
if (
|
if (
|
||||||
table->s->keys!=1 ||
|
table_arg->s->keys!=1 ||
|
||||||
table->key_info[0].user_defined_key_parts!=1 ||
|
table_arg->key_info[0].user_defined_key_parts!=1 ||
|
||||||
strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) )
|
strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
|
my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
|
||||||
name, table->field[2]->field_name );
|
name, table_arg->field[2]->field_name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3464,13 +3458,13 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
|
|||||||
sError[0] = '\0';
|
sError[0] = '\0';
|
||||||
|
|
||||||
// check that 1st column is id, is of int type, and has an index
|
// check that 1st column is id, is of int type, and has an index
|
||||||
if ( strcmp ( table->field[0]->field_name, "id" ) )
|
if ( strcmp ( table_arg->field[0]->field_name, "id" ) )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name );
|
my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( !IsIDField ( table->field[0] ) )
|
if ( !IsIDField ( table_arg->field[0] ) )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name );
|
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name );
|
||||||
break;
|
break;
|
||||||
@ -3478,22 +3472,22 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
|
|||||||
|
|
||||||
// check index
|
// check index
|
||||||
if (
|
if (
|
||||||
table->s->keys!=1 ||
|
table_arg->s->keys!=1 ||
|
||||||
table->key_info[0].user_defined_key_parts!=1 ||
|
table_arg->key_info[0].user_defined_key_parts!=1 ||
|
||||||
strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) )
|
strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name, "id" ) )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
|
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check column types
|
// check column types
|
||||||
for ( int i=1; i<(int)table->s->fields; i++ )
|
for ( int i=1; i<(int)table_arg->s->fields; i++ )
|
||||||
{
|
{
|
||||||
enum_field_types eType = table->field[i]->type();
|
enum_field_types eType = table_arg->field[i]->type();
|
||||||
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
|
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
|
||||||
{
|
{
|
||||||
my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)",
|
my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)",
|
||||||
name, i+1, table->field[i]->field_name );
|
name, i+1, table_arg->field[i]->field_name );
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3507,8 +3501,11 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
|
|||||||
// report and bail
|
// report and bail
|
||||||
if ( sError[0] )
|
if ( sError[0] )
|
||||||
{
|
{
|
||||||
my_error ( ER_CANT_CREATE_TABLE, MYF(0),
|
my_printf_error(ER_CANT_CREATE_TABLE,
|
||||||
table->s->db.str, table->s->table_name, sError );
|
"Can\'t create table %s.%s (Error: %s)",
|
||||||
|
MYF(0),
|
||||||
|
table_arg->s->db.str,
|
||||||
|
table_arg->s->table_name.str, sError);
|
||||||
SPH_RET(-1);
|
SPH_RET(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user