Merge bk-internal.mysql.com:/home/bk/mysql-5.0

into mysql.com:/home/my/mysql-5.0
This commit is contained in:
monty@mysql.com 2005-03-16 01:30:28 +02:00
commit 1a9e05dd3f
32 changed files with 1801 additions and 607 deletions

View File

@ -1084,6 +1084,7 @@ static void print_xml_row(FILE *xml_file, const char *row_name,
check_io(xml_file); check_io(xml_file);
} }
/* /*
getStructure -- retrievs database structure, prints out corresponding getStructure -- retrievs database structure, prints out corresponding
CREATE statement and fills out insert_pat. CREATE statement and fills out insert_pat.

View File

@ -168,6 +168,8 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
keyinfo->write_key= hp_write_key; keyinfo->write_key= hp_write_key;
keyinfo->hash_buckets= 0; keyinfo->hash_buckets= 0;
} }
if ((keyinfo->flag & HA_AUTO_KEY) && create_info->with_auto_increment)
share->auto_key= i + 1;
} }
share->min_records= min_records; share->min_records= min_records;
share->max_records= max_records; share->max_records= max_records;
@ -178,7 +180,6 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
share->keys= keys; share->keys= keys;
share->max_key_length= max_length; share->max_key_length= max_length;
share->changed= 0; share->changed= 0;
share->auto_key= create_info->auto_key;
share->auto_key_type= create_info->auto_key_type; share->auto_key_type= create_info->auto_key_type;
share->auto_increment= create_info->auto_increment; share->auto_increment= create_info->auto_increment;
/* Must be allocated separately for rename to work */ /* Must be allocated separately for rename to work */

View File

@ -183,10 +183,10 @@ typedef struct st_heap_info
typedef struct st_heap_create_info typedef struct st_heap_create_info
{ {
uint auto_key;
uint auto_key_type; uint auto_key_type;
ulong max_table_size; ulong max_table_size;
ulonglong auto_increment; ulonglong auto_increment;
my_bool with_auto_increment;
} HP_CREATE_INFO; } HP_CREATE_INFO;
/* Prototypes for heap-functions */ /* Prototypes for heap-functions */

View File

@ -120,6 +120,21 @@
#define __STDC_EXT__ 1 /* To get large file support on hpux */ #define __STDC_EXT__ 1 /* To get large file support on hpux */
#endif #endif
/*
Solaris include file <sys/feature_tests.h> refers to X/Open document
System Interfaces and Headers, Issue 5
saying we should define _XOPEN_SOURCE=500 to get POSIX.1c prototypes
but apparently other systems (namely FreeBSD) don't agree.
Furthermore X/Open has since 2004 "System Interfaces, Issue 6"
that dictates _XOPEN_SOURCE=600, but Solaris checks for 500.
So, let's define 500 for solaris only.
*/
#ifdef __sun
#define _XOPEN_SOURCE 500
#endif
#if defined(THREAD) && !defined(__WIN__) && !defined(OS2) #if defined(THREAD) && !defined(__WIN__) && !defined(OS2)
#ifndef _POSIX_PTHREAD_SEMANTICS #ifndef _POSIX_PTHREAD_SEMANTICS
#define _POSIX_PTHREAD_SEMANTICS /* We want posix threads */ #define _POSIX_PTHREAD_SEMANTICS /* We want posix threads */

View File

@ -812,7 +812,7 @@ my_bool my_gethwaddr(uchar *to);
/* qnx ? */ /* qnx ? */
#define my_getpagesize() 8192 #define my_getpagesize() 8192
#endif #endif
#define my_munmap(a,b) munmap((char*)(a),(b)) #define my_munmap(a,b) munmap((a),(b))
#else #else
/* not a complete set of mmap() flags, but only those that nesessary */ /* not a complete set of mmap() flags, but only those that nesessary */

View File

@ -24,7 +24,11 @@ extern dtype_t* dtype_binary;
/*-------------------------------------------*/ /*-------------------------------------------*/
/* The 'MAIN TYPE' of a column */ /* The 'MAIN TYPE' of a column */
#define DATA_VARCHAR 1 /* character varying of the #define DATA_VARCHAR 1 /* character varying of the
latin1_swedish_ci charset-collation */ latin1_swedish_ci charset-collation; note
that the MySQL format for this, DATA_BINARY,
DATA_VARMYSQL, is also affected by whether the
'precise type' contains
DATA_MYSQL_TRUE_VARCHAR */
#define DATA_CHAR 2 /* fixed length character of the #define DATA_CHAR 2 /* fixed length character of the
latin1_swedish_ci charset-collation */ latin1_swedish_ci charset-collation */
#define DATA_FIXBINARY 3 /* binary string of fixed length */ #define DATA_FIXBINARY 3 /* binary string of fixed length */
@ -102,6 +106,8 @@ columns, and for them the precise type is usually not used at all.
#define DATA_MYSQL_TYPE_MASK 255 /* AND with this mask to extract the MySQL #define DATA_MYSQL_TYPE_MASK 255 /* AND with this mask to extract the MySQL
type from the precise type */ type from the precise type */
#define DATA_MYSQL_TRUE_VARCHAR 15 /* MySQL type code for the >= 5.0.3
format true VARCHAR */
/* Precise data types for system columns and the length of those columns; /* Precise data types for system columns and the length of those columns;
NOTE: the values must run from 0 up in the order given! All codes must NOTE: the values must run from 0 up in the order given! All codes must
@ -134,6 +140,10 @@ be less than 256 */
In earlier versions this was set for some In earlier versions this was set for some
BLOB columns. BLOB columns.
*/ */
#define DATA_LONG_TRUE_VARCHAR 4096 /* this is ORed to the precise data
type when the column is true VARCHAR where
MySQL uses 2 bytes to store the data len;
for shorter VARCHARs MySQL uses only 1 byte */
/*-------------------------------------------*/ /*-------------------------------------------*/
/* This many bytes we need to store the type information affecting the /* This many bytes we need to store the type information affecting the
@ -144,6 +154,15 @@ SQL null*/
store the charset-collation number; one byte is left unused, though */ store the charset-collation number; one byte is left unused, though */
#define DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE 6 #define DATA_NEW_ORDER_NULL_TYPE_BUF_SIZE 6
/*************************************************************************
Gets the MySQL type code from a dtype. */
UNIV_INLINE
ulint
dtype_get_mysql_type(
/*=================*/
/* out: MySQL type code; this is NOT an InnoDB
type code! */
dtype_t* type); /* in: type struct */
/************************************************************************* /*************************************************************************
Determine how many bytes the first n characters of the given string occupy. Determine how many bytes the first n characters of the given string occupy.
If the string is shorter than n characters, returns the number of bytes If the string is shorter than n characters, returns the number of bytes

View File

@ -32,6 +32,19 @@ dtype_get_charset_coll(
return((prtype >> 16) & 0xFFUL); return((prtype >> 16) & 0xFFUL);
} }
/*************************************************************************
Gets the MySQL type code from a dtype. */
UNIV_INLINE
ulint
dtype_get_mysql_type(
/*=================*/
/* out: MySQL type code; this is NOT an InnoDB
type code! */
dtype_t* type) /* in: type struct */
{
return(type->prtype & 0xFFUL);
}
/************************************************************************* /*************************************************************************
Sets the mbminlen and mbmaxlen members of a data type structure. */ Sets the mbminlen and mbmaxlen members of a data type structure. */
UNIV_INLINE UNIV_INLINE

View File

@ -359,7 +359,8 @@ struct que_thr_struct{
the control came */ the control came */
ulint resource; /* resource usage of the query thread ulint resource; /* resource usage of the query thread
thus far */ thus far */
ulint lock_state; /* lock state of thread (table or row) */ ulint lock_state; /* lock state of thread (table or
row) */
}; };
#define QUE_THR_MAGIC_N 8476583 #define QUE_THR_MAGIC_N 8476583

View File

@ -21,36 +21,6 @@ Created 9/17/2000 Heikki Tuuri
typedef struct row_prebuilt_struct row_prebuilt_t; typedef struct row_prebuilt_struct row_prebuilt_t;
/***********************************************************************
Stores a variable-length field (like VARCHAR) length to dest, in the
MySQL format. */
UNIV_INLINE
byte*
row_mysql_store_var_len(
/*====================*/
/* out: dest + 2 */
byte* dest, /* in: where to store */
ulint len); /* in: length, must fit in two bytes */
/***********************************************************************
Reads a MySQL format variable-length field (like VARCHAR) length and
returns pointer to the field data. */
UNIV_INLINE
byte*
row_mysql_read_var_ref(
/*===================*/
/* out: field + 2 */
ulint* len, /* out: variable-length field length */
byte* field); /* in: field */
/***********************************************************************
Reads a MySQL format variable-length field (like VARCHAR) length and
returns pointer to the field data. */
byte*
row_mysql_read_var_ref_noninline(
/*=============================*/
/* out: field + 2 */
ulint* len, /* out: variable-length field length */
byte* field); /* in: field */
/*********************************************************************** /***********************************************************************
Frees the blob heap in prebuilt when no longer needed. */ Frees the blob heap in prebuilt when no longer needed. */
@ -60,6 +30,30 @@ row_mysql_prebuilt_free_blob_heap(
row_prebuilt_t* prebuilt); /* in: prebuilt struct of a row_prebuilt_t* prebuilt); /* in: prebuilt struct of a
ha_innobase:: table handle */ ha_innobase:: table handle */
/*********************************************************************** /***********************************************************************
Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row
format. */
byte*
row_mysql_store_true_var_len(
/*=========================*/
/* out: pointer to the data, we skip the 1 or 2 bytes
at the start that are used to store the len */
byte* dest, /* in: where to store */
ulint len, /* in: length, must fit in two bytes */
ulint lenlen);/* in: storage length of len: either 1 or 2 bytes */
/***********************************************************************
Reads a >= 5.0.3 format true VARCHAR length, in the MySQL row format, and
returns a pointer to the data. */
byte*
row_mysql_read_true_varchar(
/*========================*/
/* out: pointer to the data, we skip the 1 or 2 bytes
at the start that are used to store the len */
ulint* len, /* out: variable-length field length */
byte* field, /* in: field in the MySQL format */
ulint lenlen);/* in: storage length of len: either 1 or 2 bytes */
/***********************************************************************
Stores a reference to a BLOB in the MySQL format. */ Stores a reference to a BLOB in the MySQL format. */
void void
@ -83,24 +77,40 @@ row_mysql_read_blob_ref(
ulint col_len); /* in: BLOB reference length (not BLOB ulint col_len); /* in: BLOB reference length (not BLOB
length) */ length) */
/****************************************************************** /******************************************************************
Stores a non-SQL-NULL field given in the MySQL format in the Innobase Stores a non-SQL-NULL field given in the MySQL format in the InnoDB format.
format. */ The counterpart of this function is row_sel_field_store_in_mysql_format() in
UNIV_INLINE row0sel.c. */
void
byte*
row_mysql_store_col_in_innobase_format( row_mysql_store_col_in_innobase_format(
/*===================================*/ /*===================================*/
dfield_t* dfield, /* in/out: dfield */ /* out: up to which byte we used
byte* buf, /* in/out: buffer for the converted buf in the conversion */
value */ dfield_t* dfield, /* in/out: dfield where dtype
information must be already set when
this function is called! */
byte* buf, /* in/out: buffer for a converted
integer value; this must be at least
col_len long then! */
ibool row_format_col, /* TRUE if the mysql_data is from
a MySQL row, FALSE if from a MySQL
key value;
in MySQL, a true VARCHAR storage
format differs in a row and in a
key value: in a key value the length
is always stored in 2 bytes! */
byte* mysql_data, /* in: MySQL column value, not byte* mysql_data, /* in: MySQL column value, not
SQL NULL; NOTE that dfield may also SQL NULL; NOTE that dfield may also
get a pointer to mysql_data, get a pointer to mysql_data,
therefore do not discard this as long therefore do not discard this as long
as dfield is used! */ as dfield is used! */
ulint col_len, /* in: MySQL column length */ ulint col_len, /* in: MySQL column length; NOTE that
ulint type, /* in: data type */ this is the storage length of the
bool comp, /* in: TRUE=compact format */ column in the MySQL format row, not
ulint is_unsigned); /* in: != 0 if unsigned integer type */ necessarily the length of the actual
payload data; if the column is a true
VARCHAR then this is irrelevant */
ibool comp); /* in: TRUE = compact format */
/******************************************************************** /********************************************************************
Handles user errors and lock waits detected by the database engine. */ Handles user errors and lock waits detected by the database engine. */
@ -457,6 +467,16 @@ struct mysql_row_templ_struct {
zero if column cannot be NULL */ zero if column cannot be NULL */
ulint type; /* column type in Innobase mtype ulint type; /* column type in Innobase mtype
numbers DATA_CHAR... */ numbers DATA_CHAR... */
ulint mysql_type; /* MySQL type code; this is always
< 256 */
ulint mysql_length_bytes; /* if mysql_type
== DATA_MYSQL_TRUE_VARCHAR, this tells
whether we should use 1 or 2 bytes to
store the MySQL true VARCHAR data
length at the start of row in the MySQL
format (NOTE that the MySQL key value
format always uses 2 bytes for the data
len) */
ulint charset; /* MySQL charset-collation code ulint charset; /* MySQL charset-collation code
of the column, or zero */ of the column, or zero */
ulint mbminlen; /* minimum length of a char, in bytes, ulint mbminlen; /* minimum length of a char, in bytes,

View File

@ -5,149 +5,3 @@ MySQL interface for Innobase
Created 1/23/2001 Heikki Tuuri Created 1/23/2001 Heikki Tuuri
*******************************************************/ *******************************************************/
/***********************************************************************
Stores a variable-length field (like VARCHAR) length to dest, in the
MySQL format. No real var implemented in MySQL yet! */
UNIV_INLINE
byte*
row_mysql_store_var_len(
/*====================*/
/* out: dest + 2 */
byte* dest, /* in: where to store */
ulint len __attribute__((unused))) /* in: length, must fit in two
bytes */
{
ut_ad(len < 256 * 256);
/*
mach_write_to_2_little_endian(dest, len);
return(dest + 2);
*/
return(dest); /* No real var implemented in MySQL yet! */
}
/***********************************************************************
Reads a MySQL format variable-length field (like VARCHAR) length and
returns pointer to the field data. No real var implemented in MySQL yet! */
UNIV_INLINE
byte*
row_mysql_read_var_ref(
/*===================*/
/* out: field + 2 */
ulint* len, /* out: variable-length field length; does not work
yet! */
byte* field) /* in: field */
{
/*
*len = mach_read_from_2_little_endian(field);
return(field + 2);
*/
UT_NOT_USED(len);
return(field); /* No real var implemented in MySQL yet! */
}
/******************************************************************
Stores a non-SQL-NULL field given in the MySQL format in the Innobase
format. */
UNIV_INLINE
void
row_mysql_store_col_in_innobase_format(
/*===================================*/
dfield_t* dfield, /* in/out: dfield */
byte* buf, /* in/out: buffer for the converted
value; this must be at least col_len
long! */
byte* mysql_data, /* in: MySQL column value, not
SQL NULL; NOTE that dfield may also
get a pointer to mysql_data,
therefore do not discard this as long
as dfield is used! */
ulint col_len, /* in: MySQL column length */
ulint type, /* in: data type */
bool comp, /* in: TRUE=compact format */
ulint is_unsigned) /* in: != 0 if unsigned integer type */
{
byte* ptr = mysql_data;
if (type == DATA_INT) {
/* Store integer data in Innobase in a big-endian format,
sign bit negated */
ptr = buf + col_len;
for (;;) {
ptr--;
*ptr = *mysql_data;
if (ptr == buf) {
break;
}
mysql_data++;
}
if (!is_unsigned) {
*ptr = (byte) (*ptr ^ 128);
}
} else if (type == DATA_VARCHAR || type == DATA_VARMYSQL
|| type == DATA_BINARY) {
/* Remove trailing spaces. */
/* Handle UCS2 strings differently. */
ulint mbminlen = dtype_get_mbminlen(
dfield_get_type(dfield));
ptr = row_mysql_read_var_ref(&col_len, mysql_data);
if (mbminlen == 2) {
/* space=0x0020 */
/* Trim "half-chars", just in case. */
col_len &= ~1;
while (col_len >= 2 && ptr[col_len - 2] == 0x00
&& ptr[col_len - 1] == 0x20) {
col_len -= 2;
}
} else {
ut_a(mbminlen == 1);
/* space=0x20 */
while (col_len > 0 && ptr[col_len - 1] == 0x20) {
col_len--;
}
}
} else if (comp && type == DATA_MYSQL
&& dtype_get_mbminlen(dfield_get_type(dfield)) == 1
&& dtype_get_mbmaxlen(dfield_get_type(dfield)) > 1) {
/* We assume that this CHAR field is encoded in a
variable-length character set where spaces have
1:1 correspondence to 0x20 bytes, such as UTF-8.
Consider a CHAR(n) field, a field of n characters.
It will contain between n*mbminlen and n*mbmaxlen bytes.
We will try to truncate it to n bytes by stripping
space padding. If the field contains single-byte
characters only, it will be truncated to n characters.
Consider a CHAR(5) field containing the string ".a "
where "." denotes a 3-byte character represented by
the bytes "$%&". After our stripping, the string will
be stored as "$%&a " (5 bytes). The string ".abc "
will be stored as "$%&abc" (6 bytes).
The space padding will be restored in row0sel.c, function
row_sel_field_store_in_mysql_format(). */
ulint n_chars;
dtype_t* dtype = dfield_get_type(dfield);
ut_a(!(dtype_get_len(dtype) % dtype_get_mbmaxlen(dtype)));
n_chars = dtype_get_len(dtype) / dtype_get_mbmaxlen(dtype);
/* Strip space padding. */
while (col_len > n_chars && ptr[col_len - 1] == 0x20) {
col_len--;
}
} else if (type == DATA_BLOB) {
ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len);
}
dfield_set_data(dfield, ptr, col_len);
}

View File

@ -521,6 +521,10 @@ row_ins_cascade_calc_update_vec(
fixed_size = dtype_get_fixed_size(type); fixed_size = dtype_get_fixed_size(type);
/* TODO: pad in UCS-2 with 0x0020.
TODO: How does the special truncation of
UTF-8 CHAR cols affect this? */
if (fixed_size if (fixed_size
&& ufield->new_val.len != UNIV_SQL_NULL && ufield->new_val.len != UNIV_SQL_NULL
&& ufield->new_val.len < fixed_size) { && ufield->new_val.len < fixed_size) {

View File

@ -105,20 +105,6 @@ row_mysql_delay_if_needed(void)
} }
} }
/***********************************************************************
Reads a MySQL format variable-length field (like VARCHAR) length and
returns pointer to the field data. */
byte*
row_mysql_read_var_ref_noninline(
/*=============================*/
/* out: field + 2 */
ulint* len, /* out: variable-length field length */
byte* field) /* in: field */
{
return(row_mysql_read_var_ref(len, field));
}
/*********************************************************************** /***********************************************************************
Frees the blob heap in prebuilt when no longer needed. */ Frees the blob heap in prebuilt when no longer needed. */
@ -132,6 +118,61 @@ row_mysql_prebuilt_free_blob_heap(
prebuilt->blob_heap = NULL; prebuilt->blob_heap = NULL;
} }
/***********************************************************************
Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row
format. */
byte*
row_mysql_store_true_var_len(
/*=========================*/
/* out: pointer to the data, we skip the 1 or 2 bytes
at the start that are used to store the len */
byte* dest, /* in: where to store */
ulint len, /* in: length, must fit in two bytes */
ulint lenlen) /* in: storage length of len: either 1 or 2 bytes */
{
if (lenlen == 2) {
ut_a(len < 256 * 256);
mach_write_to_2_little_endian(dest, len);
return(dest + 2);
}
ut_a(lenlen == 1);
ut_a(len < 256);
mach_write_to_1(dest, len);
return(dest + 1);
}
/***********************************************************************
Reads a >= 5.0.3 format true VARCHAR length, in the MySQL row format, and
returns a pointer to the data. */
byte*
row_mysql_read_true_varchar(
/*========================*/
/* out: pointer to the data, we skip the 1 or 2 bytes
at the start that are used to store the len */
ulint* len, /* out: variable-length field length */
byte* field, /* in: field in the MySQL format */
ulint lenlen) /* in: storage length of len: either 1 or 2 bytes */
{
if (lenlen == 2) {
*len = mach_read_from_2_little_endian(field);
return(field + 2);
}
ut_a(lenlen == 1);
*len = mach_read_from_1(field);
return(field + 1);
}
/*********************************************************************** /***********************************************************************
Stores a reference to a BLOB in the MySQL format. */ Stores a reference to a BLOB in the MySQL format. */
@ -191,15 +232,177 @@ row_mysql_read_blob_ref(
} }
/****************************************************************** /******************************************************************
Convert a row in the MySQL format to a row in the Innobase format. */ Stores a non-SQL-NULL field given in the MySQL format in the InnoDB format.
The counterpart of this function is row_sel_field_store_in_mysql_format() in
row0sel.c. */
byte*
row_mysql_store_col_in_innobase_format(
/*===================================*/
/* out: up to which byte we used
buf in the conversion */
dfield_t* dfield, /* in/out: dfield where dtype
information must be already set when
this function is called! */
byte* buf, /* in/out: buffer for a converted
integer value; this must be at least
col_len long then! */
ibool row_format_col, /* TRUE if the mysql_data is from
a MySQL row, FALSE if from a MySQL
key value;
in MySQL, a true VARCHAR storage
format differs in a row and in a
key value: in a key value the length
is always stored in 2 bytes! */
byte* mysql_data, /* in: MySQL column value, not
SQL NULL; NOTE that dfield may also
get a pointer to mysql_data,
therefore do not discard this as long
as dfield is used! */
ulint col_len, /* in: MySQL column length; NOTE that
this is the storage length of the
column in the MySQL format row, not
necessarily the length of the actual
payload data; if the column is a true
VARCHAR then this is irrelevant */
ibool comp) /* in: TRUE = compact format */
{
byte* ptr = mysql_data;
dtype_t* dtype;
ulint type;
ulint lenlen;
dtype = dfield_get_type(dfield);
type = dtype->mtype;
if (type == DATA_INT) {
/* Store integer data in Innobase in a big-endian format,
sign bit negated if the data is a signed integer. In MySQL,
integers are stored in a little-endian format. */
ptr = buf + col_len;
for (;;) {
ptr--;
*ptr = *mysql_data;
if (ptr == buf) {
break;
}
mysql_data++;
}
if (!(dtype->prtype & DATA_UNSIGNED)) {
*ptr = (byte) (*ptr ^ 128);
}
buf += col_len;
} else if ((type == DATA_VARCHAR
|| type == DATA_VARMYSQL
|| type == DATA_BINARY)) {
if (dtype_get_mysql_type(dtype) == DATA_MYSQL_TRUE_VARCHAR) {
/* The length of the actual data is stored to 1 or 2
bytes at the start of the field */
if (row_format_col) {
if (dtype->prtype & DATA_LONG_TRUE_VARCHAR) {
lenlen = 2;
} else {
lenlen = 1;
}
} else {
/* In a MySQL key value, lenlen is always 2 */
lenlen = 2;
}
ptr = row_mysql_read_true_varchar(&col_len, mysql_data,
lenlen);
} else {
/* Remove trailing spaces from old style VARCHAR
columns. */
/* Handle UCS2 strings differently. */
ulint mbminlen = dtype_get_mbminlen(dtype);
ptr = mysql_data;
if (mbminlen == 2) {
/* space=0x0020 */
/* Trim "half-chars", just in case. */
col_len &= ~1;
while (col_len >= 2 && ptr[col_len - 2] == 0x00
&& ptr[col_len - 1] == 0x20) {
col_len -= 2;
}
} else {
ut_a(mbminlen == 1);
/* space=0x20 */
while (col_len > 0
&& ptr[col_len - 1] == 0x20) {
col_len--;
}
}
}
} else if (comp && type == DATA_MYSQL
&& dtype_get_mbminlen(dtype) == 1
&& dtype_get_mbmaxlen(dtype) > 1) {
/* In some cases we strip trailing spaces from UTF-8 and other
multibyte charsets, from FIXED-length CHAR columns, to save
space. UTF-8 would otherwise normally use 3 * the string length
bytes to store a latin1 string! */
/* We assume that this CHAR field is encoded in a
variable-length character set where spaces have
1:1 correspondence to 0x20 bytes, such as UTF-8.
Consider a CHAR(n) field, a field of n characters.
It will contain between n * mbminlen and n * mbmaxlen bytes.
We will try to truncate it to n bytes by stripping
space padding. If the field contains single-byte
characters only, it will be truncated to n characters.
Consider a CHAR(5) field containing the string ".a "
where "." denotes a 3-byte character represented by
the bytes "$%&". After our stripping, the string will
be stored as "$%&a " (5 bytes). The string ".abc "
will be stored as "$%&abc" (6 bytes).
The space padding will be restored in row0sel.c, function
row_sel_field_store_in_mysql_format(). */
ulint n_chars;
ut_a(!(dtype_get_len(dtype) % dtype_get_mbmaxlen(dtype)));
n_chars = dtype_get_len(dtype) / dtype_get_mbmaxlen(dtype);
/* Strip space padding. */
while (col_len > n_chars && ptr[col_len - 1] == 0x20) {
col_len--;
}
} else if (type == DATA_BLOB && row_format_col) {
ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len);
}
dfield_set_data(dfield, ptr, col_len);
return(buf);
}
/******************************************************************
Convert a row in the MySQL format to a row in the Innobase format. Note that
the function to convert a MySQL format key value to an InnoDB dtuple is
row_sel_convert_mysql_key_to_innobase() in row0sel.c. */
static static
void void
row_mysql_convert_row_to_innobase( row_mysql_convert_row_to_innobase(
/*==============================*/ /*==============================*/
dtuple_t* row, /* in/out: Innobase row where the dtuple_t* row, /* in/out: Innobase row where the
field type information is already field type information is already
copied there, or will be copied copied there! */
later */
row_prebuilt_t* prebuilt, /* in: prebuilt struct where template row_prebuilt_t* prebuilt, /* in: prebuilt struct where template
must be of type ROW_MYSQL_WHOLE_ROW */ must be of type ROW_MYSQL_WHOLE_ROW */
byte* mysql_rec) /* in: row in the MySQL format; byte* mysql_rec) /* in: row in the MySQL format;
@ -236,10 +439,10 @@ row_mysql_convert_row_to_innobase(
row_mysql_store_col_in_innobase_format(dfield, row_mysql_store_col_in_innobase_format(dfield,
prebuilt->ins_upd_rec_buff prebuilt->ins_upd_rec_buff
+ templ->mysql_col_offset, + templ->mysql_col_offset,
TRUE, /* MySQL row format data */
mysql_rec + templ->mysql_col_offset, mysql_rec + templ->mysql_col_offset,
templ->mysql_col_len, templ->mysql_col_len,
templ->type, prebuilt->table->comp, prebuilt->table->comp);
templ->is_unsigned);
next_column: next_column:
; ;
} }
@ -594,7 +797,8 @@ static
dtuple_t* dtuple_t*
row_get_prebuilt_insert_row( row_get_prebuilt_insert_row(
/*========================*/ /*========================*/
/* out: prebuilt dtuple */ /* out: prebuilt dtuple; the column
type information is also set in it */
row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL
handle */ handle */
{ {
@ -784,6 +988,7 @@ row_unlock_tables_for_mysql(
lock_release_tables_off_kernel(trx); lock_release_tables_off_kernel(trx);
mutex_exit(&kernel_mutex); mutex_exit(&kernel_mutex);
} }
/************************************************************************* /*************************************************************************
Sets a table lock on the table mentioned in prebuilt. */ Sets a table lock on the table mentioned in prebuilt. */
@ -962,10 +1167,13 @@ run_again:
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
que_thr_stop_for_mysql(thr); que_thr_stop_for_mysql(thr);
thr->lock_state= QUE_THR_LOCK_ROW;
/* TODO: what is this? */ thr->lock_state= QUE_THR_LOCK_ROW;
was_lock_wait = row_mysql_handle_errors(&err, trx, thr, was_lock_wait = row_mysql_handle_errors(&err, trx, thr,
&savept); &savept);
thr->lock_state= QUE_THR_LOCK_NOLOCK; thr->lock_state= QUE_THR_LOCK_NOLOCK;
if (was_lock_wait) { if (was_lock_wait) {
goto run_again; goto run_again;
} }

View File

@ -2119,10 +2119,10 @@ row_sel_convert_mysql_key_to_innobase(
+ 256 * key_ptr[data_offset + 1]; + 256 * key_ptr[data_offset + 1];
data_field_len = data_offset + 2 + field->prefix_len; data_field_len = data_offset + 2 + field->prefix_len;
data_offset += 2; data_offset += 2;
type = DATA_CHAR; /* now that we know the length, we /* now that we know the length, we store the column
store the column value like it would value like it would be a fixed char field */
be a fixed char field */
} else if (field->prefix_len > 0) { } else if (field->prefix_len > 0) {
/* Looks like MySQL pads unused end bytes in the /* Looks like MySQL pads unused end bytes in the
prefix with space. Therefore, also in UTF-8, it is ok prefix with space. Therefore, also in UTF-8, it is ok
@ -2146,11 +2146,12 @@ row_sel_convert_mysql_key_to_innobase(
if (!is_null) { if (!is_null) {
row_mysql_store_col_in_innobase_format( row_mysql_store_col_in_innobase_format(
dfield, buf, key_ptr + data_offset, dfield,
data_len, type, buf,
index->table->comp, FALSE, /* MySQL key value format col */
dfield_get_type(dfield)->prtype key_ptr + data_offset,
& DATA_UNSIGNED); data_len,
index->table->comp);
buf += data_len; buf += data_len;
} }
@ -2225,7 +2226,7 @@ row_sel_store_row_id_to_prebuilt(
dict_index_name_print(stderr, prebuilt->trx, index); dict_index_name_print(stderr, prebuilt->trx, index);
fprintf(stderr, "\n" fprintf(stderr, "\n"
"InnoDB: Field number %lu, record:\n", "InnoDB: Field number %lu, record:\n",
(ulong) dict_index_get_sys_col_pos(index, DATA_ROW_ID)); (ulong) dict_index_get_sys_col_pos(index, DATA_ROW_ID));
rec_print_new(stderr, index_rec, offsets); rec_print_new(stderr, index_rec, offsets);
putc('\n', stderr); putc('\n', stderr);
ut_error; ut_error;
@ -2235,8 +2236,9 @@ row_sel_store_row_id_to_prebuilt(
} }
/****************************************************************** /******************************************************************
Stores a non-SQL-NULL field in the MySQL format. */ Stores a non-SQL-NULL field in the MySQL format. The counterpart of this
UNIV_INLINE function is row_mysql_store_col_in_innobase_format() in row0mysql.c. */
static
void void
row_sel_field_store_in_mysql_format( row_sel_field_store_in_mysql_format(
/*================================*/ /*================================*/
@ -2251,6 +2253,8 @@ row_sel_field_store_in_mysql_format(
ulint len) /* in: length of the data */ ulint len) /* in: length of the data */
{ {
byte* ptr; byte* ptr;
byte* field_end;
byte* pad_ptr;
ut_ad(len != UNIV_SQL_NULL); ut_ad(len != UNIV_SQL_NULL);
@ -2274,25 +2278,66 @@ row_sel_field_store_in_mysql_format(
} }
ut_ad(templ->mysql_col_len == len); ut_ad(templ->mysql_col_len == len);
} else if (templ->type == DATA_VARCHAR || templ->type == DATA_VARMYSQL } else if (templ->type == DATA_VARCHAR
|| templ->type == DATA_BINARY) { || templ->type == DATA_VARMYSQL
/* Store the length of the data to the first two bytes of || templ->type == DATA_BINARY) {
dest; does not do anything yet because MySQL has
no real vars! */ field_end = dest + templ->mysql_col_len;
if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) {
/* This is a >= 5.0.3 type true VARCHAR. Store the
length of the data to the first byte or the first
two bytes of dest. */
dest = row_mysql_store_var_len(dest, len); dest = row_mysql_store_true_var_len(dest, len,
templ->mysql_length_bytes);
}
/* Copy the actual data */
ut_memcpy(dest, data, len); ut_memcpy(dest, data, len);
#if 0
/* No real var implemented in MySQL yet! */
ut_ad(templ->mysql_col_len >= len + 2);
#endif
/* Pad with trailing spaces. We pad with spaces also the
unused end of a >= 5.0.3 true VARCHAR column, just in case
MySQL expects its contents to be deterministic. */
pad_ptr = dest + len;
ut_ad(templ->mbminlen <= templ->mbmaxlen);
/* We handle UCS2 charset strings differently. */
if (templ->mbminlen == 2) {
/* A space char is two bytes, 0x0020 in UCS2 */
if (len & 1) {
/* A 0x20 has been stripped from the column.
Pad it back. */
if (pad_ptr < field_end) {
*pad_ptr = 0x20;
pad_ptr++;
}
}
/* Pad the rest of the string with 0x0020 */
while (pad_ptr < field_end) {
*pad_ptr = 0x00;
pad_ptr++;
*pad_ptr = 0x20;
pad_ptr++;
}
} else {
ut_ad(templ->mbminlen == 1);
/* space=0x20 */
memset(pad_ptr, 0x20, field_end - pad_ptr);
}
} else if (templ->type == DATA_BLOB) { } else if (templ->type == DATA_BLOB) {
/* Store a pointer to the BLOB buffer to dest: the BLOB was /* Store a pointer to the BLOB buffer to dest: the BLOB was
already copied to the buffer in row_sel_store_mysql_rec */ already copied to the buffer in row_sel_store_mysql_rec */
row_mysql_store_blob_ref(dest, templ->mysql_col_len, row_mysql_store_blob_ref(dest, templ->mysql_col_len, data,
data, len); len);
} else if (templ->type == DATA_MYSQL) { } else if (templ->type == DATA_MYSQL) {
memcpy(dest, data, len); memcpy(dest, data, len);
@ -2306,9 +2351,10 @@ row_sel_field_store_in_mysql_format(
ut_a(len * templ->mbmaxlen >= templ->mysql_col_len); ut_a(len * templ->mbmaxlen >= templ->mysql_col_len);
if (templ->mbminlen != templ->mbmaxlen) { if (templ->mbminlen != templ->mbmaxlen) {
/* Pad with spaces. This undoes the stripping /* Pad with spaces. This undoes the stripping
done in row0mysql.ic, function done in row0mysql.ic, function
row_mysql_store_col_in_innobase_format(). */ row_mysql_store_col_in_innobase_format(). */
memset(dest + len, 0x20, templ->mysql_col_len - len); memset(dest + len, 0x20, templ->mysql_col_len - len);
} }
} else { } else {
@ -2320,6 +2366,7 @@ row_sel_field_store_in_mysql_format(
|| templ->type == DATA_DOUBLE || templ->type == DATA_DOUBLE
|| templ->type == DATA_DECIMAL); || templ->type == DATA_DECIMAL);
ut_ad(templ->mysql_col_len == len); ut_ad(templ->mysql_col_len == len);
memcpy(dest, data, len); memcpy(dest, data, len);
} }
} }
@ -2436,40 +2483,6 @@ row_sel_store_mysql_rec(
mysql_rec + templ->mysql_col_offset, mysql_rec + templ->mysql_col_offset,
templ, data, len); templ, data, len);
if (templ->type == DATA_VARCHAR
|| templ->type == DATA_VARMYSQL
|| templ->type == DATA_BINARY) {
/* Pad with trailing spaces */
data = mysql_rec + templ->mysql_col_offset;
ut_ad(templ->mbminlen <= templ->mbmaxlen);
/* Handle UCS2 strings differently. */
if (templ->mbminlen == 2) {
/* space=0x0020 */
ulint col_len = templ->mysql_col_len;
ut_a(!(col_len & 1));
if (len & 1) {
/* A 0x20 has been stripped
from the column.
Pad it back. */
goto pad_0x20;
}
/* Pad the rest of the string
with 0x0020 */
while (len < col_len) {
data[len++] = 0x00;
pad_0x20:
data[len++] = 0x20;
}
} else {
ut_ad(templ->mbminlen == 1);
/* space=0x20 */
memset(data + len, 0x20,
templ->mysql_col_len - len);
}
}
/* Cleanup */ /* Cleanup */
if (extern_field_heap) { if (extern_field_heap) {
mem_heap_free(extern_field_heap); mem_heap_free(extern_field_heap);

View File

@ -1958,7 +1958,7 @@ trx_recover_for_mysql(
ut_print_timestamp(stderr); ut_print_timestamp(stderr);
fprintf(stderr, fprintf(stderr,
" InnoDB: %d transactions in prepare state after recovery\n", " InnoDB: %d transactions in prepared state after recovery\n",
count); count);
return (count); return (count);

View File

@ -1506,12 +1506,6 @@ run_testcase ()
if [ -n "$RESULT_EXT" -a \( x$RECORD = x1 -o -f "$result_file$RESULT_EXT" \) ] ; then if [ -n "$RESULT_EXT" -a \( x$RECORD = x1 -o -f "$result_file$RESULT_EXT" \) ] ; then
result_file="$result_file$RESULT_EXT" result_file="$result_file$RESULT_EXT"
fi fi
if [ -f "$TESTDIR/$tname.disabled" ]
then
comment=`$CAT $TESTDIR/$tname.disabled`;
disable_test $tname "$comment"
return
fi
if [ "$USE_MANAGER" = 1 ] ; then if [ "$USE_MANAGER" = 1 ] ; then
many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)` many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)`
fi fi
@ -1541,6 +1535,20 @@ run_testcase ()
return return
fi fi
if [ -f "$TESTDIR/$tname.disabled" ]
then
comment=`$CAT $TESTDIR/$tname.disabled`;
disable_test $tname "$comment"
return
fi
comment=`$GREP "^$tname *: *" $TESTDIR/disabled.def`;
if [ -n "$comment" ]
then
comment=`echo $comment | sed 's/^[^:]*: *//'`
disable_test $tname "$comment"
return
fi
if [ "x$USE_EMBEDDED_SERVER" != "x1" ] ; then if [ "x$USE_EMBEDDED_SERVER" != "x1" ] ; then
# Stop all slave threads, so that we don't have useless reconnection # Stop all slave threads, so that we don't have useless reconnection
# attempts and error messages in case the slave and master servers restart. # attempts and error messages in case the slave and master servers restart.

View File

@ -665,3 +665,45 @@ length(v)
65530 65530
drop table t1; drop table t1;
set storage_engine=MyISAM; set storage_engine=MyISAM;
create table t1 (a bigint unsigned auto_increment primary key, b int,
key (b, a)) engine=heap;
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
select * from t1;
a b
1 1
2 1
3 1
4 1
5 1
6 1
7 1
8 1
drop table t1;
create table t1 (a int not null, b int not null auto_increment,
primary key(a, b), key(b)) engine=heap;
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
select * from t1;
a b
1 1
1 2
1 3
1 4
1 5
1 6
1 7
1 8
drop table t1;

View File

@ -1,14 +1,45 @@
drop table if exists t1; drop table if exists t1, t2, t3, t4, t5;
set GLOBAL query_cache_type=on; set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776; set GLOBAL query_cache_size=1355776;
set GLOBAL ndb_cache_check_time=1; set GLOBAL ndb_cache_check_time=100;
reset query cache; reset query cache;
flush status; flush status;
CREATE TABLE t1 ( pk int not null primary key, CREATE TABLE t1 (
a int, b int not null, c varchar(20)) ENGINE=ndbcluster; pk int not null primary key,
a1 int,
b1 int not null,
c1 varchar(20)
) ENGINE=ndb;
CREATE TABLE t2 (
pk int not null primary key,
a2 int,
b2 int not null
) ENGINE=ndb;
CREATE TABLE t3 (
pk int not null primary key,
a3 int,
b3 int not null,
c3 int not null,
d3 varchar(20)
) ENGINE=ndb;
CREATE TABLE t4 (
a4 int,
b4 int not null,
c4 char(20)
) ENGINE=ndbcluster;
CREATE TABLE t5 (
pk int not null primary key,
a5 int,
b5 int not null,
c5 varchar(255)
) ENGINE=ndbcluster;
insert into t1 value (1, 2, 3, 'First row'); insert into t1 value (1, 2, 3, 'First row');
insert into t2 value (1, 2, 3);
insert into t3 value (1, 2, 3, 4, '3 - First row');
insert into t4 value (2, 3, '4 - First row');
insert into t5 value (1, 2, 3, '5 - First row');
select * from t1; select * from t1;
pk a b c pk a1 b1 c1
1 2 3 First row 1 2 3 First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
@ -20,14 +51,14 @@ show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 0 Qcache_hits 0
select * from t1; select * from t1;
pk a b c pk a1 b1 c1
1 2 3 First row 1 2 3 First row
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 1 Qcache_hits 1
update t1 set a=3 where pk=1; update t1 set a1=3 where pk=1;
select * from t1; select * from t1;
pk a b c pk a1 b1 c1
1 3 3 First row 1 3 3 First row
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
@ -38,7 +69,7 @@ Qcache_hits 1
insert into t1 value (2, 7, 8, 'Second row'); insert into t1 value (2, 7, 8, 'Second row');
insert into t1 value (4, 5, 6, 'Fourth row'); insert into t1 value (4, 5, 6, 'Fourth row');
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
4 5 6 Fourth row 4 5 6 Fourth row
2 7 8 Second row 2 7 8 Second row
1 3 3 First row 1 3 3 First row
@ -49,15 +80,15 @@ show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 1 Qcache_hits 1
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
4 5 6 Fourth row 4 5 6 Fourth row
2 7 8 Second row 2 7 8 Second row
1 3 3 First row 1 3 3 First row
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 2 Qcache_hits 2
select * from t1 where b=3; select * from t1 where b1=3;
pk a b c pk a1 b1 c1
1 3 3 First row 1 3 3 First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
@ -65,44 +96,44 @@ Qcache_queries_in_cache 2
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 2 Qcache_hits 2
select * from t1 where b=3; select * from t1 where b1=3;
pk a b c pk a1 b1 c1
1 3 3 First row 1 3 3 First row
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 3 Qcache_hits 3
delete from t1 where c='Fourth row'; delete from t1 where c1='Fourth row';
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 0 Qcache_queries_in_cache 0
select * from t1 where b=3; select * from t1 where b1=3;
pk a b c pk a1 b1 c1
1 3 3 First row 1 3 3 First row
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 3 Qcache_hits 3
use test; use test;
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 2 7 8 Second row
1 3 3 First row 1 3 3 First row
select * from t1 where b=3; select * from t1 where b1=3;
pk a b c pk a1 b1 c1
1 3 3 First row 1 3 3 First row
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 4 Qcache_hits 4
update t1 set a=4 where b=3; update t1 set a1=4 where b1=3;
use test; use test;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 0 Qcache_queries_in_cache 0
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 2 7 8 Second row
1 4 3 First row 1 4 3 First row
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 2 7 8 Second row
1 4 3 First row 1 4 3 First row
show status like "Qcache_inserts"; show status like "Qcache_inserts";
@ -112,11 +143,11 @@ show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 5 Qcache_hits 5
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 2 7 8 Second row
1 4 3 First row 1 4 3 First row
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 2 7 8 Second row
1 4 3 First row 1 4 3 First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
@ -128,64 +159,463 @@ Qcache_inserts 7
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 7 Qcache_hits 7
select * from t2;
pk a2 b2
1 2 3
select * from t3;
pk a3 b3 c3 d3
1 2 3 4 3 - First row
select * from t4;
a4 b4 c4
2 3 4 - First row
select * from t5;
pk a5 b5 c5
1 2 3 5 - First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
flush status;
begin; begin;
update t1 set a=5 where pk=1; update t1 set a1=5 where pk=1;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 4
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 4 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 5 3 First row
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 5 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
flush status;
begin;
update t1 set a1=6 where pk=1;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 4
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 5 3 First row
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 5 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 6 3 First row
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 6 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
commit;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 1
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 6 3 First row
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 1
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 6 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 2
flush status;
begin;
insert into t1 set pk=5, a1=6, b1=3, c1="New row";
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 4
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 where pk=5;
pk a1 b1 c1
select * from t1 order by pk desc;
pk a1 b1 c1
2 7 8 Second row
1 6 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 6
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 where pk=5;
pk a1 b1 c1
5 6 3 New row
select * from t1 where pk=5;
pk a1 b1 c1
5 6 3 New row
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
2 7 8 Second row
1 6 3 First row
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
2 7 8 Second row
1 6 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 6
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit;
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
2 7 8 Second row
1 6 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
flush status;
begin;
delete from t1 where pk=2;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 4
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 0
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 where pk=2;
pk a1 b1 c1
2 7 8 Second row
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
2 7 8 Second row
1 6 3 First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 6
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
select * from t1 where pk=2;
pk a1 b1 c1
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 6 3 First row
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 6 3 First row
select * from t1 where pk=2;
pk a1 b1 c1
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 6
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 2
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit;
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 6 3 First row
select * from t1 where pk=2;
pk a1 b1 c1
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 6
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 4
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
flush status;
begin;
update t1 set a1=9 where pk=1;
update t2 set a2=9 where pk=1;
update t3 set a3=9 where pk=1;
update t4 set a4=9 where a4=2;
update t5 set a5=9 where pk=1;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 0 Qcache_queries_in_cache 0
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 7 Qcache_inserts 0
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 7 Qcache_hits 0
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 5 6 3 New row
1 4 3 First row 1 6 3 First row
select * from t2;
pk a2 b2
1 2 3
select * from t3;
pk a3 b3 c3 d3
1 2 3 4 3 - First row
select * from t4;
a4 b4 c4
2 3 4 - First row
select * from t5;
pk a5 b5 c5
1 2 3 5 - First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 5
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 8 Qcache_inserts 5
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 7 Qcache_hits 0
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 9 3 First row
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 9 3 First row
select * from t2;
pk a2 b2
1 9 3
select * from t3;
pk a3 b3 c3 d3
1 9 3 4 3 - First row
select * from t4;
a4 b4 c4
9 3 4 - First row
select * from t5;
pk a5 b5 c5
1 9 3 5 - First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 5
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit; commit;
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 9 3 First row
select * from t2;
pk a2 b2
1 9 3
select * from t3;
pk a3 b3 c3 d3
1 9 3 4 3 - First row
select * from t4;
a4 b4 c4
9 3 4 - First row
select * from t5;
pk a5 b5 c5
1 9 3 5 - First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 5
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 8 Qcache_inserts 10
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 7 Qcache_hits 0
select * from t1 order by pk desc; select * from t1 order by pk desc;
pk a b c pk a1 b1 c1
2 7 8 Second row 5 6 3 New row
1 5 3 First row 1 9 3 First row
show status like "Qcache_inserts"; select * from t2;
Variable_name Value pk a2 b2
Qcache_inserts 9 1 9 3
show status like "Qcache_hits"; select * from t3;
Variable_name Value pk a3 b3 c3 d3
Qcache_hits 7 1 9 3 4 3 - First row
select * from t1 order by pk desc; select * from t4;
pk a b c a4 b4 c4
2 7 8 Second row 9 3 4 - First row
1 5 3 First row select * from t5;
pk a5 b5 c5
1 9 3 5 - First row
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 5
show status like "Qcache_inserts"; show status like "Qcache_inserts";
Variable_name Value Variable_name Value
Qcache_inserts 9 Qcache_inserts 10
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 8 Qcache_hits 5
drop table t1; select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 9 3 First row
select * from t2;
pk a2 b2
1 9 3
select * from t3;
pk a3 b3 c3 d3
1 9 3 4 3 - First row
select * from t4;
a4 b4 c4
9 3 4 - First row
select * from t5;
pk a5 b5 c5
1 9 3 5 - First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 10
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 10
select * from t1 order by pk desc;
pk a1 b1 c1
5 6 3 New row
1 9 3 First row
select * from t2;
pk a2 b2
1 9 3
select * from t3;
pk a3 b3 c3 d3
1 9 3 4 3 - First row
select * from t4;
a4 b4 c4
9 3 4 - First row
select * from t5;
pk a5 b5 c5
1 9 3 5 - First row
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 5
show status like "Qcache_inserts";
Variable_name Value
Qcache_inserts 10
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 15
drop table t1, t2, t3, t4, t5;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 0 Qcache_queries_in_cache 0

20
mysql-test/t/disabled.def Normal file
View File

@ -0,0 +1,20 @@
##############################################################################
#
# List the test cases that are to be disabled temporarely.
#
# Separate the test case name and the comment with ':'.
#
# <testcasename> : Comment test
#
# Don't use any TAB characters for whitespace.
#
##############################################################################
ndb_alter_table : NDB team needs to fix
ndb_autodiscover : NDB team needs to fix
ndb_autodiscover2 : NDB team needs to fix
ndb_cache_multi : NDB team needs to fix
ndb_cache_multi2 : NDB team needs to fix
ndb_multi : NDB team needs to fix
ndb_restore : NDB team needs to fix

View File

@ -406,3 +406,32 @@ drop table t1;
# Reset varchar test # Reset varchar test
# #
eval set storage_engine=$default; eval set storage_engine=$default;
#
# Bug #8489: Strange auto_increment behaviour
#
create table t1 (a bigint unsigned auto_increment primary key, b int,
key (b, a)) engine=heap;
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
insert t1 (b) values (1);
select * from t1;
drop table t1;
create table t1 (a int not null, b int not null auto_increment,
primary key(a, b), key(b)) engine=heap;
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
insert t1 (a) values (1);
select * from t1;
drop table t1;

View File

@ -2,7 +2,7 @@
-- source include/have_ndb.inc -- source include/have_ndb.inc
--disable_warnings --disable_warnings
drop table if exists t1; drop table if exists t1, t2, t3, t4, t5;
--enable_warnings --enable_warnings
@ -10,19 +10,47 @@ drop table if exists t1;
set GLOBAL query_cache_type=on; set GLOBAL query_cache_type=on;
set GLOBAL query_cache_size=1355776; set GLOBAL query_cache_size=1355776;
# Turn on thread that will fetch commit count for open tables # Turn on thread that will fetch commit count for open tables
set GLOBAL ndb_cache_check_time=1; set GLOBAL ndb_cache_check_time=100;
reset query cache; reset query cache;
flush status; flush status;
# Wait for thread to wake up and start "working" # Create test tables in NDB
sleep 20; CREATE TABLE t1 (
pk int not null primary key,
# Create test table in NDB a1 int,
CREATE TABLE t1 ( pk int not null primary key, b1 int not null,
a int, b int not null, c varchar(20)) ENGINE=ndbcluster; c1 varchar(20)
) ENGINE=ndb;
CREATE TABLE t2 (
pk int not null primary key,
a2 int,
b2 int not null
) ENGINE=ndb;
CREATE TABLE t3 (
pk int not null primary key,
a3 int,
b3 int not null,
c3 int not null,
d3 varchar(20)
) ENGINE=ndb;
CREATE TABLE t4 (
a4 int,
b4 int not null,
c4 char(20)
) ENGINE=ndbcluster;
CREATE TABLE t5 (
pk int not null primary key,
a5 int,
b5 int not null,
c5 varchar(255)
) ENGINE=ndbcluster;
insert into t1 value (1, 2, 3, 'First row'); insert into t1 value (1, 2, 3, 'First row');
insert into t2 value (1, 2, 3);
insert into t3 value (1, 2, 3, 4, '3 - First row');
insert into t4 value (2, 3, '4 - First row');
insert into t5 value (1, 2, 3, '5 - First row');
# Perform one query which should be inerted in query cache # Perform one query which should be inserted in query cache
select * from t1; select * from t1;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
@ -33,7 +61,7 @@ select * from t1;
show status like "Qcache_hits"; show status like "Qcache_hits";
# Update the table and make sure the correct data is returned # Update the table and make sure the correct data is returned
update t1 set a=3 where pk=1; update t1 set a1=3 where pk=1;
select * from t1; select * from t1;
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
@ -48,18 +76,18 @@ select * from t1 order by pk desc;
show status like "Qcache_hits"; show status like "Qcache_hits";
# Perform a "new" query and make sure the query cache is not hit # Perform a "new" query and make sure the query cache is not hit
select * from t1 where b=3; select * from t1 where b1=3;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_hits"; show status like "Qcache_hits";
# Same query again... # Same query again...
select * from t1 where b=3; select * from t1 where b1=3;
show status like "Qcache_hits"; show status like "Qcache_hits";
# Delete from the table # Delete from the table
delete from t1 where c='Fourth row'; delete from t1 where c1='Fourth row';
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
select * from t1 where b=3; select * from t1 where b1=3;
show status like "Qcache_hits"; show status like "Qcache_hits";
# Start another connection and check that the query cache is hit # Start another connection and check that the query cache is hit
@ -67,11 +95,11 @@ connect (con1,localhost,root,,);
connection con1; connection con1;
use test; use test;
select * from t1 order by pk desc; select * from t1 order by pk desc;
select * from t1 where b=3; select * from t1 where b1=3;
show status like "Qcache_hits"; show status like "Qcache_hits";
# Update the table and switch to other connection # Update the table and switch to other connection
update t1 set a=4 where b=3; update t1 set a1=4 where b1=3;
connect (con2,localhost,root,,); connect (con2,localhost,root,,);
connection con2; connection con2;
use test; use test;
@ -87,10 +115,23 @@ show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
# Use transactions and make sure the query cache is not updated until # Load all tables into cache
# transaction is commited select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
#####################################################################
# Start transaction and perform update
# Switch to other transaction and check that update does not show up
# Switch back and commit transaction
# Switch to other transaction and check that update shows up
#####################################################################
connection con1;
flush status;
begin; begin;
update t1 set a=5 where pk=1; update t1 set a1=5 where pk=1;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
@ -101,8 +142,6 @@ show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
connection con1; connection con1;
commit; commit;
# Sleep to let the query cache thread update commit count
sleep 10;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
@ -116,8 +155,203 @@ show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts"; show status like "Qcache_inserts";
show status like "Qcache_hits"; show status like "Qcache_hits";
drop table t1; #####################################################################
# Start transaction and perform update
# Switch to other transaction and check that update does not show up
# Switch back, perform selects and commit transaction
# Switch to other transaction and check that update shows up
#####################################################################
connection con1;
flush status;
begin;
update t1 set a1=6 where pk=1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1 order by pk desc;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
# The two queries below will not hit cache since transaction is ongoing
select * from t1 order by pk desc;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
commit;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1 order by pk desc;
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
#####################################################################
# Start transaction and perform insert
# Switch to other transaction and check that insert does not show up
# Switch back, perform selects and commit transaction
# Switch to other transaction and check that update shows up
#####################################################################
connection con1;
flush status;
begin;
insert into t1 set pk=5, a1=6, b1=3, c1="New row";
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1 where pk=5;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
# The below four queries will not be cached, trans is ongoing
select * from t1 where pk=5;
select * from t1 where pk=5;
select * from t1 order by pk desc;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
commit;
connection con2;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
#####################################################################
# Start transaction and perform delete
# Switch to other transaction and check that delete does not show up
# Switch back, perform selects and commit transaction
# Switch to other transaction and check that update shows up
#####################################################################
connection con1;
flush status;
begin;
delete from t1 where pk=2;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1 where pk=2;
select * from t1 order by pk desc;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
# The below four queries will not be cached, trans is ongoing
select * from t1 where pk=2;
select * from t1 order by pk desc;
select * from t1 order by pk desc;
select * from t1 where pk=2;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
commit;
connection con2;
select * from t1 order by pk desc;
select * from t1 where pk=2;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
#####################################################################
# Start a transaction which updates all tables
# Switch to other transaction and check updates does not show up
# Switch back, perform selects and commit transaction
# Switch to other transaction and check that update shows up
#####################################################################
flush status;
begin;
update t1 set a1=9 where pk=1;
update t2 set a2=9 where pk=1;
update t3 set a3=9 where pk=1;
update t4 set a4=9 where a4=2;
update t5 set a5=9 where pk=1;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1 order by pk desc;
select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
# The below five queries will not be cached, trans is ongoing
select * from t1 order by pk desc;
select * from t1 order by pk desc;
select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
commit;
connection con2;
select * from t1 order by pk desc;
select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con1;
select * from t1 order by pk desc;
select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
select * from t1 order by pk desc;
select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
connection con2;
select * from t1 order by pk desc;
select * from t2;
select * from t3;
select * from t4;
select * from t5;
show status like "Qcache_queries_in_cache";
show status like "Qcache_inserts";
show status like "Qcache_hits";
drop table t1, t2, t3, t4, t5;
# There should be no queries in cache, when tables have been dropped
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
SET GLOBAL query_cache_size=0; SET GLOBAL query_cache_size=0;

View File

@ -2449,14 +2449,14 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
operationRecPtr.p->transactionstate = IDLE; operationRecPtr.p->transactionstate = IDLE;
operationRecPtr.p->operation = ZUNDEFINED_OP; operationRecPtr.p->operation = ZUNDEFINED_OP;
if(Toperation != ZREAD){ if(Toperation != ZREAD){
rootfragrecptr.i = fragrecptr.p->myroot;
ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
rootfragrecptr.p->m_commit_count++; rootfragrecptr.p->m_commit_count++;
if (Toperation != ZINSERT) { if (Toperation != ZINSERT) {
if (Toperation != ZDELETE) { if (Toperation != ZDELETE) {
return; return;
} else { } else {
jam(); jam();
rootfragrecptr.i = fragrecptr.p->myroot;
ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
rootfragrecptr.p->noOfElements--; rootfragrecptr.p->noOfElements--;
fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen; fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
if (fragrecptr.p->slack > fragrecptr.p->slackCheck) { if (fragrecptr.p->slack > fragrecptr.p->slackCheck) {
@ -2476,8 +2476,6 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
}//if }//if
} else { } else {
jam(); /* EXPAND PROCESS HANDLING */ jam(); /* EXPAND PROCESS HANDLING */
rootfragrecptr.i = fragrecptr.p->myroot;
ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
rootfragrecptr.p->noOfElements++; rootfragrecptr.p->noOfElements++;
fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen; fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
if (fragrecptr.p->slack >= (1u << 31)) { if (fragrecptr.p->slack >= (1u << 31)) {

View File

@ -475,8 +475,11 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
if (*killed) if (*killed)
{ {
DBUG_PRINT("info",("Sort killed by user")); DBUG_PRINT("info",("Sort killed by user"));
(void) file->extra(HA_EXTRA_NO_CACHE); if (!indexfile && !quick_select)
file->ha_rnd_end(); {
(void) file->extra(HA_EXTRA_NO_CACHE);
file->ha_rnd_end();
}
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
} }
if (error == 0) if (error == 0)

View File

@ -457,6 +457,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
char buff[FN_REFLEN]; char buff[FN_REFLEN];
int error; int error;
TABLE_SHARE *share= table_arg->s; TABLE_SHARE *share= table_arg->s;
bool found_real_auto_increment= 0;
for (key= parts= 0; key < keys; key++) for (key= parts= 0; key < keys; key++)
parts+= table_arg->key_info[key].key_parts; parts+= table_arg->key_info[key].key_parts;
@ -520,19 +521,22 @@ int ha_heap::create(const char *name, TABLE *table_arg,
seg->null_bit= 0; seg->null_bit= 0;
seg->null_pos= 0; seg->null_pos= 0;
} }
// We have to store field->key_type() as seg->type can differ from it
if (field->flags & AUTO_INCREMENT_FLAG) if (field->flags & AUTO_INCREMENT_FLAG)
{
auto_key= key + 1;
auto_key_type= field->key_type(); auto_key_type= field->key_type();
}
} }
} }
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*)); mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size / max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size /
mem_per_row); mem_per_row);
if (table_arg->found_next_number_field)
{
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
found_real_auto_increment= share->next_number_key_offset == 0;
}
HP_CREATE_INFO hp_create_info; HP_CREATE_INFO hp_create_info;
hp_create_info.auto_key= auto_key;
hp_create_info.auto_key_type= auto_key_type; hp_create_info.auto_key_type= auto_key_type;
hp_create_info.with_auto_increment= found_real_auto_increment;
hp_create_info.auto_increment= (create_info->auto_increment_value ? hp_create_info.auto_increment= (create_info->auto_increment_value ?
create_info->auto_increment_value - 1 : 0); create_info->auto_increment_value - 1 : 0);
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size; hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;

View File

@ -1074,6 +1074,8 @@ innobase_init(void)
DBUG_ENTER("innobase_init"); DBUG_ENTER("innobase_init");
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
os_innodb_umask = (ulint)my_umask; os_innodb_umask = (ulint)my_umask;
/* First calculate the default path for innodb_data_home_dir etc., /* First calculate the default path for innodb_data_home_dir etc.,
@ -2244,7 +2246,9 @@ innobase_mysql_cmp(
} }
/****************************************************************** /******************************************************************
Converts a MySQL type to an InnoDB type. */ Converts a MySQL type to an InnoDB type. Note that this function returns
the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1
VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. */
inline inline
ulint ulint
get_innobase_type_from_mysql_type( get_innobase_type_from_mysql_type(
@ -2259,8 +2263,9 @@ get_innobase_type_from_mysql_type(
switch (field->type()) { switch (field->type()) {
/* NOTE that we only allow string types in DATA_MYSQL /* NOTE that we only allow string types in DATA_MYSQL
and DATA_VARMYSQL */ and DATA_VARMYSQL */
case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_VAR_STRING: /* old <= 4.1 VARCHAR */
case MYSQL_TYPE_VARCHAR: if (field->binary()) { case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */
if (field->binary()) {
return(DATA_BINARY); return(DATA_BINARY);
} else if (strcmp( } else if (strcmp(
field->charset()->name, field->charset()->name,
@ -2313,6 +2318,35 @@ get_innobase_type_from_mysql_type(
return(0); return(0);
} }
/***********************************************************************
Writes an unsigned integer value < 64k to 2 bytes, in the little-endian
storage format. */
inline
void
innobase_write_to_2_little_endian(
/*==============================*/
byte* buf, /* in: where to store */
ulint val) /* in: value to write, must be < 64k */
{
ut_a(val < 256 * 256);
buf[0] = (byte)(val & 0xFF);
buf[1] = (byte)(val / 256);
}
/***********************************************************************
Reads an unsigned integer value < 64k from 2 bytes, in the little-endian
storage format. */
inline
uint
innobase_read_from_2_little_endian(
/*===============================*/
/* out: value */
const mysql_byte* buf) /* in: from where to read */
{
return((ulint)(buf[0]) + 256 * ((ulint)(buf[1])));
}
/*********************************************************************** /***********************************************************************
Stores a key value for a row to a buffer. */ Stores a key value for a row to a buffer. */
@ -2352,9 +2386,14 @@ ha_innobase::store_key_val_for_row(
3. In a column prefix field, prefix_len next bytes are reserved for 3. In a column prefix field, prefix_len next bytes are reserved for
data. In a normal field the max field length next bytes are reserved data. In a normal field the max field length next bytes are reserved
for data. For a VARCHAR(n) the max field length is n. If the stored for data. For a VARCHAR(n) the max field length is n. If the stored
value is the SQL NULL then these data bytes are set to 0. */ value is the SQL NULL then these data bytes are set to 0.
/* We have to zero-fill the buffer so that MySQL is able to use a 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that
in the MySQL row format, the length is stored in 1 or 2 bytes,
depending on the maximum allowed length. But in the MySQL key value
format, the length always takes 2 bytes.
We have to zero-fill the buffer so that MySQL is able to use a
simple memcmp to compare two key values to determine if they are simple memcmp to compare two key values to determine if they are
equal. MySQL does this to compare contents of two 'ref' values. */ equal. MySQL does this to compare contents of two 'ref' values. */
@ -2377,7 +2416,43 @@ ha_innobase::store_key_val_for_row(
field = key_part->field; field = key_part->field;
mysql_type = field->type(); mysql_type = field->type();
if (mysql_type == FIELD_TYPE_TINY_BLOB if (mysql_type == MYSQL_TYPE_VARCHAR) {
/* >= 5.0.3 true VARCHAR */
ulint lenlen;
ulint len;
byte* data;
if (is_null) {
buff += key_part->length + 2;
continue;
}
lenlen = (ulint)
(((Field_varstring*)field)->length_bytes);
data = row_mysql_read_true_varchar(&len,
(byte*) (record
+ (ulint)get_field_offset(table, field)),
lenlen);
/* The length in a key value is always stored in 2
bytes */
row_mysql_store_true_var_len((byte*)buff, len, 2);
buff += 2;
memcpy(buff, data, len);
/* Note that we always reserve the maximum possible
length of the true VARCHAR in the key value, though
only len first bytes after the 2 length bytes contain
actual data. The rest of the space was reset to zero
in the bzero() call above. */
buff += key_part->length;
} else if (mysql_type == FIELD_TYPE_TINY_BLOB
|| mysql_type == FIELD_TYPE_MEDIUM_BLOB || mysql_type == FIELD_TYPE_MEDIUM_BLOB
|| mysql_type == FIELD_TYPE_BLOB || mysql_type == FIELD_TYPE_BLOB
|| mysql_type == FIELD_TYPE_LONG_BLOB) { || mysql_type == FIELD_TYPE_LONG_BLOB) {
@ -2385,9 +2460,9 @@ ha_innobase::store_key_val_for_row(
ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); ut_a(key_part->key_part_flag & HA_PART_KEY_SEG);
if (is_null) { if (is_null) {
buff += key_part->length + 2; buff += key_part->length + 2;
continue; continue;
} }
blob_data = row_mysql_read_blob_ref(&blob_len, blob_data = row_mysql_read_blob_ref(&blob_len,
@ -2404,12 +2479,15 @@ ha_innobase::store_key_val_for_row(
/* MySQL reserves 2 bytes for the length and the /* MySQL reserves 2 bytes for the length and the
storage of the number is little-endian */ storage of the number is little-endian */
ut_a(blob_len < 256); innobase_write_to_2_little_endian(
*((byte*)buff) = (byte)blob_len; (byte*)buff, (ulint)blob_len);
buff += 2; buff += 2;
memcpy(buff, blob_data, blob_len); memcpy(buff, blob_data, blob_len);
/* Note that we always reserve the maximum possible
length of the BLOB prefix in the key value. */
buff += key_part->length; buff += key_part->length;
} else { } else {
if (is_null) { if (is_null) {
@ -2573,6 +2651,13 @@ build_template(
templ->mysql_col_len = (ulint) field->pack_length(); templ->mysql_col_len = (ulint) field->pack_length();
templ->type = get_innobase_type_from_mysql_type(field); templ->type = get_innobase_type_from_mysql_type(field);
templ->mysql_type = (ulint)field->type();
if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) {
templ->mysql_length_bytes = (ulint)
(((Field_varstring*)field)->length_bytes);
}
templ->charset = dtype_get_charset_coll_noninline( templ->charset = dtype_get_charset_coll_noninline(
index->table->cols[i].type.prtype); index->table->cols[i].type.prtype);
templ->mbminlen = index->table->cols[i].type.mbminlen; templ->mbminlen = index->table->cols[i].type.mbminlen;
@ -2810,54 +2895,6 @@ func_exit:
DBUG_RETURN(error); DBUG_RETURN(error);
} }
/******************************************************************
Converts field data for storage in an InnoDB update vector. */
inline
mysql_byte*
innobase_convert_and_store_changed_col(
/*===================================*/
/* out: pointer to the end of the converted
data in the buffer */
upd_field_t* ufield, /* in/out: field in the update vector */
mysql_byte* buf, /* in: buffer we can use in conversion */
mysql_byte* data, /* in: column data to store */
ulint len, /* in: data len */
ulint col_type,/* in: data type in InnoDB type numbers */
ulint is_unsigned)/* in: != 0 if an unsigned integer type */
{
uint i;
if (len == UNIV_SQL_NULL) {
data = NULL;
} else if (col_type == DATA_VARCHAR || col_type == DATA_BINARY
|| col_type == DATA_VARMYSQL) {
/* Remove trailing spaces */
while (len > 0 && data[len - 1] == ' ') {
len--;
}
} else if (col_type == DATA_INT) {
/* Store integer data in InnoDB in a big-endian
format, sign bit negated, if signed */
for (i = 0; i < len; i++) {
buf[len - 1 - i] = data[i];
}
if (!is_unsigned) {
buf[0] = buf[0] ^ 128;
}
data = buf;
buf += len;
}
ufield->new_val.data = data;
ufield->new_val.len = len;
return(buf);
}
/************************************************************************** /**************************************************************************
Checks which fields have changed in a row and stores information Checks which fields have changed in a row and stores information
of them to an update vector. */ of them to an update vector. */
@ -2878,9 +2915,11 @@ calc_row_difference(
{ {
mysql_byte* original_upd_buff = upd_buff; mysql_byte* original_upd_buff = upd_buff;
Field* field; Field* field;
enum_field_types field_mysql_type;
uint n_fields; uint n_fields;
ulint o_len; ulint o_len;
ulint n_len; ulint n_len;
ulint col_pack_len;
byte* o_ptr; byte* o_ptr;
byte* n_ptr; byte* n_ptr;
byte* buf; byte* buf;
@ -2888,6 +2927,7 @@ calc_row_difference(
ulint col_type; ulint col_type;
ulint is_unsigned; ulint is_unsigned;
ulint n_changed = 0; ulint n_changed = 0;
dfield_t dfield;
uint i; uint i;
n_fields = table->s->fields; n_fields = table->s->fields;
@ -2907,9 +2947,13 @@ calc_row_difference(
o_ptr = (byte*) old_row + get_field_offset(table, field); o_ptr = (byte*) old_row + get_field_offset(table, field);
n_ptr = (byte*) new_row + get_field_offset(table, field); n_ptr = (byte*) new_row + get_field_offset(table, field);
o_len = field->pack_length();
n_len = field->pack_length(); col_pack_len = field->pack_length();
o_len = col_pack_len;
n_len = col_pack_len;
field_mysql_type = field->type();
col_type = get_innobase_type_from_mysql_type(field); col_type = get_innobase_type_from_mysql_type(field);
is_unsigned = (ulint) (field->flags & UNSIGNED_FLAG); is_unsigned = (ulint) (field->flags & UNSIGNED_FLAG);
@ -2918,14 +2962,29 @@ calc_row_difference(
case DATA_BLOB: case DATA_BLOB:
o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len);
n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len);
break; break;
case DATA_VARCHAR: case DATA_VARCHAR:
case DATA_BINARY: case DATA_BINARY:
case DATA_VARMYSQL: case DATA_VARMYSQL:
o_ptr = row_mysql_read_var_ref_noninline(&o_len, if (field_mysql_type == MYSQL_TYPE_VARCHAR) {
o_ptr); /* This is a >= 5.0.3 type true VARCHAR where
n_ptr = row_mysql_read_var_ref_noninline(&n_len, the real payload data length is stored in
n_ptr); 1 or 2 bytes */
o_ptr = row_mysql_read_true_varchar(
&o_len, o_ptr,
(ulint)
(((Field_varstring*)field)->length_bytes));
n_ptr = row_mysql_read_true_varchar(
&n_len, n_ptr,
(ulint)
(((Field_varstring*)field)->length_bytes));
}
break;
default: default:
; ;
} }
@ -2947,12 +3006,29 @@ calc_row_difference(
/* The field has changed */ /* The field has changed */
ufield = uvect->fields + n_changed; ufield = uvect->fields + n_changed;
/* Let us use a dummy dfield to make the conversion
from the MySQL column format to the InnoDB format */
dfield.type = (prebuilt->table->cols + i)->type;
if (n_len != UNIV_SQL_NULL) {
buf = row_mysql_store_col_in_innobase_format(
&dfield,
(byte*)buf,
TRUE,
n_ptr,
col_pack_len,
prebuilt->table->comp);
ufield->new_val.data =
dfield_get_data(&dfield);
ufield->new_val.len =
dfield_get_len(&dfield);
} else {
ufield->new_val.data = NULL;
ufield->new_val.len = UNIV_SQL_NULL;
}
buf = (byte*)
innobase_convert_and_store_changed_col(ufield,
(mysql_byte*)buf,
(mysql_byte*)n_ptr, n_len, col_type,
is_unsigned);
ufield->exp = NULL; ufield->exp = NULL;
ufield->field_no = ufield->field_no =
(prebuilt->table->cols + i)->clust_pos; (prebuilt->table->cols + i)->clust_pos;
@ -3701,7 +3777,7 @@ ha_innobase::rnd_pos(
} }
if (error) { if (error) {
DBUG_PRINT("error",("Got error: %ld",error)); DBUG_PRINT("error", ("Got error: %ld", error));
DBUG_RETURN(error); DBUG_RETURN(error);
} }
@ -3709,10 +3785,11 @@ ha_innobase::rnd_pos(
for the table, and it is == ref_length */ for the table, and it is == ref_length */
error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT);
if (error)
{ if (error) {
DBUG_PRINT("error",("Got error: %ld",error)); DBUG_PRINT("error", ("Got error: %ld", error));
} }
change_active_index(keynr); change_active_index(keynr);
DBUG_RETURN(error); DBUG_RETURN(error);
@ -3752,12 +3829,11 @@ ha_innobase::position(
ref_length, record); ref_length, record);
} }
/* Since we do not store len to the buffer 'ref', we must assume /* We assume that the 'ref' value len is always fixed for the same
that len is always fixed for this table. The following assertion table. */
checks this. */
if (len != ref_length) { if (len != ref_length) {
fprintf(stderr, fprintf(stderr,
"InnoDB: Error: stored ref len is %lu, but table ref len is %lu\n", "InnoDB: Error: stored ref len is %lu, but table ref len is %lu\n",
(ulong)len, (ulong)ref_length); (ulong)len, (ulong)ref_length);
} }
@ -3788,9 +3864,11 @@ create_table_def(
ulint n_cols; ulint n_cols;
int error; int error;
ulint col_type; ulint col_type;
ulint col_len;
ulint nulls_allowed; ulint nulls_allowed;
ulint unsigned_type; ulint unsigned_type;
ulint binary_type; ulint binary_type;
ulint long_true_varchar;
ulint charset_no; ulint charset_no;
ulint i; ulint i;
@ -3837,17 +3915,40 @@ create_table_def(
charset_no = (ulint)field->charset()->number; charset_no = (ulint)field->charset()->number;
ut_a(charset_no < 256); /* in ut0type.h we assume that ut_a(charset_no < 256); /* in data0type.h we assume
the number fits in one byte */ that the number fits in one
byte */
} }
dict_mem_table_add_col(table, (char*) field->field_name, ut_a(field->type() < 256); /* we assume in dtype_form_prtype()
col_type, dtype_form_prtype( that this fits in one byte */
(ulint)field->type() col_len = field->pack_length();
| nulls_allowed | unsigned_type
| binary_type, /* The MySQL pack length contains 1 or 2 bytes length field
+ charset_no), for a true VARCHAR. Let us subtract that, so that the InnoDB
field->pack_length(), 0); column length in the InnoDB data dictionary is the real
maximum byte length of the actual data. */
long_true_varchar = 0;
if (field->type() == MYSQL_TYPE_VARCHAR) {
col_len -= ((Field_varstring*)field)->length_bytes;
if (((Field_varstring*)field)->length_bytes == 2) {
long_true_varchar = DATA_LONG_TRUE_VARCHAR;
}
}
dict_mem_table_add_col(table,
(char*) field->field_name,
col_type,
dtype_form_prtype(
(ulint)field->type()
| nulls_allowed | unsigned_type
| binary_type | long_true_varchar,
charset_no),
col_len,
0);
} }
error = row_create_table_for_mysql(table, trx); error = row_create_table_for_mysql(table, trx);
@ -6125,54 +6226,79 @@ ha_innobase::get_auto_increment()
return((ulonglong) nr); return((ulonglong) nr);
} }
/***********************************************************************
Compares two 'refs'. A 'ref' is the (internal) primary key value of the row.
If there is no explicitly declared non-null unique key or a primary key, then
InnoDB internally uses the row id as the primary key. */
int int
ha_innobase::cmp_ref( ha_innobase::cmp_ref(
const mysql_byte *ref1, /*=================*/
const mysql_byte *ref2) /* out: < 0 if ref1 < ref2, 0 if equal, else
> 0 */
const mysql_byte* ref1, /* in: an (internal) primary key value in the
MySQL key value format */
const mysql_byte* ref2) /* in: an (internal) primary key value in the
MySQL key value format */
{ {
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
enum_field_types mysql_type; enum_field_types mysql_type;
Field* field; Field* field;
int result; KEY_PART_INFO* key_part;
KEY_PART_INFO* key_part_end;
uint len1;
uint len2;
int result;
if (prebuilt->clust_index_was_generated) if (prebuilt->clust_index_was_generated) {
return memcmp(ref1, ref2, DATA_ROW_ID_LEN); /* The 'ref' is an InnoDB row id */
return(memcmp(ref1, ref2, DATA_ROW_ID_LEN));
}
/* Do a type-aware comparison of primary key fields. PK fields
are always NOT NULL, so no checks for NULL are performed. */
key_part = table->key_info[table->s->primary_key].key_part;
key_part_end = key_part
+ table->key_info[table->s->primary_key].key_parts;
/* Do type-aware comparison of Primary Key members. PK members
are always NOT NULL, so no checks for NULL are performed */
KEY_PART_INFO *key_part=
table->key_info[table->s->primary_key].key_part;
KEY_PART_INFO *key_part_end=
key_part + table->key_info[table->s->primary_key].key_parts;
for (; key_part != key_part_end; ++key_part) { for (; key_part != key_part_end; ++key_part) {
field = key_part->field; field = key_part->field;
mysql_type = field->type(); mysql_type = field->type();
if (mysql_type == FIELD_TYPE_TINY_BLOB if (mysql_type == FIELD_TYPE_TINY_BLOB
|| mysql_type == FIELD_TYPE_MEDIUM_BLOB || mysql_type == FIELD_TYPE_MEDIUM_BLOB
|| mysql_type == FIELD_TYPE_BLOB || mysql_type == FIELD_TYPE_BLOB
|| mysql_type == FIELD_TYPE_LONG_BLOB) { || mysql_type == FIELD_TYPE_LONG_BLOB) {
ut_a(!ref1[1]); /* In the MySQL key value format, a column prefix of
ut_a(!ref2[1]); a BLOB is preceded by a 2-byte length field */
byte len1= *ref1;
byte len2= *ref2; len1 = innobase_read_from_2_little_endian(ref1);
len2 = innobase_read_from_2_little_endian(ref2);
ref1 += 2; ref1 += 2;
ref2 += 2; ref2 += 2;
result = result = ((Field_blob*)field)->cmp(
((Field_blob*)field)->cmp((const char*)ref1, len1, (const char*)ref1, len1,
(const char*)ref2, len2); (const char*)ref2, len2);
} else { } else {
result = result = field->cmp((const char*)ref1,
field->cmp((const char*)ref1, (const char*)ref2); (const char*)ref2);
}
if (result) {
return(result);
} }
if (result)
return result;
ref1 += key_part->length; ref1 += key_part->length;
ref2 += key_part->length; ref2 += key_part->length;
} }
return 0;
return(0);
} }
char* char*

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2000 MySQL AB && Innobase Oy /* Copyright (C) 2000-2005 MySQL AB && Innobase Oy
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
@ -40,9 +40,10 @@ my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
/* The class defining a handle to an Innodb table */ /* The class defining a handle to an Innodb table */
class ha_innobase: public handler class ha_innobase: public handler
{ {
void* innobase_prebuilt; /* (row_prebuilt_t*) prebuilt void* innobase_prebuilt;/* (row_prebuilt_t*) prebuilt
struct in Innodb, used to save struct in InnoDB, used to save
CPU */ CPU time with prebuilt data
structures*/
THD* user_thd; /* the thread handle of the user THD* user_thd; /* the thread handle of the user
currently using the handle; this is currently using the handle; this is
set in external_lock function */ set in external_lock function */
@ -83,12 +84,12 @@ class ha_innobase: public handler
public: public:
ha_innobase(TABLE *table): handler(table), ha_innobase(TABLE *table): handler(table),
int_table_flags(HA_REC_NOT_IN_SEQ | int_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY | HA_FAST_KEY_READ | HA_NULL_IN_KEY |
HA_FAST_KEY_READ |
HA_CAN_INDEX_BLOBS | HA_CAN_INDEX_BLOBS |
HA_CAN_SQL_HANDLER | HA_CAN_SQL_HANDLER |
HA_NOT_EXACT_COUNT | HA_NOT_EXACT_COUNT |
HA_PRIMARY_KEY_IN_READ_INDEX | HA_PRIMARY_KEY_IN_READ_INDEX |
HA_NO_VARCHAR |
HA_TABLE_SCAN_ON_INDEX), HA_TABLE_SCAN_ON_INDEX),
last_dup_key((uint) -1), last_dup_key((uint) -1),
start_of_scan(0), start_of_scan(0),
@ -108,7 +109,10 @@ class ha_innobase: public handler
ulong table_flags() const { return int_table_flags; } ulong table_flags() const { return int_table_flags; }
ulong index_flags(uint idx, uint part, bool all_parts) const ulong index_flags(uint idx, uint part, bool all_parts) const
{ {
return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE | return (HA_READ_NEXT |
HA_READ_PREV |
HA_READ_ORDER |
HA_READ_RANGE |
HA_KEYREAD_ONLY); HA_KEYREAD_ONLY);
} }
uint max_supported_keys() const { return MAX_KEY; } uint max_supported_keys() const { return MAX_KEY; }
@ -163,7 +167,8 @@ class ha_innobase: public handler
int start_stmt(THD *thd); int start_stmt(THD *thd);
void position(byte *record); void position(byte *record);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); ha_rows records_in_range(uint inx, key_range *min_key, key_range
*max_key);
ha_rows estimate_rows_upper_bound(); ha_rows estimate_rows_upper_bound();
int create(const char *name, register TABLE *form, int create(const char *name, register TABLE *form,

View File

@ -286,7 +286,8 @@ Thd_ndb::~Thd_ndb()
{ {
if (ndb) if (ndb)
delete ndb; delete ndb;
ndb= 0; ndb= NULL;
changed_tables.empty();
} }
inline inline
@ -1891,7 +1892,7 @@ int ha_ndbcluster::write_row(byte *record)
if (peek_res != HA_ERR_KEY_NOT_FOUND) if (peek_res != HA_ERR_KEY_NOT_FOUND)
DBUG_RETURN(peek_res); DBUG_RETURN(peek_res);
} }
statistic_increment(thd->status_var.ha_write_count, &LOCK_status); statistic_increment(thd->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time(); table->timestamp_field->set_time();
@ -1940,6 +1941,8 @@ int ha_ndbcluster::write_row(byte *record)
} }
} }
m_rows_changed++;
/* /*
Execute write operation Execute write operation
NOTE When doing inserts with many values in NOTE When doing inserts with many values in
@ -2133,6 +2136,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
} }
} }
m_rows_changed++;
// Set non-key attribute(s) // Set non-key attribute(s)
for (i= 0; i < table->s->fields; i++) for (i= 0; i < table->s->fields; i++)
{ {
@ -2215,7 +2220,9 @@ int ha_ndbcluster::delete_row(const byte *record)
return res; return res;
} }
} }
m_rows_changed++;
// Execute delete operation // Execute delete operation
if (execute_no_commit(this,trans) != 0) { if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
@ -3112,14 +3119,14 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
Check that this handler instance has a connection Check that this handler instance has a connection
set up to the Ndb object of thd set up to the Ndb object of thd
*/ */
if (check_ndb_connection()) if (check_ndb_connection(thd))
DBUG_RETURN(1); DBUG_RETURN(1);
Thd_ndb *thd_ndb= get_thd_ndb(thd); Thd_ndb *thd_ndb= get_thd_ndb(thd);
Ndb *ndb= thd_ndb->ndb; Ndb *ndb= thd_ndb->ndb;
DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d", DBUG_PRINT("enter", ("thd: %x, thd_ndb: %x, thd_ndb->lock_count: %d",
thd_ndb->lock_count)); thd, thd_ndb, thd_ndb->lock_count));
if (lock_type != F_UNLCK) if (lock_type != F_UNLCK)
{ {
@ -3127,7 +3134,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
if (!thd_ndb->lock_count++) if (!thd_ndb->lock_count++)
{ {
PRINT_OPTION_FLAGS(thd); PRINT_OPTION_FLAGS(thd);
if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | OPTION_TABLE_LOCK))) if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | OPTION_TABLE_LOCK)))
{ {
// Autocommit transaction // Autocommit transaction
@ -3195,9 +3201,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt; m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt;
DBUG_ASSERT(m_active_trans); DBUG_ASSERT(m_active_trans);
// Start of transaction // Start of transaction
m_rows_changed= 0;
m_retrieve_all_fields= FALSE; m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE; m_retrieve_primary_key= FALSE;
m_ops_pending= 0; m_ops_pending= 0;
{ {
NDBDICT *dict= ndb->getDictionary(); NDBDICT *dict= ndb->getDictionary();
const NDBTAB *tab; const NDBTAB *tab;
@ -3209,10 +3216,28 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
m_table_info= tab_info; m_table_info= tab_info;
} }
no_uncommitted_rows_init(thd); no_uncommitted_rows_init(thd);
} }
else else
{ {
DBUG_PRINT("info", ("lock_type == F_UNLCK")); DBUG_PRINT("info", ("lock_type == F_UNLCK"));
if (ndb_cache_check_time && m_rows_changed)
{
DBUG_PRINT("info", ("Rows has changed and util thread is running"));
if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))
{
DBUG_PRINT("info", ("Add share to list of tables to be invalidated"));
/* NOTE push_back allocates memory using transactions mem_root! */
thd_ndb->changed_tables.push_back(m_share, &thd->transaction.mem_root);
}
pthread_mutex_lock(&m_share->mutex);
DBUG_PRINT("info", ("Invalidating commit_count"));
m_share->commit_count= 0;
m_share->commit_count_lock++;
pthread_mutex_unlock(&m_share->mutex);
}
if (!--thd_ndb->lock_count) if (!--thd_ndb->lock_count)
{ {
DBUG_PRINT("trans", ("Last external_lock")); DBUG_PRINT("trans", ("Last external_lock"));
@ -3232,6 +3257,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
} }
m_table= NULL; m_table= NULL;
m_table_info= NULL; m_table_info= NULL;
/* /*
This is the place to make sure this handler instance This is the place to make sure this handler instance
no longer are connected to the active transaction. no longer are connected to the active transaction.
@ -3305,7 +3331,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
/* /*
Commit a transaction started in NDB Commit a transaction started in NDB
*/ */
int ndbcluster_commit(THD *thd, bool all) int ndbcluster_commit(THD *thd, bool all)
@ -3317,7 +3343,7 @@ int ndbcluster_commit(THD *thd, bool all)
DBUG_ENTER("ndbcluster_commit"); DBUG_ENTER("ndbcluster_commit");
DBUG_PRINT("transaction",("%s", DBUG_PRINT("transaction",("%s",
trans == thd_ndb->stmt ? trans == thd_ndb->stmt ?
"stmt" : "all")); "stmt" : "all"));
DBUG_ASSERT(ndb && trans); DBUG_ASSERT(ndb && trans);
@ -3325,18 +3351,31 @@ int ndbcluster_commit(THD *thd, bool all)
{ {
const NdbError err= trans->getNdbError(); const NdbError err= trans->getNdbError();
const NdbOperation *error_op= trans->getNdbErrorOperation(); const NdbOperation *error_op= trans->getNdbErrorOperation();
ERR_PRINT(err); ERR_PRINT(err);
res= ndb_to_mysql_error(&err); res= ndb_to_mysql_error(&err);
if (res != -1) if (res != -1)
ndbcluster_print_error(res, error_op); ndbcluster_print_error(res, error_op);
} }
ndb->closeTransaction(trans); ndb->closeTransaction(trans);
if(all) if(all)
thd_ndb->all= NULL; thd_ndb->all= NULL;
else else
thd_ndb->stmt= NULL; thd_ndb->stmt= NULL;
/* Clear commit_count for tables changed by transaction */
NDB_SHARE* share;
List_iterator_fast<NDB_SHARE> it(thd_ndb->changed_tables);
while ((share= it++))
{
pthread_mutex_lock(&share->mutex);
DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %d ", share->table_name, share->commit_count));
share->commit_count= 0;
share->commit_count_lock++;
pthread_mutex_unlock(&share->mutex);
}
thd_ndb->changed_tables.empty();
DBUG_RETURN(res); DBUG_RETURN(res);
} }
@ -3374,6 +3413,9 @@ int ndbcluster_rollback(THD *thd, bool all)
else else
thd_ndb->stmt= NULL; thd_ndb->stmt= NULL;
/* Clear list of tables changed by transaction */
thd_ndb->changed_tables.empty();
DBUG_RETURN(res); DBUG_RETURN(res);
} }
@ -4066,6 +4108,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_rows_to_insert(1), m_rows_to_insert(1),
m_rows_inserted(0), m_rows_inserted(0),
m_bulk_insert_rows(1024), m_bulk_insert_rows(1024),
m_rows_changed(0),
m_bulk_insert_not_flushed(FALSE), m_bulk_insert_not_flushed(FALSE),
m_ops_pending(0), m_ops_pending(0),
m_skip_auto_increment(TRUE), m_skip_auto_increment(TRUE),
@ -4079,9 +4122,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_transaction_on(TRUE), m_transaction_on(TRUE),
m_cond_stack(NULL), m_cond_stack(NULL),
m_multi_cursor(NULL) m_multi_cursor(NULL)
{ {
int i; int i;
DBUG_ENTER("ha_ndbcluster"); DBUG_ENTER("ha_ndbcluster");
m_tabname[0]= '\0'; m_tabname[0]= '\0';
@ -4245,9 +4288,8 @@ Ndb* check_ndb_in_thd(THD* thd)
int ha_ndbcluster::check_ndb_connection() int ha_ndbcluster::check_ndb_connection(THD* thd)
{ {
THD* thd= current_thd;
Ndb *ndb; Ndb *ndb;
DBUG_ENTER("check_ndb_connection"); DBUG_ENTER("check_ndb_connection");
@ -4321,33 +4363,31 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name,
/* /*
Check if a table exists in NDB Check if a table exists in NDB
*/ */
int ndbcluster_table_exists(THD* thd, const char *db, const char *name) int ndbcluster_table_exists(THD* thd, const char *db, const char *name)
{ {
uint len;
const void* data;
const NDBTAB* tab; const NDBTAB* tab;
Ndb* ndb; Ndb* ndb;
DBUG_ENTER("ndbcluster_table_exists"); DBUG_ENTER("ndbcluster_table_exists");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
if (!(ndb= check_ndb_in_thd(thd))) if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION); DBUG_RETURN(HA_ERR_NO_CONNECTION);
ndb->setDatabaseName(db); ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary(); NDBDICT* dict= ndb->getDictionary();
dict->set_local_table_data_size(sizeof(Ndb_table_local_info)); dict->set_local_table_data_size(sizeof(Ndb_table_local_info));
dict->invalidateTable(name); dict->invalidateTable(name);
if (!(tab= dict->getTable(name))) if (!(tab= dict->getTable(name)))
{ {
const NdbError err= dict->getNdbError(); const NdbError err= dict->getNdbError();
if (err.code == 709) if (err.code == 709)
DBUG_RETURN(0); DBUG_RETURN(0);
ERR_RETURN(err); ERR_RETURN(err);
} }
DBUG_PRINT("info", ("Found table %s", tab->getName())); DBUG_PRINT("info", ("Found table %s", tab->getName()));
DBUG_RETURN(1); DBUG_RETURN(1);
} }
@ -4865,38 +4905,65 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
{ {
DBUG_ENTER("ndb_get_commitcount"); DBUG_ENTER("ndb_get_commitcount");
char name[FN_REFLEN];
NDB_SHARE *share;
(void)strxnmov(name, FN_REFLEN, "./",dbname,"/",tabname,NullS);
DBUG_PRINT("enter", ("name: %s", name));
pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
(byte*) name,
strlen(name))))
{
pthread_mutex_unlock(&ndbcluster_mutex);
DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables",
name));
DBUG_RETURN(1);
}
share->use_count++;
pthread_mutex_unlock(&ndbcluster_mutex);
pthread_mutex_lock(&share->mutex);
if (ndb_cache_check_time > 0) if (ndb_cache_check_time > 0)
{ {
/* Use cached commit_count from share */ if (share->commit_count != 0)
char name[FN_REFLEN];
NDB_SHARE *share;
(void)strxnmov(name, FN_REFLEN,
"./",dbname,"/",tabname,NullS);
DBUG_PRINT("info", ("name: %s", name));
pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
(byte*) name,
strlen(name))))
{ {
pthread_mutex_unlock(&ndbcluster_mutex); *commit_count= share->commit_count;
DBUG_RETURN(1); DBUG_PRINT("info", ("Getting commit_count: %llu from share",
share->commit_count));
pthread_mutex_unlock(&share->mutex);
free_share(share);
DBUG_RETURN(0);
} }
*commit_count= share->commit_count;
DBUG_PRINT("info", ("commit_count: %d", *commit_count));
pthread_mutex_unlock(&ndbcluster_mutex);
DBUG_RETURN(0);
} }
DBUG_PRINT("info", ("Get commit_count from NDB"));
/* Get commit_count from NDB */
Ndb *ndb; Ndb *ndb;
if (!(ndb= check_ndb_in_thd(thd))) if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(1); DBUG_RETURN(1);
ndb->setDatabaseName(dbname); ndb->setDatabaseName(dbname);
uint lock= share->commit_count_lock;
pthread_mutex_unlock(&share->mutex);
struct Ndb_statistics stat; struct Ndb_statistics stat;
if (ndb_get_table_statistics(ndb, tabname, &stat)) if (ndb_get_table_statistics(ndb, tabname, &stat))
{
free_share(share);
DBUG_RETURN(1); DBUG_RETURN(1);
*commit_count= stat.commit_count; }
pthread_mutex_lock(&share->mutex);
if(share->commit_count_lock == lock)
{
DBUG_PRINT("info", ("Setting commit_count to %llu", stat.commit_count));
share->commit_count= stat.commit_count;
*commit_count= stat.commit_count;
}
else
{
DBUG_PRINT("info", ("Discarding commit_count, comit_count_lock changed"));
*commit_count= 0;
}
pthread_mutex_unlock(&share->mutex);
free_share(share);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
@ -4943,27 +5010,37 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
char *dbname= full_name; char *dbname= full_name;
char *tabname= dbname+strlen(dbname)+1; char *tabname= dbname+strlen(dbname)+1;
DBUG_PRINT("enter",("dbname=%s, tabname=%s, autocommit=%d", DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d",
dbname, tabname, is_autocommit)); dbname, tabname, is_autocommit));
if (!is_autocommit) if (!is_autocommit)
{
DBUG_PRINT("exit", ("No, don't use cache in transaction"));
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
}
if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) if (ndb_get_commitcount(thd, dbname, tabname, &commit_count))
{ {
*engine_data+= 1; /* invalidate */ *engine_data= 0; /* invalidate */
DBUG_PRINT("exit", ("No, could not retrieve commit_count"));
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }
DBUG_PRINT("info", ("*engine_data=%llu, commit_count=%llu", DBUG_PRINT("info", ("*engine_data: %llu, commit_count: %llu",
*engine_data, commit_count)); *engine_data, commit_count));
if (*engine_data != commit_count) if (commit_count == 0)
{
*engine_data= 0; /* invalidate */
DBUG_PRINT("exit", ("No, local commit has been performed"));
DBUG_RETURN(FALSE);
}
else if (*engine_data != commit_count)
{ {
*engine_data= commit_count; /* invalidate */ *engine_data= commit_count; /* invalidate */
DBUG_PRINT("exit",("Do not use cache, commit_count has changed")); DBUG_PRINT("exit", ("No, commit_count has changed"));
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }
DBUG_PRINT("exit",("OK to use cache, *engine_data=%llu",*engine_data)); DBUG_PRINT("exit", ("OK to use cache, engine_data: %llu", *engine_data));
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
} }
@ -4999,22 +5076,27 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); DBUG_ENTER("ha_ndbcluster::register_query_cache_table");
bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
DBUG_PRINT("enter",("dbname=%s, tabname=%s, is_autocommit=%d",
m_dbname,m_tabname,is_autocommit)); DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d",
m_dbname, m_tabname, is_autocommit));
if (!is_autocommit) if (!is_autocommit)
{
DBUG_PRINT("exit", ("Can't register table during transaction"))
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
}
Uint64 commit_count; Uint64 commit_count;
if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
{ {
*engine_data= 0; *engine_data= 0;
DBUG_PRINT("error", ("Could not get commitcount")) DBUG_PRINT("exit", ("Error, could not get commitcount"))
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }
*engine_data= commit_count; *engine_data= commit_count;
*engine_callback= ndbcluster_cache_retrieval_allowed; *engine_callback= ndbcluster_cache_retrieval_allowed;
DBUG_PRINT("exit",("*engine_data=%llu", *engine_data)); DBUG_PRINT("exit", ("commit_count: %llu", commit_count));
DBUG_RETURN(TRUE); DBUG_RETURN(commit_count > 0);
} }
@ -5057,14 +5139,21 @@ static NDB_SHARE* get_share(const char *table_name)
thr_lock_init(&share->lock); thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
share->commit_count= 0; share->commit_count= 0;
share->commit_count_lock= 0;
}
else
{
DBUG_PRINT("error", ("Failed to alloc share"));
pthread_mutex_unlock(&ndbcluster_mutex);
return 0;
} }
} }
DBUG_PRINT("share",
("table_name: %s, length: %d, use_count: %d, commit_count: %d",
share->table_name, share->table_name_length, share->use_count,
share->commit_count));
share->use_count++; share->use_count++;
DBUG_PRINT("share",
("table_name: %s, length: %d, use_count: %d, commit_count: %d",
share->table_name, share->table_name_length, share->use_count,
share->commit_count));
pthread_mutex_unlock(&ndbcluster_mutex); pthread_mutex_unlock(&ndbcluster_mutex);
return share; return share;
} }
@ -5075,7 +5164,7 @@ static void free_share(NDB_SHARE *share)
pthread_mutex_lock(&ndbcluster_mutex); pthread_mutex_lock(&ndbcluster_mutex);
if (!--share->use_count) if (!--share->use_count)
{ {
hash_delete(&ndbcluster_open_tables, (byte*) share); hash_delete(&ndbcluster_open_tables, (byte*) share);
thr_lock_delete(&share->lock); thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex); pthread_mutex_destroy(&share->mutex);
my_free((gptr) share, MYF(0)); my_free((gptr) share, MYF(0));
@ -5219,6 +5308,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
if (check == -1) if (check == -1)
break; break;
Uint32 count= 0;
Uint64 sum_rows= 0; Uint64 sum_rows= 0;
Uint64 sum_commits= 0; Uint64 sum_commits= 0;
Uint64 sum_row_size= 0; Uint64 sum_row_size= 0;
@ -5230,6 +5320,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
if (sum_row_size < size) if (sum_row_size < size)
sum_row_size= size; sum_row_size= size;
sum_mem+= mem; sum_mem+= mem;
count++;
} }
if (check == -1) if (check == -1)
@ -5244,8 +5335,11 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
ndbstat->row_size= sum_row_size; ndbstat->row_size= sum_row_size;
ndbstat->fragment_memory= sum_mem; ndbstat->fragment_memory= sum_mem;
DBUG_PRINT("exit", ("records: %u commits: %u row_size: %d mem: %d", DBUG_PRINT("exit", ("records: %llu commits: %llu "
sum_rows, sum_commits, sum_row_size, sum_mem)); "row_size: %llu mem: %llu count: %u",
sum_rows, sum_commits, sum_row_size,
sum_mem, count));
DBUG_RETURN(0); DBUG_RETURN(0);
} while(0); } while(0);
@ -5677,6 +5771,7 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,
arg __attribute__((unused))) arg __attribute__((unused)))
{ {
THD *thd; /* needs to be first for thread_stack */ THD *thd; /* needs to be first for thread_stack */
Ndb* ndb;
int error= 0; int error= 0;
struct timespec abstime; struct timespec abstime;
@ -5686,12 +5781,13 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,
thd= new THD; /* note that contructor of THD uses DBUG_ */ thd= new THD; /* note that contructor of THD uses DBUG_ */
THD_CHECK_SENTRY(thd); THD_CHECK_SENTRY(thd);
ndb= new Ndb(g_ndb_cluster_connection, "");
pthread_detach_this_thread(); pthread_detach_this_thread();
ndb_util_thread= pthread_self(); ndb_util_thread= pthread_self();
thd->thread_stack= (char*)&thd; /* remember where our stack is */ thd->thread_stack= (char*)&thd; /* remember where our stack is */
if (thd->store_globals()) if (thd->store_globals() && (ndb->init() != -1))
{ {
thd->cleanup(); thd->cleanup();
delete thd; delete thd;
@ -5699,7 +5795,7 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,
} }
List<NDB_SHARE> util_open_tables; List<NDB_SHARE> util_open_tables;
set_timespec(abstime, ndb_cache_check_time); set_timespec(abstime, 0);
for (;;) for (;;)
{ {
@ -5717,16 +5813,11 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,
if (ndb_cache_check_time == 0) if (ndb_cache_check_time == 0)
{ {
set_timespec(abstime, 10); /* Wake up in 1 second to check if value has changed */
set_timespec(abstime, 1);
continue; continue;
} }
/* Round tim e from millisceonds to seconds */
uint wait_secs= ((ndb_cache_check_time+999)/1000);
DBUG_PRINT("ndb_util_thread", ("wait_secs: %d", wait_secs));
/* Set new time to wake up */
set_timespec(abstime, wait_secs);
/* Lock mutex and fill list with pointers to all open tables */ /* Lock mutex and fill list with pointers to all open tables */
NDB_SHARE *share; NDB_SHARE *share;
pthread_mutex_lock(&ndbcluster_mutex); pthread_mutex_lock(&ndbcluster_mutex);
@ -5756,26 +5847,37 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,
buf[length-1]= 0; buf[length-1]= 0;
db= buf+dirname_length(buf); db= buf+dirname_length(buf);
DBUG_PRINT("ndb_util_thread", DBUG_PRINT("ndb_util_thread",
("Fetching commit count for: %s, db: %s, tab: %s", ("Fetching commit count for: %s",
share->table_name, db, tabname)); share->table_name));
/* Contact NDB to get commit count for table */ /* Contact NDB to get commit count for table */
g_ndb->setDatabaseName(db); ndb->setDatabaseName(db);
struct Ndb_statistics stat;; struct Ndb_statistics stat;
if(ndb_get_table_statistics(g_ndb, tabname, &stat) == 0)
uint lock;
pthread_mutex_lock(&share->mutex);
lock= share->commit_count_lock;
pthread_mutex_unlock(&share->mutex);
if(ndb_get_table_statistics(ndb, tabname, &stat) == 0)
{ {
DBUG_PRINT("ndb_util_thread", DBUG_PRINT("ndb_util_thread",
("Table: %s, rows: %llu, commit_count: %llu", ("Table: %s, commit_count: %llu, rows: %llu",
share->table_name, stat.row_count, stat.commit_count)); share->table_name, stat.commit_count, stat.row_count));
share->commit_count= stat.commit_count;
} }
else else
{ {
DBUG_PRINT("ndb_util_thread", DBUG_PRINT("ndb_util_thread",
("Error: Could not get commit count for table %s", ("Error: Could not get commit count for table %s",
share->table_name)); share->table_name));
share->commit_count++; /* Invalidate */ stat.commit_count= 0;
} }
pthread_mutex_lock(&share->mutex);
if (share->commit_count_lock == lock)
share->commit_count= stat.commit_count;
pthread_mutex_unlock(&share->mutex);
/* Decrease the use count and possibly free share */ /* Decrease the use count and possibly free share */
free_share(share); free_share(share);
} }
@ -5783,6 +5885,26 @@ extern "C" pthread_handler_decl(ndb_util_thread_func,
/* Clear the list of open tables */ /* Clear the list of open tables */
util_open_tables.empty(); util_open_tables.empty();
/* Calculate new time to wake up */
int secs= 0;
int msecs= ndb_cache_check_time;
struct timeval tick_time;
gettimeofday(&tick_time, 0);
abstime.tv_sec= tick_time.tv_sec;
abstime.tv_nsec= tick_time.tv_usec * 1000;
if(msecs >= 1000){
secs= msecs / 1000;
msecs= msecs % 1000;
}
abstime.tv_sec+= secs;
abstime.tv_nsec+= msecs * 1000000;
if (abstime.tv_nsec >= 1000000000) {
abstime.tv_sec+= 1;
abstime.tv_nsec-= 1000000000;
}
} }
thd->cleanup(); thd->cleanup();

View File

@ -61,6 +61,7 @@ typedef struct st_ndbcluster_share {
pthread_mutex_t mutex; pthread_mutex_t mutex;
char *table_name; char *table_name;
uint table_name_length,use_count; uint table_name_length,use_count;
uint commit_count_lock;
ulonglong commit_count; ulonglong commit_count;
} NDB_SHARE; } NDB_SHARE;
@ -374,6 +375,7 @@ class Thd_ndb
NdbTransaction *all; NdbTransaction *all;
NdbTransaction *stmt; NdbTransaction *stmt;
int error; int error;
List<NDB_SHARE> changed_tables;
}; };
class ha_ndbcluster: public handler class ha_ndbcluster: public handler
@ -562,7 +564,7 @@ private:
int write_ndb_file(); int write_ndb_file();
int check_ndb_connection(); int check_ndb_connection(THD* thd= current_thd);
void set_rec_per_key(); void set_rec_per_key();
void records_update(); void records_update();
@ -611,6 +613,7 @@ private:
ha_rows m_rows_to_insert; ha_rows m_rows_to_insert;
ha_rows m_rows_inserted; ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows; ha_rows m_bulk_insert_rows;
ha_rows m_rows_changed;
bool m_bulk_insert_not_flushed; bool m_bulk_insert_not_flushed;
ha_rows m_ops_pending; ha_rows m_ops_pending;
bool m_skip_auto_increment; bool m_skip_auto_increment;

View File

@ -413,7 +413,7 @@ sys_engine_condition_pushdown("engine_condition_pushdown",
#ifdef HAVE_NDBCLUSTER_DB #ifdef HAVE_NDBCLUSTER_DB
/* ndb thread specific variable settings */ /* ndb thread specific variable settings */
sys_var_thd_ulong sys_var_thd_ulong
sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz",
&SV::ndb_autoincrement_prefetch_sz); &SV::ndb_autoincrement_prefetch_sz);
sys_var_thd_bool sys_var_thd_bool
@ -422,7 +422,8 @@ sys_var_thd_bool
sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count);
sys_var_thd_bool sys_var_thd_bool
sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions);
sys_var_long_ptr sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time); sys_var_long_ptr
sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time);
#endif #endif
/* Time/date/datetime formats */ /* Time/date/datetime formats */
@ -702,10 +703,10 @@ sys_var *sys_variables[]=
&sys_engine_condition_pushdown, &sys_engine_condition_pushdown,
#ifdef HAVE_NDBCLUSTER_DB #ifdef HAVE_NDBCLUSTER_DB
&sys_ndb_autoincrement_prefetch_sz, &sys_ndb_autoincrement_prefetch_sz,
&sys_ndb_cache_check_time,
&sys_ndb_force_send, &sys_ndb_force_send,
&sys_ndb_use_exact_count, &sys_ndb_use_exact_count,
&sys_ndb_use_transactions, &sys_ndb_use_transactions,
&sys_ndb_cache_check_time,
#endif #endif
&sys_unique_checks, &sys_unique_checks,
&sys_updatable_views_with_limit, &sys_updatable_views_with_limit,
@ -1298,7 +1299,6 @@ static int check_max_delayed_threads(THD *thd, set_var *var)
return 0; return 0;
} }
static void fix_max_connections(THD *thd, enum_var_type type) static void fix_max_connections(THD *thd, enum_var_type type)
{ {
#ifndef EMBEDDED_LIBRARY #ifndef EMBEDDED_LIBRARY

View File

@ -1151,7 +1151,6 @@ public:
table_map used_tables; table_map used_tables;
USER_CONN *user_connect; USER_CONN *user_connect;
CHARSET_INFO *db_charset; CHARSET_INFO *db_charset;
List<TABLE> temporary_tables_should_be_free; // list of temporary tables
/* /*
FIXME: this, and some other variables like 'count_cuted_fields' FIXME: this, and some other variables like 'count_cuted_fields'
maybe should be statement/cursor local, that is, moved to Statement maybe should be statement/cursor local, that is, moved to Statement

View File

@ -703,7 +703,15 @@ static void append_directory(THD *thd, String *packet, const char *dir_type,
packet->append(' '); packet->append(' ');
packet->append(dir_type); packet->append(dir_type);
packet->append(" DIRECTORY='", 12); packet->append(" DIRECTORY='", 12);
#ifdef __WIN__
char *winfilename = thd->memdup(filename, length);
for (uint i=0; i < length; i++)
if (winfilename[i] == '\\')
winfilename[i] = '/';
packet->append(winfilename, length);
#else
packet->append(filename, length); packet->append(filename, length);
#endif
packet->append('\''); packet->append('\'');
} }
} }

View File

@ -53,8 +53,10 @@ then
basedir=@prefix@ basedir=@prefix@
bindir=@bindir@ bindir=@bindir@
datadir=@localstatedir@ datadir=@localstatedir@
sbindir=@sbindir@
else else
bindir="$basedir/bin" bindir="$basedir/bin"
sbindir="$basedir/sbin"
fi fi
# #
@ -79,11 +81,18 @@ case `echo "testing\c"`,`echo -n testing` in
*) echo_n= echo_c='\c' ;; *) echo_n= echo_c='\c' ;;
esac esac
parse_arguments() { parse_server_arguments() {
for arg do for arg do
case "$arg" in case "$arg" in
--basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
--datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
esac
done
}
parse_manager_arguments() {
for arg do
case "$arg" in
--pid-file=*) pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;; --pid-file=*) pid_file=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
esac esac
done done
@ -104,7 +113,7 @@ wait_for_pid () {
} }
# Get arguments from the my.cnf file, # Get arguments from the my.cnf file,
# groups [mysqld] [mysql_server] and [mysql.server] # the only group, which is read from now on is [mysqld]
if test -x ./bin/my_print_defaults if test -x ./bin/my_print_defaults
then then
print_defaults="./bin/my_print_defaults" print_defaults="./bin/my_print_defaults"
@ -153,14 +162,17 @@ then
extra_args="-e $datadir/my.cnf" extra_args="-e $datadir/my.cnf"
fi fi
parse_arguments `$print_defaults $extra_args mysqld server mysql_server mysql.server` parse_server_arguments `$print_defaults $extra_args mysqld`
# Look for the pidfile
parse_manager_arguments `$print_defaults $extra_args manager`
# #
# Set pid file if not given # Set pid file if not given
# #
if test -z "$pid_file" if test -z "$pid_file"
then then
pid_file=$datadir/`@HOSTNAME@`.pid pid_file=$datadir/mysqlmanager-`@HOSTNAME@`.pid
else else
case "$pid_file" in case "$pid_file" in
/* ) ;; /* ) ;;
@ -168,6 +180,9 @@ else
esac esac
fi fi
user=@MYSQLD_USER@
USER_OPTION="--user=$user"
# Safeguard (relative paths, core dumps..) # Safeguard (relative paths, core dumps..)
cd $basedir cd $basedir
@ -175,21 +190,21 @@ case "$mode" in
'start') 'start')
# Start daemon # Start daemon
if test -x $bindir/mysqld_safe if test -x $sbindir/mysqlmanager
then then
# Give extra arguments to mysqld with the my.cnf file. This script may # Give extra arguments to mysqld with the my.cnf file. This script may
# be overwritten at next upgrade. # be overwritten at next upgrade.
echo $echo_n "Starting MySQL" echo $echo_n "Starting MySQL"
$bindir/mysqld_safe --datadir=$datadir --pid-file=$pid_file >/dev/null 2>&1 & $sbindir/mysqlmanager $USER_OPTION --pid-file=$pid_file >/dev/null 2>&1 &
wait_for_pid wait_for_pid
# Make lock for RedHat / SuSE # Make lock for RedHat / SuSE
if test -w /var/lock/subsys if test -w /var/lock/subsys
then then
touch /var/lock/subsys/mysql touch /var/lock/subsys/mysqlmanager
fi fi
else else
log_failure_msg "Can't execute $bindir/mysqld_safe" log_failure_msg "Can't execute $sbindir/mysqlmanager"
fi fi
;; ;;
@ -198,19 +213,19 @@ case "$mode" in
# root password. # root password.
if test -s "$pid_file" if test -s "$pid_file"
then then
mysqld_pid=`cat $pid_file` mysqlmanager_pid=`cat $pid_file`
echo $echo_n "Shutting down MySQL" echo $echo_n "Shutting down MySQL"
kill $mysqld_pid kill $mysqlmanager_pid
# mysqld should remove the pid_file when it exits, so wait for it. # mysqlmanager should remove the pid_file when it exits, so wait for it.
wait_for_pid wait_for_pid
# delete lock for RedHat / SuSE # delete lock for RedHat / SuSE
if test -f /var/lock/subsys/mysql if test -f /var/lock/subsys/mysqlmanager
then then
rm -f /var/lock/subsys/mysql rm -f /var/lock/subsys/mysqlmanager
fi fi
else else
log_failure_msg "MySQL PID file could not be found!" log_failure_msg "mysqlmanager PID file could not be found!"
fi fi
;; ;;
@ -219,21 +234,11 @@ case "$mode" in
# running or not, start it again. # running or not, start it again.
$0 stop $0 stop
$0 start $0 start
;; ;;
'reload')
if test -s "$pid_file" ; then
mysqld_pid=`cat $pid_file`
kill -HUP $mysqld_pid && log_success_msg "Reloading service MySQL"
touch $pid_file
else
log_failure_msg "MySQL PID file could not be found!"
fi
;;
*) *)
# usage # usage
echo "Usage: $0 start|stop|restart|reload" echo "Usage: $0 start|stop|restart"
exit 1 exit 1
;; ;;
esac esac

View File

@ -429,9 +429,11 @@ ln -s %{_sysconfdir}/init.d/mysql $RPM_BUILD_ROOT%{_sbindir}/rcmysql
# (safe_mysqld will be gone in MySQL 4.1) # (safe_mysqld will be gone in MySQL 4.1)
ln -sf ./mysqld_safe $RBR%{_bindir}/safe_mysqld ln -sf ./mysqld_safe $RBR%{_bindir}/safe_mysqld
# Touch the place where the my.cnf config file might be located # Touch the place where the my.cnf config file and mysqlmanager.passwd
# (MySQL Instance Manager password file) might be located
# Just to make sure it's in the file list and marked as a config file # Just to make sure it's in the file list and marked as a config file
touch $RBR%{_sysconfdir}/my.cnf touch $RBR%{_sysconfdir}/my.cnf
touch $RBR%{_sysconfdir}/mysqlmanager.passwd
%pre server %pre server
# Shut down a previously installed server first # Shut down a previously installed server first
@ -551,6 +553,7 @@ fi
%doc %attr(644, root, man) %{_mandir}/man1/replace.1* %doc %attr(644, root, man) %{_mandir}/man1/replace.1*
%ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf %ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf
%ghost %config(noreplace,missingok) %{_sysconfdir}/mysqlmanager.passwd
%attr(755, root, root) %{_bindir}/my_print_defaults %attr(755, root, root) %{_bindir}/my_print_defaults
%attr(755, root, root) %{_bindir}/myisamchk %attr(755, root, root) %{_bindir}/myisamchk
@ -579,6 +582,7 @@ fi
%attr(755, root, root) %{_bindir}/safe_mysqld %attr(755, root, root) %{_bindir}/safe_mysqld
%attr(755, root, root) %{_sbindir}/mysqld %attr(755, root, root) %{_sbindir}/mysqld
%attr(755, root, root) %{_sbindir}/mysqlmanager
%attr(755, root, root) %{_sbindir}/rcmysql %attr(755, root, root) %{_sbindir}/rcmysql
%attr(644, root, root) %{_libdir}/mysql/mysqld.sym %attr(644, root, root) %{_libdir}/mysql/mysqld.sym
@ -690,9 +694,14 @@ fi
# itself - note that they must be ordered by date (important when # itself - note that they must be ordered by date (important when
# merging BK trees) # merging BK trees)
%changelog %changelog
* Sun Feb 20 2005 Petr Chardin <petr@mysql.com>
- Install MySQL Instance Manager together with mysqld, toch mysqlmanager
password file
* Mon Feb 14 2005 Lenz Grimmer <lenz@mysql.com> * Mon Feb 14 2005 Lenz Grimmer <lenz@mysql.com>
* Fixed the compilation comments and moved them into the separate build sections - Fixed the compilation comments and moved them into the separate build sections
for Max and Standard for Max and Standard
* Mon Feb 7 2005 Tomas Ulin <tomas@mysql.com> * Mon Feb 7 2005 Tomas Ulin <tomas@mysql.com>