Fixes for innobase
Added test for ALTER TABLE ORDER BY
This commit is contained in:
parent
f45764791b
commit
1c298277f9
@ -13,6 +13,9 @@ $make -k clean || true
|
|||||||
/bin/rm -f */.deps/*.P config.cache
|
/bin/rm -f */.deps/*.P config.cache
|
||||||
|
|
||||||
aclocal; autoheader; aclocal; automake; autoconf
|
aclocal; autoheader; aclocal; automake; autoconf
|
||||||
|
cd innobase
|
||||||
|
aclocal; autoheader; aclocal; automake; autoconf
|
||||||
|
cd ..
|
||||||
|
|
||||||
CFLAGS=\"$cflags\" CXX=gcc CXXFLAGS=\"$cxxflags\" $configure
|
CFLAGS=\"$cflags\" CXX=gcc CXXFLAGS=\"$cxxflags\" $configure
|
||||||
|
|
||||||
|
@ -13,6 +13,6 @@ if test -d /usr/local/BerkeleyDB-dbug/
|
|||||||
then
|
then
|
||||||
extra_configs="$extra_configs --with-berkeley-db=/usr/local/BerkeleyDB-dbug/"
|
extra_configs="$extra_configs --with-berkeley-db=/usr/local/BerkeleyDB-dbug/"
|
||||||
fi
|
fi
|
||||||
extra_configs="$extra_configs --with-innobase"
|
extra_configs="$extra_configs --with-innobase-db"
|
||||||
|
|
||||||
. "$path/FINISH.sh"
|
. "$path/FINISH.sh"
|
||||||
|
@ -1,15 +1 @@
|
|||||||
jcole@tetra.spaceapes.com
|
monty@donna.mysql.fi
|
||||||
monty@donna.mysql.com
|
|
||||||
monty@work.mysql.com
|
|
||||||
mwagner@evoq.mwagner.org
|
|
||||||
mwagner@work.mysql.com
|
|
||||||
paul@central.snake.net
|
|
||||||
sasha@mysql.sashanet.com
|
|
||||||
sasha@work.mysql.com
|
|
||||||
serg@donna.mysql.com
|
|
||||||
serg@serg.mysql.com
|
|
||||||
tfr@coyote.emotion.ee
|
|
||||||
tim@cane.mysql.fi
|
|
||||||
tim@threads.polyesthetic.msg
|
|
||||||
tim@work.mysql.com
|
|
||||||
tim@donna.mysql.com
|
|
||||||
|
@ -487,7 +487,7 @@ MySQL Table Types
|
|||||||
* ISAM:: ISAM tables
|
* ISAM:: ISAM tables
|
||||||
* HEAP:: HEAP tables
|
* HEAP:: HEAP tables
|
||||||
* BDB:: BDB or Berkeley_db tables
|
* BDB:: BDB or Berkeley_db tables
|
||||||
* INNOBASE::
|
* INNOBASE:: Innobase tables
|
||||||
|
|
||||||
MyISAM Tables
|
MyISAM Tables
|
||||||
|
|
||||||
@ -2062,7 +2062,6 @@ report about lost data because of bugs in @strong{MySQL}.
|
|||||||
@cindex retrieving, data
|
@cindex retrieving, data
|
||||||
@cindex data, ISAM table handler
|
@cindex data, ISAM table handler
|
||||||
|
|
||||||
|
|
||||||
@item The MyISAM table handler --- Gamma
|
@item The MyISAM table handler --- Gamma
|
||||||
This is new in @strong{MySQL} Version 3.23. It's largely based on the ISAM
|
This is new in @strong{MySQL} Version 3.23. It's largely based on the ISAM
|
||||||
table code but has a lot of new and very useful features.
|
table code but has a lot of new and very useful features.
|
||||||
@ -2154,6 +2153,9 @@ The Berkeley DB code is very stable, but we are still improving the interface
|
|||||||
between @strong{MySQL} and BDB tables, so it will take some time before this
|
between @strong{MySQL} and BDB tables, so it will take some time before this
|
||||||
is as tested as the other table types.
|
is as tested as the other table types.
|
||||||
|
|
||||||
|
@item Innobase Tables -- Alpha
|
||||||
|
This is a very recent addition to @code{MySQL} and are not very tested yet.
|
||||||
|
|
||||||
@item Automatic recovery of MyISAM tables - Beta.
|
@item Automatic recovery of MyISAM tables - Beta.
|
||||||
This only affects the new code that checks if the table was closed properly
|
This only affects the new code that checks if the table was closed properly
|
||||||
on open and executes an automatic check/repair of the table if it wasn't.
|
on open and executes an automatic check/repair of the table if it wasn't.
|
||||||
@ -12241,7 +12243,7 @@ connections:
|
|||||||
@item @code{'x.y.%'} @tab @code{'fred'} @tab @code{fred}, connecting from @code{x.y.net}, @code{x.y.com},@code{x.y.edu}, etc. (this is probably not useful)
|
@item @code{'x.y.%'} @tab @code{'fred'} @tab @code{fred}, connecting from @code{x.y.net}, @code{x.y.com},@code{x.y.edu}, etc. (this is probably not useful)
|
||||||
@item @code{'144.155.166.177'} @tab @code{'fred'} @tab @code{fred}, connecting from the host with IP address @code{144.155.166.177}
|
@item @code{'144.155.166.177'} @tab @code{'fred'} @tab @code{fred}, connecting from the host with IP address @code{144.155.166.177}
|
||||||
@item @code{'144.155.166.%'} @tab @code{'fred'} @tab @code{fred}, connecting from any host in the @code{144.155.166} class C subnet
|
@item @code{'144.155.166.%'} @tab @code{'fred'} @tab @code{fred}, connecting from any host in the @code{144.155.166} class C subnet
|
||||||
@item @code{'144.155.166.0/24'} @tab @code{'fred'} @tab Same as previous example
|
@item @code{'144.155.166.0/255.255.255.0'} @tab @code{'fred'} @tab Same as previous example
|
||||||
@end multitable
|
@end multitable
|
||||||
|
|
||||||
Because you can use IP wild-card values in the @code{Host} field (for example,
|
Because you can use IP wild-card values in the @code{Host} field (for example,
|
||||||
@ -18322,12 +18324,12 @@ If you specify a @code{SELECT} after the @code{CREATE STATEMENT},
|
|||||||
@example
|
@example
|
||||||
mysql> CREATE TABLE test (a int not null auto_increment,
|
mysql> CREATE TABLE test (a int not null auto_increment,
|
||||||
primary key (a), key(b))
|
primary key (a), key(b))
|
||||||
TYPE=HEAP SELECT b,c from test2;
|
TYPE=MyISAM SELECT b,c from test2;
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
This will create a @code{HEAP} table with 3 columns. Note that the table will
|
This will create a @code{MyISAM} table with 3 columns. Note that the
|
||||||
automatically be deleted if any errors occur while copying data
|
table will automatically be deleted if any errors occur while copying
|
||||||
into the table.
|
data into the table.
|
||||||
@item
|
@item
|
||||||
The @code{RAID_TYPE} option will help you to break the 2G/4G limit for
|
The @code{RAID_TYPE} option will help you to break the 2G/4G limit for
|
||||||
the MyISAM data file (not the index file) on
|
the MyISAM data file (not the index file) on
|
||||||
@ -23504,6 +23506,60 @@ Innobase cannot notice. In cases like this the timeout is useful to
|
|||||||
resolve the situation.
|
resolve the situation.
|
||||||
@end multitable
|
@end multitable
|
||||||
|
|
||||||
|
You can query the amount of free space in the Innobase tablespace (=
|
||||||
|
data files you specified in my.cnf) by issuing the table status command
|
||||||
|
of @strong{MySQL} for any table you have created with @code{TYPE =
|
||||||
|
INNOBASE}. Then the amount of free space in the tablespace appears in
|
||||||
|
the table comment section in the output of SHOW. An example:
|
||||||
|
|
||||||
|
@example
|
||||||
|
SHOW TABLE STATUS FROM TEST LIKE 'CUSTOMER'
|
||||||
|
@end example
|
||||||
|
|
||||||
|
if you have created a table of name CUSTOMER in a database you have named
|
||||||
|
TEST. Note that the statistics SHOW gives about Innobase tables
|
||||||
|
are only approximate: they are used in SQL optimization. Table and
|
||||||
|
index reserved sizes in bytes are accurate, though.
|
||||||
|
|
||||||
|
Note that in addition to your tables, the rollback segment uses space
|
||||||
|
from the tablespace.
|
||||||
|
|
||||||
|
Since Innobase is a multiversioned database, it must keep information
|
||||||
|
of old versions of rows in the tablespace. This information is stored
|
||||||
|
in a data structure called a rollback segment, like in Oracle. In contrast
|
||||||
|
to Oracle, you do not need to configure the rollback segment in any way in
|
||||||
|
Innobase. If you issue SELECTs, which by default do a consistent read in
|
||||||
|
Innobase, remember to commit your transaction regularly. Otherwise
|
||||||
|
the rollback segment will grow because it has to preserve the information
|
||||||
|
needed for further consistent reads in your transaction: in Innobase
|
||||||
|
all consistent reads within one transaction will see the same timepoint
|
||||||
|
snapshot of the database: the reads are also 'consistent' with
|
||||||
|
respect to each other.
|
||||||
|
|
||||||
|
Some Innobase errors: If you run out of file space in the tablespace,
|
||||||
|
you will get the MySQL 'Table is full' error. If you want to make your
|
||||||
|
tablespace bigger, you have to shut down MySQL and add a new datafile
|
||||||
|
specification to my.conf, to the innobase_data_file_path parameter.
|
||||||
|
|
||||||
|
A transaction deadlock or a timeout in a lock wait will give 'Table handler
|
||||||
|
error 1000000'.
|
||||||
|
|
||||||
|
Contact information of Innobase Oy, producer of the Innobase engine:
|
||||||
|
|
||||||
|
Website: Being registered, probably @uref{http://www.innobase.fi}.
|
||||||
|
This should open about March 3rd, 2001.
|
||||||
|
|
||||||
|
@email{Heikki.Tuuri@@innobase.inet.fi}
|
||||||
|
@example
|
||||||
|
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
|
||||||
|
Innobase Oy Inc.
|
||||||
|
World Trade Center Helsinki
|
||||||
|
Aleksanterinkatu 17
|
||||||
|
P.O.Box 800
|
||||||
|
00101 Helsinki
|
||||||
|
Finland
|
||||||
|
@end example
|
||||||
|
|
||||||
@cindex tutorial
|
@cindex tutorial
|
||||||
@cindex terminal monitor, defined
|
@cindex terminal monitor, defined
|
||||||
@cindex monitor, terminal
|
@cindex monitor, terminal
|
||||||
@ -40853,6 +40909,10 @@ This converter can't handle MEMO fields.
|
|||||||
Convert between FoxPro @file{.dbf} files and @strong{MySQL} tables on Windows.
|
Convert between FoxPro @file{.dbf} files and @strong{MySQL} tables on Windows.
|
||||||
By Alexander Eltsyn, @email{ae@@nica.ru} or @email{ae@@usa.net}.
|
By Alexander Eltsyn, @email{ae@@nica.ru} or @email{ae@@usa.net}.
|
||||||
|
|
||||||
|
@item @uref{http://www.mysql.com/Downloads/Contrib/dbf2sql.zip, dbf2sql.zip}
|
||||||
|
Short and simple prg that can help you transport your data from foxpro
|
||||||
|
table into @strong{MySQL} table. By Danko Josic.
|
||||||
|
|
||||||
@item @uref{http://www.mysql.com/Downloads/Contrib/dump2h-1.20.gz, dump2h-1.20.gz}
|
@item @uref{http://www.mysql.com/Downloads/Contrib/dump2h-1.20.gz, dump2h-1.20.gz}
|
||||||
Convert from @code{mysqldump} output to a C header file. By Harry Brueckner,
|
Convert from @code{mysqldump} output to a C header file. By Harry Brueckner,
|
||||||
@email{brueckner@@mail.respublica.de}.
|
@email{brueckner@@mail.respublica.de}.
|
||||||
@ -41594,6 +41654,8 @@ not yet 100 % confident in this code.
|
|||||||
@appendixsubsec Changes in release 3.23.34
|
@appendixsubsec Changes in release 3.23.34
|
||||||
@itemize @bullet
|
@itemize @bullet
|
||||||
@item
|
@item
|
||||||
|
Fixed bug in @code{ALTER TABLE ... ORDER BY}.
|
||||||
|
@item
|
||||||
Added option @code{max_user_connections} to @code{mysqld}.
|
Added option @code{max_user_connections} to @code{mysqld}.
|
||||||
@item
|
@item
|
||||||
Limit query length for replication by max_allowed_packet, not the arbitrary
|
Limit query length for replication by max_allowed_packet, not the arbitrary
|
||||||
|
@ -441,6 +441,7 @@ int safe_cond_timedwait(pthread_cond_t *cond, safe_mutex_t *mp,
|
|||||||
#define pthread_mutex_destroy(A) safe_mutex_destroy((A),__FILE__,__LINE__)
|
#define pthread_mutex_destroy(A) safe_mutex_destroy((A),__FILE__,__LINE__)
|
||||||
#define pthread_cond_wait(A,B) safe_cond_wait((A),(B),__FILE__,__LINE__)
|
#define pthread_cond_wait(A,B) safe_cond_wait((A),(B),__FILE__,__LINE__)
|
||||||
#define pthread_cond_timedwait(A,B,C) safe_cond_timedwait((A),(B),(C),__FILE__,__LINE__)
|
#define pthread_cond_timedwait(A,B,C) safe_cond_timedwait((A),(B),(C),__FILE__,__LINE__)
|
||||||
|
#define pthread_mutex_trylock(A) pthread_mutex_lock(A)
|
||||||
#define pthread_mutex_t safe_mutex_t
|
#define pthread_mutex_t safe_mutex_t
|
||||||
#endif /* SAFE_MUTEX */
|
#endif /* SAFE_MUTEX */
|
||||||
|
|
||||||
|
@ -24,3 +24,5 @@ SUBDIRS = os ut btr buf com data dict dyn eval fil fsp fut \
|
|||||||
ha ibuf lock log mach mem mtr odbc page pars que \
|
ha ibuf lock log mach mem mtr odbc page pars que \
|
||||||
read rem row srv sync thr trx usr
|
read rem row srv sync thr trx usr
|
||||||
|
|
||||||
|
# Don't update the files from bitkeeper
|
||||||
|
%::SCCS/s.%
|
||||||
|
@ -22,4 +22,3 @@ libs_LIBRARIES = libbtr.a
|
|||||||
libbtr_a_SOURCES = btr0btr.c btr0cur.c btr0pcur.c btr0sea.c
|
libbtr_a_SOURCES = btr0btr.c btr0cur.c btr0pcur.c btr0sea.c
|
||||||
|
|
||||||
EXTRA_PROGRAMS =
|
EXTRA_PROGRAMS =
|
||||||
|
|
||||||
|
@ -22,4 +22,3 @@ libs_LIBRARIES = libdata.a
|
|||||||
libdata_a_SOURCES = data0data.c data0type.c
|
libdata_a_SOURCES = data0data.c data0type.c
|
||||||
|
|
||||||
EXTRA_PROGRAMS =
|
EXTRA_PROGRAMS =
|
||||||
|
|
||||||
|
@ -22,4 +22,3 @@ libs_LIBRARIES = libeval.a
|
|||||||
libeval_a_SOURCES = eval0eval.c eval0proc.c
|
libeval_a_SOURCES = eval0eval.c eval0proc.c
|
||||||
|
|
||||||
EXTRA_PROGRAMS =
|
EXTRA_PROGRAMS =
|
||||||
|
|
||||||
|
@ -3,3 +3,6 @@
|
|||||||
libsdir = ../libs
|
libsdir = ../libs
|
||||||
|
|
||||||
INCLUDES = -I../../include -I../include
|
INCLUDES = -I../../include -I../include
|
||||||
|
|
||||||
|
# Don't update the files from bitkeeper
|
||||||
|
%::SCCS/s.%
|
||||||
|
@ -22,3 +22,6 @@ libs_LIBRARIES = libos.a
|
|||||||
libos_a_SOURCES = os0proc.c os0shm.c os0sync.c os0thread.c os0file.c
|
libos_a_SOURCES = os0proc.c os0shm.c os0sync.c os0thread.c os0file.c
|
||||||
|
|
||||||
EXTRA_PROGRAMS =
|
EXTRA_PROGRAMS =
|
||||||
|
|
||||||
|
# Don't update the files from bitkeeper
|
||||||
|
%::SCCS/s.%
|
||||||
|
@ -46,3 +46,24 @@ insert into t1 values(9),(3),(12),(10);
|
|||||||
alter table t1 order by n;
|
alter table t1 order by n;
|
||||||
select * from t1;
|
select * from t1;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
id int(11) unsigned NOT NULL default '0',
|
||||||
|
category_id tinyint(4) unsigned NOT NULL default '0',
|
||||||
|
type_id tinyint(4) unsigned NOT NULL default '0',
|
||||||
|
body text NOT NULL,
|
||||||
|
user_id int(11) unsigned NOT NULL default '0',
|
||||||
|
status enum('new','old') NOT NULL default 'new',
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
) TYPE=MyISAM;
|
||||||
|
|
||||||
|
ALTER TABLE
|
||||||
|
t1
|
||||||
|
ORDER BY
|
||||||
|
t1.id,
|
||||||
|
t1.status,
|
||||||
|
t1.type_id,
|
||||||
|
t1.user_id,
|
||||||
|
t1.body;
|
||||||
|
|
||||||
|
drop table t1;
|
||||||
|
@ -65,6 +65,8 @@ int my_error(int nr,myf MyFlags, ...)
|
|||||||
/* Skipp if max size is used (to be compatible with printf) */
|
/* Skipp if max size is used (to be compatible with printf) */
|
||||||
while (isdigit(*tpos) || *tpos == '.' || *tpos == '-')
|
while (isdigit(*tpos) || *tpos == '.' || *tpos == '-')
|
||||||
tpos++;
|
tpos++;
|
||||||
|
if (*tpos == 'l') /* Skipp 'l' argument */
|
||||||
|
*tpos++;
|
||||||
if (*tpos == 's') /* String parameter */
|
if (*tpos == 's') /* String parameter */
|
||||||
{
|
{
|
||||||
par = va_arg(ap, char *);
|
par = va_arg(ap, char *);
|
||||||
|
@ -44,13 +44,13 @@ int my_vsnprintf(char *to, size_t n, const char* fmt, va_list ap)
|
|||||||
fmt++;
|
fmt++;
|
||||||
while (isdigit(*fmt) || *fmt == '.' || *fmt == '-')
|
while (isdigit(*fmt) || *fmt == '.' || *fmt == '-')
|
||||||
fmt++;
|
fmt++;
|
||||||
if(*fmt == 'l')
|
if (*fmt == 'l')
|
||||||
fmt++;
|
fmt++;
|
||||||
if (*fmt == 's') /* String parameter */
|
if (*fmt == 's') /* String parameter */
|
||||||
{
|
{
|
||||||
reg2 char *par = va_arg(ap, char *);
|
reg2 char *par = va_arg(ap, char *);
|
||||||
uint plen;
|
uint plen;
|
||||||
if(!par) par = (char*)"(null)";
|
if (!par) par = (char*)"(null)";
|
||||||
plen = (uint) strlen(par);
|
plen = (uint) strlen(par);
|
||||||
if ((uint) (end-to) > plen) /* Replace if possible */
|
if ((uint) (end-to) > plen) /* Replace if possible */
|
||||||
{
|
{
|
||||||
|
@ -418,7 +418,7 @@ innobase_init(void)
|
|||||||
int err;
|
int err;
|
||||||
bool ret;
|
bool ret;
|
||||||
ibool test_bool;
|
ibool test_bool;
|
||||||
static char *current_dir[3];
|
static char current_dir[3];
|
||||||
DBUG_ENTER("innobase_init");
|
DBUG_ENTER("innobase_init");
|
||||||
|
|
||||||
/* Use current_dir if no paths are set */
|
/* Use current_dir if no paths are set */
|
||||||
@ -431,13 +431,14 @@ innobase_init(void)
|
|||||||
|
|
||||||
if (!innobase_data_file_path)
|
if (!innobase_data_file_path)
|
||||||
{
|
{
|
||||||
fprintf(stderr,"Can't initialize innobase as 'innobase_data_file_path' is not set\n");
|
fprintf(stderr,"Can't initialize Innobase as 'innobase_data_file_path' is not set\n");
|
||||||
DBUG_RETURN(TRUE);
|
innobase_skip=1;
|
||||||
|
DBUG_RETURN(FALSE); // Continue without innobase
|
||||||
}
|
}
|
||||||
|
|
||||||
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
|
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
|
||||||
current_dir);
|
current_dir);
|
||||||
srv_logs_home = "";
|
srv_logs_home = (char*) "";
|
||||||
srv_arch_dir = (innobase_log_arch_dir ? innobase_log_arch_dir :
|
srv_arch_dir = (innobase_log_arch_dir ? innobase_log_arch_dir :
|
||||||
current_dir);
|
current_dir);
|
||||||
|
|
||||||
@ -2167,8 +2168,9 @@ create_clustered_index_when_no_primary(
|
|||||||
/* The first '0' below specifies that everything in Innobase is
|
/* The first '0' below specifies that everything in Innobase is
|
||||||
currently created in file space 0 */
|
currently created in file space 0 */
|
||||||
|
|
||||||
index = dict_mem_index_create((char*) table_name, "GEN_CLUST_INDEX",
|
index = dict_mem_index_create((char*) table_name,
|
||||||
0, DICT_CLUSTERED, 0);
|
(char*) "GEN_CLUST_INDEX",
|
||||||
|
0, DICT_CLUSTERED, 0);
|
||||||
error = row_create_index_for_mysql(index, trx);
|
error = row_create_index_for_mysql(index, trx);
|
||||||
|
|
||||||
error = convert_error_code_to_mysql(error);
|
error = convert_error_code_to_mysql(error);
|
||||||
@ -2208,7 +2210,7 @@ ha_innobase::create(
|
|||||||
|
|
||||||
/* Create the table definition in Innobase */
|
/* Create the table definition in Innobase */
|
||||||
|
|
||||||
if (error = create_table_def(trx, form, norm_name)) {
|
if ((error = create_table_def(trx, form, norm_name))) {
|
||||||
|
|
||||||
trx_commit_for_mysql(trx);
|
trx_commit_for_mysql(trx);
|
||||||
|
|
||||||
@ -2248,8 +2250,8 @@ ha_innobase::create(
|
|||||||
if (primary_key_no != -1) {
|
if (primary_key_no != -1) {
|
||||||
/* In Innobase the clustered index must always be created
|
/* In Innobase the clustered index must always be created
|
||||||
first */
|
first */
|
||||||
if (error = create_index(trx, form, norm_name,
|
if ((error = create_index(trx, form, norm_name,
|
||||||
(uint) primary_key_no)) {
|
(uint) primary_key_no))) {
|
||||||
trx_commit_for_mysql(trx);
|
trx_commit_for_mysql(trx);
|
||||||
|
|
||||||
trx_free_for_mysql(trx);
|
trx_free_for_mysql(trx);
|
||||||
@ -2262,7 +2264,7 @@ ha_innobase::create(
|
|||||||
|
|
||||||
if (i != (uint) primary_key_no) {
|
if (i != (uint) primary_key_no) {
|
||||||
|
|
||||||
if (error = create_index(trx, form, norm_name, i)) {
|
if ((error = create_index(trx, form, norm_name, i))) {
|
||||||
|
|
||||||
trx_commit_for_mysql(trx);
|
trx_commit_for_mysql(trx);
|
||||||
|
|
||||||
@ -2564,7 +2566,8 @@ ha_innobase::update_table_comment(
|
|||||||
if (!str)
|
if (!str)
|
||||||
return (char*)comment;
|
return (char*)comment;
|
||||||
|
|
||||||
sprintf(str,"%s Innobase free: %lu kB", comment,innobase_get_free_space());
|
sprintf(str,"%s Innobase free: %lu kB", comment,
|
||||||
|
(ulong) innobase_get_free_space());
|
||||||
|
|
||||||
return((char*) str);
|
return((char*) str);
|
||||||
}
|
}
|
||||||
|
@ -53,10 +53,10 @@ class ha_innobase: public handler
|
|||||||
'ref' buffer of the handle, if any */
|
'ref' buffer of the handle, if any */
|
||||||
ulong int_option_flag;
|
ulong int_option_flag;
|
||||||
uint primary_key;
|
uint primary_key;
|
||||||
|
uint last_dup_key;
|
||||||
ulong start_of_scan; /* this is set to 1 when we are
|
ulong start_of_scan; /* this is set to 1 when we are
|
||||||
starting a table scan but have not
|
starting a table scan but have not
|
||||||
yet fetched any row, else 0 */
|
yet fetched any row, else 0 */
|
||||||
uint last_dup_key;
|
|
||||||
|
|
||||||
uint last_match_mode;/* match mode of the latest search:
|
uint last_match_mode;/* match mode of the latest search:
|
||||||
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
|
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
|
||||||
|
@ -138,9 +138,8 @@ int ha_init()
|
|||||||
#ifdef HAVE_INNOBASE_DB
|
#ifdef HAVE_INNOBASE_DB
|
||||||
if (!innobase_skip)
|
if (!innobase_skip)
|
||||||
{
|
{
|
||||||
int error;
|
if (innobase_init())
|
||||||
if ((error=innobase_init()))
|
return -1;
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -75,9 +75,8 @@ static void free_var(user_var_entry *entry)
|
|||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0),
|
THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0),
|
||||||
insert_id_used(0),
|
insert_id_used(0),in_lock_tables(0),
|
||||||
bootstrap(0),in_lock_tables(0),
|
global_read_lock(0),bootstrap(0)
|
||||||
global_read_lock(0)
|
|
||||||
{
|
{
|
||||||
proc_info="login";
|
proc_info="login";
|
||||||
host=user=priv_user=db=query=ip=0;
|
host=user=priv_user=db=query=ip=0;
|
||||||
|
@ -214,7 +214,7 @@ static int check_for_max_user_connections(const char *user, int u_length,
|
|||||||
(byte*) temp_user, temp_len);
|
(byte*) temp_user, temp_len);
|
||||||
if (uc) /* user found ; check for no. of connections */
|
if (uc) /* user found ; check for no. of connections */
|
||||||
{
|
{
|
||||||
if (max_user_connections == uc->connections)
|
if ((uint) max_user_connections == uc->connections)
|
||||||
{
|
{
|
||||||
net_printf(&(current_thd->net),ER_TOO_MANY_USER_CONNECTIONS, temp_user);
|
net_printf(&(current_thd->net),ER_TOO_MANY_USER_CONNECTIONS, temp_user);
|
||||||
pthread_mutex_unlock(&LOCK_user_conn);
|
pthread_mutex_unlock(&LOCK_user_conn);
|
||||||
|
@ -1644,11 +1644,14 @@ copy_data_between_tables(TABLE *from,TABLE *to,
|
|||||||
|
|
||||||
found_count=delete_count=0;
|
found_count=delete_count=0;
|
||||||
|
|
||||||
if(order) {
|
if (order)
|
||||||
|
{
|
||||||
from->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
|
from->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
|
||||||
MYF(MY_FAE | MY_ZEROFILL));
|
MYF(MY_FAE | MY_ZEROFILL));
|
||||||
bzero((char*) &tables,sizeof(tables));
|
bzero((char*) &tables,sizeof(tables));
|
||||||
tables.table = from;
|
tables.table = from;
|
||||||
|
tables.name = tables.real_name= from->real_name;
|
||||||
|
tables.db = from->table_cache_key;
|
||||||
error=1;
|
error=1;
|
||||||
|
|
||||||
if (setup_order(thd, &tables, fields, all_fields, order) ||
|
if (setup_order(thd, &tables, fields, all_fields, order) ||
|
||||||
|
Loading…
x
Reference in New Issue
Block a user