merge (remove conflicting zip files)
BitKeeper/etc/logging_ok: auto-union BitKeeper/deleted/.del-ChangeLog: Delete: zlib/ChangeLog BitKeeper/deleted/.del-descrip.mms: Delete: zlib/descrip.mms BitKeeper/deleted/.del-zlib.def: Delete: zlib/os2/zlib.def BitKeeper/deleted/.del-example.c: Delete: zlib/example.c BitKeeper/deleted/.del-faq: Delete: zlib/faq BitKeeper/deleted/.del-gzio.c: Delete: zlib/gzio.c BitKeeper/deleted/.del-index: Delete: zlib/index BitKeeper/deleted/.del-infblock.c: Delete: zlib/infblock.c BitKeeper/deleted/.del-infblock.h: Delete: zlib/infblock.h BitKeeper/deleted/.del-infcodes.c: Delete: zlib/infcodes.c BitKeeper/deleted/.del-infcodes.h: Delete: zlib/infcodes.h BitKeeper/deleted/.del-inffast.c: Delete: zlib/inffast.c BitKeeper/deleted/.del-Make_vms.com: Delete: zlib/Make_vms.com BitKeeper/deleted/.del-inffast.h: Delete: zlib/inffast.h BitKeeper/deleted/.del-inffixed.h: Delete: zlib/inffixed.h BitKeeper/deleted/.del-inflate.c: Delete: zlib/inflate.c BitKeeper/deleted/.del-inftrees.c: Delete: zlib/inftrees.c BitKeeper/deleted/.del-inftrees.h: Delete: zlib/inftrees.h BitKeeper/deleted/.del-infutil.c: Delete: zlib/infutil.c BitKeeper/deleted/.del-infutil.h: Delete: zlib/infutil.h BitKeeper/deleted/.del-maketree.c: Delete: zlib/maketree.c BitKeeper/deleted/.del-minigzip.c: Delete: zlib/minigzip.c BitKeeper/deleted/.del-readme: Delete: zlib/readme BitKeeper/deleted/.del-Makefile.riscos: Delete: zlib/Makefile.riscos BitKeeper/deleted/.del-trees.c: Delete: zlib/trees.c BitKeeper/deleted/.del-trees.h: Delete: zlib/trees.h BitKeeper/deleted/.del-uncompr.c: Delete: zlib/uncompr.c BitKeeper/deleted/.del-zconf.h: Delete: zlib/zconf.h BitKeeper/deleted/.del-zlib.3: Delete: zlib/zlib.3 BitKeeper/deleted/.del-zlib.dsp: Delete: zlib/zlib.dsp BitKeeper/deleted/.del-zlib.h: Delete: zlib/zlib.h BitKeeper/deleted/.del-zlib.html: Delete: zlib/zlib.html BitKeeper/deleted/.del-zutil.c: Delete: zlib/zutil.c BitKeeper/deleted/.del-zutil.h: Delete: zlib/zutil.h BitKeeper/deleted/.del-adler32.c: Delete: zlib/adler32.c BitKeeper/deleted/.del-Makefile.pup: Delete: zlib/amiga/Makefile.pup BitKeeper/deleted/.del-Makefile.sas: Delete: zlib/amiga/Makefile.sas BitKeeper/deleted/.del-README.contrib: Delete: zlib/contrib/README.contrib BitKeeper/deleted/.del-visual-basic.txt: Delete: zlib/contrib/visual-basic.txt BitKeeper/deleted/.del-gvmat32.asm: Delete: zlib/contrib/asm386/gvmat32.asm BitKeeper/deleted/.del-gvmat32c.c: Delete: zlib/contrib/asm386/gvmat32c.c BitKeeper/deleted/.del-mkgvmt32.bat: Delete: zlib/contrib/asm386/mkgvmt32.bat BitKeeper/deleted/.del-zlibvc.def: Delete: zlib/contrib/asm386/zlibvc.def BitKeeper/deleted/.del-zlibvc.dsp: Delete: zlib/contrib/asm386/zlibvc.dsp BitKeeper/deleted/.del-zlibvc.dsw: Delete: zlib/contrib/asm386/zlibvc.dsw BitKeeper/deleted/.del-algorithm.txt: Delete: zlib/algorithm.txt BitKeeper/deleted/.del-match.s: Delete: zlib/contrib/asm586/match.s BitKeeper/deleted/.del-readme.586: Delete: zlib/contrib/asm586/readme.586 BitKeeper/deleted/.del-match.s~1: Delete: zlib/contrib/asm686/match.s BitKeeper/deleted/.del-readme.686: Delete: zlib/contrib/asm686/readme.686 BitKeeper/deleted/.del-zlib.mak: Delete: zlib/contrib/delphi/zlib.mak BitKeeper/deleted/.del-zlibdef.pas: Delete: zlib/contrib/delphi/zlibdef.pas BitKeeper/deleted/.del-d_zlib.bpr: Delete: zlib/contrib/delphi2/d_zlib.bpr BitKeeper/deleted/.del-d_zlib.cpp: Delete: zlib/contrib/delphi2/d_zlib.cpp BitKeeper/deleted/.del-readme.txt: Delete: zlib/contrib/delphi2/readme.txt BitKeeper/deleted/.del-zlib.bpg: Delete: zlib/contrib/delphi2/zlib.bpg BitKeeper/deleted/.del-compress.c: Delete: zlib/compress.c BitKeeper/deleted/.del-zlib.bpr: Delete: zlib/contrib/delphi2/zlib.bpr BitKeeper/deleted/.del-zlib.cpp: Delete: zlib/contrib/delphi2/zlib.cpp BitKeeper/deleted/.del-zlib.pas: Delete: zlib/contrib/delphi2/zlib.pas BitKeeper/deleted/.del-zlib32.bpr: Delete: zlib/contrib/delphi2/zlib32.bpr BitKeeper/deleted/.del-zlib32.cpp: Delete: zlib/contrib/delphi2/zlib32.cpp BitKeeper/deleted/.del-test.cpp: Delete: zlib/contrib/iostream/test.cpp BitKeeper/deleted/.del-zfstream.cpp: Delete: zlib/contrib/iostream/zfstream.cpp BitKeeper/deleted/.del-zfstream.h: Delete: zlib/contrib/iostream/zfstream.h BitKeeper/deleted/.del-zstream.h: Delete: zlib/contrib/iostream2/zstream.h BitKeeper/deleted/.del-zstream_test.cpp: Delete: zlib/contrib/iostream2/zstream_test.cpp BitKeeper/deleted/.del-crc32.c: Delete: zlib/crc32.c BitKeeper/deleted/.del-ChangeLogUnzip: Delete: zlib/contrib/minizip/ChangeLogUnzip BitKeeper/deleted/.del-miniunz.c: Delete: zlib/contrib/minizip/miniunz.c BitKeeper/deleted/.del-minizip.c: Delete: zlib/contrib/minizip/minizip.c BitKeeper/deleted/.del-readme.txt~1: Delete: zlib/contrib/minizip/readme.txt BitKeeper/deleted/.del-unzip.c: Delete: zlib/contrib/minizip/unzip.c BitKeeper/deleted/.del-unzip.def: Delete: zlib/contrib/minizip/unzip.def BitKeeper/deleted/.del-unzip.h: Delete: zlib/contrib/minizip/unzip.h BitKeeper/deleted/.del-zip.c: Delete: zlib/contrib/minizip/zip.c BitKeeper/deleted/.del-zip.def: Delete: zlib/contrib/minizip/zip.def BitKeeper/deleted/.del-zip.h: Delete: zlib/contrib/minizip/zip.h BitKeeper/deleted/.del-deflate.c: Delete: zlib/deflate.c BitKeeper/deleted/.del-zlibvc.def~1: Delete: zlib/contrib/minizip/zlibvc.def BitKeeper/deleted/.del-zlibvc.dsp~1: Delete: zlib/contrib/minizip/zlibvc.dsp BitKeeper/deleted/.del-zlibvc.dsw~1: Delete: zlib/contrib/minizip/zlibvc.dsw BitKeeper/deleted/.del-makefile.w32: Delete: zlib/contrib/untgz/makefile.w32 BitKeeper/deleted/.del-untgz.c: Delete: zlib/contrib/untgz/untgz.c BitKeeper/deleted/.del-Makefile.b32: Delete: zlib/msdos/Makefile.b32 BitKeeper/deleted/.del-Makefile.bor: Delete: zlib/msdos/Makefile.bor BitKeeper/deleted/.del-Makefile.dj2: Delete: zlib/msdos/Makefile.dj2 BitKeeper/deleted/.del-Makefile.emx: Delete: zlib/msdos/Makefile.emx BitKeeper/deleted/.del-Makefile.msc: Delete: zlib/msdos/Makefile.msc BitKeeper/deleted/.del-deflate.h: Delete: zlib/deflate.h BitKeeper/deleted/.del-Makefile.tc: Delete: zlib/msdos/Makefile.tc BitKeeper/deleted/.del-Makefile.w32: Delete: zlib/msdos/Makefile.w32 BitKeeper/deleted/.del-Makefile.wat: Delete: zlib/msdos/Makefile.wat BitKeeper/deleted/.del-zlib.def~1: Delete: zlib/msdos/zlib.def BitKeeper/deleted/.del-zlib.rc: Delete: zlib/msdos/zlib.rc BitKeeper/deleted/.del-Makefile.emx~1: Delete: zlib/nt/Makefile.emx BitKeeper/deleted/.del-Makefile.gcc: Delete: zlib/nt/Makefile.gcc BitKeeper/deleted/.del-Makefile.nt: Delete: zlib/nt/Makefile.nt BitKeeper/deleted/.del-zlib.dnt: Delete: zlib/nt/zlib.dnt BitKeeper/deleted/.del-Makefile.os2: Delete: zlib/os2/Makefile.os2 configure.in: Auto merged client/mysql.cc: Auto merged client/mysqldump.c: Auto merged sql/handler.cc: Auto merged sql/mysqld.cc: Auto merged Docs/manual.texi: merge sql/ha_innodb.cc: merge
This commit is contained in:
commit
e82a6417c4
@ -71,11 +71,4 @@ else
|
||||
make=make
|
||||
fi
|
||||
|
||||
if gcc -v 2>&1 | grep 'version 3' > /dev/null 2>&1
|
||||
then
|
||||
CXX="gcc -DUSE_MYSYS_NEW"
|
||||
CXXLDFLAGS="-Wl,--defsym -Wl,__cxa_pure_virtual=0"
|
||||
else
|
||||
CXX=gcc
|
||||
CXXLDFLAGS=""
|
||||
fi
|
||||
CXX=gcc
|
||||
|
@ -5,6 +5,7 @@ Miguel@light.local
|
||||
Sinisa@sinisa.nasamreza.org
|
||||
ahlentz@co3064164-a.rochd1.qld.optusnet.com.au
|
||||
arjen@co3064164-a.bitbike.com
|
||||
arjen@fred.bitbike.com
|
||||
bell@sanja.is.com.ua
|
||||
davida@isil.mysql.com
|
||||
heikki@donna.mysql.fi
|
||||
@ -14,10 +15,12 @@ jani@janikt.pp.saunalahti.fi
|
||||
jani@rhols221.adsl.netsonic.fi
|
||||
jcole@abel.spaceapes.com
|
||||
jcole@main.burghcom.com
|
||||
jcole@mugatu.spaceapes.com
|
||||
jcole@sarvik.tfr.cafe.ee
|
||||
jcole@tetra.spaceapes.com
|
||||
jorge@linux.jorge.mysql.com
|
||||
kaj@work.mysql.com
|
||||
miguel@hegel.local
|
||||
miguel@light.local
|
||||
monty@bitch.mysql.fi
|
||||
monty@donna.mysql.fi
|
||||
@ -45,8 +48,5 @@ tonu@volk.internalnet
|
||||
tonu@x153.internalnet
|
||||
tonu@x3.internalnet
|
||||
venu@work.mysql.com
|
||||
zak@linux.local
|
||||
jcole@mugatu.spaceapes.com
|
||||
arjen@fred.bitbike.com
|
||||
zak@balfor.local
|
||||
miguel@hegel.local
|
||||
zak@linux.local
|
||||
|
@ -92,7 +92,7 @@ if ($opt_stage == 0)
|
||||
{
|
||||
system("mkdir $host") if (! -d $host);
|
||||
system("touch $host/mysql-fix-for-glob");
|
||||
rm_all(<$host/mysql-*>);
|
||||
rm_all(<$host/mysql*>);
|
||||
system("mkdir $host/bin") if (! -d "$host/bin");
|
||||
}
|
||||
rm_all("$host/test");
|
||||
|
@ -159,10 +159,10 @@ cat > $logdir/$TMP_SCRIPT_MYSQL <<END
|
||||
set -x
|
||||
|
||||
# Check environment
|
||||
export MYSQL_BUILD_PATH="/usr/cygnus/redhat-980810/H-i386-pc-linux-gnu/bin/:/usr/bin:/bin"
|
||||
export MYSQL_BUILD_CFLAGS="-O6 -fno-omit-frame-pointer -mpentium"
|
||||
export MYSQL_BUILD_PATH="/usr/local/bin:/my/gnu/bin:/usr/bin:/bin"
|
||||
export MYSQL_BUILD_CFLAGS="-O6 -fno-omit-frame-pointer -mcpu=pentiumpro"
|
||||
export MYSQL_BUILD_CXXFLAGS="-O6 -fno-omit-frame-pointer \
|
||||
-felide-constructors -fno-exceptions -fno-rtti -mpentium"
|
||||
-felide-constructors -fno-exceptions -fno-rtti -mcpu=pentiumpro"
|
||||
gcc -v
|
||||
|
||||
# Make RPM
|
||||
|
144
Docs/manual.texi
144
Docs/manual.texi
@ -4533,7 +4533,7 @@ MySQL Server also supports
|
||||
the following additional type attributes:
|
||||
@itemize @bullet
|
||||
@item
|
||||
@code{UNSIGNED} option for integer columns.
|
||||
@code{UNSIGNED} option for integer and floating point columns.
|
||||
@item
|
||||
@code{ZEROFILL} option for integer columns.
|
||||
@item
|
||||
@ -5535,10 +5535,12 @@ named pipes are created and you must have TCP/IP installed.
|
||||
Optimised binary with support for symbolic links,
|
||||
InnoDB and BDB tables.
|
||||
@item @code{mysqld-max-nt} @tab
|
||||
Like @code{mysqld-max}, but compiled with support for named
|
||||
pipes.
|
||||
Like @code{mysqld-max}, but compiled with support for named pipes.
|
||||
@end multitable
|
||||
|
||||
Starting from 3.23.50, named pipes are only enabled if one starts mysqld with
|
||||
@code{--enable-named-pipe}.
|
||||
|
||||
All of the above binaries are optimised for the Pentium Pro
|
||||
processor but should work on any Intel processor >= i386.
|
||||
|
||||
@ -5761,9 +5763,10 @@ Solaris 2.5 and above with native threads on SPARC and x86. @xref{Solaris}.
|
||||
@item
|
||||
SunOS 4.x with the included MIT-pthreads package. @xref{Solaris}.
|
||||
@item
|
||||
SCO OpenServer with a recent port of the FSU Pthreads package. @xref{SCO}.
|
||||
Caldera (SCO) OpenServer with a recent port of the FSU Pthreads package.
|
||||
@xref{Caldera}.
|
||||
@item
|
||||
SCO UnixWare 7.0.1. @xref{SCO Unixware}.
|
||||
Caldera (SCO) UnixWare 7.0.1. @xref{Caldera Unixware}.
|
||||
@item
|
||||
Tru64 Unix
|
||||
@item
|
||||
@ -8157,6 +8160,10 @@ version 4.0;
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
@code{DOUBLE} and @code{FLOAT} columns are now honoring the
|
||||
@code{UNSIGNED} flag on storage (before @code{UNSIGNED} was ignored for
|
||||
these columns).
|
||||
@item
|
||||
Use @code{ORDER BY column DESC} now always sorts @code{NULL} values
|
||||
first; In 3.23 this was not always consistent.
|
||||
@item
|
||||
@ -8186,6 +8193,9 @@ you need to rebuild them with @code{ALTER TABLE table_name TYPE=MyISAM},
|
||||
@code{LOCATE()} and @code{INSTR()} are case sensitive if one of the
|
||||
arguments is a binary string.
|
||||
@item
|
||||
@code{STRCMP()} now uses the current character set when doing comparison,
|
||||
which means that the default comparison is case insensitive.
|
||||
@item
|
||||
@code{HEX(string)} now returns the characters in string converted to
|
||||
hexadecimal. If you want to convert a number to hexadecimal, you should
|
||||
ensure that you call @code{HEX()} with a numeric argument.
|
||||
@ -9323,6 +9333,11 @@ You can force a MySQL client to use named pipes by specifying the
|
||||
@code{--pipe} option or by specifying @code{.} as the host name. Use the
|
||||
@code{--socket} option to specify the name of the pipe.
|
||||
|
||||
Note that starting from 3.23.50, named pipes are only enabled if start
|
||||
mysqld with @code{--enable-named-pipe}. This is because some users have
|
||||
experienced problems shutting down the MySQL server when one uses named
|
||||
pipes.
|
||||
|
||||
You can test whether or not MySQL is working by executing the
|
||||
following commands:
|
||||
|
||||
@ -9600,6 +9615,9 @@ option to the new MySQL clients or create an option file
|
||||
host = localhost
|
||||
@end example
|
||||
|
||||
Starting from 3.23.50, named pipes are only enabled if start mysqld with
|
||||
@code{--enable-named-pipe}.
|
||||
|
||||
@item @code{Access denied for user} error
|
||||
If you get the error @code{Access denied for user: 'some-user@@unknown'
|
||||
to database 'mysql'} when accessing a MySQL server on the same
|
||||
@ -10287,8 +10305,8 @@ alias mysqladmin '/usr/local/mysql/bin/mysqladmin'
|
||||
* Alpha-DEC-UNIX:: Alpha-DEC-UNIX Notes (Tru64)
|
||||
* Alpha-DEC-OSF1:: Alpha-DEC-OSF1 Notes
|
||||
* SGI-Irix:: SGI Irix Notes
|
||||
* SCO:: SCO Notes
|
||||
* SCO Unixware:: SCO Unixware Version 7.0 Notes
|
||||
* Caldera:: Caldera Notes
|
||||
* Caldera Unixware:: Caldera Unixware Version 7.0 Notes
|
||||
@end menu
|
||||
|
||||
|
||||
@ -10826,7 +10844,7 @@ can just change back to the top-level directly and run @code{make}
|
||||
again.
|
||||
|
||||
|
||||
@node SGI-Irix, SCO, Alpha-DEC-OSF1, Other Unix Notes
|
||||
@node SGI-Irix, Caldera, Alpha-DEC-OSF1, Other Unix Notes
|
||||
@subsubsection SGI Irix Notes
|
||||
|
||||
If you are using Irix Version 6.5.3 or newer @code{mysqld} will only be able to
|
||||
@ -10902,8 +10920,8 @@ CC=cc CXX=CC CFLAGS='-O3 -n32 -TARG:platform=IP22 -I/usr/local/include \
|
||||
@end example
|
||||
|
||||
|
||||
@node SCO, SCO Unixware, SGI-Irix, Other Unix Notes
|
||||
@subsubsection SCO Notes
|
||||
@node Caldera, Caldera Unixware, SGI-Irix, Other Unix Notes
|
||||
@subsubsection Caldera (SCO) Notes
|
||||
|
||||
The current port is tested only on a ``sco3.2v5.0.4'' and
|
||||
``sco3.2v5.0.5'' system. There has also been a lot of progress on a
|
||||
@ -10931,7 +10949,7 @@ shell> cp -p /usr/include/pthread/stdtypes.h \
|
||||
|
||||
@item
|
||||
You need the port of GCC 2.5.x for this product and the Development
|
||||
system. They are required on this version of SCO Unix. You cannot
|
||||
system. They are required on this version of Caldera (SCO) Unix. You cannot
|
||||
just use the GCC Dev system.
|
||||
|
||||
@item
|
||||
@ -10942,8 +10960,8 @@ You can also get a precompiled package from
|
||||
@uref{http://www.mysql.com/Downloads/SCO/FSU-threads-3.5c.tar.gz}.
|
||||
|
||||
@item
|
||||
FSU Pthreads can be compiled with SCO Unix 4.2 with tcpip. Or
|
||||
OpenServer 3.0 or Open Desktop 3.0 (OS 3.0 ODT 3.0), with the SCO
|
||||
FSU Pthreads can be compiled with Caldera (SCO) Unix 4.2 with tcpip. Or
|
||||
OpenServer 3.0 or Open Desktop 3.0 (OS 3.0 ODT 3.0), with the Caldera (SCO)
|
||||
Development System installed using a good port of GCC 2.5.x ODT or OS
|
||||
3.0 you will need a good port of GCC 2.5.x There are a lot of problems
|
||||
without a good port. The port for this product requires the SCO Unix
|
||||
@ -11005,7 +11023,7 @@ You should unpack this file in the @file{include} directory of your
|
||||
MySQL source tree.
|
||||
@end enumerate
|
||||
|
||||
SCO development notes:
|
||||
Caldera (SCO) development notes:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
@ -11013,10 +11031,11 @@ MySQL should automatically detect FSU Pthreads and link @code{mysqld}
|
||||
with @code{-lgthreads -lsocket -lgthreads}.
|
||||
|
||||
@item
|
||||
The SCO development libraries are re-entrant in FSU Pthreads. SCO claims
|
||||
that its libraries' functions are re-entrant, so they must be reentrant with
|
||||
FSU Pthreads. FSU Pthreads on OpenServer tries to use the SCO scheme to
|
||||
make re-entrant libraries.
|
||||
|
||||
The Caldera (SCO) development libraries are re-entrant in FSU Pthreads.
|
||||
Caldera claim sthat its libraries' functions are re-entrant, so they must
|
||||
be reentrant with FSU Pthreads. FSU Pthreads on OpenServer tries to use
|
||||
the SCO scheme to make re-entrant libraries.
|
||||
|
||||
@item
|
||||
FSU Pthreads (at least the version at @uref{http://www.mysql.com/}) comes
|
||||
@ -11036,8 +11055,8 @@ makes mysqld instable. You have to remove this one if you want to run
|
||||
mysqld on an OpenServer 5.0.6 machine.
|
||||
@end itemize
|
||||
|
||||
If you want to install DBI on SCO, you have to edit the @file{Makefile} in
|
||||
DBI-xxx and each subdirectory.
|
||||
If you want to install DBI on Caldera (SCO), you have to edit the
|
||||
@file{Makefile} in DBI-xxx and each subdirectory.
|
||||
|
||||
Note that the following assumes gcc 2.95.2 or newer:
|
||||
|
||||
@ -11067,8 +11086,8 @@ if they were compiled with @code{icc} or @code{cc}.
|
||||
Perl works best when compiled with @code{cc}.
|
||||
|
||||
|
||||
@node SCO Unixware, , SCO, Other Unix Notes
|
||||
@subsubsection SCO Unixware Version 7.0 Notes
|
||||
@node Caldera Unixware, , Caldera, Other Unix Notes
|
||||
@subsubsection Caldera (SCO) Unixware Version 7.0 Notes
|
||||
|
||||
You must use a version of MySQL at least as recent as Version 3.22.13
|
||||
because that version fixes some portability problems under Unixware.
|
||||
@ -11417,14 +11436,15 @@ $sysliblist .= " -lm -lz";
|
||||
After this, you @strong{must} run 'make realclean' and then proceed with the
|
||||
installation from the beginning.
|
||||
|
||||
If you want to use the Perl module on a system that doesn't support dynamic
|
||||
linking (like SCO) you can generate a static version of Perl that includes
|
||||
@code{DBI} and @code{DBD-mysql}. The way this works is that you generate a
|
||||
version of Perl with the @code{DBI} code linked in and install it on top of
|
||||
your current Perl. Then you use that to build a version of Perl that
|
||||
additionally has the @code{DBD} code linked in, and install that.
|
||||
If you want to use the Perl module on a system that doesn't support
|
||||
dynamic linking (like Caldera/SCO) you can generate a static version of
|
||||
Perl that includes @code{DBI} and @code{DBD-mysql}. The way this works
|
||||
is that you generate a version of Perl with the @code{DBI} code linked
|
||||
in and install it on top of your current Perl. Then you use that to
|
||||
build a version of Perl that additionally has the @code{DBD} code linked
|
||||
in, and install that.
|
||||
|
||||
On SCO, you must have the following environment variables set:
|
||||
On Caldera (SCO), you must have the following environment variables set:
|
||||
|
||||
@example
|
||||
shell> LD_LIBRARY_PATH=/lib:/usr/lib:/usr/local/lib:/usr/progressive/lib
|
||||
@ -11450,8 +11470,8 @@ shell> make perl
|
||||
|
||||
Then you must install the new Perl. The output of @code{make perl} will
|
||||
indicate the exact @code{make} command you will need to execute to perform
|
||||
the installation. On SCO, this is @code{make -f Makefile.aperl inst_perl
|
||||
MAP_TARGET=perl}.
|
||||
the installation. On Caldera (SCO), this is
|
||||
@code{make -f Makefile.aperl inst_perl MAP_TARGET=perl}.
|
||||
|
||||
Next, use the just-created Perl to create another Perl that also includes a
|
||||
statically-linked @code{DBD::mysql} by running these commands in the
|
||||
@ -14158,6 +14178,10 @@ Enable system locking. Note that if you use this option on a system
|
||||
which a not fully working lockd() (as on Linux) you will easily get
|
||||
mysqld to deadlock.
|
||||
|
||||
@item --enable-named-pipe
|
||||
Enable support for named pipes; This only works on NT and newer windows
|
||||
versions.
|
||||
|
||||
@item -T, --exit-info
|
||||
This is a bit mask of different flags one can use for debugging the
|
||||
mysqld server; One should not use this option if one doesn't know
|
||||
@ -21535,7 +21559,7 @@ binaries includes:
|
||||
@item Linux-Ia64 @tab N @tab Y
|
||||
@item Solaris-intel @tab N @tab Y
|
||||
@item Solaris-sparc @tab Y @tab Y
|
||||
@item SCO OSR5 @tab Y @tab Y
|
||||
@item Caldera (SCO) OSR5 @tab Y @tab Y
|
||||
@item UnixWare @tab Y @tab Y
|
||||
@item Windows/NT @tab Y @tab Y
|
||||
@end multitable
|
||||
@ -33495,8 +33519,6 @@ restrictions:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Only the last @code{SELECT} command can have @code{INTO OUTFILE}.
|
||||
@item
|
||||
Only the last @code{SELECT} command can have @code{ORDER BY}.
|
||||
@end itemize
|
||||
|
||||
If you don't use the keyword @code{ALL} for the @code{UNION}, all
|
||||
@ -33504,6 +33526,13 @@ returned rows will be unique, like if you had done a @code{DISTINCT} for
|
||||
the total result set. If you specify @code{ALL}, then you will get all
|
||||
matching rows from all the used @code{SELECT} statements.
|
||||
|
||||
If you want to use an @code{ORDER BY} for the total @code{UNION} result,
|
||||
you should use parentheses:
|
||||
|
||||
@example
|
||||
(SELECT a FROM table_name WHERE a=10 AND B=1 ORDER BY a LIMIT 10) UNION
|
||||
(SELECT a FROM table_name WHERE a=11 AND B=2 ORDER BY a LIMIT 10) ORDER BY a;
|
||||
@end example
|
||||
|
||||
@findex HANDLER
|
||||
@node HANDLER, INSERT, SELECT, Data Manipulation
|
||||
@ -39588,9 +39617,9 @@ Linux 2.x intel
|
||||
@item
|
||||
Solaris sparc
|
||||
@item
|
||||
SCO OpenServer
|
||||
Caldera (SCO) OpenServer
|
||||
@item
|
||||
SCO UnixWare 7.0.1
|
||||
Caldera (SCO) UnixWare 7.0.1
|
||||
@end itemize
|
||||
|
||||
It doesn't work with the following operating systems:
|
||||
@ -41897,9 +41926,11 @@ set has been read.
|
||||
|
||||
If you acquire a result set from a successful call to
|
||||
@code{mysql_store_result()}, the client receives the entire set in one
|
||||
operation. In this case, a @code{NULL} return from @code{mysql_fetch_row()}
|
||||
always means the end of the result set has been reached and it is
|
||||
unnecessary to call @code{mysql_eof()}.
|
||||
operation. In this case, a @code{NULL} return from
|
||||
@code{mysql_fetch_row()} always means the end of the result set has been
|
||||
reached and it is unnecessary to call @code{mysql_eof()}. When used
|
||||
with @code{mysql_store_result()}, @code{mysql_eof()} will always return
|
||||
true.
|
||||
|
||||
On the other hand, if you use @code{mysql_use_result()} to initiate a result
|
||||
set retrieval, the rows of the set are obtained from the server one by one as
|
||||
@ -45812,7 +45843,8 @@ are used if you don't specify a hostname or if you specify the special
|
||||
hostname @code{localhost}.
|
||||
|
||||
On Windows, if the @code{mysqld} server is running on 9x/Me, you can
|
||||
connect only via TCP/IP. If the server is running on NT/2000/XP, you
|
||||
connect only via TCP/IP. If the server is running on NT/2000/XP and
|
||||
mysqld is started with @code{--enable-named-pipe}, you
|
||||
can also connect with named pipes. The name of the named pipe is MySQL.
|
||||
If you don't give a hostname when connecting to @code{mysqld}, a MySQL
|
||||
client will first try to connect to the named pipe, and if this doesn't
|
||||
@ -48886,6 +48918,9 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
@code{DOUBLE} and @code{FLOAT} columns are now honoring the
|
||||
@code{UNSIGNED} flag on storage.
|
||||
@item
|
||||
@code{InnoDB} now retains foreign key constraints through @code{ALTER TABLE}
|
||||
and @code{CREATE/DROP INDEX}.
|
||||
@item
|
||||
@ -48914,8 +48949,6 @@ the braces.
|
||||
Fixed that full-text works fine with @code{UNION} operations.
|
||||
@item
|
||||
Fixed bug with indexless boolean full-text search.
|
||||
@item
|
||||
Fixed bug that sometimes appeared when full-text search was used
|
||||
with ``const'' tables.
|
||||
@item
|
||||
Fixed incorrect error value when doing a @code{SELECT} with an empty
|
||||
@ -49120,6 +49153,9 @@ now handle signed and unsigned @code{BIGINT} numbers correctly.
|
||||
@item
|
||||
New character set @code{latin_de} which provides correct German sorting.
|
||||
@item
|
||||
@code{STRCMP()} now uses the current character set when doing comparison,
|
||||
which means that the default comparison is case insensitive.
|
||||
@item
|
||||
@code{TRUNCATE TABLE} and @code{DELETE FROM table_name} are now separate
|
||||
functions. One bonus is that @code{DELETE FROM table_name} now returns
|
||||
the number of deleted rows, rather than zero.
|
||||
@ -49304,6 +49340,26 @@ not yet 100% confident in this code.
|
||||
@appendixsubsec Changes in release 3.23.50
|
||||
@itemize @bullet
|
||||
@item
|
||||
Our Linux RPMS and binaries are now compiled with gcc 3.0.4, which should
|
||||
make them a bit faster.
|
||||
@item
|
||||
Fixed some buffer overflow problems when reading startup parameters.
|
||||
@item
|
||||
Because of problems on shutdown we have now disabled named pipes on
|
||||
windows by default. One can enable this with by starting mysqld with
|
||||
@code{--enable-named-pipe}.
|
||||
@item
|
||||
Fixed bug when using @code{WHERE key_column = 'J' or key_column='j'}.
|
||||
@item
|
||||
Fixed core-dump bug when using @code{--log-bin} with @code{LOAD DATA
|
||||
INFILE} without an active database.
|
||||
@item
|
||||
Fixed bug in @code{RENAME TABLE} when used with
|
||||
@code{lower_case_table_names=1} (default on Windows).
|
||||
@item
|
||||
Fixed unlikely core-dump bug when using @code{DROP TABLE} on a table
|
||||
that was in use by a thread that also used queries on only temporary tables.
|
||||
@item
|
||||
Fixed problem with @code{SHOW CREATE TABLE} and @code{PRIMARY KEY} when using
|
||||
32 indexes.
|
||||
@item
|
||||
@ -49332,7 +49388,7 @@ Don't give warning for statement that is only a comment; This is needed for
|
||||
@code{mysqldump --disable-keys} to work.
|
||||
@item
|
||||
Fixed unlikely caching bug when doing a join without keys. In this case
|
||||
the last used field for a table always returned @code{NULL}.
|
||||
the last used column for a table always returned @code{NULL}.
|
||||
@item
|
||||
Added options to make @code{LOAD DATA LOCAL INFILE} more secure.
|
||||
@item
|
||||
@ -54018,7 +54074,7 @@ Fixed bug in range calculation that could return empty
|
||||
set when searching on multiple key with only one entry (very rare).
|
||||
@item
|
||||
Most things ported to FSU Pthreads, which should allow MySQL to
|
||||
run on SCO. @xref{SCO}.
|
||||
run on Caldera (SCO). @xref{Caldera}.
|
||||
@end itemize
|
||||
|
||||
|
||||
|
@ -841,17 +841,6 @@ static int get_options(int argc, char **argv)
|
||||
return(0);
|
||||
}
|
||||
|
||||
#if defined(OS2)
|
||||
static char* readline( char* prompt)
|
||||
{
|
||||
#if defined(OS2)
|
||||
static char linebuffer[254];
|
||||
#endif
|
||||
puts( prompt);
|
||||
return gets( linebuffer);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int read_lines(bool execute_commands)
|
||||
{
|
||||
#if defined( __WIN__) || defined(OS2)
|
||||
|
@ -149,7 +149,7 @@ CHANGEABLE_VAR md_changeable_vars[] = {
|
||||
{ "max_allowed_packet", (long*) &max_allowed_packet,24*1024*1024,4096,
|
||||
512*1024L*1024L,MALLOC_OVERHEAD,1024},
|
||||
{ "net_buffer_length", (long*) &net_buffer_length,1024*1024L-1025,4096,
|
||||
512*1024L*1024L,MALLOC_OVERHEAD+1024,1024},
|
||||
16*1024L*1024L,MALLOC_OVERHEAD-1024,1024},
|
||||
{ 0, 0, 0, 0, 0, 0, 0}
|
||||
};
|
||||
|
||||
@ -651,7 +651,7 @@ static uint getTableStructure(char *table, char* db)
|
||||
/* Make an sql-file, if path was given iow. option -T was given */
|
||||
char buff[20+FN_REFLEN];
|
||||
|
||||
sprintf(buff,"show create table %s",table_name);
|
||||
sprintf(buff,"show create table `%s`",table);
|
||||
if (mysql_query(sock, buff))
|
||||
{
|
||||
fprintf(stderr, "%s: Can't get CREATE TABLE for table '%s' (%s)\n",
|
||||
@ -784,7 +784,7 @@ static uint getTableStructure(char *table, char* db)
|
||||
{
|
||||
if (opt_keywords)
|
||||
fprintf(sql_file, " %s.%s %s", table_name,
|
||||
quote_name(row[SHOW_FIELDNAME],name_buff), row[SHOW_TYPE]);
|
||||
quote_name(row[SHOW_FIELDNAME],name_buff), row[SHOW_TYPE]);
|
||||
else
|
||||
fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME],
|
||||
name_buff), row[SHOW_TYPE]);
|
||||
@ -1072,6 +1072,9 @@ static void dumpTable(uint numFields, char *table)
|
||||
fputs(insert_pat,md_result_file);
|
||||
mysql_field_seek(res,0);
|
||||
|
||||
if (opt_xml)
|
||||
fprintf(md_result_file, "\t<row>\n");
|
||||
|
||||
for (i = 0; i < mysql_num_fields(res); i++)
|
||||
{
|
||||
if (!(field = mysql_fetch_field(res)))
|
||||
@ -1161,6 +1164,9 @@ static void dumpTable(uint numFields, char *table)
|
||||
}
|
||||
}
|
||||
|
||||
if (opt_xml)
|
||||
fprintf(md_result_file, "\t</row>\n");
|
||||
|
||||
if (extended_insert)
|
||||
{
|
||||
ulong row_length;
|
||||
|
59
configure.in
59
configure.in
@ -38,7 +38,7 @@ for i in $AVAILABLE_LANGUAGES
|
||||
do
|
||||
AVAILABLE_LANGUAGES_ERRORS="$AVAILABLE_LANGUAGES_ERRORS $i/errmsg.sys"
|
||||
echo "$i/errmsg.sys: $i/errmsg.txt
|
||||
\$(top_builddir)/extra/comp_err $i/errmsg.txt $i/errmsg.sys" \
|
||||
\$(top_builddir)/extra/comp_err \$^ $i/errmsg.sys" \
|
||||
>> $AVAILABLE_LANGUAGES_ERRORS_RULES
|
||||
done
|
||||
|
||||
@ -126,6 +126,20 @@ AC_PROG_CC
|
||||
AC_PROG_CXX
|
||||
AC_PROG_CPP
|
||||
|
||||
# Print version of CC and CXX compiler (if they support --version)
|
||||
CC_VERSION=`$CC --version`
|
||||
if test $? -eq "0"
|
||||
then
|
||||
AC_MSG_CHECKING("C Compiler version");
|
||||
AC_MSG_RESULT("$CC $CC_VERSION")
|
||||
fi
|
||||
CXX_VERSION=`$CXX --version`
|
||||
if test $? -eq "0"
|
||||
then
|
||||
AC_MSG_CHECKING("C++ compiler version");
|
||||
AC_MSG_RESULT("$CXX $CXX_VERSION")
|
||||
fi
|
||||
|
||||
# Fix for sgi gcc / sgiCC which tries to emulate gcc
|
||||
if test "$CC" = "sgicc"
|
||||
then
|
||||
@ -307,6 +321,19 @@ then
|
||||
# Disable exceptions as they seams to create problems with gcc and threads.
|
||||
# mysqld doesn't use run-time-type-checking, so we disable it.
|
||||
CXXFLAGS="$CXXFLAGS -fno-implicit-templates -fno-exceptions -fno-rtti"
|
||||
|
||||
# If you are using 'gcc' 3.0 (not g++) to compile C++ programs,
|
||||
# we will gets some problems when linking static programs.
|
||||
# The following code is used to fix this problem.
|
||||
|
||||
if test "$CXX" = "gcc"
|
||||
then
|
||||
if $CXX -v 2>&1 | grep 'version 3' > /dev/null 2>&1
|
||||
then
|
||||
CXXFLAGS="$CXXFLAGS -DUSE_MYSYS_NEW"
|
||||
CXXLDFLAGS="$CXXLDFLAGS -Wl,--defsym -Wl,__cxa_pure_virtual=0"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Avoid bug in fcntl on some versions of linux
|
||||
@ -435,16 +462,17 @@ AC_ARG_WITH(other-libc,
|
||||
enable_shared="no"
|
||||
all_is_static="yes"
|
||||
CFLAGS="$CFLAGS -I$other_libc_include"
|
||||
# There seems to be a feature in gcc that treats system and libc headers
|
||||
# silently when they violatate ANSI C++ standard, but it is strict otherwise
|
||||
# since gcc cannot now recognize that our headers are libc, we work around
|
||||
# by telling it to be permissive. Note that this option only works with
|
||||
# new versions of gcc (2.95.x and above)
|
||||
CXXFLAGS="$CXXFLAGS -fpermissive -I$other_libc_include"
|
||||
#There seems to be a feature in gcc that treats system and libc headers
|
||||
#leniently when they violatate ANSI C++ standard, but it is strict otherwise
|
||||
#since gcc cannot now recognize that our headers are libc, we work around
|
||||
#by telling it to be permissive
|
||||
static_nss=
|
||||
if test -f "$other_libc_lib/libnss_files.a"
|
||||
then
|
||||
# libc has been compiled with --enable-static-nss
|
||||
# we need special flags, but we will have to add those later
|
||||
# libc has been compiled with --enable-static-nss
|
||||
# we need special flags, but we will have to add those later
|
||||
STATIC_NSS_FLAGS="-Wl,--start-group -lc -lnss_files -lnss_dns -lresolv \
|
||||
-Wl,--end-group"
|
||||
static_nss=1
|
||||
@ -460,12 +488,12 @@ AC_ARG_WITH(other-libc,
|
||||
LDFLAGS="$LDFLAGS -static -L$other_libc_lib "
|
||||
fi
|
||||
|
||||
# When linking against custom libc installed separately, we want to force
|
||||
# all binary builds to be static, including the build done by configure
|
||||
# itself to test for system features.
|
||||
with_mysqld_ldflags="-all-static"
|
||||
with_client_ldflags="-all-static"
|
||||
NOINST_LDFLAGS="-all-static"
|
||||
#when linking against custom libc installed separately, we want to force all
|
||||
#binary builds to be static, including the build done by configure itself
|
||||
#to test for system features
|
||||
],
|
||||
[
|
||||
other_libc_include=
|
||||
@ -974,7 +1002,8 @@ Reference Manual.])
|
||||
if test -f /usr/shlib/libpthread.so -a -f /usr/lib/libmach.a -a -f /usr/ccs/lib/cmplrs/cc/libexc.a
|
||||
then
|
||||
with_named_thread="-lpthread -lmach -lexc"
|
||||
#with_named_thread="-lpthread -lmach -lexc -lc"
|
||||
CFLAGS="$CFLAGS -D_REENTRANT"
|
||||
CXXFLAGS="$CXXFLAGS -D_REENTRANT"
|
||||
AC_DEFINE(HAVE_DEC_THREADS)
|
||||
AC_MSG_RESULT("yes")
|
||||
else
|
||||
@ -1557,9 +1586,9 @@ ac_save_CXXFLAGS="$CXXFLAGS"
|
||||
AC_CACHE_CHECK([style of gethost* routines], mysql_cv_gethost_style,
|
||||
AC_LANG_SAVE
|
||||
AC_LANG_CPLUSPLUS
|
||||
#do not treat warnings as errors if we are linking agaist other libc
|
||||
#this is to work around gcc not being permissive on non-system includes
|
||||
#with respect to ANSI C++
|
||||
# Do not treat warnings as errors if we are linking agaist other libc
|
||||
# this is to work around gcc not being permissive on non-system includes
|
||||
# with respect to ANSI C++
|
||||
if test "$ac_cv_prog_gxx" = "yes" -a "$with_other_libc" = "no"
|
||||
then
|
||||
CXXFLAGS="$CXXFLAGS -Werror"
|
||||
@ -2300,7 +2329,7 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
#IMPORTANT - do not modify LIBS past this line - this hack is the only way
|
||||
# IMPORTANT - do not modify LIBS past this line - this hack is the only way
|
||||
# I know to add the static NSS magic if we have static NSS libraries with
|
||||
# glibc - Sasha
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define SYSTEM_TYPE "Win95/Win98"
|
||||
#endif
|
||||
|
||||
#ifdef _WIN64
|
||||
#if defined(_WIN64) || defined(WIN64)
|
||||
#define MACHINE_TYPE "ia64" /* Define to machine type name */
|
||||
#else
|
||||
#define MACHINE_TYPE "i32" /* Define to machine type name */
|
||||
|
@ -67,7 +67,7 @@ enum enum_server_command {COM_SLEEP,COM_QUIT,COM_INIT_DB,COM_QUERY,
|
||||
#define REFRESH_TABLES 4 /* close all tables */
|
||||
#define REFRESH_HOSTS 8 /* Flush host cache */
|
||||
#define REFRESH_STATUS 16 /* Flush status variables */
|
||||
#define REFRESH_THREADS 32 /* Flush status variables */
|
||||
#define REFRESH_THREADS 32 /* Flush thread cache */
|
||||
#define REFRESH_SLAVE 64 /* Reset master info and restart slave
|
||||
thread */
|
||||
#define REFRESH_MASTER 128 /* Remove all bin logs in the index
|
||||
@ -108,8 +108,8 @@ enum enum_server_command {COM_SLEEP,COM_QUIT,COM_INIT_DB,COM_QUERY,
|
||||
struct st_vio; /* Only C */
|
||||
typedef struct st_vio Vio;
|
||||
|
||||
#define MAX_CHAR_WIDTH 255 // Max length for a CHAR colum
|
||||
#define MAX_BLOB_WIDTH 8192 // Default width for blob
|
||||
#define MAX_CHAR_WIDTH 255 /* Max length for a CHAR colum */
|
||||
#define MAX_BLOB_WIDTH 8192 /* Default width for blob */
|
||||
|
||||
typedef struct st_net {
|
||||
Vio* vio;
|
||||
|
@ -570,6 +570,19 @@ btr_page_get_father_for_rec(
|
||||
|
||||
node_ptr = btr_cur_get_rec(&cursor);
|
||||
|
||||
if (btr_node_ptr_get_child_page_no(node_ptr) !=
|
||||
buf_frame_get_page_no(page)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Corruption of an index tree: table %s, index %s,\n"
|
||||
"InnoDB: father ptr page no %lu, child page no %lu\n",
|
||||
(UT_LIST_GET_FIRST(tree->tree_indexes))->table_name,
|
||||
(UT_LIST_GET_FIRST(tree->tree_indexes))->name,
|
||||
btr_node_ptr_get_child_page_no(node_ptr),
|
||||
buf_frame_get_page_no(page));
|
||||
page_rec_print(page_rec_get_next(page_get_infimum_rec(page)));
|
||||
page_rec_print(node_ptr);
|
||||
}
|
||||
|
||||
ut_a(btr_node_ptr_get_child_page_no(node_ptr) ==
|
||||
buf_frame_get_page_no(page));
|
||||
mem_heap_free(heap);
|
||||
|
@ -204,7 +204,7 @@ btr_cur_search_to_nth_level(
|
||||
the caller uses his search latch
|
||||
to protect the record! */
|
||||
btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
|
||||
s- or x-latched, but see also above! */
|
||||
s- or x-latched, but see also above! */
|
||||
ulint has_search_latch,/* in: info on the latch mode the
|
||||
caller currently has on btr_search_latch:
|
||||
RW_S_LATCH, or 0 */
|
||||
|
@ -743,7 +743,7 @@ btr_search_guess_on_hash(
|
||||
|
||||
#ifdef notdefined
|
||||
/* These lines of code can be used in a debug version to check
|
||||
correctness of the searched cursor position: */
|
||||
the correctness of the searched cursor position: */
|
||||
|
||||
info->last_hash_succ = FALSE;
|
||||
|
||||
|
@ -220,6 +220,10 @@ buf_calc_page_checksum(
|
||||
{
|
||||
ulint checksum;
|
||||
|
||||
/* Since the fields FIL_PAGE_FILE_FLUSH_LSN and ..._ARCH_LOG_NO
|
||||
are written outside the buffer pool to the first pages of data
|
||||
files, we have to skip them in page checksum calculation */
|
||||
|
||||
checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
|
||||
+ ut_fold_binary(page + FIL_PAGE_DATA,
|
||||
UNIV_PAGE_SIZE - FIL_PAGE_DATA
|
||||
@ -279,8 +283,9 @@ buf_page_print(
|
||||
|
||||
ut_sprintf_buf(buf, read_buf, UNIV_PAGE_SIZE);
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
"InnoDB: Page dump in ascii and hex (%u bytes):\n%s",
|
||||
" InnoDB: Page dump in ascii and hex (%u bytes):\n%s",
|
||||
UNIV_PAGE_SIZE, buf);
|
||||
fprintf(stderr, "InnoDB: End of page dump\n");
|
||||
|
||||
@ -288,7 +293,8 @@ buf_page_print(
|
||||
|
||||
checksum = buf_calc_page_checksum(read_buf);
|
||||
|
||||
fprintf(stderr, "InnoDB: Page checksum %lu stored checksum %lu\n",
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr, " InnoDB: Page checksum %lu stored checksum %lu\n",
|
||||
checksum, mach_read_from_4(read_buf
|
||||
+ UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN));
|
||||
@ -1358,47 +1364,87 @@ buf_page_io_complete(
|
||||
/*=================*/
|
||||
buf_block_t* block) /* in: pointer to the block in question */
|
||||
{
|
||||
dulint id;
|
||||
dict_index_t* index;
|
||||
dulint id;
|
||||
ulint io_type;
|
||||
|
||||
ulint read_page_no;
|
||||
|
||||
ut_ad(block);
|
||||
|
||||
io_type = block->io_fix;
|
||||
|
||||
if (io_type == BUF_IO_READ) {
|
||||
/* If this page is not uninitialized and not in the
|
||||
doublewrite buffer, then the page number should be the
|
||||
same as in block */
|
||||
|
||||
read_page_no = mach_read_from_4((block->frame)
|
||||
+ FIL_PAGE_OFFSET);
|
||||
if (read_page_no != 0
|
||||
&& !trx_doublewrite_page_inside(read_page_no)
|
||||
&& read_page_no != block->offset) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: page n:o stored in the page read in is %lu, should be %lu!\n",
|
||||
read_page_no, block->offset);
|
||||
}
|
||||
#ifdef notdefined
|
||||
if (block->offset != 0 && read_page_no == 0) {
|
||||
/* Check that the page is really uninited */
|
||||
|
||||
for (i = 0; i < UNIV_PAGE_SIZE; i++) {
|
||||
|
||||
if (*((block->frame) + i) != '\0') {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: page n:o in the page read in is 0, but page %lu is inited!\n",
|
||||
block->offset);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* From version 3.23.38 up we store the page checksum
|
||||
to the 4 upper bytes of the page end lsn field */
|
||||
to the 4 first bytes of the page end lsn field */
|
||||
|
||||
if (buf_page_is_corrupted(block->frame)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Database page corruption or a failed\n"
|
||||
"InnoDB: file read of page %lu.\n", block->offset);
|
||||
"InnoDB: Database page corruption on disk or a failed\n"
|
||||
"InnoDB: file read of page %lu.\n", block->offset);
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: You may have to recover from a backup.\n");
|
||||
"InnoDB: You may have to recover from a backup.\n");
|
||||
|
||||
buf_page_print(block->frame);
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Database page corruption or a failed\n"
|
||||
"InnoDB: file read of page %lu.\n", block->offset);
|
||||
"InnoDB: Database page corruption on disk or a failed\n"
|
||||
"InnoDB: file read of page %lu.\n", block->offset);
|
||||
fprintf(stderr,
|
||||
"InnoDB: You may have to recover from a backup.\n");
|
||||
"InnoDB: You may have to recover from a backup.\n");
|
||||
fprintf(stderr,
|
||||
"InnoDB: It is also possible that your operating\n"
|
||||
"InnoDB: system has corrupted its own file cache\n"
|
||||
"InnoDB: and rebooting your computer removes the\n"
|
||||
"InnoDB: error.\n");
|
||||
"InnoDB: It is also possible that your operating\n"
|
||||
"InnoDB: system has corrupted its own file cache\n"
|
||||
"InnoDB: and rebooting your computer removes the\n"
|
||||
"InnoDB: error.\n"
|
||||
"InnoDB: If the corrupt page is an index page\n"
|
||||
"InnoDB: you can also try to fix the corruption\n"
|
||||
"InnoDB: by dumping, dropping, and reimporting\n"
|
||||
"InnoDB: the corrupt table. You can use CHECK\n"
|
||||
"InnoDB: TABLE to scan your table for corruption.\n"
|
||||
"InnoDB: Look also at section 6.1 of\n"
|
||||
"InnoDB: http://www.innodb.com/ibman.html about\n"
|
||||
"InnoDB: forcing recovery.\n");
|
||||
|
||||
if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) {
|
||||
if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Ending processing because of a corrupt database page.\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (recv_recovery_is_on()) {
|
||||
recv_recover_page(TRUE, block->frame, block->space,
|
||||
block->offset);
|
||||
recv_recover_page(FALSE, TRUE, block->frame,
|
||||
block->space, block->offset);
|
||||
}
|
||||
|
||||
if (!recv_no_ibuf_operations) {
|
||||
|
@ -327,6 +327,34 @@ try_again:
|
||||
mutex_exit(&(trx_doublewrite->mutex));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Initializes a page for writing to the tablespace. */
|
||||
|
||||
void
|
||||
buf_flush_init_for_writing(
|
||||
/*=======================*/
|
||||
byte* page, /* in: page */
|
||||
dulint newest_lsn, /* in: newest modification lsn to the page */
|
||||
ulint space, /* in: space id */
|
||||
ulint page_no) /* in: page number */
|
||||
{
|
||||
/* Write the newest modification lsn to the page */
|
||||
mach_write_to_8(page + FIL_PAGE_LSN, newest_lsn);
|
||||
|
||||
mach_write_to_8(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN, newest_lsn);
|
||||
|
||||
/* Write to the page the space id and page number */
|
||||
|
||||
mach_write_to_4(page + FIL_PAGE_SPACE, space);
|
||||
mach_write_to_4(page + FIL_PAGE_OFFSET, page_no);
|
||||
|
||||
/* We overwrite the first 4 bytes of the end lsn field to store
|
||||
a page checksum */
|
||||
|
||||
mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
|
||||
buf_calc_page_checksum(page));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Does an asynchronous write of a buffer page. NOTE: in simulated aio and
|
||||
also when the doublewrite buffer is used, we must call
|
||||
@ -349,23 +377,8 @@ buf_flush_write_block_low(
|
||||
/* Force the log to the disk before writing the modified block */
|
||||
log_flush_up_to(block->newest_modification, LOG_WAIT_ALL_GROUPS);
|
||||
#endif
|
||||
/* Write the newest modification lsn to the page */
|
||||
mach_write_to_8(block->frame + FIL_PAGE_LSN,
|
||||
block->newest_modification);
|
||||
mach_write_to_8(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
|
||||
block->newest_modification);
|
||||
|
||||
/* Write to the page the space id and page number */
|
||||
|
||||
mach_write_to_4(block->frame + FIL_PAGE_SPACE, block->space);
|
||||
mach_write_to_4(block->frame + FIL_PAGE_OFFSET, block->offset);
|
||||
|
||||
/* We overwrite the first 4 bytes of the end lsn field to store
|
||||
a page checksum */
|
||||
|
||||
mach_write_to_4(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
|
||||
buf_calc_page_checksum(block->frame));
|
||||
|
||||
buf_flush_init_for_writing(block->frame, block->newest_modification,
|
||||
block->space, block->offset);
|
||||
if (!trx_doublewrite) {
|
||||
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
|
||||
FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
|
||||
|
@ -281,7 +281,8 @@ dict_table_autoinc_initialize(
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Gets the next autoinc value, 0 if not yet initialized. */
|
||||
Gets the next autoinc value, 0 if not yet initialized. If initialized,
|
||||
increments the counter by 1. */
|
||||
|
||||
ib_longlong
|
||||
dict_table_autoinc_get(
|
||||
@ -306,6 +307,32 @@ dict_table_autoinc_get(
|
||||
return(value);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Reads the autoinc counter value, 0 if not yet initialized. Does not
|
||||
increment the counter. */
|
||||
|
||||
ib_longlong
|
||||
dict_table_autoinc_read(
|
||||
/*====================*/
|
||||
/* out: value of the counter */
|
||||
dict_table_t* table) /* in: table */
|
||||
{
|
||||
ib_longlong value;
|
||||
|
||||
mutex_enter(&(table->autoinc_mutex));
|
||||
|
||||
if (!table->autoinc_inited) {
|
||||
|
||||
value = 0;
|
||||
} else {
|
||||
value = table->autoinc;
|
||||
}
|
||||
|
||||
mutex_exit(&(table->autoinc_mutex));
|
||||
|
||||
return(value);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Updates the autoinc counter if the value supplied is bigger than the
|
||||
current value. If not inited, does nothing. */
|
||||
@ -648,7 +675,10 @@ dict_table_rename_in_cache(
|
||||
/*=======================*/
|
||||
/* out: TRUE if success */
|
||||
dict_table_t* table, /* in: table */
|
||||
char* new_name) /* in: new name */
|
||||
char* new_name, /* in: new name */
|
||||
ibool rename_also_foreigns)/* in: in ALTER TABLE we want
|
||||
to preserve the original table name
|
||||
in constraints which reference it */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
dict_index_t* index;
|
||||
@ -706,6 +736,41 @@ dict_table_rename_in_cache(
|
||||
index = dict_table_get_next_index(index);
|
||||
}
|
||||
|
||||
if (!rename_also_foreigns) {
|
||||
/* In ALTER TABLE we think of the rename table operation
|
||||
in the direction table -> temporary table (#sql...)
|
||||
as dropping the table with the old name and creating
|
||||
a new with the new name. Thus we kind of drop the
|
||||
constraints from the dictionary cache here. The foreign key
|
||||
constraints will be inherited to the new table from the
|
||||
system tables through a call of dict_load_foreigns. */
|
||||
|
||||
/* Remove the foreign constraints from the cache */
|
||||
foreign = UT_LIST_GET_LAST(table->foreign_list);
|
||||
|
||||
while (foreign != NULL) {
|
||||
dict_foreign_remove_from_cache(foreign);
|
||||
foreign = UT_LIST_GET_LAST(table->foreign_list);
|
||||
}
|
||||
|
||||
/* Reset table field in referencing constraints */
|
||||
|
||||
foreign = UT_LIST_GET_FIRST(table->referenced_list);
|
||||
|
||||
while (foreign != NULL) {
|
||||
foreign->referenced_table = NULL;
|
||||
foreign->referenced_index = NULL;
|
||||
|
||||
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
|
||||
}
|
||||
|
||||
/* Make the list of referencing constraints empty */
|
||||
|
||||
UT_LIST_INIT(table->referenced_list);
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/* Update the table name fields in foreign constraints */
|
||||
|
||||
foreign = UT_LIST_GET_FIRST(table->foreign_list);
|
||||
@ -772,8 +837,6 @@ dict_table_remove_from_cache(
|
||||
foreign = UT_LIST_GET_LAST(table->foreign_list);
|
||||
|
||||
while (foreign != NULL) {
|
||||
ut_a(0 == ut_strcmp(foreign->foreign_table_name, table->name));
|
||||
|
||||
dict_foreign_remove_from_cache(foreign);
|
||||
foreign = UT_LIST_GET_LAST(table->foreign_list);
|
||||
}
|
||||
@ -783,8 +846,6 @@ dict_table_remove_from_cache(
|
||||
foreign = UT_LIST_GET_FIRST(table->referenced_list);
|
||||
|
||||
while (foreign != NULL) {
|
||||
ut_a(0 == ut_strcmp(foreign->referenced_table_name,
|
||||
table->name));
|
||||
foreign->referenced_table = NULL;
|
||||
foreign->referenced_index = NULL;
|
||||
|
||||
@ -1632,8 +1693,9 @@ dict_foreign_add_to_cache(
|
||||
{
|
||||
dict_table_t* for_table;
|
||||
dict_table_t* ref_table;
|
||||
dict_foreign_t* for_in_cache = NULL;
|
||||
dict_foreign_t* for_in_cache = NULL;
|
||||
dict_index_t* index;
|
||||
ibool added_to_referenced_list = FALSE;
|
||||
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
@ -1677,6 +1739,7 @@ dict_foreign_add_to_cache(
|
||||
UT_LIST_ADD_LAST(referenced_list,
|
||||
ref_table->referenced_list,
|
||||
for_in_cache);
|
||||
added_to_referenced_list = TRUE;
|
||||
}
|
||||
|
||||
if (for_in_cache->foreign_table == NULL && for_table) {
|
||||
@ -1687,6 +1750,12 @@ dict_foreign_add_to_cache(
|
||||
|
||||
if (index == NULL) {
|
||||
if (for_in_cache == foreign) {
|
||||
if (added_to_referenced_list) {
|
||||
UT_LIST_REMOVE(referenced_list,
|
||||
ref_table->referenced_list,
|
||||
for_in_cache);
|
||||
}
|
||||
|
||||
mem_heap_free(foreign->heap);
|
||||
}
|
||||
|
||||
@ -1806,9 +1875,14 @@ dict_scan_col(
|
||||
return(ptr);
|
||||
}
|
||||
|
||||
if (*ptr == '`') {
|
||||
ptr++;
|
||||
}
|
||||
|
||||
old_ptr = ptr;
|
||||
|
||||
while (!isspace(*ptr) && *ptr != ',' && *ptr != ')') {
|
||||
while (!isspace(*ptr) && *ptr != ',' && *ptr != ')' && *ptr != '`') {
|
||||
|
||||
ptr++;
|
||||
}
|
||||
|
||||
@ -1829,6 +1903,10 @@ dict_scan_col(
|
||||
}
|
||||
}
|
||||
|
||||
if (*ptr == '`') {
|
||||
ptr++;
|
||||
}
|
||||
|
||||
return(ptr);
|
||||
}
|
||||
|
||||
@ -1859,9 +1937,13 @@ dict_scan_table_name(
|
||||
return(ptr);
|
||||
}
|
||||
|
||||
if (*ptr == '`') {
|
||||
ptr++;
|
||||
}
|
||||
|
||||
old_ptr = ptr;
|
||||
|
||||
while (!isspace(*ptr) && *ptr != '(') {
|
||||
while (!isspace(*ptr) && *ptr != '(' && *ptr != '`') {
|
||||
if (*ptr == '.') {
|
||||
dot_ptr = ptr;
|
||||
}
|
||||
@ -1902,6 +1984,10 @@ dict_scan_table_name(
|
||||
|
||||
*table = dict_table_get_low(second_table_name);
|
||||
|
||||
if (*ptr == '`') {
|
||||
ptr++;
|
||||
}
|
||||
|
||||
return(ptr);
|
||||
}
|
||||
|
||||
@ -1944,8 +2030,8 @@ dict_create_foreign_constraints(
|
||||
/*============================*/
|
||||
/* out: error code or DB_SUCCESS */
|
||||
trx_t* trx, /* in: transaction */
|
||||
char* sql_string, /* in: table create statement where
|
||||
foreign keys are declared like:
|
||||
char* sql_string, /* in: table create or ALTER TABLE
|
||||
statement where foreign keys are declared like:
|
||||
FOREIGN KEY (a, b) REFERENCES table2(c, d),
|
||||
table2 can be written also with the database
|
||||
name before it: test.table2; the default
|
||||
@ -1971,10 +2057,11 @@ dict_create_foreign_constraints(
|
||||
if (table == NULL) {
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
loop:
|
||||
ptr = dict_scan_to(ptr, (char *) "FOREIGN");
|
||||
|
||||
if (*ptr == '\0' || dict_bracket_count(sql_string, ptr) != 1) {
|
||||
if (*ptr == '\0') {
|
||||
|
||||
/* The following call adds the foreign key constraints
|
||||
to the data dictionary system tables on disk */
|
||||
@ -2889,19 +2976,21 @@ dict_field_print_low(
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Sprintfs to a string info on foreign keys of a table. */
|
||||
|
||||
Sprintfs to a string info on foreign keys of a table in a format suitable
|
||||
for CREATE TABLE. */
|
||||
static
|
||||
void
|
||||
dict_print_info_on_foreign_keys(
|
||||
/*============================*/
|
||||
dict_print_info_on_foreign_keys_in_create_format(
|
||||
/*=============================================*/
|
||||
char* buf, /* in: auxiliary buffer of 10000 chars */
|
||||
char* str, /* in/out: pointer to a string */
|
||||
ulint len, /* in: space in str available for info */
|
||||
dict_table_t* table) /* in: table */
|
||||
{
|
||||
|
||||
dict_foreign_t* foreign;
|
||||
ulint i;
|
||||
char* buf2;
|
||||
char buf[10000];
|
||||
|
||||
buf2 = buf;
|
||||
|
||||
@ -2916,11 +3005,93 @@ dict_print_info_on_foreign_keys(
|
||||
}
|
||||
|
||||
while (foreign != NULL) {
|
||||
buf2 += sprintf(buf2, "; (");
|
||||
|
||||
buf2 += sprintf(buf2, ",\n FOREIGN KEY (");
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
buf2 += sprintf(buf2, "`%s`",
|
||||
foreign->foreign_col_names[i]);
|
||||
|
||||
if (i + 1 < foreign->n_fields) {
|
||||
buf2 += sprintf(buf2, ", ");
|
||||
}
|
||||
}
|
||||
|
||||
buf2 += sprintf(buf2, ") REFERENCES `%s` (",
|
||||
foreign->referenced_table_name);
|
||||
/* Change the '/' in the table name to '.' */
|
||||
|
||||
for (i = ut_strlen(buf); i > 0; i--) {
|
||||
if (buf[i] == '/') {
|
||||
|
||||
buf[i] = '.';
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
buf2 += sprintf(buf2, "`%s`",
|
||||
foreign->referenced_col_names[i]);
|
||||
if (i + 1 < foreign->n_fields) {
|
||||
buf2 += sprintf(buf2, ", ");
|
||||
}
|
||||
}
|
||||
|
||||
buf2 += sprintf(buf2, ")");
|
||||
|
||||
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
|
||||
}
|
||||
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
||||
buf[len - 1] = '\0';
|
||||
ut_memcpy(str, buf, len);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Sprintfs to a string info on foreign keys of a table. */
|
||||
|
||||
void
|
||||
dict_print_info_on_foreign_keys(
|
||||
/*============================*/
|
||||
ibool create_table_format, /* in: if TRUE then print in
|
||||
a format suitable to be inserted into
|
||||
a CREATE TABLE, otherwise in the format
|
||||
of SHOW TABLE STATUS */
|
||||
char* str, /* in/out: pointer to a string */
|
||||
ulint len, /* in: space in str available for info */
|
||||
dict_table_t* table) /* in: table */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
ulint i;
|
||||
char* buf2;
|
||||
char buf[10000];
|
||||
|
||||
if (create_table_format) {
|
||||
dict_print_info_on_foreign_keys_in_create_format(
|
||||
buf, str, len, table);
|
||||
return;
|
||||
}
|
||||
|
||||
buf2 = buf;
|
||||
|
||||
mutex_enter(&(dict_sys->mutex));
|
||||
|
||||
foreign = UT_LIST_GET_FIRST(table->foreign_list);
|
||||
|
||||
if (foreign == NULL) {
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
while (foreign != NULL) {
|
||||
buf2 += sprintf(buf2, "; (");
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
buf2 += sprintf(buf2, "%s",
|
||||
foreign->foreign_col_names[i]);
|
||||
|
||||
if (i + 1 < foreign->n_fields) {
|
||||
buf2 += sprintf(buf2, " ");
|
||||
}
|
||||
|
@ -688,7 +688,16 @@ dict_load_indexes(
|
||||
|
||||
dict_load_fields(table, index, heap);
|
||||
|
||||
dict_index_add_to_cache(table, index);
|
||||
if (index->type & DICT_CLUSTERED == 0
|
||||
&& NULL == dict_table_get_first_index(table)) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to load index %s for table %s\n"
|
||||
"InnoDB: but the first index was not clustered\n",
|
||||
index->name, table->name);
|
||||
} else {
|
||||
dict_index_add_to_cache(table, index);
|
||||
}
|
||||
}
|
||||
|
||||
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
|
||||
|
@ -89,8 +89,8 @@ struct fil_node_struct {
|
||||
char* name; /* the file name or path */
|
||||
ibool open; /* TRUE if file open */
|
||||
os_file_t handle; /* OS handle to the file, if file open */
|
||||
ulint size; /* size of the file in database blocks
|
||||
(where the possible last incomplete block
|
||||
ulint size; /* size of the file in database pages
|
||||
(where the possible last incomplete megabyte
|
||||
is ignored) */
|
||||
ulint n_pending;
|
||||
/* count of pending i/o-ops on this file */
|
||||
@ -945,6 +945,76 @@ fil_node_complete_io(
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Tries to extend a data file by the number of pages given. Any fractions of a
|
||||
megabyte are ignored. */
|
||||
|
||||
ibool
|
||||
fil_extend_last_data_file(
|
||||
/*======================*/
|
||||
/* out: TRUE if success, also if we run
|
||||
out of disk space we may return TRUE */
|
||||
ulint* actual_increase,/* out: number of pages we were able to
|
||||
extend, here the orginal size of the file and
|
||||
the resulting size of the file are rounded
|
||||
downwards to a full megabyte, and the
|
||||
difference expressed in pages is returned */
|
||||
ulint size_increase) /* in: try to extend this many pages */
|
||||
{
|
||||
fil_node_t* node;
|
||||
fil_space_t* space;
|
||||
fil_system_t* system = fil_system;
|
||||
byte* buf;
|
||||
ibool success;
|
||||
ulint i;
|
||||
|
||||
mutex_enter(&(system->mutex));
|
||||
|
||||
HASH_SEARCH(hash, system->spaces, 0, space, space->id == 0);
|
||||
|
||||
ut_a(space);
|
||||
|
||||
node = UT_LIST_GET_LAST(space->chain);
|
||||
|
||||
fil_node_prepare_for_io(node, system, space);
|
||||
|
||||
buf = mem_alloc(1024 * 1024);
|
||||
|
||||
memset(buf, '\0', 1024 * 1024);
|
||||
|
||||
for (i = 0; i < size_increase / ((1024 * 1024) / UNIV_PAGE_SIZE); i++) {
|
||||
|
||||
success = os_file_write(node->name, node->handle, buf,
|
||||
(node->size << UNIV_PAGE_SIZE_SHIFT) & 0xFFFFFFFF,
|
||||
node->size >> (32 - UNIV_PAGE_SIZE_SHIFT),
|
||||
1024 * 1024);
|
||||
|
||||
if (!success) {
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
node->size += ((1024 * 1024) / UNIV_PAGE_SIZE);
|
||||
space->size += ((1024 * 1024) / UNIV_PAGE_SIZE);
|
||||
|
||||
os_has_said_disk_full = FALSE;
|
||||
}
|
||||
|
||||
mem_free(buf);
|
||||
|
||||
fil_node_complete_io(node, system, OS_FILE_WRITE);
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
|
||||
*actual_increase = i * ((1024 * 1024) / UNIV_PAGE_SIZE);
|
||||
|
||||
fil_flush(0);
|
||||
|
||||
srv_data_file_sizes[srv_n_data_files - 1] += *actual_increase;
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Reads or writes data. This operation is asynchronous (aio). */
|
||||
|
||||
@ -966,9 +1036,9 @@ fil_io(
|
||||
ulint byte_offset, /* in: remainder of offset in bytes; in
|
||||
aio this must be divisible by the OS block
|
||||
size */
|
||||
ulint len, /* in: how many bytes to read; this must
|
||||
not cross a file boundary; in aio this must
|
||||
be a block size multiple */
|
||||
ulint len, /* in: how many bytes to read or write; this
|
||||
must not cross a file boundary; in aio this
|
||||
must be a block size multiple */
|
||||
void* buf, /* in/out: buffer where to store read data
|
||||
or from where to write; in aio this must be
|
||||
appropriately aligned */
|
||||
|
@ -50,7 +50,7 @@ descriptor page, but used only in the first. */
|
||||
#define FSP_FREE_LIMIT 12 /* Minimum page number for which the
|
||||
free list has not been initialized:
|
||||
the pages >= this limit are, by
|
||||
definition free */
|
||||
definition, free */
|
||||
#define FSP_LOWEST_NO_WRITE 16 /* The lowest page offset for which
|
||||
the page has not been written to disk
|
||||
(if it has been written, we know that
|
||||
@ -898,6 +898,106 @@ fsp_header_inc_size(
|
||||
mlog_write_ulint(header + FSP_SIZE, size + size_inc, MLOG_4BYTES, mtr);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Gets the current free limit of a tablespace. The free limit means the
|
||||
place of the first page which has never been put to the the free list
|
||||
for allocation. The space above that address is initialized to zero.
|
||||
Sets also the global variable log_fsp_current_free_limit. */
|
||||
|
||||
ulint
|
||||
fsp_header_get_free_limit(
|
||||
/*======================*/
|
||||
/* out: free limit in megabytes */
|
||||
ulint space) /* in: space id */
|
||||
{
|
||||
fsp_header_t* header;
|
||||
ulint limit;
|
||||
mtr_t mtr;
|
||||
|
||||
ut_a(space == 0); /* We have only one log_fsp_current_... variable */
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
|
||||
header = fsp_get_space_header(space, &mtr);
|
||||
|
||||
limit = mtr_read_ulint(header + FSP_FREE_LIMIT, MLOG_4BYTES, &mtr);
|
||||
|
||||
limit = limit / ((1024 * 1024) / UNIV_PAGE_SIZE);
|
||||
|
||||
log_fsp_current_free_limit_set_and_checkpoint(limit);
|
||||
|
||||
mtr_commit(&mtr);
|
||||
|
||||
return(limit);
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
Tries to extend the last data file file if it is defined as auto-extending. */
|
||||
static
|
||||
ibool
|
||||
fsp_try_extend_last_file(
|
||||
/*=====================*/
|
||||
/* out: FALSE if not auto-extending */
|
||||
ulint* actual_increase,/* out: actual increase in pages */
|
||||
ulint space, /* in: space */
|
||||
fsp_header_t* header, /* in: space header */
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
ulint size;
|
||||
ulint size_increase;
|
||||
ibool success;
|
||||
|
||||
ut_a(space == 0);
|
||||
|
||||
*actual_increase = 0;
|
||||
|
||||
if (!srv_auto_extend_last_data_file) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr);
|
||||
|
||||
if (srv_last_file_size_max != 0) {
|
||||
if (srv_last_file_size_max
|
||||
< srv_data_file_sizes[srv_n_data_files - 1]) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: Last data file size is %lu, max size allowed %lu\n",
|
||||
srv_data_file_sizes[srv_n_data_files - 1],
|
||||
srv_last_file_size_max);
|
||||
}
|
||||
|
||||
size_increase = srv_last_file_size_max
|
||||
- srv_data_file_sizes[srv_n_data_files - 1];
|
||||
if (size_increase > SRV_AUTO_EXTEND_INCREMENT) {
|
||||
size_increase = SRV_AUTO_EXTEND_INCREMENT;
|
||||
}
|
||||
} else {
|
||||
size_increase = SRV_AUTO_EXTEND_INCREMENT;
|
||||
}
|
||||
|
||||
if (size_increase == 0) {
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/* Extend the data file. If we are not able to extend
|
||||
the full requested length, the function tells us
|
||||
the number of full megabytes (but the unit is pages!)
|
||||
we were able to extend. */
|
||||
|
||||
success = fil_extend_last_data_file(actual_increase, size_increase);
|
||||
|
||||
if (success) {
|
||||
mlog_write_ulint(header + FSP_SIZE, size + *actual_increase,
|
||||
MLOG_4BYTES, mtr);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Puts new extents to the free list if there are free extents above the free
|
||||
limit. If an extent happens to contain an extent descriptor page, the extent
|
||||
@ -917,8 +1017,9 @@ fsp_fill_free_list(
|
||||
ulint frag_n_used;
|
||||
page_t* descr_page;
|
||||
page_t* ibuf_page;
|
||||
mtr_t ibuf_mtr;
|
||||
ulint actual_increase;
|
||||
ulint i;
|
||||
mtr_t ibuf_mtr;
|
||||
|
||||
ut_ad(header && mtr);
|
||||
|
||||
@ -926,12 +1027,28 @@ fsp_fill_free_list(
|
||||
size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr);
|
||||
limit = mtr_read_ulint(header + FSP_FREE_LIMIT, MLOG_4BYTES, mtr);
|
||||
|
||||
if (srv_auto_extend_last_data_file
|
||||
&& size < limit + FSP_EXTENT_SIZE * FSP_FREE_ADD) {
|
||||
|
||||
/* Try to increase the last data file size */
|
||||
fsp_try_extend_last_file(&actual_increase, space, header,
|
||||
mtr);
|
||||
size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr);
|
||||
}
|
||||
|
||||
i = limit;
|
||||
|
||||
while ((i + FSP_EXTENT_SIZE <= size) && (count < FSP_FREE_ADD)) {
|
||||
|
||||
mlog_write_ulint(header + FSP_FREE_LIMIT, i + FSP_EXTENT_SIZE,
|
||||
MLOG_4BYTES, mtr);
|
||||
|
||||
/* Update the free limit info in the log system and make
|
||||
a checkpoint */
|
||||
log_fsp_current_free_limit_set_and_checkpoint(
|
||||
(i + FSP_EXTENT_SIZE)
|
||||
/ ((1024 * 1024) / UNIV_PAGE_SIZE));
|
||||
|
||||
if (0 == i % XDES_DESCRIBED_PER_PAGE) {
|
||||
|
||||
/* We are going to initialize a new descriptor page
|
||||
@ -1172,6 +1289,7 @@ fsp_free_page(
|
||||
xdes_t* descr;
|
||||
ulint state;
|
||||
ulint frag_n_used;
|
||||
char buf[1000];
|
||||
|
||||
ut_ad(mtr);
|
||||
|
||||
@ -1183,10 +1301,38 @@ fsp_free_page(
|
||||
|
||||
state = xdes_get_state(descr, mtr);
|
||||
|
||||
ut_a((state == XDES_FREE_FRAG) || (state == XDES_FULL_FRAG));
|
||||
if (state != XDES_FREE_FRAG && state != XDES_FULL_FRAG) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: File space extent descriptor of page %lu has state %lu\n",
|
||||
page, state);
|
||||
ut_sprintf_buf(buf, ((byte*)descr) - 50, 200);
|
||||
|
||||
ut_a(xdes_get_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, mtr)
|
||||
== FALSE);
|
||||
fprintf(stderr, "InnoDB: Dump of descriptor: %s\n", buf);
|
||||
|
||||
if (state == XDES_FREE) {
|
||||
/* We put here some fault tolerance: if the page
|
||||
is already free, return without doing anything! */
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
if (xdes_get_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, mtr)
|
||||
== TRUE) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: File space extent descriptor of page %lu says it is free\n",
|
||||
page);
|
||||
ut_sprintf_buf(buf, ((byte*)descr) - 50, 200);
|
||||
|
||||
fprintf(stderr, "InnoDB: Dump of descriptor: %s\n", buf);
|
||||
|
||||
/* We put here some fault tolerance: if the page
|
||||
is already free, return without doing anything! */
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
xdes_set_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr);
|
||||
xdes_set_bit(descr, XDES_CLEAN_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr);
|
||||
@ -2243,13 +2389,15 @@ fsp_reserve_free_extents(
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
fsp_header_t* space_header;
|
||||
rw_lock_t* latch;
|
||||
ulint n_free_list_ext;
|
||||
ulint free_limit;
|
||||
ulint size;
|
||||
ulint n_free;
|
||||
ulint n_free_up;
|
||||
ulint reserve;
|
||||
rw_lock_t* latch;
|
||||
ibool success;
|
||||
ulint n_pages_added;
|
||||
|
||||
ut_ad(mtr);
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
@ -2260,7 +2408,7 @@ fsp_reserve_free_extents(
|
||||
mtr_x_lock(latch, mtr);
|
||||
|
||||
space_header = fsp_get_space_header(space, mtr);
|
||||
|
||||
try_again:
|
||||
size = mtr_read_ulint(space_header + FSP_SIZE, MLOG_4BYTES, mtr);
|
||||
|
||||
n_free_list_ext = flst_get_len(space_header + FSP_FREE, mtr);
|
||||
@ -2291,7 +2439,7 @@ fsp_reserve_free_extents(
|
||||
|
||||
if (n_free <= reserve + n_ext) {
|
||||
|
||||
return(FALSE);
|
||||
goto try_to_extend;
|
||||
}
|
||||
} else if (alloc_type == FSP_UNDO) {
|
||||
/* We reserve 1 % of the space size to cleaning operations */
|
||||
@ -2300,13 +2448,26 @@ fsp_reserve_free_extents(
|
||||
|
||||
if (n_free <= reserve + n_ext) {
|
||||
|
||||
return(FALSE);
|
||||
goto try_to_extend;
|
||||
}
|
||||
} else {
|
||||
ut_a(alloc_type == FSP_CLEANING);
|
||||
}
|
||||
|
||||
return(fil_space_reserve_free_extents(space, n_free, n_ext));
|
||||
success = fil_space_reserve_free_extents(space, n_free, n_ext);
|
||||
|
||||
if (success) {
|
||||
return(TRUE);
|
||||
}
|
||||
try_to_extend:
|
||||
success = fsp_try_extend_last_file(&n_pages_added, space,
|
||||
space_header, mtr);
|
||||
if (success && n_pages_added > 0) {
|
||||
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
|
@ -28,6 +28,16 @@ a margin of replaceable pages there. */
|
||||
void
|
||||
buf_flush_free_margin(void);
|
||||
/*=======================*/
|
||||
/************************************************************************
|
||||
Initializes a page for writing to the tablespace. */
|
||||
|
||||
void
|
||||
buf_flush_init_for_writing(
|
||||
/*=======================*/
|
||||
byte* page, /* in: page */
|
||||
dulint newest_lsn, /* in: newest modification lsn to the page */
|
||||
ulint space, /* in: space id */
|
||||
ulint page_no); /* in: page number */
|
||||
/***********************************************************************
|
||||
This utility flushes dirty blocks from the end of the LRU list or flush_list.
|
||||
NOTE 1: in the case of an LRU flush the calling thread may own latches to
|
||||
|
@ -105,7 +105,8 @@ dict_table_autoinc_initialize(
|
||||
dict_table_t* table, /* in: table */
|
||||
ib_longlong value); /* in: value which was assigned to a row */
|
||||
/************************************************************************
|
||||
Gets the next autoinc value, 0 if not yet initialized. */
|
||||
Gets the next autoinc value, 0 if not yet initialized. If initialized,
|
||||
increments the counter by 1. */
|
||||
|
||||
ib_longlong
|
||||
dict_table_autoinc_get(
|
||||
@ -113,6 +114,15 @@ dict_table_autoinc_get(
|
||||
/* out: value for a new row, or 0 */
|
||||
dict_table_t* table); /* in: table */
|
||||
/************************************************************************
|
||||
Reads the autoinc counter value, 0 if not yet initialized. Does not
|
||||
increment the counter. */
|
||||
|
||||
ib_longlong
|
||||
dict_table_autoinc_read(
|
||||
/*====================*/
|
||||
/* out: value of the counter */
|
||||
dict_table_t* table); /* in: table */
|
||||
/************************************************************************
|
||||
Updates the autoinc counter if the value supplied is bigger than the
|
||||
current value. If not inited, does nothing. */
|
||||
|
||||
@ -143,7 +153,10 @@ dict_table_rename_in_cache(
|
||||
/*=======================*/
|
||||
/* out: TRUE if success */
|
||||
dict_table_t* table, /* in: table */
|
||||
char* new_name); /* in: new name */
|
||||
char* new_name, /* in: new name */
|
||||
ibool rename_also_foreigns);/* in: in ALTER TABLE we want
|
||||
to preserve the original table name
|
||||
in constraints which reference it */
|
||||
/**************************************************************************
|
||||
Adds a foreign key constraint object to the dictionary cache. May free
|
||||
the object if there already is an object with the same identifier in.
|
||||
@ -284,6 +297,10 @@ Sprintfs to a string info on foreign keys of a table. */
|
||||
void
|
||||
dict_print_info_on_foreign_keys(
|
||||
/*============================*/
|
||||
ibool create_table_format, /* in: if TRUE then print in
|
||||
a format suitable to be inserted into
|
||||
a CREATE TABLE, otherwise in the format
|
||||
of SHOW TABLE STATUS */
|
||||
char* str, /* in/out: pointer to a string */
|
||||
ulint len, /* in: space in str available for info */
|
||||
dict_table_t* table); /* in: table */
|
||||
|
@ -64,8 +64,10 @@ extern fil_addr_t fil_addr_null;
|
||||
#define FIL_PAGE_DATA 38 /* start of the data on the page */
|
||||
|
||||
/* File page trailer */
|
||||
#define FIL_PAGE_END_LSN 8 /* this should be same as
|
||||
FIL_PAGE_LSN */
|
||||
#define FIL_PAGE_END_LSN 8 /* the low 4 bytes of this are used
|
||||
to store the page checksum, the
|
||||
last 4 bytes should be identical
|
||||
to the last 4 bytes of FIL_PAGE_LSN */
|
||||
#define FIL_PAGE_DATA_END 8
|
||||
|
||||
/* File page types */
|
||||
@ -134,6 +136,21 @@ fil_space_truncate_start(
|
||||
ulint trunc_len); /* in: truncate by this much; it is an error
|
||||
if this does not equal to the combined size of
|
||||
some initial files in the space */
|
||||
/**************************************************************************
|
||||
Tries to extend a data file by the number of pages given. Any fractions of a
|
||||
megabyte are ignored. */
|
||||
|
||||
ibool
|
||||
fil_extend_last_data_file(
|
||||
/*======================*/
|
||||
/* out: TRUE if success, also if we run
|
||||
out of disk space we may return TRUE */
|
||||
ulint* actual_increase,/* out: number of pages we were able to
|
||||
extend, here the orginal size of the file and
|
||||
the resulting size of the file are rounded
|
||||
downwards to a full megabyte, and the
|
||||
difference expressed in pages is returned */
|
||||
ulint size_increase); /* in: try to extend this many pages */
|
||||
/***********************************************************************
|
||||
Frees a space object from a file system. Closes the files in the chain
|
||||
but does not delete them. */
|
||||
|
@ -46,6 +46,17 @@ void
|
||||
fsp_init(void);
|
||||
/*==========*/
|
||||
/**************************************************************************
|
||||
Gets the current free limit of a tablespace. The free limit means the
|
||||
place of the first page which has never been put to the the free list
|
||||
for allocation. The space above that address is initialized to zero.
|
||||
Sets also the global variable log_fsp_current_free_limit. */
|
||||
|
||||
ulint
|
||||
fsp_header_get_free_limit(
|
||||
/*======================*/
|
||||
/* out: free limit in megabytes */
|
||||
ulint space); /* in: space id */
|
||||
/**************************************************************************
|
||||
Initializes the space header of a new created space. */
|
||||
|
||||
void
|
||||
|
@ -26,6 +26,32 @@ extern ibool log_debug_writes;
|
||||
#define LOG_WAIT_ALL_GROUPS 93
|
||||
#define LOG_MAX_N_GROUPS 32
|
||||
|
||||
/********************************************************************
|
||||
Sets the global variable log_fsp_current_free_limit. Also makes a checkpoint,
|
||||
so that we know that the limit has been written to a log checkpoint field
|
||||
on disk. */
|
||||
|
||||
void
|
||||
log_fsp_current_free_limit_set_and_checkpoint(
|
||||
/*==========================================*/
|
||||
ulint limit); /* in: limit to set */
|
||||
/***********************************************************************
|
||||
Calculates where in log files we find a specified lsn. */
|
||||
|
||||
ulint
|
||||
log_calc_where_lsn_is(
|
||||
/*==================*/
|
||||
/* out: log file number */
|
||||
ib_longlong* log_file_offset, /* out: offset in that file
|
||||
(including the header) */
|
||||
dulint first_header_lsn, /* in: first log file start
|
||||
lsn */
|
||||
dulint lsn, /* in: lsn whose position to
|
||||
determine */
|
||||
ulint n_log_files, /* in: total number of log
|
||||
files */
|
||||
ib_longlong log_file_size); /* in: log file size
|
||||
(including the header) */
|
||||
/****************************************************************
|
||||
Writes to the log the string given. The log must be released with
|
||||
log_release. */
|
||||
@ -225,6 +251,16 @@ Writes checkpoint info to groups. */
|
||||
void
|
||||
log_groups_write_checkpoint_info(void);
|
||||
/*==================================*/
|
||||
/**********************************************************
|
||||
Writes info to a buffer of a log group when log files are created in
|
||||
backup restoration. */
|
||||
|
||||
void
|
||||
log_reset_first_header_and_checkpoint(
|
||||
/*==================================*/
|
||||
byte* hdr_buf,/* in: buffer which will be written to the start
|
||||
of the first log file */
|
||||
dulint lsn); /* in: lsn of the start of the first log file */
|
||||
/************************************************************************
|
||||
Starts an archiving operation. */
|
||||
|
||||
@ -507,7 +543,16 @@ extern log_t* log_sys;
|
||||
+ LOG_MAX_N_GROUPS * 8)
|
||||
#define LOG_CHECKPOINT_CHECKSUM_1 LOG_CHECKPOINT_ARRAY_END
|
||||
#define LOG_CHECKPOINT_CHECKSUM_2 (4 + LOG_CHECKPOINT_ARRAY_END)
|
||||
#define LOG_CHECKPOINT_SIZE (8 + LOG_CHECKPOINT_ARRAY_END)
|
||||
#define LOG_CHECKPOINT_FSP_FREE_LIMIT (8 + LOG_CHECKPOINT_ARRAY_END)
|
||||
/* current fsp free limit in the
|
||||
tablespace, in units of one megabyte */
|
||||
#define LOG_CHECKPOINT_FSP_MAGIC_N (12 + LOG_CHECKPOINT_ARRAY_END)
|
||||
/* this magic number tells if the
|
||||
checkpoint contains the above field:
|
||||
the field was added to InnoDB-3.23.50 */
|
||||
#define LOG_CHECKPOINT_SIZE (16 + LOG_CHECKPOINT_ARRAY_END)
|
||||
|
||||
#define LOG_CHECKPOINT_FSP_MAGIC_N_VAL 1441231243
|
||||
|
||||
/* Offsets of a log file header */
|
||||
#define LOG_GROUP_ID 0 /* log group number */
|
||||
|
@ -15,6 +15,39 @@ Created 9/20/1997 Heikki Tuuri
|
||||
#include "hash0hash.h"
|
||||
#include "log0log.h"
|
||||
|
||||
/***********************************************************************
|
||||
Reads the checkpoint info needed in hot backup. */
|
||||
|
||||
ibool
|
||||
recv_read_cp_info_for_backup(
|
||||
/*=========================*/
|
||||
/* out: TRUE if success */
|
||||
byte* hdr, /* in: buffer containing the log group header */
|
||||
dulint* lsn, /* out: checkpoint lsn */
|
||||
ulint* offset, /* out: checkpoint offset in the log group */
|
||||
ulint* fsp_limit,/* out: fsp limit, 1000000000 if the database
|
||||
is running with < version 3.23.50 of InnoDB */
|
||||
dulint* cp_no, /* out: checkpoint number */
|
||||
dulint* first_header_lsn);
|
||||
/* out: lsn of of the start of the first log file */
|
||||
/***********************************************************************
|
||||
Scans the log segment and n_bytes_scanned is set to the length of valid
|
||||
log scanned. */
|
||||
|
||||
void
|
||||
recv_scan_log_seg_for_backup(
|
||||
/*=========================*/
|
||||
byte* buf, /* in: buffer containing log data */
|
||||
ulint buf_len, /* in: data length in that buffer */
|
||||
dulint* scanned_lsn, /* in/out: lsn of buffer start,
|
||||
we return scanned lsn */
|
||||
ulint* scanned_checkpoint_no,
|
||||
/* in/out: 4 lowest bytes of the
|
||||
highest scanned checkpoint number so
|
||||
far */
|
||||
ulint* n_bytes_scanned);/* out: how much we were able to
|
||||
scan, smaller than buf_len if log
|
||||
data ended here */
|
||||
/***********************************************************************
|
||||
Returns TRUE if recovery is currently running. */
|
||||
UNIV_INLINE
|
||||
@ -35,6 +68,10 @@ read in, or also for a page already in the buffer pool. */
|
||||
void
|
||||
recv_recover_page(
|
||||
/*==============*/
|
||||
ibool recover_backup, /* in: TRUE if we are recovering a backup
|
||||
page: then we do not acquire any latches
|
||||
since the page was read in outside the
|
||||
buffer pool */
|
||||
ibool just_read_in, /* in: TRUE if the i/o-handler calls this for
|
||||
a freshly read page */
|
||||
page_t* page, /* in: buffer page */
|
||||
@ -69,8 +106,15 @@ recv_scan_log_recs(
|
||||
/*===============*/
|
||||
/* out: TRUE if limit_lsn has been reached, or
|
||||
not able to scan any more in this log group */
|
||||
ibool apply_automatically,/* in: TRUE if we want this function to
|
||||
apply log records automatically when the
|
||||
hash table becomes full; in the hot backup tool
|
||||
the tool does the applying, not this
|
||||
function */
|
||||
ulint available_memory,/* in: we let the hash table of recs to grow
|
||||
to this size, at the maximum */
|
||||
ibool store_to_hash, /* in: TRUE if the records should be stored
|
||||
to the hash table; this is set FALSE if just
|
||||
to the hash table; this is set to FALSE if just
|
||||
debug checking is needed */
|
||||
byte* buf, /* in: buffer containing a log segment or
|
||||
garbage */
|
||||
@ -92,6 +136,16 @@ recv_reset_logs(
|
||||
ibool new_logs_created);/* in: TRUE if resetting logs is done
|
||||
at the log creation; FALSE if it is done
|
||||
after archive recovery */
|
||||
/**********************************************************
|
||||
Creates new log files after a backup has been restored. */
|
||||
|
||||
void
|
||||
recv_reset_log_files_for_backup(
|
||||
/*============================*/
|
||||
char* log_dir, /* in: log file directory path */
|
||||
ulint n_log_files, /* in: number of log files */
|
||||
ulint log_file_size, /* in: log file size */
|
||||
dulint lsn); /* in: new start lsn */
|
||||
/************************************************************
|
||||
Creates the recovery system. */
|
||||
|
||||
@ -102,8 +156,11 @@ recv_sys_create(void);
|
||||
Inits the recovery system for a recovery operation. */
|
||||
|
||||
void
|
||||
recv_sys_init(void);
|
||||
/*===============*/
|
||||
recv_sys_init(
|
||||
/*==========*/
|
||||
ibool recover_from_backup, /* in: TRUE if this is called
|
||||
to recover from a hot backup */
|
||||
ulint available_memory); /* in: available memory in bytes */
|
||||
/***********************************************************************
|
||||
Empties the hash table of stored log records, applying them to appropriate
|
||||
pages. */
|
||||
@ -118,6 +175,17 @@ recv_apply_hashed_log_recs(
|
||||
disk and invalidated in buffer pool: this
|
||||
alternative means that no new log records
|
||||
can be generated during the application */
|
||||
/***********************************************************************
|
||||
Applies log records in the hash table to a backup. */
|
||||
|
||||
void
|
||||
recv_apply_log_recs_for_backup(
|
||||
/*===========================*/
|
||||
ulint n_data_files, /* in: number of data files */
|
||||
char** data_files, /* in: array containing the paths to the
|
||||
data files */
|
||||
ulint* file_sizes); /* in: sizes of the data files in database
|
||||
pages */
|
||||
/************************************************************
|
||||
Recovers from archived log files, and also from log files, if they exist. */
|
||||
|
||||
@ -260,6 +328,14 @@ extern ibool recv_recovery_on;
|
||||
extern ibool recv_no_ibuf_operations;
|
||||
extern ibool recv_needed_recovery;
|
||||
|
||||
/* Size of the parsing buffer; it must accommodate RECV_SCAN_SIZE many
|
||||
times! */
|
||||
#define RECV_PARSING_BUF_SIZE (2 * 1024 * 1024)
|
||||
|
||||
/* Size of block reads when the log groups are scanned forward to do a
|
||||
roll-forward */
|
||||
#define RECV_SCAN_SIZE (4 * UNIV_PAGE_SIZE)
|
||||
|
||||
/* States of recv_addr_struct */
|
||||
#define RECV_NOT_PROCESSED 71
|
||||
#define RECV_BEING_READ 72
|
||||
|
@ -41,11 +41,11 @@ page buffer pool; the latter method is used for very big heaps */
|
||||
|
||||
/* The following start size is used for the first block in the memory heap if
|
||||
the size is not specified, i.e., 0 is given as the parameter in the call of
|
||||
create. The standard size is the maximum size of the blocks used for
|
||||
create. The standard size is the maximum (payload) size of the blocks used for
|
||||
allocations of small buffers. */
|
||||
|
||||
#define MEM_BLOCK_START_SIZE 64
|
||||
#define MEM_BLOCK_STANDARD_SIZE 8192
|
||||
#define MEM_BLOCK_STANDARD_SIZE 8000
|
||||
|
||||
/* If a memory heap is allowed to grow into the buffer pool, the following
|
||||
is the maximum size for a single allocated buffer: */
|
||||
|
@ -11,6 +11,12 @@ Created 10/21/1995 Heikki Tuuri
|
||||
|
||||
#include "univ.i"
|
||||
|
||||
|
||||
/* If the following is set to TRUE, we do not call os_file_flush in every
|
||||
os_file_write */
|
||||
extern ibool os_do_not_call_flush_at_each_write;
|
||||
extern ibool os_has_said_disk_full;
|
||||
|
||||
#ifdef __WIN__
|
||||
|
||||
/* We define always WIN_ASYNC_IO, and check at run-time whether
|
||||
@ -55,6 +61,9 @@ log. */
|
||||
#define OS_FILE_CREATE 52
|
||||
#define OS_FILE_OVERWRITE 53
|
||||
|
||||
#define OS_FILE_READ_ONLY 333
|
||||
#define OS_FILE_READ_WRITE 444
|
||||
|
||||
/* Options for file_create */
|
||||
#define OS_FILE_AIO 61
|
||||
#define OS_FILE_NORMAL 62
|
||||
@ -118,6 +127,27 @@ os_get_os_version(void);
|
||||
/*===================*/
|
||||
/* out: OS_WIN95, OS_WIN31, OS_WINNT (2000 == NT) */
|
||||
/********************************************************************
|
||||
Creates the seek mutexes used in positioned reads and writes. */
|
||||
|
||||
void
|
||||
os_io_init_simple(void);
|
||||
/*===================*/
|
||||
/********************************************************************
|
||||
A simple function to open or create a file. */
|
||||
|
||||
os_file_t
|
||||
os_file_create_simple(
|
||||
/*==================*/
|
||||
/* out, own: handle to the file, not defined if error,
|
||||
error number can be retrieved with os_get_last_error */
|
||||
char* name, /* in: name of the file or path as a null-terminated
|
||||
string */
|
||||
ulint create_mode,/* in: OS_FILE_OPEN if an existing file is opened
|
||||
(if does not exist, error), or OS_FILE_CREATE if a new
|
||||
file is created (if exists, error) */
|
||||
ulint access_type,/* in: OS_FILE_READ_ONLY or OS_FILE_READ_WRITE */
|
||||
ibool* success);/* out: TRUE if succeed, FALSE if error */
|
||||
/********************************************************************
|
||||
Opens an existing file or creates a new. */
|
||||
|
||||
os_file_t
|
||||
|
@ -402,13 +402,13 @@ struct row_prebuilt_struct {
|
||||
byte* ins_upd_rec_buff;/* buffer for storing data converted
|
||||
to the Innobase format from the MySQL
|
||||
format */
|
||||
ibool in_update_remember_pos;
|
||||
/* if an update is processed, then if
|
||||
this flag is set to TRUE, it means
|
||||
that the stored cursor position in
|
||||
SELECT is the right position also
|
||||
for the update: we can just restore
|
||||
the cursor and save CPU time */
|
||||
ibool hint_no_need_to_fetch_extra_cols;
|
||||
/* normally this is TRUE, but
|
||||
MySQL will set this to FALSE
|
||||
if we might be required to fetch also
|
||||
other columns than mentioned in the
|
||||
query: the clustered index column(s),
|
||||
or an auto-increment column*/
|
||||
upd_node_t* upd_node; /* Innobase SQL update node used
|
||||
to perform updates and deletes */
|
||||
que_fork_t* ins_graph; /* Innobase SQL query graph used
|
||||
|
@ -24,10 +24,13 @@ extern char srv_fatal_errbuf[];
|
||||
thread starts running */
|
||||
extern os_event_t srv_lock_timeout_thread_event;
|
||||
|
||||
/* If the last data file is auto-extended, we add this many pages to it
|
||||
at a time */
|
||||
#define SRV_AUTO_EXTEND_INCREMENT (8 * ((1024 * 1024) / UNIV_PAGE_SIZE))
|
||||
|
||||
/* Server parameters which are read from the initfile */
|
||||
|
||||
extern char* srv_data_home;
|
||||
extern char* srv_logs_home;
|
||||
extern char* srv_arch_dir;
|
||||
|
||||
extern ulint srv_n_data_files;
|
||||
@ -35,6 +38,9 @@ extern char** srv_data_file_names;
|
||||
extern ulint* srv_data_file_sizes;
|
||||
extern ulint* srv_data_file_is_raw_partition;
|
||||
|
||||
extern ibool srv_auto_extend_last_data_file;
|
||||
extern ulint srv_last_file_size_max;
|
||||
|
||||
extern ibool srv_created_new_raw;
|
||||
|
||||
#define SRV_NEW_RAW 1
|
||||
@ -186,6 +192,19 @@ srv_boot(void);
|
||||
/*==========*/
|
||||
/* out: DB_SUCCESS or error code */
|
||||
/*************************************************************************
|
||||
Initializes the server. */
|
||||
|
||||
void
|
||||
srv_init(void);
|
||||
/*==========*/
|
||||
/*************************************************************************
|
||||
Initializes the synchronization primitives, memory system, and the thread
|
||||
local storage. */
|
||||
|
||||
void
|
||||
srv_general_init(void);
|
||||
/*==================*/
|
||||
/*************************************************************************
|
||||
Gets the number of threads in the system. */
|
||||
|
||||
ulint
|
||||
|
@ -12,6 +12,56 @@ Created 10/10/1995 Heikki Tuuri
|
||||
|
||||
#include "univ.i"
|
||||
|
||||
/*************************************************************************
|
||||
Normalizes a directory path for Windows: converts slashes to backslashes. */
|
||||
|
||||
void
|
||||
srv_normalize_path_for_win(
|
||||
/*=======================*/
|
||||
char* str); /* in/out: null-terminated character string */
|
||||
/*************************************************************************
|
||||
Adds a slash or a backslash to the end of a string if it is missing
|
||||
and the string is not empty. */
|
||||
|
||||
char*
|
||||
srv_add_path_separator_if_needed(
|
||||
/*=============================*/
|
||||
/* out, own: string which has the separator if the
|
||||
string is not empty */
|
||||
char* str); /* in: null-terminated character string */
|
||||
/*************************************************************************
|
||||
Reads the data files and their sizes from a character string given in
|
||||
the .cnf file. */
|
||||
|
||||
ibool
|
||||
srv_parse_data_file_paths_and_sizes(
|
||||
/*================================*/
|
||||
/* out: TRUE if ok, FALSE if parsing
|
||||
error */
|
||||
char* str, /* in: the data file path string */
|
||||
char*** data_file_names, /* out, own: array of data file
|
||||
names */
|
||||
ulint** data_file_sizes, /* out, own: array of data file sizes
|
||||
in megabytes */
|
||||
ulint** data_file_is_raw_partition,/* out, own: array of flags
|
||||
showing which data files are raw
|
||||
partitions */
|
||||
ulint* n_data_files, /* out: number of data files */
|
||||
ibool* is_auto_extending, /* out: TRUE if the last data file is
|
||||
auto-extending */
|
||||
ulint* max_auto_extend_size); /* out: max auto extend size for the
|
||||
last file if specified, 0 if not */
|
||||
/*************************************************************************
|
||||
Reads log group home directories from a character string given in
|
||||
the .cnf file. */
|
||||
|
||||
ibool
|
||||
srv_parse_log_group_home_dirs(
|
||||
/*==========================*/
|
||||
/* out: TRUE if ok, FALSE if parsing
|
||||
error */
|
||||
char* str, /* in: character string */
|
||||
char*** log_group_home_dirs); /* out, own: log group home dirs */
|
||||
/********************************************************************
|
||||
Starts Innobase and creates a new database if database files
|
||||
are not found and the user wants. Server parameters are
|
||||
|
@ -357,7 +357,7 @@ rw_lock_s_unlock_func(
|
||||
|
||||
/* Reset the shared lock by decrementing the reader count */
|
||||
|
||||
ut_ad(lock->reader_count > 0);
|
||||
ut_a(lock->reader_count > 0);
|
||||
lock->reader_count--;
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
|
@ -44,6 +44,15 @@ half-written pages in the data files. */
|
||||
void
|
||||
trx_sys_doublewrite_restore_corrupt_pages(void);
|
||||
/*===========================================*/
|
||||
/********************************************************************
|
||||
Determines if a page number is located inside the doublewrite buffer. */
|
||||
|
||||
ibool
|
||||
trx_doublewrite_page_inside(
|
||||
/*========================*/
|
||||
/* out: TRUE if the location is inside
|
||||
the two blocks of the doublewrite buffer */
|
||||
ulint page_no); /* in: page number */
|
||||
/*******************************************************************
|
||||
Checks if a page address is the trx sys header page. */
|
||||
UNIV_INLINE
|
||||
|
@ -55,6 +55,15 @@ ut_dulint_get_low(
|
||||
/* out: 32 bits in ulint */
|
||||
dulint d); /* in: dulint */
|
||||
/***********************************************************
|
||||
Converts a dulint (a struct of 2 ulints) to ib_longlong, which is a 64-bit
|
||||
integer type. */
|
||||
UNIV_INLINE
|
||||
ib_longlong
|
||||
ut_conv_dulint_to_longlong(
|
||||
/*=======================*/
|
||||
/* out: value in ib_longlong type */
|
||||
dulint d); /* in: dulint */
|
||||
/***********************************************************
|
||||
Tests if a dulint is zero. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
|
@ -51,6 +51,20 @@ ut_dulint_get_low(
|
||||
return(d.low);
|
||||
}
|
||||
|
||||
/***********************************************************
|
||||
Converts a dulint (a struct of 2 ulints) to ib_longlong, which is a 64-bit
|
||||
integer type. */
|
||||
UNIV_INLINE
|
||||
ib_longlong
|
||||
ut_conv_dulint_to_longlong(
|
||||
/*=======================*/
|
||||
/* out: value in ib_longlong type */
|
||||
dulint d) /* in: dulint */
|
||||
{
|
||||
return((ib_longlong)d.low
|
||||
+ (((ib_longlong)d.high) << 32));
|
||||
}
|
||||
|
||||
/***********************************************************
|
||||
Tests if a dulint is zero. */
|
||||
UNIV_INLINE
|
||||
|
@ -35,7 +35,7 @@ ut_rnd_gen_next_ulint(
|
||||
/*************************************************************
|
||||
The following function generates 'random' ulint integers which
|
||||
enumerate the value space (let there be N of them) of ulint integers
|
||||
in a pseudo random fashion. Note that the same integer is repeated
|
||||
in a pseudo-random fashion. Note that the same integer is repeated
|
||||
always after N calls to the generator. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
|
@ -17,6 +17,16 @@ Created 1/20/1994 Heikki Tuuri
|
||||
|
||||
typedef time_t ib_time_t;
|
||||
|
||||
/************************************************************
|
||||
Gets the high 32 bits in a ulint. That is makes a shift >> 32,
|
||||
but since there seem to be compiler bugs in both gcc and Visual C++,
|
||||
we do this by a special conversion. */
|
||||
|
||||
ulint
|
||||
ut_get_high32(
|
||||
/*==========*/
|
||||
/* out: a >> 32 */
|
||||
ulint a); /* in: ulint */
|
||||
/**********************************************************
|
||||
Calculates the minimum of two ulints. */
|
||||
UNIV_INLINE
|
||||
@ -144,6 +154,15 @@ void
|
||||
ut_print_timestamp(
|
||||
/*===============*/
|
||||
FILE* file); /* in: file where to print */
|
||||
/**************************************************************
|
||||
Returns current year, month, day. */
|
||||
|
||||
void
|
||||
ut_get_year_month_day(
|
||||
/*==================*/
|
||||
ulint* year, /* out: current year */
|
||||
ulint* month, /* out: month */
|
||||
ulint* day); /* out: day */
|
||||
/*****************************************************************
|
||||
Runs an idle loop on CPU. The argument gives the desired delay
|
||||
in microseconds on 100 MHz Pentium + Visual C++. */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/******************************************************
|
||||
Database log
|
||||
|
||||
(c) 1995-1997 InnoDB Oy
|
||||
(c) 1995-1997 Innobase Oy
|
||||
|
||||
Created 12/9/1995 Heikki Tuuri
|
||||
*******************************************************/
|
||||
@ -24,6 +24,9 @@ Created 12/9/1995 Heikki Tuuri
|
||||
#include "trx0sys.h"
|
||||
#include "trx0trx.h"
|
||||
|
||||
/* Current free limit; protected by the log sys mutex; 0 means uninitialized */
|
||||
ulint log_fsp_current_free_limit = 0;
|
||||
|
||||
/* Global log system variable */
|
||||
log_t* log_sys = NULL;
|
||||
|
||||
@ -95,6 +98,32 @@ void
|
||||
log_archive_margin(void);
|
||||
/*====================*/
|
||||
|
||||
/********************************************************************
|
||||
Sets the global variable log_fsp_current_free_limit. Also makes a checkpoint,
|
||||
so that we know that the limit has been written to a log checkpoint field
|
||||
on disk. */
|
||||
|
||||
void
|
||||
log_fsp_current_free_limit_set_and_checkpoint(
|
||||
/*==========================================*/
|
||||
ulint limit) /* in: limit to set */
|
||||
{
|
||||
ibool success;
|
||||
|
||||
mutex_enter(&(log_sys->mutex));
|
||||
|
||||
log_fsp_current_free_limit = limit;
|
||||
|
||||
mutex_exit(&(log_sys->mutex));
|
||||
|
||||
/* Try to make a synchronous checkpoint */
|
||||
|
||||
success = FALSE;
|
||||
|
||||
while (!success) {
|
||||
success = log_checkpoint(TRUE, TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
Returns the oldest modified block lsn in the pool, or log_sys->lsn if none
|
||||
@ -436,6 +465,51 @@ log_group_calc_lsn_offset(
|
||||
return(log_group_calc_real_offset(offset, group));
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Calculates where in log files we find a specified lsn. */
|
||||
|
||||
ulint
|
||||
log_calc_where_lsn_is(
|
||||
/*==================*/
|
||||
/* out: log file number */
|
||||
ib_longlong* log_file_offset, /* out: offset in that file
|
||||
(including the header) */
|
||||
dulint first_header_lsn, /* in: first log file start
|
||||
lsn */
|
||||
dulint lsn, /* in: lsn whose position to
|
||||
determine */
|
||||
ulint n_log_files, /* in: total number of log
|
||||
files */
|
||||
ib_longlong log_file_size) /* in: log file size
|
||||
(including the header) */
|
||||
{
|
||||
ib_longlong ib_lsn;
|
||||
ib_longlong ib_first_header_lsn;
|
||||
ib_longlong capacity = log_file_size - LOG_FILE_HDR_SIZE;
|
||||
ulint file_no;
|
||||
ib_longlong add_this_many;
|
||||
|
||||
ib_lsn = ut_conv_dulint_to_longlong(lsn);
|
||||
ib_first_header_lsn = ut_conv_dulint_to_longlong(first_header_lsn);
|
||||
|
||||
if (ib_lsn < ib_first_header_lsn) {
|
||||
add_this_many = 1 + (ib_first_header_lsn - ib_lsn)
|
||||
/ (capacity * (ib_longlong)n_log_files);
|
||||
ib_lsn += add_this_many
|
||||
* capacity * (ib_longlong)n_log_files;
|
||||
}
|
||||
|
||||
ut_a(ib_lsn >= ib_first_header_lsn);
|
||||
|
||||
file_no = ((ulint)((ib_lsn - ib_first_header_lsn) / capacity))
|
||||
% n_log_files;
|
||||
*log_file_offset = (ib_lsn - ib_first_header_lsn) % capacity;
|
||||
|
||||
*log_file_offset = *log_file_offset + LOG_FILE_HDR_SIZE;
|
||||
|
||||
return(file_no);
|
||||
}
|
||||
|
||||
/************************************************************
|
||||
Sets the field values in group to correspond to a given lsn. For this function
|
||||
to work, the values must already be correctly initialized to correspond to
|
||||
@ -653,7 +727,7 @@ log_init(void)
|
||||
|
||||
#ifdef UNIV_LOG_DEBUG
|
||||
recv_sys_create();
|
||||
recv_sys_init();
|
||||
recv_sys_init(FALSE, buf_pool_get_curr_size());
|
||||
|
||||
recv_sys->parse_start_lsn = log_sys->lsn;
|
||||
recv_sys->scanned_lsn = log_sys->lsn;
|
||||
@ -961,7 +1035,7 @@ log_group_write_buf(
|
||||
ibool sync;
|
||||
ibool write_header;
|
||||
ulint next_offset;
|
||||
|
||||
|
||||
ut_ad(mutex_own(&(log_sys->mutex)));
|
||||
ut_ad(len % OS_FILE_LOG_BLOCK_SIZE == 0);
|
||||
ut_ad(ut_dulint_get_low(start_lsn) % OS_FILE_LOG_BLOCK_SIZE == 0);
|
||||
@ -1002,9 +1076,28 @@ loop:
|
||||
}
|
||||
|
||||
if (log_debug_writes) {
|
||||
ulint i;
|
||||
|
||||
printf(
|
||||
"Writing log file segment to group %lu offset %lu len %lu\n",
|
||||
group->id, next_offset, write_len);
|
||||
"Writing log file segment to group %lu offset %lu len %lu\n"
|
||||
"start lsn %lu %lu\n",
|
||||
group->id, next_offset, write_len,
|
||||
ut_dulint_get_high(start_lsn),
|
||||
ut_dulint_get_low(start_lsn));
|
||||
printf(
|
||||
"First block n:o %lu last block n:o %lu\n",
|
||||
log_block_get_hdr_no(buf),
|
||||
log_block_get_hdr_no(
|
||||
buf + write_len - OS_FILE_LOG_BLOCK_SIZE));
|
||||
ut_a(log_block_get_hdr_no(buf)
|
||||
== log_block_convert_lsn_to_no(start_lsn));
|
||||
|
||||
for (i = 0; i < write_len / OS_FILE_LOG_BLOCK_SIZE; i++) {
|
||||
|
||||
ut_a(log_block_get_hdr_no(buf) + i
|
||||
== log_block_get_hdr_no(buf
|
||||
+ i * OS_FILE_LOG_BLOCK_SIZE));
|
||||
}
|
||||
}
|
||||
|
||||
if (log_do_write) {
|
||||
@ -1346,7 +1439,7 @@ log_group_checkpoint(
|
||||
ulint i;
|
||||
|
||||
ut_ad(mutex_own(&(log_sys->mutex)));
|
||||
ut_ad(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE);
|
||||
ut_a(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE);
|
||||
|
||||
buf = group->checkpoint_buf;
|
||||
|
||||
@ -1394,6 +1487,15 @@ log_group_checkpoint(
|
||||
LOG_CHECKPOINT_CHECKSUM_2 - LOG_CHECKPOINT_LSN);
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_CHECKSUM_2, fold);
|
||||
|
||||
/* Starting from InnoDB-3.23.50, we also write info on allocated
|
||||
size in the tablespace */
|
||||
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_FSP_FREE_LIMIT,
|
||||
log_fsp_current_free_limit);
|
||||
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_FSP_MAGIC_N,
|
||||
LOG_CHECKPOINT_FSP_MAGIC_N_VAL);
|
||||
|
||||
/* We alternate the physical place of the checkpoint info in the first
|
||||
log file */
|
||||
|
||||
@ -1428,6 +1530,48 @@ log_group_checkpoint(
|
||||
}
|
||||
}
|
||||
|
||||
/**********************************************************
|
||||
Writes info to a buffer of a log group when log files are created in
|
||||
backup restoration. */
|
||||
|
||||
void
|
||||
log_reset_first_header_and_checkpoint(
|
||||
/*==================================*/
|
||||
byte* hdr_buf,/* in: buffer which will be written to the start
|
||||
of the first log file */
|
||||
dulint lsn) /* in: lsn of the start of the first log file
|
||||
+ LOG_BLOCK_HDR_SIZE */
|
||||
{
|
||||
ulint fold;
|
||||
byte* buf;
|
||||
|
||||
mach_write_to_4(hdr_buf + LOG_GROUP_ID, 0);
|
||||
mach_write_to_8(hdr_buf + LOG_FILE_START_LSN, lsn);
|
||||
|
||||
buf = hdr_buf + LOG_CHECKPOINT_1;
|
||||
|
||||
mach_write_to_8(buf + LOG_CHECKPOINT_NO, ut_dulint_zero);
|
||||
mach_write_to_8(buf + LOG_CHECKPOINT_LSN, lsn);
|
||||
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_OFFSET,
|
||||
LOG_FILE_HDR_SIZE + LOG_BLOCK_HDR_SIZE);
|
||||
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_LOG_BUF_SIZE, 2 * 1024 * 1024);
|
||||
|
||||
mach_write_to_8(buf + LOG_CHECKPOINT_ARCHIVED_LSN, ut_dulint_max);
|
||||
|
||||
fold = ut_fold_binary(buf, LOG_CHECKPOINT_CHECKSUM_1);
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_CHECKSUM_1, fold);
|
||||
|
||||
fold = ut_fold_binary(buf + LOG_CHECKPOINT_LSN,
|
||||
LOG_CHECKPOINT_CHECKSUM_2 - LOG_CHECKPOINT_LSN);
|
||||
mach_write_to_4(buf + LOG_CHECKPOINT_CHECKSUM_2, fold);
|
||||
|
||||
/* Starting from InnoDB-3.23.50, we should also write info on
|
||||
allocated size in the tablespace, but unfortunately we do not
|
||||
know it here */
|
||||
}
|
||||
|
||||
/**********************************************************
|
||||
Reads a checkpoint info from a log group header to log_sys->checkpoint_buf. */
|
||||
|
||||
@ -2800,7 +2944,10 @@ log_check_log_recs(
|
||||
|
||||
ut_memcpy(scan_buf, start, end - start);
|
||||
|
||||
recv_scan_log_recs(FALSE, scan_buf, end - start,
|
||||
recv_scan_log_recs(TRUE,
|
||||
buf_pool_get_curr_size() -
|
||||
RECV_POOL_N_FREE_BLOCKS * UNIV_PAGE_SIZE,
|
||||
FALSE, scan_buf, end - start,
|
||||
ut_dulint_align_down(buf_start_lsn,
|
||||
OS_FILE_LOG_BLOCK_SIZE),
|
||||
&contiguous_lsn, &scanned_lsn);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/******************************************************
|
||||
Recovery
|
||||
|
||||
(c) 1997 InnoDB Oy
|
||||
(c) 1997 Innobase Oy
|
||||
|
||||
Created 9/20/1997 Heikki Tuuri
|
||||
*******************************************************/
|
||||
@ -33,13 +33,6 @@ Created 9/20/1997 Heikki Tuuri
|
||||
#include "dict0boot.h"
|
||||
#include "fil0fil.h"
|
||||
|
||||
/* Size of block reads when the log groups are scanned forward to do a
|
||||
roll-forward */
|
||||
#define RECV_SCAN_SIZE (4 * UNIV_PAGE_SIZE)
|
||||
|
||||
/* Size of the parsing buffer */
|
||||
#define RECV_PARSING_BUF_SIZE LOG_BUFFER_SIZE
|
||||
|
||||
/* Log records are stored in the hash table in chunks at most of this size;
|
||||
this must be less than UNIV_PAGE_SIZE as it is stored in the buffer pool */
|
||||
#define RECV_DATA_BLOCK_SIZE (MEM_MAX_ALLOC_IN_BUF - sizeof(recv_data_t))
|
||||
@ -69,6 +62,9 @@ ibool recv_no_ibuf_operations = FALSE;
|
||||
log scan */
|
||||
ulint recv_scan_print_counter = 0;
|
||||
|
||||
ibool recv_is_from_backup = FALSE;
|
||||
|
||||
|
||||
/************************************************************
|
||||
Creates the recovery system. */
|
||||
|
||||
@ -94,8 +90,11 @@ recv_sys_create(void)
|
||||
Inits the recovery system for a recovery operation. */
|
||||
|
||||
void
|
||||
recv_sys_init(void)
|
||||
/*===============*/
|
||||
recv_sys_init(
|
||||
/*==========*/
|
||||
ibool recover_from_backup, /* in: TRUE if this is called
|
||||
to recover from a hot backup */
|
||||
ulint available_memory) /* in: available memory in bytes */
|
||||
{
|
||||
if (recv_sys->heap != NULL) {
|
||||
|
||||
@ -104,13 +103,18 @@ recv_sys_init(void)
|
||||
|
||||
mutex_enter(&(recv_sys->mutex));
|
||||
|
||||
recv_sys->heap = mem_heap_create_in_buffer(256);
|
||||
if (!recover_from_backup) {
|
||||
recv_sys->heap = mem_heap_create_in_buffer(256);
|
||||
} else {
|
||||
recv_sys->heap = mem_heap_create(256);
|
||||
recv_is_from_backup = TRUE;
|
||||
}
|
||||
|
||||
recv_sys->buf = ut_malloc(RECV_PARSING_BUF_SIZE);
|
||||
recv_sys->len = 0;
|
||||
recv_sys->recovered_offset = 0;
|
||||
|
||||
recv_sys->addr_hash = hash_create(buf_pool_get_curr_size() / 64);
|
||||
recv_sys->addr_hash = hash_create(available_memory / 64);
|
||||
recv_sys->n_addrs = 0;
|
||||
|
||||
recv_sys->apply_log_recs = FALSE;
|
||||
@ -337,7 +341,7 @@ recv_synchronize_groups(
|
||||
start_lsn = ut_dulint_align_down(recovered_lsn, OS_FILE_LOG_BLOCK_SIZE);
|
||||
end_lsn = ut_dulint_align_up(recovered_lsn, OS_FILE_LOG_BLOCK_SIZE);
|
||||
|
||||
ut_ad(ut_dulint_cmp(start_lsn, end_lsn) != 0);
|
||||
ut_a(ut_dulint_cmp(start_lsn, end_lsn) != 0);
|
||||
|
||||
log_group_read_log_seg(LOG_RECOVER, recv_sys->last_block,
|
||||
up_to_date_group, start_lsn, end_lsn);
|
||||
@ -377,6 +381,35 @@ recv_synchronize_groups(
|
||||
mutex_enter(&(log_sys->mutex));
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
Checks the consistency of the checkpoint info */
|
||||
static
|
||||
ibool
|
||||
recv_check_cp_is_consistent(
|
||||
/*========================*/
|
||||
/* out: TRUE if ok */
|
||||
byte* buf) /* in: buffer containing checkpoint info */
|
||||
{
|
||||
ulint fold;
|
||||
|
||||
fold = ut_fold_binary(buf, LOG_CHECKPOINT_CHECKSUM_1);
|
||||
|
||||
if ((fold & 0xFFFFFFFF) != mach_read_from_4(buf
|
||||
+ LOG_CHECKPOINT_CHECKSUM_1)) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
fold = ut_fold_binary(buf + LOG_CHECKPOINT_LSN,
|
||||
LOG_CHECKPOINT_CHECKSUM_2 - LOG_CHECKPOINT_LSN);
|
||||
|
||||
if ((fold & 0xFFFFFFFF) != mach_read_from_4(buf
|
||||
+ LOG_CHECKPOINT_CHECKSUM_2)) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/************************************************************
|
||||
Looks for the maximum consistent checkpoint from the log groups. */
|
||||
static
|
||||
@ -392,7 +425,6 @@ recv_find_max_checkpoint(
|
||||
dulint max_no;
|
||||
dulint checkpoint_no;
|
||||
ulint field;
|
||||
ulint fold;
|
||||
byte* buf;
|
||||
|
||||
group = UT_LIST_GET_FIRST(log_sys->log_groups);
|
||||
@ -410,17 +442,11 @@ recv_find_max_checkpoint(
|
||||
|
||||
log_group_read_checkpoint_info(group, field);
|
||||
|
||||
/* Check the consistency of the checkpoint info */
|
||||
fold = ut_fold_binary(buf, LOG_CHECKPOINT_CHECKSUM_1);
|
||||
|
||||
if ((fold & 0xFFFFFFFF)
|
||||
!= mach_read_from_4(buf
|
||||
+ LOG_CHECKPOINT_CHECKSUM_1)) {
|
||||
if (!recv_check_cp_is_consistent(buf)) {
|
||||
if (log_debug_writes) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Checkpoint in group %lu at %lu invalid, %lu, %lu\n",
|
||||
"InnoDB: Checkpoint in group %lu at %lu invalid, %lu\n",
|
||||
group->id, field,
|
||||
fold & 0xFFFFFFFF,
|
||||
mach_read_from_4(buf
|
||||
+ LOG_CHECKPOINT_CHECKSUM_1));
|
||||
|
||||
@ -429,23 +455,6 @@ recv_find_max_checkpoint(
|
||||
goto not_consistent;
|
||||
}
|
||||
|
||||
fold = ut_fold_binary(buf + LOG_CHECKPOINT_LSN,
|
||||
LOG_CHECKPOINT_CHECKSUM_2
|
||||
- LOG_CHECKPOINT_LSN);
|
||||
if ((fold & 0xFFFFFFFF)
|
||||
!= mach_read_from_4(buf
|
||||
+ LOG_CHECKPOINT_CHECKSUM_2)) {
|
||||
if (log_debug_writes) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Checkpoint in group %lu at %lu invalid, %lu, %lu\n",
|
||||
group->id, field,
|
||||
fold & 0xFFFFFFFF,
|
||||
mach_read_from_4(buf
|
||||
+ LOG_CHECKPOINT_CHECKSUM_2));
|
||||
}
|
||||
goto not_consistent;
|
||||
}
|
||||
|
||||
group->state = LOG_GROUP_OK;
|
||||
|
||||
group->lsn = mach_read_from_8(buf
|
||||
@ -476,7 +485,13 @@ recv_find_max_checkpoint(
|
||||
|
||||
if (*max_group == NULL) {
|
||||
|
||||
fprintf(stderr, "InnoDB: No valid checkpoint found\n");
|
||||
fprintf(stderr,
|
||||
"InnoDB: No valid checkpoint found.\n"
|
||||
"InnoDB: If this error appears when you are creating an InnoDB database,\n"
|
||||
"InnoDB: the problem may be that during an earlier attempt you managed\n"
|
||||
"InnoDB: to create the InnoDB data files, but log file creation failed.\n"
|
||||
"InnoDB: If that is the case, please refer to section 3.1 of\n"
|
||||
"InnoDB: http://www.innodb.com/ibman.html\n");
|
||||
|
||||
return(DB_ERROR);
|
||||
}
|
||||
@ -484,6 +499,162 @@ recv_find_max_checkpoint(
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Reads the checkpoint info needed in hot backup. */
|
||||
|
||||
ibool
|
||||
recv_read_cp_info_for_backup(
|
||||
/*=========================*/
|
||||
/* out: TRUE if success */
|
||||
byte* hdr, /* in: buffer containing the log group header */
|
||||
dulint* lsn, /* out: checkpoint lsn */
|
||||
ulint* offset, /* out: checkpoint offset in the log group */
|
||||
ulint* fsp_limit,/* out: fsp limit, 1000000000 if the database
|
||||
is running with < version 3.23.50 of InnoDB */
|
||||
dulint* cp_no, /* out: checkpoint number */
|
||||
dulint* first_header_lsn)
|
||||
/* out: lsn of of the start of the first log file */
|
||||
{
|
||||
ulint max_cp = 0;
|
||||
dulint max_cp_no = ut_dulint_zero;
|
||||
byte* cp_buf;
|
||||
|
||||
cp_buf = hdr + LOG_CHECKPOINT_1;
|
||||
|
||||
if (recv_check_cp_is_consistent(cp_buf)) {
|
||||
max_cp_no = mach_read_from_8(cp_buf + LOG_CHECKPOINT_NO);
|
||||
max_cp = LOG_CHECKPOINT_1;
|
||||
}
|
||||
|
||||
cp_buf = hdr + LOG_CHECKPOINT_2;
|
||||
|
||||
if (recv_check_cp_is_consistent(cp_buf)) {
|
||||
if (ut_dulint_cmp(mach_read_from_8(cp_buf + LOG_CHECKPOINT_NO),
|
||||
max_cp_no) > 0) {
|
||||
max_cp = LOG_CHECKPOINT_2;
|
||||
}
|
||||
}
|
||||
|
||||
if (max_cp == 0) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
cp_buf = hdr + max_cp;
|
||||
|
||||
*lsn = mach_read_from_8(cp_buf + LOG_CHECKPOINT_LSN);
|
||||
*offset = mach_read_from_4(cp_buf + LOG_CHECKPOINT_OFFSET);
|
||||
|
||||
/* If the user is running a pre-3.23.50 version of InnoDB, its
|
||||
checkpoint data does not contain the fsp limit info */
|
||||
if (mach_read_from_4(cp_buf + LOG_CHECKPOINT_FSP_MAGIC_N)
|
||||
== LOG_CHECKPOINT_FSP_MAGIC_N_VAL) {
|
||||
|
||||
*fsp_limit = mach_read_from_4(
|
||||
cp_buf + LOG_CHECKPOINT_FSP_FREE_LIMIT);
|
||||
|
||||
if (*fsp_limit == 0) {
|
||||
*fsp_limit = 1000000000;
|
||||
}
|
||||
} else {
|
||||
*fsp_limit = 1000000000;
|
||||
}
|
||||
|
||||
/* printf("fsp limit %lu MB\n", *fsp_limit); */
|
||||
|
||||
*cp_no = mach_read_from_8(cp_buf + LOG_CHECKPOINT_NO);
|
||||
|
||||
*first_header_lsn = mach_read_from_8(hdr + LOG_FILE_START_LSN);
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Scans the log segment and n_bytes_scanned is set to the length of valid
|
||||
log scanned. */
|
||||
|
||||
void
|
||||
recv_scan_log_seg_for_backup(
|
||||
/*=========================*/
|
||||
byte* buf, /* in: buffer containing log data */
|
||||
ulint buf_len, /* in: data length in that buffer */
|
||||
dulint* scanned_lsn, /* in/out: lsn of buffer start,
|
||||
we return scanned lsn */
|
||||
ulint* scanned_checkpoint_no,
|
||||
/* in/out: 4 lowest bytes of the
|
||||
highest scanned checkpoint number so
|
||||
far */
|
||||
ulint* n_bytes_scanned)/* out: how much we were able to
|
||||
scan, smaller than buf_len if log
|
||||
data ended here */
|
||||
{
|
||||
ulint data_len;
|
||||
byte* log_block;
|
||||
ulint no;
|
||||
|
||||
*n_bytes_scanned = 0;
|
||||
|
||||
for (log_block = buf; log_block < buf + buf_len;
|
||||
log_block += OS_FILE_LOG_BLOCK_SIZE) {
|
||||
|
||||
no = log_block_get_hdr_no(log_block);
|
||||
|
||||
/* fprintf(stderr, "Log block header no %lu\n", no); */
|
||||
|
||||
if (no != log_block_get_trl_no(log_block)
|
||||
|| no != log_block_convert_lsn_to_no(*scanned_lsn)) {
|
||||
|
||||
/* printf(
|
||||
"Log block n:o %lu, trailer n:o %lu, scanned lsn n:o %lu\n",
|
||||
no, log_block_get_trl_no(log_block),
|
||||
log_block_convert_lsn_to_no(*scanned_lsn));
|
||||
*/
|
||||
/* Garbage or an incompletely written log block */
|
||||
|
||||
log_block += OS_FILE_LOG_BLOCK_SIZE;
|
||||
|
||||
/* printf(
|
||||
"Next log block n:o %lu, trailer n:o %lu\n",
|
||||
log_block_get_hdr_no(log_block),
|
||||
log_block_get_trl_no(log_block));
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
if (*scanned_checkpoint_no > 0
|
||||
&& log_block_get_checkpoint_no(log_block)
|
||||
< *scanned_checkpoint_no
|
||||
&& *scanned_checkpoint_no
|
||||
- log_block_get_checkpoint_no(log_block)
|
||||
> 0x80000000) {
|
||||
|
||||
/* Garbage from a log buffer flush which was made
|
||||
before the most recent database recovery */
|
||||
|
||||
printf("Scanned cp n:o %lu, block cp n:o %lu\n",
|
||||
*scanned_checkpoint_no,
|
||||
log_block_get_checkpoint_no(log_block));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
data_len = log_block_get_data_len(log_block);
|
||||
|
||||
*scanned_checkpoint_no
|
||||
= log_block_get_checkpoint_no(log_block);
|
||||
*scanned_lsn = ut_dulint_add(*scanned_lsn, data_len);
|
||||
|
||||
*n_bytes_scanned += data_len;
|
||||
|
||||
if (data_len < OS_FILE_LOG_BLOCK_SIZE) {
|
||||
/* Log data ends here */
|
||||
|
||||
/* printf("Log block data len %lu\n", data_len); */
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Tries to parse a single log record body and also applies it to a page if
|
||||
specified. */
|
||||
@ -625,7 +796,6 @@ recv_get_fil_addr_struct(
|
||||
|
||||
recv_addr = HASH_GET_FIRST(recv_sys->addr_hash,
|
||||
recv_hash(space, page_no));
|
||||
|
||||
while (recv_addr) {
|
||||
if ((recv_addr->space == space)
|
||||
&& (recv_addr->page_no == page_no)) {
|
||||
@ -755,6 +925,10 @@ read in, or also for a page already in the buffer pool. */
|
||||
void
|
||||
recv_recover_page(
|
||||
/*==============*/
|
||||
ibool recover_backup, /* in: TRUE if we are recovering a backup
|
||||
page: then we do not acquire any latches
|
||||
since the page was read in outside the
|
||||
buffer pool */
|
||||
ibool just_read_in, /* in: TRUE if the i/o-handler calls this for
|
||||
a freshly read page */
|
||||
page_t* page, /* in: buffer page */
|
||||
@ -799,39 +973,48 @@ recv_recover_page(
|
||||
|
||||
mutex_exit(&(recv_sys->mutex));
|
||||
|
||||
block = buf_block_align(page);
|
||||
|
||||
if (just_read_in) {
|
||||
/* Move the ownership of the x-latch on the page to this OS
|
||||
thread, so that we can acquire a second x-latch on it. This
|
||||
is needed for the operations to the page to pass the debug
|
||||
checks. */
|
||||
|
||||
rw_lock_x_lock_move_ownership(&(block->lock));
|
||||
}
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
mtr_set_log_mode(&mtr, MTR_LOG_NONE);
|
||||
|
||||
success = buf_page_get_known_nowait(RW_X_LATCH, page, BUF_KEEP_OLD,
|
||||
if (!recover_backup) {
|
||||
block = buf_block_align(page);
|
||||
|
||||
if (just_read_in) {
|
||||
/* Move the ownership of the x-latch on the page to this OS
|
||||
thread, so that we can acquire a second x-latch on it. This
|
||||
is needed for the operations to the page to pass the debug
|
||||
checks. */
|
||||
|
||||
rw_lock_x_lock_move_ownership(&(block->lock));
|
||||
}
|
||||
|
||||
success = buf_page_get_known_nowait(RW_X_LATCH, page,
|
||||
BUF_KEEP_OLD,
|
||||
IB__FILE__, __LINE__,
|
||||
&mtr);
|
||||
ut_a(success);
|
||||
ut_a(success);
|
||||
|
||||
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
|
||||
buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
|
||||
}
|
||||
|
||||
/* Read the newest modification lsn from the page */
|
||||
page_lsn = mach_read_from_8(page + FIL_PAGE_LSN);
|
||||
|
||||
/* It may be that the page has been modified in the buffer pool: read
|
||||
the newest modification lsn there */
|
||||
if (!recover_backup) {
|
||||
/* It may be that the page has been modified in the buffer
|
||||
pool: read the newest modification lsn there */
|
||||
|
||||
page_newest_lsn = buf_frame_get_newest_modification(page);
|
||||
page_newest_lsn = buf_frame_get_newest_modification(page);
|
||||
|
||||
if (!ut_dulint_is_zero(page_newest_lsn)) {
|
||||
if (!ut_dulint_is_zero(page_newest_lsn)) {
|
||||
|
||||
page_lsn = page_newest_lsn;
|
||||
page_lsn = page_newest_lsn;
|
||||
}
|
||||
} else {
|
||||
/* In recovery from a backup we do not use the buffer
|
||||
pool */
|
||||
|
||||
page_newest_lsn = ut_dulint_zero;
|
||||
}
|
||||
|
||||
modification_to_page = FALSE;
|
||||
@ -852,13 +1035,13 @@ recv_recover_page(
|
||||
buf = ((byte*)(recv->data)) + sizeof(recv_data_t);
|
||||
}
|
||||
|
||||
if ((recv->type == MLOG_INIT_FILE_PAGE)
|
||||
|| (recv->type == MLOG_FULL_PAGE)) {
|
||||
/* A new file page may has been taken into use,
|
||||
if (recv->type == MLOG_INIT_FILE_PAGE
|
||||
|| recv->type == MLOG_FULL_PAGE) {
|
||||
/* A new file page may have been taken into use,
|
||||
or we have stored the full contents of the page:
|
||||
in this case it may be that the original log record
|
||||
type was MLOG_INIT_FILE_PAGE, and we replaced it
|
||||
with MLOG_FULL_PAGE, thus to we have to apply
|
||||
with MLOG_FULL_PAGE, thus we have to apply
|
||||
any record of type MLOG_FULL_PAGE */
|
||||
|
||||
page_lsn = page_newest_lsn;
|
||||
@ -885,6 +1068,13 @@ recv_recover_page(
|
||||
|
||||
recv_parse_or_apply_log_rec_body(recv->type, buf,
|
||||
buf + recv->len, page, &mtr);
|
||||
mach_write_to_8(page + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN,
|
||||
ut_dulint_add(recv->start_lsn,
|
||||
recv->len));
|
||||
mach_write_to_8(page + FIL_PAGE_LSN,
|
||||
ut_dulint_add(recv->start_lsn,
|
||||
recv->len));
|
||||
}
|
||||
|
||||
if (recv->len > RECV_DATA_BLOCK_SIZE) {
|
||||
@ -903,7 +1093,7 @@ recv_recover_page(
|
||||
|
||||
mutex_exit(&(recv_sys->mutex));
|
||||
|
||||
if (modification_to_page) {
|
||||
if (!recover_backup && modification_to_page) {
|
||||
buf_flush_recv_note_modification(block, start_lsn, end_lsn);
|
||||
}
|
||||
|
||||
@ -1038,8 +1228,8 @@ loop:
|
||||
|
||||
buf_page_dbg_add_level(page,
|
||||
SYNC_NO_ORDER_CHECK);
|
||||
recv_recover_page(FALSE, page, space,
|
||||
page_no);
|
||||
recv_recover_page(FALSE, FALSE, page,
|
||||
space, page_no);
|
||||
mtr_commit(&mtr);
|
||||
} else {
|
||||
recv_read_in_area(space, page_no);
|
||||
@ -1111,6 +1301,95 @@ loop:
|
||||
mutex_exit(&(recv_sys->mutex));
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Applies log records in the hash table to a backup. */
|
||||
|
||||
void
|
||||
recv_apply_log_recs_for_backup(
|
||||
/*===========================*/
|
||||
ulint n_data_files, /* in: number of data files */
|
||||
char** data_files, /* in: array containing the paths to the
|
||||
data files */
|
||||
ulint* file_sizes) /* in: sizes of the data files in database
|
||||
pages */
|
||||
{
|
||||
recv_addr_t* recv_addr;
|
||||
os_file_t data_file;
|
||||
ulint n_pages_total = 0;
|
||||
ulint nth_file = 0;
|
||||
ulint nth_page_in_file= 0;
|
||||
byte* page;
|
||||
ibool success;
|
||||
ulint i;
|
||||
|
||||
recv_sys->apply_log_recs = TRUE;
|
||||
recv_sys->apply_batch_on = TRUE;
|
||||
|
||||
page = buf_pool->frame_zero;
|
||||
|
||||
for (i = 0; i < n_data_files; i++) {
|
||||
n_pages_total += file_sizes[i];
|
||||
}
|
||||
|
||||
printf(
|
||||
"InnoDB: Starting an apply batch of log records to the database...\n"
|
||||
"InnoDB: Progress in percents: ");
|
||||
|
||||
for (i = 0; i < n_pages_total; i++) {
|
||||
|
||||
if (i == 0 || nth_page_in_file == file_sizes[nth_file]) {
|
||||
if (i != 0) {
|
||||
nth_file++;
|
||||
nth_page_in_file = 0;
|
||||
os_file_flush(data_file);
|
||||
os_file_close(data_file);
|
||||
}
|
||||
|
||||
data_file = os_file_create_simple(data_files[nth_file],
|
||||
OS_FILE_OPEN,
|
||||
OS_FILE_READ_WRITE,
|
||||
&success);
|
||||
ut_a(success);
|
||||
}
|
||||
|
||||
recv_addr = recv_get_fil_addr_struct(0, i);
|
||||
|
||||
if (recv_addr != NULL) {
|
||||
os_file_read(data_file, page,
|
||||
(nth_page_in_file << UNIV_PAGE_SIZE_SHIFT)
|
||||
& 0xFFFFFFFF,
|
||||
nth_page_in_file >> (32 - UNIV_PAGE_SIZE_SHIFT),
|
||||
UNIV_PAGE_SIZE);
|
||||
|
||||
recv_recover_page(TRUE, FALSE, page, 0, i);
|
||||
|
||||
buf_flush_init_for_writing(page,
|
||||
mach_read_from_8(page + FIL_PAGE_LSN),
|
||||
0, i);
|
||||
|
||||
os_file_write(data_files[nth_file],
|
||||
data_file, page,
|
||||
(nth_page_in_file << UNIV_PAGE_SIZE_SHIFT)
|
||||
& 0xFFFFFFFF,
|
||||
nth_page_in_file >> (32 - UNIV_PAGE_SIZE_SHIFT),
|
||||
UNIV_PAGE_SIZE);
|
||||
}
|
||||
|
||||
if ((100 * i) / n_pages_total
|
||||
!= (100 * (i + 1)) / n_pages_total) {
|
||||
printf("%lu ", (100 * i) / n_pages_total);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
nth_page_in_file++;
|
||||
}
|
||||
|
||||
os_file_flush(data_file);
|
||||
os_file_close(data_file);
|
||||
|
||||
recv_sys_empty_hash();
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
In the debug version, updates the replica of a file page, based on a log
|
||||
record. */
|
||||
@ -1430,12 +1709,13 @@ recv_check_incomplete_log_recs(
|
||||
|
||||
/***********************************************************
|
||||
Parses log records from a buffer and stores them to a hash table to wait
|
||||
merging to file pages. If the hash table becomes too full, applies it
|
||||
automatically to file pages. */
|
||||
|
||||
void
|
||||
merging to file pages. */
|
||||
static
|
||||
ibool
|
||||
recv_parse_log_recs(
|
||||
/*================*/
|
||||
/* out: TRUE if the hash table of parsed log
|
||||
records became full */
|
||||
ibool store_to_hash) /* in: TRUE if the records should be stored
|
||||
to the hash table; this is set to FALSE if just
|
||||
debug checking is needed */
|
||||
@ -1462,7 +1742,7 @@ loop:
|
||||
|
||||
if (ptr == end_ptr) {
|
||||
|
||||
return;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
single_rec = (ulint)*ptr & MLOG_SINGLE_REC_FLAG;
|
||||
@ -1476,7 +1756,7 @@ loop:
|
||||
&page_no, &body);
|
||||
if (len == 0) {
|
||||
|
||||
return;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
new_recovered_lsn = recv_calc_lsn_on_data_add(old_lsn, len);
|
||||
@ -1487,7 +1767,7 @@ loop:
|
||||
that also the next log block should have been scanned
|
||||
in */
|
||||
|
||||
return;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
recv_sys->recovered_offset += len;
|
||||
@ -1529,7 +1809,7 @@ loop:
|
||||
&page_no, &body);
|
||||
if (len == 0) {
|
||||
|
||||
return;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
if ((!store_to_hash) && (type != MLOG_MULTI_REC_END)) {
|
||||
@ -1570,27 +1850,9 @@ loop:
|
||||
that also the next log block should have been scanned
|
||||
in */
|
||||
|
||||
return;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
if (2 * n_recs * (sizeof(recv_t) + sizeof(recv_addr_t))
|
||||
+ total_len
|
||||
+ mem_heap_get_size(recv_sys->heap)
|
||||
+ RECV_POOL_N_FREE_BLOCKS * UNIV_PAGE_SIZE
|
||||
> buf_pool_get_curr_size()) {
|
||||
|
||||
/* Hash table of log records will grow too big:
|
||||
empty it */
|
||||
|
||||
recv_apply_hashed_log_recs(FALSE);
|
||||
}
|
||||
|
||||
ut_ad(2 * n_recs * (sizeof(recv_t) + sizeof(recv_addr_t))
|
||||
+ total_len
|
||||
+ mem_heap_get_size(recv_sys->heap)
|
||||
+ RECV_POOL_N_FREE_BLOCKS * UNIV_PAGE_SIZE
|
||||
< buf_pool_get_curr_size());
|
||||
|
||||
/* Add all the records to the hash table */
|
||||
|
||||
ptr = recv_sys->buf + recv_sys->recovered_offset;
|
||||
@ -1627,18 +1889,7 @@ loop:
|
||||
ptr += len;
|
||||
}
|
||||
}
|
||||
|
||||
if (store_to_hash && buf_get_free_list_len()
|
||||
< RECV_POOL_N_FREE_BLOCKS) {
|
||||
|
||||
/* Hash table of log records has grown too big: empty it;
|
||||
FALSE means no ibuf operations allowed, as we cannot add
|
||||
new records to the log yet: they would be produced by ibuf
|
||||
operations */
|
||||
|
||||
recv_apply_hashed_log_recs(FALSE);
|
||||
}
|
||||
|
||||
|
||||
goto loop;
|
||||
}
|
||||
|
||||
@ -1713,7 +1964,7 @@ recv_sys_add_to_parsing_buf(
|
||||
|
||||
recv_sys->len += end_offset - start_offset;
|
||||
|
||||
ut_ad(recv_sys->len <= RECV_PARSING_BUF_SIZE);
|
||||
ut_a(recv_sys->len <= RECV_PARSING_BUF_SIZE);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
@ -1743,6 +1994,13 @@ recv_scan_log_recs(
|
||||
/*===============*/
|
||||
/* out: TRUE if limit_lsn has been reached, or
|
||||
not able to scan any more in this log group */
|
||||
ibool apply_automatically,/* in: TRUE if we want this function to
|
||||
apply log records automatically when the
|
||||
hash table becomes full; in the hot backup tool
|
||||
the tool does the applying, not this
|
||||
function */
|
||||
ulint available_memory,/* in: we let the hash table of recs to grow
|
||||
to this size, at the maximum */
|
||||
ibool store_to_hash, /* in: TRUE if the records should be stored
|
||||
to the hash table; this is set to FALSE if just
|
||||
debug checking is needed */
|
||||
@ -1764,7 +2022,9 @@ recv_scan_log_recs(
|
||||
ut_ad(ut_dulint_get_low(start_lsn) % OS_FILE_LOG_BLOCK_SIZE == 0);
|
||||
ut_ad(len % OS_FILE_LOG_BLOCK_SIZE == 0);
|
||||
ut_ad(len > 0);
|
||||
|
||||
ut_a(apply_automatically <= TRUE);
|
||||
ut_a(store_to_hash <= TRUE);
|
||||
|
||||
finished = FALSE;
|
||||
|
||||
log_block = buf;
|
||||
@ -1845,6 +2105,13 @@ recv_scan_log_recs(
|
||||
/* We were able to find more log data: add it to the
|
||||
parsing buffer if parse_start_lsn is already non-zero */
|
||||
|
||||
if (recv_sys->len + 4 * OS_FILE_LOG_BLOCK_SIZE
|
||||
>= RECV_PARSING_BUF_SIZE) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: log parsing buffer overflow. Recovery may have failed!\n");
|
||||
finished = TRUE;
|
||||
}
|
||||
|
||||
more_data = recv_sys_add_to_parsing_buf(log_block,
|
||||
scanned_lsn);
|
||||
recv_sys->scanned_lsn = scanned_lsn;
|
||||
@ -1863,25 +2130,36 @@ recv_scan_log_recs(
|
||||
|
||||
*group_scanned_lsn = scanned_lsn;
|
||||
|
||||
if (more_data) {
|
||||
if (recv_needed_recovery || recv_is_from_backup) {
|
||||
recv_scan_print_counter++;
|
||||
|
||||
if (recv_scan_print_counter < 10
|
||||
|| (recv_scan_print_counter % 10 == 0)) {
|
||||
if (finished || (recv_scan_print_counter % 80 == 0)) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Doing recovery: scanned up to log sequence number %lu %lu\n",
|
||||
ut_dulint_get_high(*group_scanned_lsn),
|
||||
ut_dulint_get_low(*group_scanned_lsn));
|
||||
if (recv_scan_print_counter == 10) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: After this prints a line for every 10th scan sweep:\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (more_data) {
|
||||
/* Try to parse more log records */
|
||||
|
||||
recv_parse_log_recs(store_to_hash);
|
||||
|
||||
if (store_to_hash && mem_heap_get_size(recv_sys->heap)
|
||||
> available_memory
|
||||
&& apply_automatically) {
|
||||
|
||||
/* Hash table of log records has grown too big:
|
||||
empty it; FALSE means no ibuf operations
|
||||
allowed, as we cannot add new records to the
|
||||
log yet: they would be produced by ibuf
|
||||
operations */
|
||||
|
||||
recv_apply_hashed_log_recs(FALSE);
|
||||
}
|
||||
|
||||
if (recv_sys->recovered_offset > RECV_PARSING_BUF_SIZE / 4) {
|
||||
/* Move parsing buffer data to the buffer start */
|
||||
|
||||
@ -1918,10 +2196,12 @@ recv_group_scan_log_recs(
|
||||
log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
|
||||
group, start_lsn, end_lsn);
|
||||
|
||||
finished = recv_scan_log_recs(TRUE, log_sys->buf,
|
||||
RECV_SCAN_SIZE, start_lsn,
|
||||
contiguous_lsn,
|
||||
group_scanned_lsn);
|
||||
finished = recv_scan_log_recs(TRUE,
|
||||
buf_pool_get_curr_size()
|
||||
- RECV_POOL_N_FREE_BLOCKS * UNIV_PAGE_SIZE,
|
||||
TRUE, log_sys->buf,
|
||||
RECV_SCAN_SIZE, start_lsn,
|
||||
contiguous_lsn, group_scanned_lsn);
|
||||
start_lsn = end_lsn;
|
||||
}
|
||||
|
||||
@ -1969,7 +2249,7 @@ recv_recovery_from_checkpoint_start(
|
||||
if (type == LOG_CHECKPOINT) {
|
||||
|
||||
recv_sys_create();
|
||||
recv_sys_init();
|
||||
recv_sys_init(FALSE, buf_pool_get_curr_size());
|
||||
}
|
||||
|
||||
if (srv_force_recovery >= SRV_FORCE_NO_LOG_REDO) {
|
||||
@ -2280,6 +2560,84 @@ recv_reset_logs(
|
||||
mutex_enter(&(log_sys->mutex));
|
||||
}
|
||||
|
||||
/**********************************************************
|
||||
Creates new log files after a backup has been restored. */
|
||||
|
||||
void
|
||||
recv_reset_log_files_for_backup(
|
||||
/*============================*/
|
||||
char* log_dir, /* in: log file directory path */
|
||||
ulint n_log_files, /* in: number of log files */
|
||||
ulint log_file_size, /* in: log file size */
|
||||
dulint lsn) /* in: new start lsn, must be divisible by
|
||||
OS_FILE_LOG_BLOCK_SIZE */
|
||||
{
|
||||
os_file_t log_file;
|
||||
ibool success;
|
||||
byte* buf;
|
||||
ulint i;
|
||||
char name[5000];
|
||||
|
||||
buf = ut_malloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
|
||||
|
||||
for (i = 0; i < n_log_files; i++) {
|
||||
|
||||
sprintf(name, "%sib_logfile%lu", log_dir, i);
|
||||
|
||||
log_file = os_file_create_simple(name, OS_FILE_CREATE,
|
||||
OS_FILE_READ_WRITE, &success);
|
||||
if (!success) {
|
||||
printf(
|
||||
"InnoDB: Cannot create %s. Check that the file does not exist yet.\n", name);
|
||||
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf(
|
||||
"Setting log file size to %lu %lu\n", ut_get_high32(log_file_size),
|
||||
log_file_size & 0xFFFFFFFF);
|
||||
|
||||
success = os_file_set_size(name, log_file,
|
||||
log_file_size & 0xFFFFFFFF,
|
||||
ut_get_high32(log_file_size));
|
||||
|
||||
if (!success) {
|
||||
printf(
|
||||
"InnoDB: Cannot set %s size to %lu %lu\n", name, ut_get_high32(log_file_size),
|
||||
log_file_size & 0xFFFFFFFF);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
os_file_flush(log_file);
|
||||
os_file_close(log_file);
|
||||
}
|
||||
|
||||
/* We pretend there is a checkpoint at lsn + LOG_BLOCK_HDR_SIZE */
|
||||
|
||||
log_reset_first_header_and_checkpoint(buf,
|
||||
ut_dulint_add(lsn, LOG_BLOCK_HDR_SIZE));
|
||||
|
||||
log_block_init(buf + LOG_FILE_HDR_SIZE, lsn);
|
||||
log_block_set_first_rec_group(buf + LOG_FILE_HDR_SIZE,
|
||||
LOG_BLOCK_HDR_SIZE);
|
||||
sprintf(name, "%sib_logfile%lu", log_dir, 0);
|
||||
|
||||
log_file = os_file_create_simple(name, OS_FILE_OPEN,
|
||||
OS_FILE_READ_WRITE, &success);
|
||||
if (!success) {
|
||||
printf("InnoDB: Cannot open %s.\n", name);
|
||||
|
||||
exit(1);
|
||||
}
|
||||
|
||||
os_file_write(name, log_file, buf, 0, 0,
|
||||
LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
|
||||
os_file_flush(log_file);
|
||||
os_file_close(log_file);
|
||||
|
||||
ut_free(buf);
|
||||
}
|
||||
|
||||
/**********************************************************
|
||||
Reads from the archive of a log group and performs recovery. */
|
||||
static
|
||||
@ -2296,13 +2654,13 @@ log_group_recover_from_archive_file(
|
||||
dulint dummy_lsn;
|
||||
dulint scanned_lsn;
|
||||
ulint len;
|
||||
char name[10000];
|
||||
ibool ret;
|
||||
byte* buf;
|
||||
ulint read_offset;
|
||||
ulint file_size;
|
||||
ulint file_size_high;
|
||||
int input_char;
|
||||
char name[10000];
|
||||
|
||||
try_open_again:
|
||||
buf = log_sys->buf;
|
||||
@ -2438,9 +2796,11 @@ ask_again:
|
||||
group->archive_space_id, read_offset / UNIV_PAGE_SIZE,
|
||||
read_offset % UNIV_PAGE_SIZE, len, buf, NULL);
|
||||
|
||||
|
||||
ret = recv_scan_log_recs(TRUE, buf, len, start_lsn,
|
||||
&dummy_lsn, &scanned_lsn);
|
||||
ret = recv_scan_log_recs(TRUE,
|
||||
buf_pool_get_curr_size() -
|
||||
RECV_POOL_N_FREE_BLOCKS * UNIV_PAGE_SIZE,
|
||||
TRUE, buf, len, start_lsn,
|
||||
&dummy_lsn, &scanned_lsn);
|
||||
|
||||
if (ut_dulint_cmp(scanned_lsn, file_end_lsn) == 0) {
|
||||
|
||||
@ -2485,7 +2845,7 @@ recv_recovery_from_archive_start(
|
||||
ulint err;
|
||||
|
||||
recv_sys_create();
|
||||
recv_sys_init();
|
||||
recv_sys_init(FALSE, buf_pool_get_curr_size());
|
||||
|
||||
sync_order_checks_on = TRUE;
|
||||
|
||||
|
@ -234,7 +234,8 @@ mem_heap_add_block(
|
||||
new_size = 2 * mem_block_get_len(block);
|
||||
|
||||
if (heap->type != MEM_HEAP_DYNAMIC) {
|
||||
ut_ad(n <= MEM_MAX_ALLOC_IN_BUF);
|
||||
/* From the buffer pool we allocate buffer frames */
|
||||
ut_a(n <= MEM_MAX_ALLOC_IN_BUF);
|
||||
|
||||
if (new_size > MEM_MAX_ALLOC_IN_BUF) {
|
||||
new_size = MEM_MAX_ALLOC_IN_BUF;
|
||||
@ -249,7 +250,7 @@ mem_heap_add_block(
|
||||
}
|
||||
|
||||
new_block = mem_heap_create_block(heap, new_size, NULL, heap->type,
|
||||
heap->file_name, heap->line);
|
||||
heap->file_name, heap->line);
|
||||
if (new_block == NULL) {
|
||||
|
||||
return(NULL);
|
||||
|
@ -10,17 +10,22 @@ Created 10/21/1995 Heikki Tuuri
|
||||
#include "os0sync.h"
|
||||
#include "ut0mem.h"
|
||||
#include "srv0srv.h"
|
||||
#include "trx0sys.h"
|
||||
#include "fil0fil.h"
|
||||
|
||||
#undef HAVE_FDATASYNC
|
||||
|
||||
#undef UNIV_NON_BUFFERED_IO
|
||||
|
||||
#ifdef POSIX_ASYNC_IO
|
||||
/* We assume in this case that the OS has standard Posix aio (at least SunOS
|
||||
2.6, HP-UX 11i and AIX 4.3 have) */
|
||||
|
||||
#endif
|
||||
|
||||
/* If the following is set to TRUE, we do not call os_file_flush in every
|
||||
os_file_write. We can set this TRUE if the doublewrite buffer is used. */
|
||||
ibool os_do_not_call_flush_at_each_write = FALSE;
|
||||
|
||||
/* We use these mutexes to protect lseek + file i/o operation, if the
|
||||
OS does not provide an atomic pread or pwrite, or similar */
|
||||
#define OS_FILE_N_SEEK_MUTEXES 16
|
||||
@ -118,6 +123,9 @@ ulint os_n_file_writes_old = 0;
|
||||
ulint os_n_fsyncs_old = 0;
|
||||
time_t os_last_printout;
|
||||
|
||||
ibool os_has_said_disk_full = FALSE;
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Gets the operating system version. Currently works only on Windows. */
|
||||
|
||||
@ -167,27 +175,28 @@ os_file_get_last_error(void)
|
||||
|
||||
err = (ulint) GetLastError();
|
||||
|
||||
if (err != ERROR_FILE_EXISTS) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Operating system error number %li in a file operation.\n"
|
||||
if (err != ERROR_FILE_EXISTS && err != ERROR_DISK_FULL) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Operating system error number %li in a file operation.\n"
|
||||
"InnoDB: See http://www.innodb.com/ibman.html for installation help.\n",
|
||||
(long) err);
|
||||
|
||||
if (err == ERROR_PATH_NOT_FOUND) {
|
||||
if (err == ERROR_PATH_NOT_FOUND) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: The error means the system cannot find the path specified.\n"
|
||||
"InnoDB: In installation you must create directories yourself, InnoDB\n"
|
||||
"InnoDB: does not create them.\n");
|
||||
} else if (err == ERROR_ACCESS_DENIED) {
|
||||
} else if (err == ERROR_ACCESS_DENIED) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: The error means mysqld does not have the access rights to\n"
|
||||
"InnoDB: the directory. It may also be you have created a subdirectory\n"
|
||||
"InnoDB: of the same name as a data file.\n");
|
||||
} else {
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Look from section 13.2 at http://www.innodb.com/ibman.html\n"
|
||||
"InnoDB: what the error number means.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (err == ERROR_FILE_NOT_FOUND) {
|
||||
@ -202,26 +211,28 @@ os_file_get_last_error(void)
|
||||
#else
|
||||
err = (ulint) errno;
|
||||
|
||||
if (err != EEXIST) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Operating system error number %li in a file operation.\n"
|
||||
if (err != EEXIST && err != ENOSPC ) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Operating system error number %li in a file operation.\n"
|
||||
"InnoDB: See http://www.innodb.com/ibman.html for installation help.\n",
|
||||
(long) err);
|
||||
|
||||
if (err == ENOENT) {
|
||||
if (err == ENOENT) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: The error means the system cannot find the path specified.\n"
|
||||
"InnoDB: In installation you must create directories yourself, InnoDB\n"
|
||||
"InnoDB: does not create them.\n");
|
||||
} else if (err == EACCES) {
|
||||
} else if (err == EACCES) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: The error means mysqld does not have the access rights to\n"
|
||||
"InnoDB: the directory.\n");
|
||||
} else {
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Look from section 13.2 at http://www.innodb.com/ibman.html\n"
|
||||
"InnoDB: what the error number means or use the perror program of MySQL.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (err == ENOSPC ) {
|
||||
@ -259,18 +270,26 @@ os_file_handle_error(
|
||||
err = os_file_get_last_error();
|
||||
|
||||
if (err == OS_FILE_DISK_FULL) {
|
||||
fprintf(stderr, "\n");
|
||||
if (name) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Encountered a problem with file %s.\n",
|
||||
name);
|
||||
}
|
||||
fprintf(stderr,
|
||||
"InnoDB: Cannot continue operation.\n"
|
||||
"InnoDB: Disk is full. Try to clean the disk to free space.\n"
|
||||
"InnoDB: Delete a possible created file and restart.\n");
|
||||
/* We only print a warning about disk full once */
|
||||
|
||||
exit(1);
|
||||
if (os_has_said_disk_full) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
if (name) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Encountered a problem with file %s\n", name);
|
||||
}
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Disk is full. Try to clean the disk to free space.\n");
|
||||
|
||||
os_has_said_disk_full = TRUE;
|
||||
|
||||
return(FALSE);
|
||||
|
||||
} else if (err == OS_FILE_AIO_RESOURCES_RESERVED) {
|
||||
return(TRUE);
|
||||
@ -290,6 +309,130 @@ os_file_handle_error(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
Creates the seek mutexes used in positioned reads and writes. */
|
||||
|
||||
void
|
||||
os_io_init_simple(void)
|
||||
/*===================*/
|
||||
{
|
||||
ulint i;
|
||||
|
||||
for (i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) {
|
||||
os_file_seek_mutexes[i] = os_mutex_create(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
A simple function to open or create a file. */
|
||||
|
||||
os_file_t
|
||||
os_file_create_simple(
|
||||
/*==================*/
|
||||
/* out, own: handle to the file, not defined if error,
|
||||
error number can be retrieved with os_get_last_error */
|
||||
char* name, /* in: name of the file or path as a null-terminated
|
||||
string */
|
||||
ulint create_mode,/* in: OS_FILE_OPEN if an existing file is opened
|
||||
(if does not exist, error), or OS_FILE_CREATE if a new
|
||||
file is created (if exists, error) */
|
||||
ulint access_type,/* in: OS_FILE_READ_ONLY or OS_FILE_READ_WRITE */
|
||||
ibool* success)/* out: TRUE if succeed, FALSE if error */
|
||||
{
|
||||
#ifdef __WIN__
|
||||
os_file_t file;
|
||||
DWORD create_flag;
|
||||
DWORD access;
|
||||
DWORD attributes = 0;
|
||||
ibool retry;
|
||||
|
||||
try_again:
|
||||
ut_a(name);
|
||||
|
||||
if (create_mode == OS_FILE_OPEN) {
|
||||
create_flag = OPEN_EXISTING;
|
||||
} else if (create_mode == OS_FILE_CREATE) {
|
||||
create_flag = CREATE_NEW;
|
||||
} else {
|
||||
create_flag = 0;
|
||||
ut_error;
|
||||
}
|
||||
|
||||
if (access_type == OS_FILE_READ_ONLY) {
|
||||
access = GENERIC_READ;
|
||||
} else if (access_type == OS_FILE_READ_WRITE) {
|
||||
access = GENERIC_READ | GENERIC_WRITE;
|
||||
} else {
|
||||
access = 0;
|
||||
ut_error;
|
||||
}
|
||||
|
||||
file = CreateFile(name,
|
||||
access,
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE,
|
||||
/* file can be read and written
|
||||
also by other processes */
|
||||
NULL, /* default security attributes */
|
||||
create_flag,
|
||||
attributes,
|
||||
NULL); /* no template file */
|
||||
|
||||
if (file == INVALID_HANDLE_VALUE) {
|
||||
*success = FALSE;
|
||||
|
||||
retry = os_file_handle_error(file, name);
|
||||
|
||||
if (retry) {
|
||||
goto try_again;
|
||||
}
|
||||
} else {
|
||||
*success = TRUE;
|
||||
}
|
||||
|
||||
return(file);
|
||||
#else
|
||||
os_file_t file;
|
||||
int create_flag;
|
||||
ibool retry;
|
||||
|
||||
try_again:
|
||||
ut_a(name);
|
||||
|
||||
if (create_mode == OS_FILE_OPEN) {
|
||||
if (access_type == OS_FILE_READ_ONLY) {
|
||||
create_flag = O_RDONLY;
|
||||
} else {
|
||||
create_flag = O_RDWR;
|
||||
}
|
||||
} else if (create_mode == OS_FILE_CREATE) {
|
||||
create_flag = O_RDWR | O_CREAT | O_EXCL;
|
||||
} else {
|
||||
create_flag = 0;
|
||||
ut_error;
|
||||
}
|
||||
|
||||
if (create_mode == OS_FILE_CREATE) {
|
||||
file = open(name, create_flag, S_IRUSR | S_IWUSR | S_IRGRP
|
||||
| S_IWGRP | S_IROTH | S_IWOTH);
|
||||
} else {
|
||||
file = open(name, create_flag);
|
||||
}
|
||||
|
||||
if (file == -1) {
|
||||
*success = FALSE;
|
||||
|
||||
retry = os_file_handle_error(file, name);
|
||||
|
||||
if (retry) {
|
||||
goto try_again;
|
||||
}
|
||||
} else {
|
||||
*success = TRUE;
|
||||
}
|
||||
|
||||
return(file);
|
||||
#endif
|
||||
}
|
||||
/********************************************************************
|
||||
Opens an existing file or creates a new. */
|
||||
|
||||
@ -355,8 +498,9 @@ try_again:
|
||||
file = CreateFile(name,
|
||||
GENERIC_READ | GENERIC_WRITE, /* read and write
|
||||
access */
|
||||
FILE_SHARE_READ,/* file can be read by other
|
||||
processes */
|
||||
FILE_SHARE_READ | FILE_SHARE_WRITE,
|
||||
/* file can be read and written
|
||||
also by other processes */
|
||||
NULL, /* default security attributes */
|
||||
create_flag,
|
||||
attributes,
|
||||
@ -494,6 +638,11 @@ os_file_get_size(
|
||||
|
||||
offs = lseek(file, 0, SEEK_END);
|
||||
|
||||
if (offs == ((off_t)-1)) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
#if SIZEOF_OFF_T > 4
|
||||
*size = (ulint)(offs & 0xFFFFFFFF);
|
||||
*size_high = (ulint)(offs >> 32);
|
||||
@ -523,13 +672,11 @@ os_file_set_size(
|
||||
ib_longlong low;
|
||||
ulint n_bytes;
|
||||
ibool ret;
|
||||
ibool retry;
|
||||
byte* buf;
|
||||
ulint i;
|
||||
|
||||
ut_a(size == (size & 0xFFFFFFFF));
|
||||
|
||||
try_again:
|
||||
/* We use a very big 8 MB buffer in writing because Linux may be
|
||||
extremely slow in fsync on 1 MB writes */
|
||||
|
||||
@ -570,14 +717,6 @@ try_again:
|
||||
}
|
||||
|
||||
error_handling:
|
||||
retry = os_file_handle_error(file, name);
|
||||
|
||||
if (retry) {
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
ut_error;
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
@ -722,8 +861,7 @@ os_file_pwrite(
|
||||
64-bit address */
|
||||
|
||||
if (sizeof(off_t) > 4) {
|
||||
offs = (off_t)offset + (((off_t)offset_high) << 32);
|
||||
|
||||
offs = (off_t)offset + (((off_t)offset_high) << 32);
|
||||
} else {
|
||||
offs = (off_t)offset;
|
||||
|
||||
@ -740,8 +878,8 @@ os_file_pwrite(
|
||||
|
||||
if (srv_unix_file_flush_method != SRV_UNIX_LITTLESYNC
|
||||
&& srv_unix_file_flush_method != SRV_UNIX_NOSYNC
|
||||
&& !trx_doublewrite) {
|
||||
|
||||
&& !os_do_not_call_flush_at_each_write) {
|
||||
|
||||
/* Always do fsync to reduce the probability that when
|
||||
the OS crashes, a database page is only partially
|
||||
physically written to disk. */
|
||||
@ -771,7 +909,7 @@ os_file_pwrite(
|
||||
|
||||
if (srv_unix_file_flush_method != SRV_UNIX_LITTLESYNC
|
||||
&& srv_unix_file_flush_method != SRV_UNIX_NOSYNC
|
||||
&& !trx_doublewrite) {
|
||||
&& !os_do_not_call_flush_at_each_write) {
|
||||
|
||||
/* Always do fsync to reduce the probability that when
|
||||
the OS crashes, a database page is only partially
|
||||
@ -896,13 +1034,12 @@ os_file_write(
|
||||
DWORD ret2;
|
||||
DWORD low;
|
||||
DWORD high;
|
||||
ibool retry;
|
||||
ulint i;
|
||||
|
||||
ut_a((offset & 0xFFFFFFFF) == offset);
|
||||
|
||||
os_n_file_writes++;
|
||||
try_again:
|
||||
|
||||
ut_ad(file);
|
||||
ut_ad(buf);
|
||||
ut_ad(n > 0);
|
||||
@ -921,7 +1058,15 @@ try_again:
|
||||
|
||||
os_mutex_exit(os_file_seek_mutexes[i]);
|
||||
|
||||
goto error_handling;
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: File pointer positioning to file %s failed at\n"
|
||||
"InnoDB: offset %lu %lu. Operating system error number %lu.\n",
|
||||
name, offset_high, offset,
|
||||
(ulint)GetLastError());
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
ret = WriteFile(file, buf, n, &len, NULL);
|
||||
@ -929,38 +1074,61 @@ try_again:
|
||||
/* Always do fsync to reduce the probability that when the OS crashes,
|
||||
a database page is only partially physically written to disk. */
|
||||
|
||||
if (!trx_doublewrite) {
|
||||
if (!os_do_not_call_flush_at_each_write) {
|
||||
ut_a(TRUE == os_file_flush(file));
|
||||
}
|
||||
|
||||
os_mutex_exit(os_file_seek_mutexes[i]);
|
||||
|
||||
if (ret && len == n) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
if (!os_has_said_disk_full) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: Write to file %s failed at offset %lu %lu.\n"
|
||||
"InnoDB: %lu bytes should have been written, only %lu were written.\n"
|
||||
"InnoDB: Operating system error number %lu.\n"
|
||||
"InnoDB: Check that your OS and file system support files of this size.\n"
|
||||
"InnoDB: Check also the disk is not full or a disk quota exceeded.\n",
|
||||
name, offset_high, offset, n, len,
|
||||
(ulint)GetLastError());
|
||||
|
||||
os_has_said_disk_full = TRUE;
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
#else
|
||||
ibool retry;
|
||||
ssize_t ret;
|
||||
|
||||
try_again:
|
||||
ret = os_file_pwrite(file, buf, n, offset, offset_high);
|
||||
|
||||
if ((ulint)ret == n) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
#endif
|
||||
#ifdef __WIN__
|
||||
error_handling:
|
||||
#endif
|
||||
retry = os_file_handle_error(file, name);
|
||||
|
||||
if (retry) {
|
||||
goto try_again;
|
||||
}
|
||||
if (!os_has_said_disk_full) {
|
||||
|
||||
ut_error;
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
return(FALSE);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: Write to file %s failed at offset %lu %lu.\n"
|
||||
"InnoDB: %lu bytes should have been written, only %lu were written.\n"
|
||||
"InnoDB: Operating system error number %lu.\n"
|
||||
"InnoDB: Check that your OS and file system support files of this size.\n"
|
||||
"InnoDB: Check also the disk is not full or a disk quota exceeded.\n",
|
||||
name, offset_high, offset, n, ret, (ulint)errno);
|
||||
|
||||
os_has_said_disk_full = TRUE;
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
#endif
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
@ -1031,7 +1199,8 @@ os_aio_array_create(
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Initializes the asynchronous io system. Creates separate aio array for
|
||||
Initializes the asynchronous io system. Calls also os_io_init_simple.
|
||||
Creates a separate aio array for
|
||||
non-ibuf read and write, a third aio array for the ibuf i/o, with just one
|
||||
segment, two aio arrays for log reads and writes with one segment, and a
|
||||
synchronous aio array of the specified size. The combined number of segments
|
||||
@ -1058,6 +1227,8 @@ os_aio_init(
|
||||
ut_ad(n % n_segments == 0);
|
||||
ut_ad(n_segments >= 4);
|
||||
|
||||
os_io_init_simple();
|
||||
|
||||
n_per_seg = n / n_segments;
|
||||
n_write_segs = (n_segments - 2) / 2;
|
||||
n_read_segs = n_segments - 2 - n_write_segs;
|
||||
@ -1078,10 +1249,6 @@ os_aio_init(
|
||||
|
||||
os_aio_validate();
|
||||
|
||||
for (i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) {
|
||||
os_file_seek_mutexes[i] = os_mutex_create(NULL);
|
||||
}
|
||||
|
||||
os_aio_segment_wait_events = ut_malloc(n_segments * sizeof(void*));
|
||||
|
||||
for (i = 0; i < n_segments; i++) {
|
||||
@ -1739,7 +1906,8 @@ os_aio_windows_handle(
|
||||
if (ret && len == slot->len) {
|
||||
ret_val = TRUE;
|
||||
|
||||
if (slot->type == OS_FILE_WRITE && !trx_doublewrite) {
|
||||
if (slot->type == OS_FILE_WRITE
|
||||
&& !os_do_not_call_flush_at_each_write) {
|
||||
ut_a(TRUE == os_file_flush(slot->file));
|
||||
}
|
||||
} else {
|
||||
@ -1824,7 +1992,8 @@ os_aio_posix_handle(
|
||||
*message1 = slot->message1;
|
||||
*message2 = slot->message2;
|
||||
|
||||
if (slot->type == OS_FILE_WRITE && !trx_doublewrite) {
|
||||
if (slot->type == OS_FILE_WRITE
|
||||
&& !os_do_not_call_flush_at_each_write) {
|
||||
ut_a(TRUE == os_file_flush(slot->file));
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,8 @@ cmp_debug_dtuple_rec_with_match(
|
||||
contains the value for current comparison */
|
||||
/*****************************************************************
|
||||
This function is used to compare two data fields for which the data type
|
||||
is such that we must use MySQL code to compare them. */
|
||||
is such that we must use MySQL code to compare them. The prototype here
|
||||
must be a copy of the the one in ha_innobase.cc! */
|
||||
|
||||
int
|
||||
innobase_mysql_cmp(
|
||||
|
@ -391,7 +391,7 @@ row_ins_check_foreign_constraint(
|
||||
/* out: DB_SUCCESS, DB_LOCK_WAIT,
|
||||
DB_NO_REFERENCED_ROW,
|
||||
or DB_ROW_IS_REFERENCED */
|
||||
ibool check_ref,/* in: TRUE If we want to check that
|
||||
ibool check_ref,/* in: TRUE if we want to check that
|
||||
the referenced table is ok, FALSE if we
|
||||
want to to check the foreign key table */
|
||||
dict_foreign_t* foreign,/* in: foreign constraint; NOTE that the
|
||||
@ -411,10 +411,23 @@ row_ins_check_foreign_constraint(
|
||||
ibool moved;
|
||||
int cmp;
|
||||
ulint err;
|
||||
ulint i;
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad(rw_lock_own(&dict_foreign_key_check_lock, RW_LOCK_SHARED));
|
||||
|
||||
/* If any of the foreign key fields in entry is SQL NULL, we
|
||||
suppress the foreign key check: this is compatible with Oracle,
|
||||
for example */
|
||||
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
if (UNIV_SQL_NULL == dfield_get_len(
|
||||
dtuple_get_nth_field(entry, i))) {
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
}
|
||||
|
||||
if (check_ref) {
|
||||
check_table = foreign->referenced_table;
|
||||
check_index = foreign->referenced_index;
|
||||
@ -591,6 +604,8 @@ row_ins_scan_sec_index_for_duplicate(
|
||||
dtuple_t* entry, /* in: index entry */
|
||||
que_thr_t* thr) /* in: query thread */
|
||||
{
|
||||
ulint n_unique;
|
||||
ulint i;
|
||||
int cmp;
|
||||
ulint n_fields_cmp;
|
||||
rec_t* rec;
|
||||
@ -599,6 +614,20 @@ row_ins_scan_sec_index_for_duplicate(
|
||||
ibool moved;
|
||||
mtr_t mtr;
|
||||
|
||||
n_unique = dict_index_get_n_unique(index);
|
||||
|
||||
/* If the secondary index is unique, but one of the fields in the
|
||||
n_unique first fields is NULL, a unique key violation cannot occur,
|
||||
since we define NULL != NULL in this case */
|
||||
|
||||
for (i = 0; i < n_unique; i++) {
|
||||
if (UNIV_SQL_NULL == dfield_get_len(
|
||||
dtuple_get_nth_field(entry, i))) {
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
}
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
/* Store old value on n_fields_cmp */
|
||||
|
@ -1886,6 +1886,28 @@ loop:
|
||||
return(err);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Checks if a table name contains the string "/#sql" which denotes temporary
|
||||
tables in MySQL. */
|
||||
static
|
||||
ibool
|
||||
row_is_mysql_tmp_table_name(
|
||||
/*========================*/
|
||||
/* out: TRUE if temporary table */
|
||||
char* name) /* in: table name in the form 'database/tablename' */
|
||||
{
|
||||
ulint i;
|
||||
|
||||
for (i = 0; i <= ut_strlen(name) - 5; i++) {
|
||||
if (ut_memcmp(name + i, "/#sql", 5) == 0) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Renames a table for MySQL. */
|
||||
|
||||
@ -1949,16 +1971,27 @@ row_rename_table_for_mysql(
|
||||
str2 = (char *)
|
||||
"';\nold_table_name := '";
|
||||
|
||||
str3 = (char *)
|
||||
"';\n"
|
||||
"UPDATE SYS_TABLES SET NAME = new_table_name\n"
|
||||
"WHERE NAME = old_table_name;\n"
|
||||
"UPDATE SYS_FOREIGN SET FOR_NAME = new_table_name\n"
|
||||
"WHERE FOR_NAME = old_table_name;\n"
|
||||
"UPDATE SYS_FOREIGN SET REF_NAME = new_table_name\n"
|
||||
"WHERE REF_NAME = old_table_name;\n"
|
||||
"COMMIT WORK;\n"
|
||||
"END;\n";
|
||||
if (row_is_mysql_tmp_table_name(new_name)) {
|
||||
|
||||
/* We want to preserve the original foreign key
|
||||
constraint definitions despite the name change */
|
||||
|
||||
str3 = (char*)
|
||||
"';\n"
|
||||
"UPDATE SYS_TABLES SET NAME = new_table_name\n"
|
||||
"WHERE NAME = old_table_name;\n"
|
||||
"END;\n";
|
||||
} else {
|
||||
str3 = (char*)
|
||||
"';\n"
|
||||
"UPDATE SYS_TABLES SET NAME = new_table_name\n"
|
||||
"WHERE NAME = old_table_name;\n"
|
||||
"UPDATE SYS_FOREIGN SET FOR_NAME = new_table_name\n"
|
||||
"WHERE FOR_NAME = old_table_name;\n"
|
||||
"UPDATE SYS_FOREIGN SET REF_NAME = new_table_name\n"
|
||||
"WHERE REF_NAME = old_table_name;\n"
|
||||
"END;\n";
|
||||
}
|
||||
|
||||
len = ut_strlen(str1);
|
||||
|
||||
@ -2033,7 +2066,32 @@ row_rename_table_for_mysql(
|
||||
trx_general_rollback_for_mysql(trx, FALSE, NULL);
|
||||
trx->error_state = DB_SUCCESS;
|
||||
} else {
|
||||
ut_a(dict_table_rename_in_cache(table, new_name));
|
||||
ut_a(dict_table_rename_in_cache(table, new_name,
|
||||
!row_is_mysql_tmp_table_name(new_name)));
|
||||
|
||||
if (row_is_mysql_tmp_table_name(old_name)) {
|
||||
|
||||
err = dict_load_foreigns(new_name);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: in ALTER TABLE table %s\n"
|
||||
"InnoDB: has or is referenced in foreign key constraints\n"
|
||||
"InnoDB: which are not compatible with the new table definition.\n",
|
||||
new_name);
|
||||
|
||||
ut_a(dict_table_rename_in_cache(table,
|
||||
old_name, FALSE));
|
||||
|
||||
trx->error_state = DB_SUCCESS;
|
||||
trx_general_rollback_for_mysql(trx, FALSE,
|
||||
NULL);
|
||||
trx->error_state = DB_SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
funct_exit:
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
@ -2234,7 +2234,7 @@ row_sel_get_clust_rec_for_mysql(
|
||||
(or old_vers) is not rec; in that case we must ignore
|
||||
such row because in our snapshot rec would not have existed.
|
||||
Remember that from rec we cannot see directly which transaction
|
||||
id corrsponds to it: we have to go to the clustered index
|
||||
id corresponds to it: we have to go to the clustered index
|
||||
record. A query where we want to fetch all rows where
|
||||
the secondary index value is in some interval would return
|
||||
a wrong result if we would not drop rows which we come to
|
||||
@ -2245,6 +2245,12 @@ row_sel_get_clust_rec_for_mysql(
|
||||
&& !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
|
||||
clust_rec, clust_index)) {
|
||||
clust_rec = NULL;
|
||||
} else {
|
||||
#ifdef UNIV_SEARCH_DEBUG
|
||||
ut_a(clust_rec == NULL ||
|
||||
row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
|
||||
clust_rec, clust_index));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -2400,7 +2406,12 @@ row_sel_try_search_shortcut_for_mysql(
|
||||
|
||||
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
|
||||
BTR_SEARCH_LEAF, pcur,
|
||||
RW_S_LATCH, mtr);
|
||||
#ifndef UNIV_SEARCH_DEBUG
|
||||
RW_S_LATCH,
|
||||
#else
|
||||
0,
|
||||
#endif
|
||||
mtr);
|
||||
rec = btr_pcur_get_rec(pcur);
|
||||
|
||||
if (!page_rec_is_user_rec(rec)) {
|
||||
@ -2624,15 +2635,18 @@ row_search_for_mysql(
|
||||
|
||||
goto no_shortcut;
|
||||
}
|
||||
|
||||
#ifndef UNIV_SEARCH_DEBUG
|
||||
if (!trx->has_search_latch) {
|
||||
rw_lock_s_lock(&btr_search_latch);
|
||||
trx->has_search_latch = TRUE;
|
||||
}
|
||||
|
||||
#endif
|
||||
shortcut = row_sel_try_search_shortcut_for_mysql(&rec,
|
||||
prebuilt, &mtr);
|
||||
if (shortcut == SEL_FOUND) {
|
||||
#ifdef UNIV_SEARCH_DEBUG
|
||||
ut_a(0 == cmp_dtuple_rec(search_tuple, rec));
|
||||
#endif
|
||||
row_sel_store_mysql_rec(buf, prebuilt, rec);
|
||||
|
||||
mtr_commit(&mtr);
|
||||
@ -2794,7 +2808,9 @@ rec_loop:
|
||||
/* The record matches enough */
|
||||
|
||||
ut_ad(mode == PAGE_CUR_GE);
|
||||
|
||||
#ifdef UNIV_SEARCH_DEBUG
|
||||
ut_a(0 == cmp_dtuple_rec(search_tuple, rec));
|
||||
#endif
|
||||
} else if (match_mode == ROW_SEL_EXACT) {
|
||||
/* Test if the index record matches completely to search_tuple
|
||||
in prebuilt: if not, then we return with DB_RECORD_NOT_FOUND */
|
||||
|
@ -142,7 +142,7 @@ try_again:
|
||||
|
||||
/*************************************************************************
|
||||
Checks if possible foreign key constraints hold after a delete of the record
|
||||
under pcur. NOTE that this function will temporarily commit mtr and lose
|
||||
under pcur. NOTE that this function will temporarily commit mtr and lose the
|
||||
pcur position! */
|
||||
static
|
||||
ulint
|
||||
|
@ -69,13 +69,19 @@ char* srv_main_thread_op_info = (char *) "";
|
||||
names, where the file name itself may also contain a path */
|
||||
|
||||
char* srv_data_home = NULL;
|
||||
char* srv_logs_home = NULL;
|
||||
char* srv_arch_dir = NULL;
|
||||
|
||||
ulint srv_n_data_files = 0;
|
||||
char** srv_data_file_names = NULL;
|
||||
ulint* srv_data_file_sizes = NULL; /* size in database pages */
|
||||
|
||||
ibool srv_auto_extend_last_data_file = FALSE; /* if TRUE, then we
|
||||
auto-extend the last data
|
||||
file */
|
||||
ulint srv_last_file_size_max = 0; /* if != 0, this tells
|
||||
the max size auto-extending
|
||||
may increase the last data
|
||||
file size */
|
||||
ulint* srv_data_file_is_raw_partition = NULL;
|
||||
|
||||
/* If the following is TRUE we do not allow inserts etc. This protects
|
||||
@ -1605,7 +1611,7 @@ srv_read_initfile(
|
||||
|
||||
/*************************************************************************
|
||||
Initializes the server. */
|
||||
static
|
||||
|
||||
void
|
||||
srv_init(void)
|
||||
/*==========*/
|
||||
@ -1673,7 +1679,7 @@ srv_init(void)
|
||||
/*************************************************************************
|
||||
Initializes the synchronization primitives, memory system, and the thread
|
||||
local storage. */
|
||||
static
|
||||
|
||||
void
|
||||
srv_general_init(void)
|
||||
/*==================*/
|
||||
@ -1695,6 +1701,7 @@ srv_conc_enter_innodb(
|
||||
trx_t* trx) /* in: transaction object associated with the
|
||||
thread */
|
||||
{
|
||||
ibool has_slept = FALSE;
|
||||
srv_conc_slot_t* slot;
|
||||
ulint i;
|
||||
|
||||
@ -1712,7 +1719,7 @@ srv_conc_enter_innodb(
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
retry:
|
||||
os_fast_mutex_lock(&srv_conc_mutex);
|
||||
|
||||
if (srv_conc_n_threads < (lint)srv_thread_concurrency) {
|
||||
@ -1725,7 +1732,23 @@ srv_conc_enter_innodb(
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the transaction is not holding resources, let it sleep
|
||||
for 100 milliseconds, and try again then */
|
||||
|
||||
if (!has_slept && !trx->has_search_latch
|
||||
&& NULL == UT_LIST_GET_FIRST(trx->trx_locks)) {
|
||||
|
||||
has_slept = TRUE; /* We let is sleep only once to avoid
|
||||
starvation */
|
||||
|
||||
os_fast_mutex_unlock(&srv_conc_mutex);
|
||||
|
||||
os_thread_sleep(100000);
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* Too many threads inside: put the current thread to a queue */
|
||||
|
||||
for (i = 0; i < OS_THREAD_MAX_N; i++) {
|
||||
@ -1917,6 +1940,9 @@ srv_normalize_init_values(void)
|
||||
* ((1024 * 1024) / UNIV_PAGE_SIZE);
|
||||
}
|
||||
|
||||
srv_last_file_size_max = srv_last_file_size_max
|
||||
* ((1024 * 1024) / UNIV_PAGE_SIZE);
|
||||
|
||||
srv_log_file_size = srv_log_file_size / UNIV_PAGE_SIZE;
|
||||
|
||||
srv_log_buffer_size = srv_log_buffer_size / UNIV_PAGE_SIZE;
|
||||
|
@ -84,6 +84,308 @@ we may get an assertion failure in os0file.c */
|
||||
|
||||
#define SRV_LOG_SPACE_FIRST_ID 1000000000
|
||||
|
||||
/*************************************************************************
|
||||
Reads the data files and their sizes from a character string given in
|
||||
the .cnf file. */
|
||||
|
||||
ibool
|
||||
srv_parse_data_file_paths_and_sizes(
|
||||
/*================================*/
|
||||
/* out: TRUE if ok, FALSE if parsing
|
||||
error */
|
||||
char* str, /* in: the data file path string */
|
||||
char*** data_file_names, /* out, own: array of data file
|
||||
names */
|
||||
ulint** data_file_sizes, /* out, own: array of data file sizes
|
||||
in megabytes */
|
||||
ulint** data_file_is_raw_partition,/* out, own: array of flags
|
||||
showing which data files are raw
|
||||
partitions */
|
||||
ulint* n_data_files, /* out: number of data files */
|
||||
ibool* is_auto_extending, /* out: TRUE if the last data file is
|
||||
auto-extending */
|
||||
ulint* max_auto_extend_size) /* out: max auto extend size for the
|
||||
last file if specified, 0 if not */
|
||||
{
|
||||
char* input_str;
|
||||
char* endp;
|
||||
char* path;
|
||||
ulint size;
|
||||
ulint i = 0;
|
||||
|
||||
*is_auto_extending = FALSE;
|
||||
*max_auto_extend_size = 0;
|
||||
|
||||
input_str = str;
|
||||
|
||||
/* First calculate the number of data files and check syntax:
|
||||
path:size[M | G];path:size[M | G]... . Note that a Windows path may
|
||||
contain a drive name and a ':'. */
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
while ((*str != ':' && *str != '\0')
|
||||
|| (*str == ':'
|
||||
&& (*(str + 1) == '\\' || *(str + 1) == '/'))) {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (*str == '\0') {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
str++;
|
||||
|
||||
size = strtoul(str, &endp, 10);
|
||||
|
||||
str = endp;
|
||||
|
||||
if (*str != 'M' && *str != 'G') {
|
||||
size = size / (1024 * 1024);
|
||||
} else if (*str == 'G') {
|
||||
size = size * 1024;
|
||||
str++;
|
||||
} else {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (strlen(str) >= ut_strlen(":autoextend")
|
||||
&& 0 == ut_memcmp(str, ":autoextend",
|
||||
ut_strlen(":autoextend"))) {
|
||||
|
||||
str += ut_strlen(":autoextend");
|
||||
|
||||
if (strlen(str) >= ut_strlen(":max:")
|
||||
&& 0 == ut_memcmp(str, ":max:",
|
||||
ut_strlen(":max:"))) {
|
||||
|
||||
str += ut_strlen(":max:");
|
||||
|
||||
size = strtoul(str, &endp, 10);
|
||||
|
||||
str = endp;
|
||||
|
||||
if (*str != 'M' && *str != 'G') {
|
||||
size = size / (1024 * 1024);
|
||||
} else if (*str == 'G') {
|
||||
size = size * 1024;
|
||||
str++;
|
||||
} else {
|
||||
str++;
|
||||
}
|
||||
}
|
||||
|
||||
if (*str != '\0') {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
if (strlen(str) >= 6
|
||||
&& *str == 'n'
|
||||
&& *(str + 1) == 'e'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
}
|
||||
|
||||
if (strlen(str) >= 3
|
||||
&& *str == 'r'
|
||||
&& *(str + 1) == 'a'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
i++;
|
||||
|
||||
if (*str == ';') {
|
||||
str++;
|
||||
} else if (*str != '\0') {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
*data_file_names = (char**)ut_malloc(i * sizeof(void*));
|
||||
*data_file_sizes = (ulint*)ut_malloc(i * sizeof(ulint));
|
||||
*data_file_is_raw_partition = (ulint*)ut_malloc(i * sizeof(ulint));
|
||||
|
||||
*n_data_files = i;
|
||||
|
||||
/* Then store the actual values to our arrays */
|
||||
|
||||
str = input_str;
|
||||
i = 0;
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
/* Note that we must ignore the ':' in a Windows path */
|
||||
|
||||
while ((*str != ':' && *str != '\0')
|
||||
|| (*str == ':'
|
||||
&& (*(str + 1) == '\\' || *(str + 1) == '/'))) {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (*str == ':') {
|
||||
/* Make path a null-terminated string */
|
||||
*str = '\0';
|
||||
str++;
|
||||
}
|
||||
|
||||
size = strtoul(str, &endp, 10);
|
||||
|
||||
str = endp;
|
||||
|
||||
if ((*str != 'M') && (*str != 'G')) {
|
||||
size = size / (1024 * 1024);
|
||||
} else if (*str == 'G') {
|
||||
size = size * 1024;
|
||||
str++;
|
||||
} else {
|
||||
str++;
|
||||
}
|
||||
|
||||
(*data_file_names)[i] = path;
|
||||
(*data_file_sizes)[i] = size;
|
||||
|
||||
if (strlen(str) >= ut_strlen(":autoextend")
|
||||
&& 0 == ut_memcmp(str, ":autoextend",
|
||||
ut_strlen(":autoextend"))) {
|
||||
|
||||
*is_auto_extending = TRUE;
|
||||
|
||||
str += ut_strlen(":autoextend");
|
||||
|
||||
if (strlen(str) >= ut_strlen(":max:")
|
||||
&& 0 == ut_memcmp(str, ":max:",
|
||||
ut_strlen(":max:"))) {
|
||||
|
||||
str += ut_strlen(":max:");
|
||||
|
||||
size = strtoul(str, &endp, 10);
|
||||
|
||||
str = endp;
|
||||
|
||||
if (*str != 'M' && *str != 'G') {
|
||||
size = size / (1024 * 1024);
|
||||
} else if (*str == 'G') {
|
||||
size = size * 1024;
|
||||
str++;
|
||||
} else {
|
||||
str++;
|
||||
}
|
||||
|
||||
*max_auto_extend_size = size;
|
||||
}
|
||||
|
||||
if (*str != '\0') {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
(*data_file_is_raw_partition)[i] = 0;
|
||||
|
||||
if (strlen(str) >= 6
|
||||
&& *str == 'n'
|
||||
&& *(str + 1) == 'e'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
(*data_file_is_raw_partition)[i] = SRV_NEW_RAW;
|
||||
}
|
||||
|
||||
if (strlen(str) >= 3
|
||||
&& *str == 'r'
|
||||
&& *(str + 1) == 'a'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
|
||||
if ((*data_file_is_raw_partition)[i] == 0) {
|
||||
(*data_file_is_raw_partition)[i] = SRV_OLD_RAW;
|
||||
}
|
||||
}
|
||||
|
||||
i++;
|
||||
|
||||
if (*str == ';') {
|
||||
str++;
|
||||
}
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Reads log group home directories from a character string given in
|
||||
the .cnf file. */
|
||||
|
||||
ibool
|
||||
srv_parse_log_group_home_dirs(
|
||||
/*==========================*/
|
||||
/* out: TRUE if ok, FALSE if parsing
|
||||
error */
|
||||
char* str, /* in: character string */
|
||||
char*** log_group_home_dirs) /* out, own: log group home dirs */
|
||||
{
|
||||
char* input_str;
|
||||
char* path;
|
||||
ulint i = 0;
|
||||
|
||||
input_str = str;
|
||||
|
||||
/* First calculate the number of directories and check syntax:
|
||||
path;path;... */
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
while (*str != ';' && *str != '\0') {
|
||||
str++;
|
||||
}
|
||||
|
||||
i++;
|
||||
|
||||
if (*str == ';') {
|
||||
str++;
|
||||
} else if (*str != '\0') {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
*log_group_home_dirs = (char**) ut_malloc(i * sizeof(void*));
|
||||
|
||||
/* Then store the actual values to our array */
|
||||
|
||||
str = input_str;
|
||||
i = 0;
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
while (*str != ';' && *str != '\0') {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (*str == ';') {
|
||||
*str = '\0';
|
||||
str++;
|
||||
}
|
||||
|
||||
(*log_group_home_dirs)[i] = path;
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
I/o-handler thread function. */
|
||||
static
|
||||
@ -127,7 +429,7 @@ io_handler_thread(
|
||||
|
||||
/*************************************************************************
|
||||
Normalizes a directory path for Windows: converts slashes to backslashes. */
|
||||
static
|
||||
|
||||
void
|
||||
srv_normalize_path_for_win(
|
||||
/*=======================*/
|
||||
@ -149,7 +451,7 @@ srv_normalize_path_for_win(
|
||||
/*************************************************************************
|
||||
Adds a slash or a backslash to the end of a string if it is missing
|
||||
and the string is not empty. */
|
||||
static
|
||||
|
||||
char*
|
||||
srv_add_path_separator_if_needed(
|
||||
/*=============================*/
|
||||
@ -356,6 +658,7 @@ open_or_create_data_files(
|
||||
ibool one_created = FALSE;
|
||||
ulint size;
|
||||
ulint size_high;
|
||||
ulint rounded_size_pages;
|
||||
char name[10000];
|
||||
|
||||
if (srv_n_data_files >= 1000) {
|
||||
@ -435,17 +738,35 @@ open_or_create_data_files(
|
||||
ret = os_file_get_size(files[i], &size,
|
||||
&size_high);
|
||||
ut_a(ret);
|
||||
/* Round size downward to megabytes */
|
||||
|
||||
/* File sizes in srv_... are given in
|
||||
database pages */
|
||||
rounded_size_pages = (size / (1024 * 1024)
|
||||
+ 4096 * size_high)
|
||||
<< (20 - UNIV_PAGE_SIZE_SHIFT);
|
||||
|
||||
if (size != srv_calc_low32(
|
||||
srv_data_file_sizes[i])
|
||||
|| size_high != srv_calc_high32(
|
||||
srv_data_file_sizes[i])) {
|
||||
if (i == srv_n_data_files - 1
|
||||
&& srv_auto_extend_last_data_file) {
|
||||
|
||||
if (srv_data_file_sizes[i] >
|
||||
rounded_size_pages
|
||||
|| (srv_last_file_size_max > 0
|
||||
&& srv_last_file_size_max <
|
||||
rounded_size_pages)) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: data file %s is of a different size\n"
|
||||
"InnoDB: than specified in the .cnf file!\n", name);
|
||||
}
|
||||
|
||||
srv_data_file_sizes[i] =
|
||||
rounded_size_pages;
|
||||
}
|
||||
|
||||
if (rounded_size_pages
|
||||
!= srv_data_file_sizes[i]) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: data file %s is of different size\n"
|
||||
"InnoDB: Error: data file %s is of a different size\n"
|
||||
"InnoDB: than specified in the .cnf file!\n", name);
|
||||
|
||||
return(DB_ERROR);
|
||||
@ -479,7 +800,7 @@ open_or_create_data_files(
|
||||
>> (20 - UNIV_PAGE_SIZE_SHIFT)));
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Database physically writes the file full: wait...\n");
|
||||
"InnoDB: Database physically writes the file full: wait...\n");
|
||||
|
||||
ret = os_file_set_size(name, files[i],
|
||||
srv_calc_low32(srv_data_file_sizes[i]),
|
||||
@ -681,6 +1002,8 @@ innobase_start_or_create_for_mysql(void)
|
||||
os_aio_use_native_aio = TRUE;
|
||||
}
|
||||
#endif
|
||||
os_aio_use_native_aio = FALSE;
|
||||
|
||||
if (!os_aio_use_native_aio) {
|
||||
os_aio_init(4 * SRV_N_PENDING_IOS_PER_THREAD
|
||||
* srv_n_file_io_threads,
|
||||
@ -727,12 +1050,10 @@ innobase_start_or_create_for_mysql(void)
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
if (sizeof(ulint) == 4
|
||||
&& srv_n_log_files * srv_log_file_size >= 262144) {
|
||||
if (srv_n_log_files * srv_log_file_size >= 262144) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: combined size of log files must be < 4 GB\n"
|
||||
"InnoDB: on 32-bit computers\n");
|
||||
"InnoDB: Error: combined size of log files must be < 4 GB\n");
|
||||
|
||||
return(DB_ERROR);
|
||||
}
|
||||
@ -764,7 +1085,6 @@ innobase_start_or_create_for_mysql(void)
|
||||
&max_flushed_lsn, &max_arch_log_no,
|
||||
&sum_of_new_sizes);
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
fprintf(stderr, "InnoDB: Could not open data files\n");
|
||||
|
||||
return((int) err);
|
||||
@ -803,9 +1123,9 @@ innobase_start_or_create_for_mysql(void)
|
||||
|| (log_opened && log_created)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: all log files must be created at the same time.\n"
|
||||
"InnoDB: If you want bigger or smaller log files,\n"
|
||||
"InnoDB: shut down the database and make sure there\n"
|
||||
"InnoDB: were no errors in shutdown.\n"
|
||||
"InnoDB: All log files must be created also in database creation.\n"
|
||||
"InnoDB: If you want bigger or smaller log files, shut down the\n"
|
||||
"InnoDB: database and make sure there were no errors in shutdown.\n"
|
||||
"InnoDB: Then delete the existing log files. Edit the .cnf file\n"
|
||||
"InnoDB: and start the database again.\n");
|
||||
|
||||
@ -841,9 +1161,7 @@ innobase_start_or_create_for_mysql(void)
|
||||
|
||||
mutex_enter(&(log_sys->mutex));
|
||||
|
||||
recv_reset_logs(ut_dulint_align_down(max_flushed_lsn,
|
||||
OS_FILE_LOG_BLOCK_SIZE),
|
||||
max_arch_log_no + 1, TRUE);
|
||||
recv_reset_logs(max_flushed_lsn, max_arch_log_no + 1, TRUE);
|
||||
|
||||
mutex_exit(&(log_sys->mutex));
|
||||
}
|
||||
@ -883,6 +1201,10 @@ innobase_start_or_create_for_mysql(void)
|
||||
|
||||
srv_startup_is_before_trx_rollback_phase = FALSE;
|
||||
|
||||
/* Initialize the fsp free limit global variable in the log
|
||||
system */
|
||||
fsp_header_get_free_limit(0);
|
||||
|
||||
recv_recovery_from_archive_finish();
|
||||
} else {
|
||||
/* We always try to do a recovery, even if the database had
|
||||
@ -899,6 +1221,7 @@ innobase_start_or_create_for_mysql(void)
|
||||
|
||||
/* Since ibuf init is in dict_boot, and ibuf is needed
|
||||
in any disk i/o, first call dict_boot */
|
||||
|
||||
dict_boot();
|
||||
trx_sys_init_at_db_start();
|
||||
|
||||
@ -906,6 +1229,11 @@ innobase_start_or_create_for_mysql(void)
|
||||
trx_sys_init_at_db_start */
|
||||
|
||||
srv_startup_is_before_trx_rollback_phase = FALSE;
|
||||
|
||||
/* Initialize the fsp free limit global variable in the log
|
||||
system */
|
||||
fsp_header_get_free_limit(0);
|
||||
|
||||
recv_recovery_from_checkpoint_finish();
|
||||
}
|
||||
|
||||
@ -975,7 +1303,7 @@ innobase_start_or_create_for_mysql(void)
|
||||
if (err != DB_SUCCESS) {
|
||||
return((int)DB_ERROR);
|
||||
}
|
||||
|
||||
|
||||
/* Create the master thread which monitors the database
|
||||
server, and does purge and other utility operations */
|
||||
|
||||
|
@ -20,11 +20,42 @@ Created 3/26/1996 Heikki Tuuri
|
||||
#include "srv0srv.h"
|
||||
#include "trx0purge.h"
|
||||
#include "log0log.h"
|
||||
#include "os0file.h"
|
||||
|
||||
/* The transaction system */
|
||||
trx_sys_t* trx_sys = NULL;
|
||||
trx_doublewrite_t* trx_doublewrite = NULL;
|
||||
|
||||
/********************************************************************
|
||||
Determines if a page number is located inside the doublewrite buffer. */
|
||||
|
||||
ibool
|
||||
trx_doublewrite_page_inside(
|
||||
/*========================*/
|
||||
/* out: TRUE if the location is inside
|
||||
the two blocks of the doublewrite buffer */
|
||||
ulint page_no) /* in: page number */
|
||||
{
|
||||
if (trx_doublewrite == NULL) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
if (page_no >= trx_doublewrite->block1
|
||||
&& page_no < trx_doublewrite->block1
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
if (page_no >= trx_doublewrite->block2
|
||||
&& page_no < trx_doublewrite->block2
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
Creates or initialializes the doublewrite buffer at a database start. */
|
||||
static
|
||||
@ -36,6 +67,11 @@ trx_doublewrite_init(
|
||||
{
|
||||
trx_doublewrite = mem_alloc(sizeof(trx_doublewrite_t));
|
||||
|
||||
/* When we have the doublewrite buffer in use, we do not need to
|
||||
call os_file_flush (Unix fsync) after every write. */
|
||||
|
||||
os_do_not_call_flush_at_each_write = TRUE;
|
||||
|
||||
mutex_create(&(trx_doublewrite->mutex));
|
||||
mutex_set_level(&(trx_doublewrite->mutex), SYNC_DOUBLEWRITE);
|
||||
|
||||
|
@ -121,6 +121,7 @@ ut_malloc(
|
||||
{
|
||||
return(ut_malloc_low(n, TRUE));
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Frees a memory block allocated with ut_malloc. */
|
||||
|
||||
|
@ -16,6 +16,24 @@ Created 5/11/1994 Heikki Tuuri
|
||||
|
||||
ibool ut_always_false = FALSE;
|
||||
|
||||
/************************************************************
|
||||
Gets the high 32 bits in a ulint. That is makes a shift >> 32,
|
||||
but since there seem to be compiler bugs in both gcc and Visual C++,
|
||||
we do this by a special conversion. */
|
||||
|
||||
ulint
|
||||
ut_get_high32(
|
||||
/*==========*/
|
||||
/* out: a >> 32 */
|
||||
ulint a) /* in: ulint */
|
||||
{
|
||||
if (sizeof(ulint) == 4) {
|
||||
return(0);
|
||||
}
|
||||
|
||||
return(a >> 32);
|
||||
}
|
||||
|
||||
/************************************************************
|
||||
The following function returns a clock time in milliseconds. */
|
||||
|
||||
@ -58,11 +76,11 @@ ut_print_timestamp(
|
||||
FILE* file) /* in: file where to print */
|
||||
{
|
||||
#ifdef __WIN__
|
||||
SYSTEMTIME cal_tm;
|
||||
SYSTEMTIME cal_tm;
|
||||
|
||||
GetLocalTime(&cal_tm);
|
||||
GetLocalTime(&cal_tm);
|
||||
|
||||
fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
|
||||
fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
|
||||
(int)cal_tm.wYear % 100,
|
||||
(int)cal_tm.wMonth,
|
||||
(int)cal_tm.wDay,
|
||||
@ -70,23 +88,21 @@ ut_print_timestamp(
|
||||
(int)cal_tm.wMinute,
|
||||
(int)cal_tm.wSecond);
|
||||
#else
|
||||
struct tm cal_tm;
|
||||
struct tm* cal_tm_ptr;
|
||||
time_t tm;
|
||||
|
||||
struct tm cal_tm;
|
||||
struct tm* cal_tm_ptr;
|
||||
time_t tm;
|
||||
|
||||
time(&tm);
|
||||
time(&tm);
|
||||
|
||||
#ifdef HAVE_LOCALTIME_R
|
||||
localtime_r(&tm, &cal_tm);
|
||||
cal_tm_ptr = &cal_tm;
|
||||
localtime_r(&tm, &cal_tm);
|
||||
cal_tm_ptr = &cal_tm;
|
||||
#else
|
||||
cal_tm_ptr = localtime(&tm);
|
||||
cal_tm_ptr = localtime(&tm);
|
||||
#endif
|
||||
|
||||
fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
|
||||
fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
|
||||
cal_tm_ptr->tm_year % 100,
|
||||
cal_tm_ptr->tm_mon+1,
|
||||
cal_tm_ptr->tm_mon + 1,
|
||||
cal_tm_ptr->tm_mday,
|
||||
cal_tm_ptr->tm_hour,
|
||||
cal_tm_ptr->tm_min,
|
||||
@ -94,6 +110,39 @@ ut_print_timestamp(
|
||||
#endif
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
Returns current year, month, day. */
|
||||
|
||||
void
|
||||
ut_get_year_month_day(
|
||||
/*==================*/
|
||||
ulint* year, /* out: current year */
|
||||
ulint* month, /* out: month */
|
||||
ulint* day) /* out: day */
|
||||
{
|
||||
#ifdef __WIN__
|
||||
SYSTEMTIME cal_tm;
|
||||
|
||||
GetLocalTime(&cal_tm);
|
||||
|
||||
*year = (ulint)cal_tm.wYear;
|
||||
*month = (ulint)cal_tm.wMonth;
|
||||
*day = (ulint)cal_tm.wDay;
|
||||
#else
|
||||
struct tm cal_tm;
|
||||
struct tm* cal_tm_ptr;
|
||||
time_t tm;
|
||||
|
||||
time(&tm);
|
||||
|
||||
cal_tm_ptr = localtime(&tm);
|
||||
|
||||
*year = (ulint)cal_tm_ptr->tm_year;
|
||||
*month = (ulint)cal_tm_ptr->tm_mon + 1;
|
||||
*day = (ulint)cal_tm_ptr->tm_mday;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*****************************************************************
|
||||
Runs an idle loop on CPU. The argument gives the desired delay
|
||||
in microseconds on 100 MHz Pentium + Visual C++. */
|
||||
|
@ -701,7 +701,7 @@ static const char *default_options[]=
|
||||
"character-set-dir", "default-character-set", "interactive-timeout",
|
||||
"connect-timeout", "local-infile", "disable-local-infile",
|
||||
"replication-probe", "enable-reads-from-master", "repl-parse-query",
|
||||
"ssl-chiper",
|
||||
"ssl-cipher",
|
||||
NullS
|
||||
};
|
||||
|
||||
|
@ -412,7 +412,6 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
|
||||
FTB_EXPR *ftbe;
|
||||
FT_SEG_ITERATOR ftsi;
|
||||
const byte *end;
|
||||
uint i;
|
||||
my_off_t docid=ftb->info->lastpos;
|
||||
|
||||
if (docid == HA_POS_ERROR)
|
||||
@ -420,7 +419,7 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
|
||||
if (!ftb->queue.elements)
|
||||
return 0;
|
||||
|
||||
#if 0
|
||||
#if NOT_USED
|
||||
if (ftb->state == READY || ftb->state == INDEX_DONE)
|
||||
ftb->state=SCAN;
|
||||
else if (ftb->state != SCAN)
|
||||
|
@ -252,7 +252,7 @@ static struct option long_options[] =
|
||||
|
||||
static void print_version(void)
|
||||
{
|
||||
printf("%s Ver 1.11 for %s on %s\n",my_progname,SYSTEM_TYPE,MACHINE_TYPE);
|
||||
printf("%s Ver 1.12 for %s on %s\n",my_progname,SYSTEM_TYPE,MACHINE_TYPE);
|
||||
}
|
||||
|
||||
static void usage(void)
|
||||
@ -595,10 +595,7 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table)
|
||||
else
|
||||
{
|
||||
if (tmp_dir[0])
|
||||
{
|
||||
if (!(error=my_copy(new_name,org_name,MYF(MY_WME))))
|
||||
VOID(my_delete(new_name,MYF(MY_WME)));
|
||||
}
|
||||
error=my_copy(new_name,org_name,MYF(MY_WME));
|
||||
else
|
||||
error=my_rename(new_name,org_name,MYF(MY_WME));
|
||||
if (!error)
|
||||
@ -608,13 +605,8 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table)
|
||||
else
|
||||
{
|
||||
if (tmp_dir[0])
|
||||
{
|
||||
|
||||
if (!(error=my_copy(new_name,org_name,
|
||||
MYF(MY_WME | MY_HOLD_ORIGINAL_MODES
|
||||
| MY_COPYTIME))))
|
||||
VOID(my_delete(new_name,MYF(MY_WME)));
|
||||
}
|
||||
error=my_copy(new_name,org_name,
|
||||
MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_COPYTIME));
|
||||
else
|
||||
error=my_redel(org_name,new_name,MYF(MY_WME | MY_COPYTIME));
|
||||
}
|
||||
@ -628,6 +620,7 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table)
|
||||
if (error)
|
||||
{
|
||||
VOID(fprintf(stderr,"Aborting: %s is not compressed\n",org_name));
|
||||
VOID(my_delete(new_name,MYF(MY_WME)));
|
||||
DBUG_RETURN(-1);
|
||||
}
|
||||
if (write_loop || verbose)
|
||||
|
@ -140,3 +140,70 @@ select * from t1 where aString > "believe in love" order by aString;
|
||||
aString
|
||||
believe in myself
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (
|
||||
t1ID int(10) unsigned NOT NULL auto_increment,
|
||||
art char(1) binary NOT NULL default '',
|
||||
KNR char(5) NOT NULL default '',
|
||||
RECHNR char(6) NOT NULL default '',
|
||||
POSNR char(2) NOT NULL default '',
|
||||
ARTNR char(10) NOT NULL default '',
|
||||
TEX char(70) NOT NULL default '',
|
||||
PRIMARY KEY (t1ID),
|
||||
KEY IdxArt (art),
|
||||
KEY IdxKnr (KNR),
|
||||
KEY IdxArtnr (ARTNR)
|
||||
) TYPE=MyISAM;
|
||||
INSERT INTO t1 (art) VALUES ('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j');
|
||||
select count(*) from t1 where upper(art) = 'J';
|
||||
count(*)
|
||||
602
|
||||
select count(*) from t1 where art = 'J' or art = 'j';
|
||||
count(*)
|
||||
602
|
||||
select count(*) from t1 where art = 'j' or art = 'J';
|
||||
count(*)
|
||||
602
|
||||
select count(*) from t1 where art = 'j';
|
||||
count(*)
|
||||
389
|
||||
select count(*) from t1 where art = 'J';
|
||||
count(*)
|
||||
213
|
||||
drop table t1;
|
||||
|
@ -100,3 +100,66 @@ alter table t1 drop key aString;
|
||||
select * from t1 where aString < "believe in myself" order by aString;
|
||||
select * from t1 where aString > "believe in love" order by aString;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Problem with binary strings
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
t1ID int(10) unsigned NOT NULL auto_increment,
|
||||
art char(1) binary NOT NULL default '',
|
||||
KNR char(5) NOT NULL default '',
|
||||
RECHNR char(6) NOT NULL default '',
|
||||
POSNR char(2) NOT NULL default '',
|
||||
ARTNR char(10) NOT NULL default '',
|
||||
TEX char(70) NOT NULL default '',
|
||||
PRIMARY KEY (t1ID),
|
||||
KEY IdxArt (art),
|
||||
KEY IdxKnr (KNR),
|
||||
KEY IdxArtnr (ARTNR)
|
||||
) TYPE=MyISAM;
|
||||
|
||||
INSERT INTO t1 (art) VALUES ('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),('j'),('J'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),
|
||||
('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j'),('j');
|
||||
select count(*) from t1 where upper(art) = 'J';
|
||||
select count(*) from t1 where art = 'J' or art = 'j';
|
||||
select count(*) from t1 where art = 'j' or art = 'J';
|
||||
select count(*) from t1 where art = 'j';
|
||||
select count(*) from t1 where art = 'J';
|
||||
drop table t1;
|
||||
|
@ -29,7 +29,8 @@
|
||||
*/
|
||||
|
||||
my_bool _init_dynamic_array(DYNAMIC_ARRAY *array, uint element_size,
|
||||
uint init_alloc, uint alloc_increment CALLER_INFO_PROTO)
|
||||
uint init_alloc,
|
||||
uint alloc_increment CALLER_INFO_PROTO)
|
||||
{
|
||||
DBUG_ENTER("init_dynamic_array");
|
||||
if (!alloc_increment)
|
||||
|
@ -15,6 +15,7 @@
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/* To avoid problems with alarms in debug code, we disable DBUG here */
|
||||
#undef DBUG_OFF
|
||||
#define DBUG_OFF
|
||||
#include <my_global.h>
|
||||
|
||||
|
25
sql/field.cc
25
sql/field.cc
@ -1713,6 +1713,11 @@ void Field_float::store(double nr)
|
||||
float j;
|
||||
if (dec < NOT_FIXED_DEC)
|
||||
nr=floor(nr*log_10[dec]+0.5)/log_10[dec]; // To fixed point
|
||||
if (unsigned_flag && nr < 0)
|
||||
{
|
||||
current_thd->cuted_fields++;
|
||||
nr=0;
|
||||
}
|
||||
if (nr < -FLT_MAX)
|
||||
{
|
||||
j= -FLT_MAX;
|
||||
@ -1739,6 +1744,11 @@ void Field_float::store(double nr)
|
||||
void Field_float::store(longlong nr)
|
||||
{
|
||||
float j= (float) nr;
|
||||
if (unsigned_flag && j < 0)
|
||||
{
|
||||
current_thd->cuted_fields++;
|
||||
j=0;
|
||||
}
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->db_low_byte_first)
|
||||
{
|
||||
@ -1945,6 +1955,11 @@ void Field_double::store(const char *from,uint len)
|
||||
double j= atof(tmp_str.c_ptr());
|
||||
if (errno || current_thd->count_cuted_fields && !test_if_real(from,len))
|
||||
current_thd->cuted_fields++;
|
||||
if (unsigned_flag && j < 0)
|
||||
{
|
||||
current_thd->cuted_fields++;
|
||||
j=0;
|
||||
}
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->db_low_byte_first)
|
||||
{
|
||||
@ -1960,6 +1975,11 @@ void Field_double::store(double nr)
|
||||
{
|
||||
if (dec < NOT_FIXED_DEC)
|
||||
nr=floor(nr*log_10[dec]+0.5)/log_10[dec]; // To fixed point
|
||||
if (unsigned_flag && nr < 0)
|
||||
{
|
||||
current_thd->cuted_fields++;
|
||||
nr=0;
|
||||
}
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->db_low_byte_first)
|
||||
{
|
||||
@ -1974,6 +1994,11 @@ void Field_double::store(double nr)
|
||||
void Field_double::store(longlong nr)
|
||||
{
|
||||
double j= (double) nr;
|
||||
if (unsigned_flag && j < 0)
|
||||
{
|
||||
current_thd->cuted_fields++;
|
||||
j=0;
|
||||
}
|
||||
#ifdef WORDS_BIGENDIAN
|
||||
if (table->db_low_byte_first)
|
||||
{
|
||||
|
512
sql/ha_innodb.cc
512
sql/ha_innodb.cc
@ -76,23 +76,37 @@ bool innodb_skip = 0;
|
||||
uint innobase_init_flags = 0;
|
||||
ulong innobase_cache_size = 0;
|
||||
|
||||
/* The default values for the following, type long, start-up parameters
|
||||
are declared in mysqld.cc: */
|
||||
|
||||
long innobase_mirrored_log_groups, innobase_log_files_in_group,
|
||||
innobase_log_file_size, innobase_log_buffer_size,
|
||||
innobase_buffer_pool_size, innobase_additional_mem_pool_size,
|
||||
innobase_file_io_threads, innobase_lock_wait_timeout,
|
||||
innobase_thread_concurrency, innobase_force_recovery;
|
||||
/* The default values for the following char* start-up parameters
|
||||
are determined in innobase_init below: */
|
||||
|
||||
/* innobase_data_file_path=ibdata:15,idata2:1,... */
|
||||
|
||||
char* innobase_data_home_dir = NULL;
|
||||
char* innobase_log_group_home_dir = NULL;
|
||||
char* innobase_log_arch_dir = NULL;
|
||||
char* innobase_unix_file_flush_method = NULL;
|
||||
|
||||
char *innobase_data_home_dir;
|
||||
char *innobase_log_group_home_dir, *innobase_log_arch_dir;
|
||||
char *innobase_unix_file_flush_method;
|
||||
my_bool innobase_flush_log_at_trx_commit, innobase_log_archive,
|
||||
innobase_use_native_aio, innobase_fast_shutdown;
|
||||
/* Below we have boolean-valued start-up parameters, and their default
|
||||
values */
|
||||
|
||||
/* Set the default InnoDB tablespace size to 16M, and let it be
|
||||
auto-extending. Thus users can use InnoDB without having to specify
|
||||
any startup options. */
|
||||
my_bool innobase_flush_log_at_trx_commit = FALSE;
|
||||
my_bool innobase_log_archive = FALSE;
|
||||
my_bool innobase_use_native_aio = FALSE;
|
||||
my_bool innobase_fast_shutdown = TRUE;
|
||||
|
||||
char *innobase_data_file_path= (char*) "ibdata1:16M:autoextend";
|
||||
/*
|
||||
Set default InnoDB size to 64M and let it be auto-extending. Thus users
|
||||
can use InnoDB without having to specify any startup options.
|
||||
*/
|
||||
|
||||
char *innobase_data_file_path= (char*) "ibdata1:64M:autoextend";
|
||||
char *internal_innobase_data_file_path=0;
|
||||
|
||||
/* The following counter is used to convey information to InnoDB
|
||||
@ -336,227 +350,6 @@ ha_innobase::update_thd(
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Reads the data files and their sizes from a character string given in
|
||||
the .cnf file. */
|
||||
static
|
||||
bool
|
||||
innobase_parse_data_file_paths_and_sizes(void)
|
||||
/*==========================================*/
|
||||
/* out: TRUE if ok, FALSE if parsing
|
||||
error */
|
||||
{
|
||||
char* str;
|
||||
char* endp;
|
||||
char* path;
|
||||
ulint size;
|
||||
ulint i = 0;
|
||||
|
||||
str = internal_innobase_data_file_path;
|
||||
|
||||
/* First calculate the number of data files and check syntax:
|
||||
path:size[M];path:size[M]... . Note that a Windows path may
|
||||
contain a drive name and a ':'. */
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
while ((*str != ':' && *str != '\0')
|
||||
|| (*str == ':'
|
||||
&& (*(str + 1) == '\\' || *(str + 1) == '/'))) {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (*str == '\0') {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
str++;
|
||||
|
||||
size = strtoul(str, &endp, 10);
|
||||
|
||||
str = endp;
|
||||
|
||||
if ((*str != 'M') && (*str != 'G')) {
|
||||
size = size / (1024 * 1024);
|
||||
} else if (*str == 'G') {
|
||||
size = size * 1024;
|
||||
str++;
|
||||
} else {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (strlen(str) >= 6
|
||||
&& *str == 'n'
|
||||
&& *(str + 1) == 'e'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
}
|
||||
|
||||
if (strlen(str) >= 3
|
||||
&& *str == 'r'
|
||||
&& *(str + 1) == 'a'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
i++;
|
||||
|
||||
if (*str == ';') {
|
||||
str++;
|
||||
} else if (*str != '\0') {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
srv_data_file_names = (char**)ut_malloc(i * sizeof(void*));
|
||||
srv_data_file_sizes = (ulint*)ut_malloc(i * sizeof(ulint));
|
||||
srv_data_file_is_raw_partition = (ulint*)ut_malloc(i * sizeof(ulint));
|
||||
|
||||
srv_n_data_files = i;
|
||||
|
||||
/* Then store the actual values to our arrays */
|
||||
|
||||
str = internal_innobase_data_file_path;
|
||||
i = 0;
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
/* Note that we must ignore the ':' in a Windows path */
|
||||
|
||||
while ((*str != ':' && *str != '\0')
|
||||
|| (*str == ':'
|
||||
&& (*(str + 1) == '\\' || *(str + 1) == '/'))) {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (*str == ':') {
|
||||
/* Make path a null-terminated string */
|
||||
*str = '\0';
|
||||
str++;
|
||||
}
|
||||
|
||||
size = strtoul(str, &endp, 10);
|
||||
|
||||
str = endp;
|
||||
|
||||
if ((*str != 'M') && (*str != 'G')) {
|
||||
size = size / (1024 * 1024);
|
||||
} else if (*str == 'G') {
|
||||
size = size * 1024;
|
||||
str++;
|
||||
} else {
|
||||
str++;
|
||||
}
|
||||
|
||||
srv_data_file_is_raw_partition[i] = 0;
|
||||
|
||||
if (strlen(str) >= 6
|
||||
&& *str == 'n'
|
||||
&& *(str + 1) == 'e'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
srv_data_file_is_raw_partition[i] = SRV_NEW_RAW;
|
||||
}
|
||||
|
||||
if (strlen(str) >= 3
|
||||
&& *str == 'r'
|
||||
&& *(str + 1) == 'a'
|
||||
&& *(str + 2) == 'w') {
|
||||
str += 3;
|
||||
|
||||
if (srv_data_file_is_raw_partition[i] == 0) {
|
||||
srv_data_file_is_raw_partition[i] = SRV_OLD_RAW;
|
||||
}
|
||||
}
|
||||
|
||||
srv_data_file_names[i] = path;
|
||||
srv_data_file_sizes[i] = size;
|
||||
|
||||
i++;
|
||||
|
||||
if (*str == ';') {
|
||||
str++;
|
||||
}
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Reads log group home directories from a character string given in
|
||||
the .cnf file. */
|
||||
static
|
||||
bool
|
||||
innobase_parse_log_group_home_dirs(void)
|
||||
/*====================================*/
|
||||
/* out: TRUE if ok, FALSE if parsing
|
||||
error */
|
||||
{
|
||||
char* str;
|
||||
char* path;
|
||||
ulint i = 0;
|
||||
|
||||
str = innobase_log_group_home_dir;
|
||||
|
||||
/* First calculate the number of directories and check syntax:
|
||||
path;path;... */
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
while (*str != ';' && *str != '\0') {
|
||||
str++;
|
||||
}
|
||||
|
||||
i++;
|
||||
|
||||
if (*str == ';') {
|
||||
str++;
|
||||
} else if (*str != '\0') {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
if (i != (ulint) innobase_mirrored_log_groups) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
srv_log_group_home_dirs = (char**) ut_malloc(i * sizeof(void*));
|
||||
|
||||
/* Then store the actual values to our array */
|
||||
|
||||
str = innobase_log_group_home_dir;
|
||||
i = 0;
|
||||
|
||||
while (*str != '\0') {
|
||||
path = str;
|
||||
|
||||
while (*str != ';' && *str != '\0') {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (*str == ';') {
|
||||
*str = '\0';
|
||||
str++;
|
||||
}
|
||||
|
||||
srv_log_group_home_dirs[i] = path;
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Opens an InnoDB database. */
|
||||
|
||||
@ -580,9 +373,9 @@ innobase_init(void)
|
||||
else
|
||||
{
|
||||
/* It's better to use current lib, to keep path's short */
|
||||
current_lib[0]=FN_CURLIB;
|
||||
current_lib[1]=FN_LIBCHAR;
|
||||
current_lib[2]=0;
|
||||
current_lib[0] = FN_CURLIB;
|
||||
current_lib[1] = FN_LIBCHAR;
|
||||
current_lib[2] = 0;
|
||||
default_path=current_lib;
|
||||
}
|
||||
|
||||
@ -604,12 +397,17 @@ innobase_init(void)
|
||||
|
||||
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
|
||||
default_path);
|
||||
srv_logs_home = (char*) "";
|
||||
srv_arch_dir = (innobase_log_arch_dir ? innobase_log_arch_dir :
|
||||
default_path);
|
||||
|
||||
ret = innobase_parse_data_file_paths_and_sizes();
|
||||
|
||||
ret = (bool)
|
||||
srv_parse_data_file_paths_and_sizes(innobase_data_file_path,
|
||||
&srv_data_file_names,
|
||||
&srv_data_file_sizes,
|
||||
&srv_data_file_is_raw_partition,
|
||||
&srv_n_data_files,
|
||||
&srv_auto_extend_last_data_file,
|
||||
&srv_last_file_size_max);
|
||||
if (ret == FALSE) {
|
||||
sql_print_error("InnoDB: syntax error in innodb_data_file_path");
|
||||
DBUG_RETURN(TRUE);
|
||||
@ -617,12 +415,18 @@ innobase_init(void)
|
||||
|
||||
if (!innobase_log_group_home_dir)
|
||||
innobase_log_group_home_dir= default_path;
|
||||
ret = innobase_parse_log_group_home_dirs();
|
||||
|
||||
if (ret == FALSE) {
|
||||
DBUG_RETURN(TRUE);
|
||||
ret = (bool)
|
||||
srv_parse_log_group_home_dirs(innobase_log_group_home_dir,
|
||||
&srv_log_group_home_dirs);
|
||||
|
||||
if (ret == FALSE || innobase_mirrored_log_groups != 1) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: syntax error in innodb_log_group_home_dir\n"
|
||||
"InnoDB: or a wrong number of mirrored log groups\n");
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
srv_unix_file_flush_method_str = (innobase_unix_file_flush_method ?
|
||||
innobase_unix_file_flush_method :
|
||||
(char*)"fdatasync");
|
||||
@ -663,10 +467,11 @@ innobase_init(void)
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
DBUG_RETURN(1);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
(void) hash_init(&innobase_open_tables,32,0,0,
|
||||
(hash_get_key) innobase_get_key,0,0);
|
||||
(hash_get_key) innobase_get_key,0,0);
|
||||
pthread_mutex_init(&innobase_mutex,MY_MUTEX_INIT_FAST);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
@ -1352,33 +1157,43 @@ build_template(
|
||||
|
||||
clust_index = dict_table_get_first_index_noninline(prebuilt->table);
|
||||
|
||||
if (!prebuilt->in_update_remember_pos) {
|
||||
if (!prebuilt->hint_no_need_to_fetch_extra_cols) {
|
||||
/* We have a hint that we should at least fetch all
|
||||
columns in the key, or all columns in the table */
|
||||
|
||||
if (prebuilt->read_just_key) {
|
||||
/* MySQL has instructed us that it is enough to
|
||||
fetch the columns in the key */
|
||||
|
||||
fetch_all_in_key = TRUE;
|
||||
} else {
|
||||
/* We are building a temporary table: fetch all
|
||||
columns */
|
||||
columns; the reason is that MySQL may use the
|
||||
clustered index key to store rows, but the mechanism
|
||||
we use below to detect required columns does not
|
||||
reveal that. Actually, it might be enough to
|
||||
fetch only all in the key also in this case! */
|
||||
|
||||
templ_type = ROW_MYSQL_WHOLE_ROW;
|
||||
}
|
||||
}
|
||||
|
||||
if (prebuilt->select_lock_type == LOCK_X) {
|
||||
/* TODO: should fix the code in sql_update so that we could do
|
||||
with fetching only the needed columns */
|
||||
/* We always retrieve the whole clustered index record if we
|
||||
use exclusive row level locks, for example, if the read is
|
||||
done in an UPDATE statement. */
|
||||
|
||||
templ_type = ROW_MYSQL_WHOLE_ROW;
|
||||
}
|
||||
|
||||
if (templ_type == ROW_MYSQL_REC_FIELDS) {
|
||||
/* In versions < 3.23.50 we always retrieved the clustered
|
||||
index record if prebuilt->select_lock_type == LOCK_S,
|
||||
but there is really not need for that, and in some cases
|
||||
performance could be seriously degraded because the MySQL
|
||||
optimizer did not know about our convention! */
|
||||
|
||||
if (prebuilt->select_lock_type != LOCK_NONE) {
|
||||
/* Let index be the clustered index */
|
||||
|
||||
index = clust_index;
|
||||
} else {
|
||||
index = prebuilt->index;
|
||||
}
|
||||
index = prebuilt->index;
|
||||
} else {
|
||||
index = clust_index;
|
||||
}
|
||||
@ -1474,12 +1289,6 @@ skip_field:
|
||||
(index->table->cols + templ->col_no)->clust_pos;
|
||||
}
|
||||
}
|
||||
|
||||
if (templ_type == ROW_MYSQL_REC_FIELDS
|
||||
&& prebuilt->select_lock_type != LOCK_NONE) {
|
||||
|
||||
prebuilt->need_to_access_clustered = TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -1512,7 +1321,9 @@ ha_innobase::write_row(
|
||||
}
|
||||
|
||||
if (table->next_number_field && record == table->record[0]) {
|
||||
|
||||
/* This is the case where the table has an
|
||||
auto-increment column */
|
||||
|
||||
/* Fetch the value the user possibly has set in the
|
||||
autoincrement field */
|
||||
|
||||
@ -1596,12 +1407,6 @@ ha_innobase::write_row(
|
||||
}
|
||||
}
|
||||
|
||||
/* Set the 'in_update_remember_pos' flag to FALSE to
|
||||
make sure all columns are fetched in the select done by
|
||||
update_auto_increment */
|
||||
|
||||
prebuilt->in_update_remember_pos = FALSE;
|
||||
|
||||
update_auto_increment();
|
||||
|
||||
if (auto_inc == 0) {
|
||||
@ -1625,7 +1430,7 @@ ha_innobase::write_row(
|
||||
}
|
||||
|
||||
/* We have to set sql_stat_start to TRUE because
|
||||
update_auto_increment has called a select, and
|
||||
update_auto_increment may have called a select, and
|
||||
has reset that flag; row_insert_for_mysql has to
|
||||
know to set the IX intention lock on the table, something
|
||||
it only does at the start of each statement */
|
||||
@ -1865,9 +1670,7 @@ ha_innobase::update_row(
|
||||
/* This is not a delete */
|
||||
prebuilt->upd_node->is_delete = FALSE;
|
||||
|
||||
if (!prebuilt->in_update_remember_pos) {
|
||||
assert(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
|
||||
}
|
||||
assert(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
|
||||
|
||||
srv_conc_enter_innodb(prebuilt->trx);
|
||||
|
||||
@ -1913,7 +1716,6 @@ ha_innobase::delete_row(
|
||||
/* This is a delete */
|
||||
|
||||
prebuilt->upd_node->is_delete = TRUE;
|
||||
prebuilt->in_update_remember_pos = TRUE;
|
||||
|
||||
srv_conc_enter_innodb(prebuilt->trx);
|
||||
|
||||
@ -2123,19 +1925,20 @@ ha_innobase::change_active_index(
|
||||
|
||||
active_index = keynr;
|
||||
|
||||
if (keynr != MAX_KEY && table->keys > 0)
|
||||
{
|
||||
if (keynr != MAX_KEY && table->keys > 0) {
|
||||
key = table->key_info + active_index;
|
||||
|
||||
prebuilt->index=dict_table_get_index_noninline(prebuilt->table, key->name);
|
||||
if (!prebuilt->index)
|
||||
{
|
||||
sql_print_error("Innodb could not find key n:o %u with name %s from dict cache for table %s", keynr, key->name, prebuilt->table->name);
|
||||
return(1);
|
||||
}
|
||||
prebuilt->index = dict_table_get_index_noninline(
|
||||
prebuilt->table, key->name);
|
||||
} else {
|
||||
prebuilt->index = dict_table_get_first_index_noninline(
|
||||
prebuilt->table);
|
||||
}
|
||||
|
||||
if (!prebuilt->index) {
|
||||
sql_print_error("Innodb could not find key n:o %u with name %s from dict cache for table %s", keynr, key->name, prebuilt->table->name);
|
||||
return(1);
|
||||
}
|
||||
else
|
||||
prebuilt->index = dict_table_get_first_index_noninline(prebuilt->table);
|
||||
|
||||
assert(prebuilt->search_tuple != 0);
|
||||
|
||||
@ -2407,7 +2210,7 @@ ha_innobase::rnd_pos(
|
||||
int error;
|
||||
uint keynr = active_index;
|
||||
DBUG_ENTER("rnd_pos");
|
||||
DBUG_DUMP("key", pos, ref_stored_len);
|
||||
DBUG_DUMP("key", (char*) pos, ref_stored_len);
|
||||
|
||||
statistic_increment(ha_read_rnd_count, &LOCK_status);
|
||||
|
||||
@ -2632,7 +2435,6 @@ ha_innobase::create(
|
||||
dict_table_t* innobase_table;
|
||||
trx_t* trx;
|
||||
int primary_key_no;
|
||||
KEY* key;
|
||||
uint i;
|
||||
char name2[FN_REFLEN];
|
||||
char norm_name[FN_REFLEN];
|
||||
@ -2647,7 +2449,9 @@ ha_innobase::create(
|
||||
|
||||
/* Create the table definition in InnoDB */
|
||||
|
||||
if ((error = create_table_def(trx, form, norm_name))) {
|
||||
error = create_table_def(trx, form, norm_name);
|
||||
|
||||
if (error) {
|
||||
|
||||
trx_commit_for_mysql(trx);
|
||||
|
||||
@ -3222,13 +3026,59 @@ ha_innobase::update_table_comment(
|
||||
pos += sprintf(pos, "InnoDB free: %lu kB",
|
||||
(ulong) innobase_get_free_space());
|
||||
|
||||
/* We assume 150 bytes of space to print info */
|
||||
|
||||
dict_print_info_on_foreign_keys(pos, 500, prebuilt->table);
|
||||
/* We assume 450 - length bytes of space to print info */
|
||||
|
||||
if (length < 450) {
|
||||
dict_print_info_on_foreign_keys(FALSE, pos, 450 - length,
|
||||
prebuilt->table);
|
||||
}
|
||||
|
||||
return(str);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Gets the foreign key create info for a table stored in InnoDB. */
|
||||
|
||||
char*
|
||||
ha_innobase::get_foreign_key_create_info(void)
|
||||
/*==========================================*/
|
||||
/* out, own: character string in the form which
|
||||
can be inserted to the CREATE TABLE statement,
|
||||
MUST be freed with ::free_foreign_key_create_info */
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt;
|
||||
char* str;
|
||||
|
||||
if (prebuilt == NULL) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: cannot get create info for foreign keys\n");
|
||||
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
str = (char*)ut_malloc(10000);
|
||||
|
||||
str[0] = '\0';
|
||||
|
||||
dict_print_info_on_foreign_keys(TRUE, str, 9000, prebuilt->table);
|
||||
|
||||
return(str);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Frees the foreign key create info for a table stored in InnoDB, if it is
|
||||
non-NULL. */
|
||||
|
||||
void
|
||||
ha_innobase::free_foreign_key_create_info(
|
||||
/*======================================*/
|
||||
char* str) /* in, own: create info string to free */
|
||||
{
|
||||
if (str) {
|
||||
ut_free(str);
|
||||
}
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Tells something additional to the handler about how to do things. */
|
||||
|
||||
@ -3254,7 +3104,7 @@ ha_innobase::extra(
|
||||
prebuilt->read_just_key = 0;
|
||||
break;
|
||||
case HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE:
|
||||
prebuilt->in_update_remember_pos = FALSE;
|
||||
prebuilt->hint_no_need_to_fetch_extra_cols = FALSE;
|
||||
break;
|
||||
case HA_EXTRA_KEYREAD:
|
||||
prebuilt->read_just_key = 1;
|
||||
@ -3301,7 +3151,7 @@ ha_innobase::external_lock(
|
||||
trx = prebuilt->trx;
|
||||
|
||||
prebuilt->sql_stat_start = TRUE;
|
||||
prebuilt->in_update_remember_pos = TRUE;
|
||||
prebuilt->hint_no_need_to_fetch_extra_cols = TRUE;
|
||||
|
||||
prebuilt->read_just_key = 0;
|
||||
|
||||
@ -3320,6 +3170,16 @@ ha_innobase::external_lock(
|
||||
thd->transaction.all.innodb_active_trans = 1;
|
||||
trx->n_mysql_tables_in_use++;
|
||||
|
||||
if (thd->tx_isolation == ISO_SERIALIZABLE
|
||||
&& prebuilt->select_lock_type == LOCK_NONE) {
|
||||
|
||||
/* To get serializable execution we let InnoDB
|
||||
conceptually add 'LOCK IN SHARE MODE' to all SELECTs
|
||||
which otherwise would have been consistent reads */
|
||||
|
||||
prebuilt->select_lock_type = LOCK_S;
|
||||
}
|
||||
|
||||
if (prebuilt->select_lock_type != LOCK_NONE) {
|
||||
|
||||
trx->mysql_n_tables_locked++;
|
||||
@ -3427,8 +3287,8 @@ ha_innobase::store_lock(
|
||||
lock_type == TL_READ_NO_INSERT) {
|
||||
/* This is a SELECT ... IN SHARE MODE, or
|
||||
we are doing a complex SQL statement like
|
||||
INSERT INTO ... SELECT ... and the logical logging
|
||||
requires the use of a locking read */
|
||||
INSERT INTO ... SELECT ... and the logical logging (MySQL
|
||||
binlog) requires the use of a locking read */
|
||||
|
||||
prebuilt->select_lock_type = LOCK_S;
|
||||
} else {
|
||||
@ -3468,37 +3328,59 @@ ha_innobase::get_auto_increment()
|
||||
/*=============================*/
|
||||
/* out: the next auto-increment column value */
|
||||
{
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
longlong nr;
|
||||
int error;
|
||||
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
|
||||
longlong nr;
|
||||
int error;
|
||||
|
||||
(void) extra(HA_EXTRA_KEYREAD);
|
||||
index_init(table->next_number_index);
|
||||
/* Also SHOW TABLE STATUS calls this function. Previously, when we did
|
||||
always read the max autoinc key value, setting x-locks, users were
|
||||
surprised that SHOW TABLE STATUS could end up in a deadlock with
|
||||
ordinary SQL queries. We avoid these deadlocks if the auto-inc
|
||||
counter for the table has been initialized by fetching the value
|
||||
from the table struct in dictionary cache. */
|
||||
|
||||
/* We use an exclusive lock when we read the max key value from the
|
||||
auto-increment column index. This is because then build_template will
|
||||
advise InnoDB to fetch all columns. In SHOW TABLE STATUS the query
|
||||
id of the auto-increment column is not changed, and previously InnoDB
|
||||
did not fetch it, causing SHOW TABLE STATUS to show wrong values
|
||||
for the autoinc column. */
|
||||
assert(prebuilt->table);
|
||||
|
||||
nr = dict_table_autoinc_read(prebuilt->table);
|
||||
|
||||
prebuilt->select_lock_type = LOCK_X;
|
||||
prebuilt->trx->mysql_n_tables_locked += 1;
|
||||
if (nr != 0) {
|
||||
|
||||
error=index_last(table->record[1]);
|
||||
return(nr + 1);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
nr = 1;
|
||||
} else {
|
||||
nr = (longlong) table->next_number_field->
|
||||
val_int_offset(table->rec_buff_length) + 1;
|
||||
}
|
||||
(void) extra(HA_EXTRA_KEYREAD);
|
||||
index_init(table->next_number_index);
|
||||
|
||||
(void) extra(HA_EXTRA_NO_KEYREAD);
|
||||
/* We use an exclusive lock when we read the max key value from the
|
||||
auto-increment column index. This is because then build_template will
|
||||
advise InnoDB to fetch all columns. In SHOW TABLE STATUS the query
|
||||
id of the auto-increment column is not changed, and previously InnoDB
|
||||
did not fetch it, causing SHOW TABLE STATUS to show wrong values
|
||||
for the autoinc column. */
|
||||
|
||||
index_end();
|
||||
prebuilt->select_lock_type = LOCK_X;
|
||||
|
||||
return(nr);
|
||||
/* Play safe and also give in another way the hint to fetch
|
||||
all columns in the key: */
|
||||
|
||||
prebuilt->hint_no_need_to_fetch_extra_cols = FALSE;
|
||||
|
||||
prebuilt->trx->mysql_n_tables_locked += 1;
|
||||
|
||||
error = index_last(table->record[1]);
|
||||
|
||||
if (error) {
|
||||
nr = 1;
|
||||
} else {
|
||||
nr = (longlong) table->next_number_field->
|
||||
val_int_offset(table->rec_buff_length) + 1;
|
||||
}
|
||||
|
||||
(void) extra(HA_EXTRA_NO_KEYREAD);
|
||||
|
||||
index_end();
|
||||
|
||||
return(nr);
|
||||
}
|
||||
|
||||
#endif /* HAVE_INNOBASE_DB */
|
||||
|
@ -158,7 +158,8 @@ class ha_innobase: public handler
|
||||
int rename_table(const char* from, const char* to);
|
||||
int check(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
char* update_table_comment(const char* comment);
|
||||
|
||||
char* get_foreign_key_create_info();
|
||||
void free_foreign_key_create_info(char* str);
|
||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
longlong get_auto_increment();
|
||||
|
@ -225,13 +225,13 @@ int ha_autocommit_or_rollback(THD *thd, int error)
|
||||
/*
|
||||
This function is called when MySQL writes the log segment of a
|
||||
transaction to the binlog. It is called when the LOCK_log mutex is
|
||||
reserved. Here we communicate to transactional table handlers whta
|
||||
reserved. Here we communicate to transactional table handlers what
|
||||
binlog position corresponds to the current transaction. The handler
|
||||
can store it and in recovery print to the user, so that the user
|
||||
knows from what position in the binlog to start possible
|
||||
roll-forward, for example, if the crashed server was a slave in
|
||||
replication. This function also calls the commit of the table
|
||||
handler, because the order of trasnactions in the log of the table
|
||||
handler, because the order of transactions in the log of the table
|
||||
handler must be the same as in the binlog.
|
||||
|
||||
arguments:
|
||||
@ -263,7 +263,6 @@ int ha_report_binlog_offset_and_commit(THD *thd,
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
int ha_commit_trans(THD *thd, THD_TRANS* trans)
|
||||
{
|
||||
int error=0;
|
||||
|
@ -308,7 +308,9 @@ public:
|
||||
virtual char *update_table_comment(const char * comment)
|
||||
{ return (char*) comment;}
|
||||
virtual void append_create_info(String *packet) {}
|
||||
|
||||
virtual char* get_foreign_key_create_info()
|
||||
{ return(NULL);} /* gets foreign key create string from InnoDB */
|
||||
virtual void free_foreign_key_create_info(char* str) {}
|
||||
/* The following can be called without an open handler */
|
||||
virtual const char *table_type() const =0;
|
||||
virtual const char **bas_ext() const =0;
|
||||
|
23
sql/item.cc
23
sql/item.cc
@ -59,12 +59,28 @@ void Item::set_name(char *str,uint length)
|
||||
}
|
||||
}
|
||||
|
||||
bool Item::eq(const Item *item) const // Only doing this on conds
|
||||
/*
|
||||
This function is only called when comparing items in the WHERE clause
|
||||
*/
|
||||
|
||||
bool Item::eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
return type() == item->type() && name && item->name &&
|
||||
!my_strcasecmp(name,item->name);
|
||||
}
|
||||
|
||||
bool Item_string::eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
if (type() == item->type())
|
||||
{
|
||||
if (binary_cmp)
|
||||
return !stringcmp(&str_value, &item->str_value);
|
||||
return !sortcmp(&str_value, &item->str_value);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Get the value of the function as a TIME structure.
|
||||
As a extra convenience the time structure is reset on error!
|
||||
@ -203,7 +219,7 @@ longlong Item_field::val_int_result()
|
||||
return result_field->val_int();
|
||||
}
|
||||
|
||||
bool Item_field::eq(const Item *item) const
|
||||
bool Item_field::eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
return item->type() == FIELD_ITEM && ((Item_field*) item)->field == field;
|
||||
}
|
||||
@ -262,7 +278,8 @@ void Item_string::print(String *str)
|
||||
str->append('\'');
|
||||
}
|
||||
|
||||
bool Item_null::eq(const Item *item) const { return item->type() == type(); }
|
||||
bool Item_null::eq(const Item *item, bool binary_cmp) const
|
||||
{ return item->type() == type(); }
|
||||
double Item_null::val() { null_value=1; return 0.0; }
|
||||
longlong Item_null::val_int() { null_value=1; return 0; }
|
||||
/* ARGSUSED */
|
||||
|
10
sql/item.h
10
sql/item.h
@ -57,7 +57,7 @@ public:
|
||||
virtual void save_org_in_field(Field *field)
|
||||
{ (void) save_in_field(field); }
|
||||
virtual bool send(THD *thd, String *str);
|
||||
virtual bool eq(const Item *) const;
|
||||
virtual bool eq(const Item *, bool binary_cmp) const;
|
||||
virtual Item_result result_type () const { return REAL_RESULT; }
|
||||
virtual enum Type type() const =0;
|
||||
virtual double val()=0;
|
||||
@ -111,7 +111,7 @@ public:
|
||||
{}
|
||||
Item_field(Field *field);
|
||||
enum Type type() const { return FIELD_ITEM; }
|
||||
bool eq(const Item *item) const;
|
||||
bool eq(const Item *item, bool binary_cmp) const;
|
||||
double val();
|
||||
longlong val_int();
|
||||
String *val_str(String*);
|
||||
@ -141,7 +141,7 @@ public:
|
||||
Item_null(char *name_par=0)
|
||||
{ maybe_null=null_value=TRUE; name= name_par ? name_par : (char*) "NULL";}
|
||||
enum Type type() const { return NULL_ITEM; }
|
||||
bool eq(const Item *item) const;
|
||||
bool eq(const Item *item, bool binary_cmp) const;
|
||||
double val();
|
||||
longlong val_int();
|
||||
String *val_str(String *str);
|
||||
@ -264,6 +264,7 @@ public:
|
||||
void make_field(Send_field *field);
|
||||
enum Item_result result_type () const { return STRING_RESULT; }
|
||||
bool basic_const_item() const { return 1; }
|
||||
bool eq(const Item *item, bool binary_cmp) const;
|
||||
Item *new_item() { return new Item_string(name,str_value.ptr(),max_length); }
|
||||
String *const_string() { return &str_value; }
|
||||
inline void append(char *str,uint length) { str_value.append(str,length); }
|
||||
@ -323,7 +324,8 @@ public:
|
||||
Item_ref(Item **item, char *table_name_par,char *field_name_par)
|
||||
:Item_ident(NullS,table_name_par,field_name_par),ref(item) {}
|
||||
enum Type type() const { return REF_ITEM; }
|
||||
bool eq(const Item *item) const { return (*ref)->eq(item); }
|
||||
bool eq(const Item *item, bool binary_cmp) const
|
||||
{ return (*ref)->eq(item, binary_cmp); }
|
||||
~Item_ref() { if (ref) delete *ref; }
|
||||
double val()
|
||||
{
|
||||
|
@ -148,7 +148,7 @@ void Item_func::print_op(String *str)
|
||||
str->append(')');
|
||||
}
|
||||
|
||||
bool Item_func::eq(const Item *item) const
|
||||
bool Item_func::eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
/* Assume we don't have rtti */
|
||||
if (this == item)
|
||||
@ -160,7 +160,7 @@ bool Item_func::eq(const Item *item) const
|
||||
func_name() != item_func->func_name())
|
||||
return 0;
|
||||
for (uint i=0; i < arg_count ; i++)
|
||||
if (!args[i]->eq(item_func->args[i]))
|
||||
if (!args[i]->eq(item_func->args[i], binary_cmp))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
@ -1975,7 +1975,7 @@ void Item_func_get_user_var::print(String *str)
|
||||
str->append(')');
|
||||
}
|
||||
|
||||
bool Item_func_get_user_var::eq(const Item *item) const
|
||||
bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
/* Assume we don't have rtti */
|
||||
if (this == item)
|
||||
@ -2198,7 +2198,7 @@ err:
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool Item_func_match::eq(const Item *item) const
|
||||
bool Item_func_match::eq(const Item *item, bool binary_cmp) const
|
||||
{
|
||||
if (item->type() != FUNC_ITEM)
|
||||
return 0;
|
||||
@ -2209,7 +2209,7 @@ bool Item_func_match::eq(const Item *item) const
|
||||
Item_func_match *ifm=(Item_func_match*) item;
|
||||
|
||||
if (key == ifm->key && table == ifm->table &&
|
||||
key_item()->eq(ifm->key_item()))
|
||||
key_item()->eq(ifm->key_item(), binary_cmp))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -98,7 +98,7 @@ public:
|
||||
void make_field(Send_field *field);
|
||||
table_map used_tables() const;
|
||||
void update_used_tables();
|
||||
bool eq(const Item *item) const;
|
||||
bool eq(const Item *item, bool binary_cmp) const;
|
||||
virtual optimize_type select_optimize() const { return OPTIMIZE_NONE; }
|
||||
virtual bool have_rev_func() const { return 0; }
|
||||
virtual Item *key_item() const { return args[0]; }
|
||||
@ -889,7 +889,7 @@ public:
|
||||
bool const_item() const { return const_var_flag; }
|
||||
table_map used_tables() const
|
||||
{ return const_var_flag ? 0 : RAND_TABLE_BIT; }
|
||||
bool eq(const Item *item) const;
|
||||
bool eq(const Item *item, bool binary_cmp) const;
|
||||
};
|
||||
|
||||
|
||||
@ -937,7 +937,7 @@ public:
|
||||
enum Functype functype() const { return FT_FUNC; }
|
||||
void update_used_tables() {}
|
||||
bool fix_fields(THD *thd,struct st_table_list *tlist);
|
||||
bool eq(const Item *) const;
|
||||
bool eq(const Item *, bool binary_cmp) const;
|
||||
longlong val_int() { return val()!=0.0; }
|
||||
double val();
|
||||
|
||||
|
@ -88,6 +88,13 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd,TABLE **tables,uint count)
|
||||
thd->locked=0;
|
||||
break;
|
||||
}
|
||||
else if (!thd->open_tables)
|
||||
{
|
||||
// Only using temporary tables, no need to unlock
|
||||
thd->some_tables_deleted=0;
|
||||
thd->locked=0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* some table was altered or deleted. reopen tables marked deleted */
|
||||
mysql_unlock_tables(thd,sql_lock);
|
||||
|
@ -162,6 +162,7 @@ static SECURITY_DESCRIPTOR sdPipeDescriptor;
|
||||
static HANDLE hPipe = INVALID_HANDLE_VALUE;
|
||||
static pthread_cond_t COND_handler_count;
|
||||
static uint handler_count;
|
||||
static bool opt_enable_named_pipe = 0;
|
||||
#endif
|
||||
#ifdef __WIN__
|
||||
static bool opt_console=0,start_mode=0;
|
||||
@ -488,7 +489,7 @@ static void close_connections(void)
|
||||
}
|
||||
}
|
||||
#ifdef __NT__
|
||||
if ( hPipe != INVALID_HANDLE_VALUE )
|
||||
if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe)
|
||||
{
|
||||
HANDLE temp;
|
||||
DBUG_PRINT( "quit", ("Closing named pipes") );
|
||||
@ -983,17 +984,14 @@ static void server_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
We have to first call set_user(), then set_root(), to get things to work
|
||||
with glibc
|
||||
*/
|
||||
set_user(mysqld_user); // Works also with mysqld_user==NULL
|
||||
if (mysqld_chroot)
|
||||
set_root(mysqld_chroot);
|
||||
set_user(mysqld_user); // Works also with mysqld_user==NULL
|
||||
|
||||
#ifdef __NT__
|
||||
/* create named pipe */
|
||||
if (Service.IsNT() && mysql_unix_port[0] && !opt_bootstrap)
|
||||
if (Service.IsNT() && mysql_unix_port[0] && !opt_bootstrap &&
|
||||
opt_enable_named_pipe)
|
||||
{
|
||||
sprintf( szPipeName, "\\\\.\\pipe\\%s", mysql_unix_port );
|
||||
ZeroMemory( &saPipeSecurity, sizeof(saPipeSecurity) );
|
||||
@ -1732,7 +1730,7 @@ int main(int argc, char **argv)
|
||||
|
||||
if (gethostname(glob_hostname,sizeof(glob_hostname)-4) < 0)
|
||||
strmov(glob_hostname,"mysql");
|
||||
strmov(pidfile_name,glob_hostname);
|
||||
strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5);
|
||||
strmov(strcend(pidfile_name,'.'),".pid"); // Add extension
|
||||
#ifndef DBUG_OFF
|
||||
strxmov(strend(server_version),MYSQL_SERVER_SUFFIX,"-debug",NullS);
|
||||
@ -2053,9 +2051,11 @@ The server will not act as a slave.");
|
||||
fflush(stdout);
|
||||
|
||||
#ifdef __NT__
|
||||
if (hPipe == INVALID_HANDLE_VALUE && !have_tcpip)
|
||||
if (hPipe == INVALID_HANDLE_VALUE &&
|
||||
(!have_tcpip || opt_disable_networking)
|
||||
{
|
||||
sql_print_error("TCP/IP or Named Pipes should be installed on NT OS");
|
||||
sql_print_error("TCP/IP or --enable-named-pipe should be configured on NT OS");
|
||||
unireg_abort(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2064,7 +2064,7 @@ The server will not act as a slave.");
|
||||
{
|
||||
pthread_t hThread;
|
||||
handler_count=0;
|
||||
if ( hPipe != INVALID_HANDLE_VALUE )
|
||||
if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe)
|
||||
{
|
||||
handler_count++;
|
||||
if (pthread_create(&hThread,&connection_attrib,
|
||||
@ -2581,9 +2581,9 @@ pthread_handler_decl(handle_connections_namedpipes,arg)
|
||||
fConnected = ConnectNamedPipe( hPipe, NULL );
|
||||
if (abort_loop)
|
||||
break;
|
||||
if ( !fConnected )
|
||||
if (!fConnected)
|
||||
fConnected = GetLastError() == ERROR_PIPE_CONNECTED;
|
||||
if ( !fConnected )
|
||||
if (!fConnected)
|
||||
{
|
||||
CloseHandle( hPipe );
|
||||
if ((hPipe = CreateNamedPipe(szPipeName,
|
||||
@ -2621,7 +2621,7 @@ pthread_handler_decl(handle_connections_namedpipes,arg)
|
||||
continue; // We have to try again
|
||||
}
|
||||
|
||||
if ( !(thd = new THD))
|
||||
if (!(thd = new THD))
|
||||
{
|
||||
DisconnectNamedPipe( hConnectedPipe );
|
||||
CloseHandle( hConnectedPipe );
|
||||
@ -2705,6 +2705,7 @@ enum options {
|
||||
OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
|
||||
OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
|
||||
OPT_SAFE_USER_CREATE, OPT_SQL_MODE,
|
||||
OPT_HAVE_NAMED_PIPE,
|
||||
OPT_DO_PSTACK, OPT_REPORT_HOST,
|
||||
OPT_REPORT_USER, OPT_REPORT_PASSWORD, OPT_REPORT_PORT,
|
||||
OPT_SHOW_SLAVE_AUTH_INFO, OPT_OLD_RPL_COMPAT,
|
||||
@ -2745,6 +2746,7 @@ static struct option long_options[] = {
|
||||
{"delay-key-write-for-all-tables",
|
||||
no_argument, 0, (int) OPT_DELAY_KEY_WRITE},
|
||||
{"enable-locking", no_argument, 0, (int) OPT_ENABLE_LOCK},
|
||||
{"enable-named-pipe", no_argument, 0, (int) OPT_HAVE_NAMED_PIPE},
|
||||
{"enable-pstack", no_argument, 0, (int) OPT_DO_PSTACK},
|
||||
{"exit-info", optional_argument, 0, 'T'},
|
||||
{"flush", no_argument, 0, (int) OPT_FLUSH},
|
||||
@ -3132,6 +3134,9 @@ struct show_var_st init_vars[]= {
|
||||
{"myisam_max_sort_file_size",(char*) &myisam_max_sort_file_size, SHOW_LONG},
|
||||
{"myisam_recover_options", (char*) &myisam_recover_options_str, SHOW_CHAR_PTR},
|
||||
{"myisam_sort_buffer_size", (char*) &myisam_sort_buffer_size, SHOW_LONG},
|
||||
#ifdef __NT__
|
||||
{"named_pipe", (char*) &opt_enable_named_pipe, SHOW_BOOL},
|
||||
#endif
|
||||
{"net_buffer_length", (char*) &net_buffer_length, SHOW_LONG},
|
||||
{"net_read_timeout", (char*) &net_read_timeout, SHOW_LONG},
|
||||
{"net_retry_count", (char*) &mysqld_net_retry_count, SHOW_LONG},
|
||||
@ -3470,10 +3475,12 @@ Starts the MySQL server\n");
|
||||
-W, --warnings Log some not critical warnings to the log file\n");
|
||||
#ifdef __WIN__
|
||||
puts("NT and Win32 specific options:\n\
|
||||
--console Don't remove the console window\n\
|
||||
--install Install mysqld as a service (NT)\n\
|
||||
--remove Remove mysqld from the service list (NT)\n\
|
||||
--standalone Dummy option to start as a standalone program (NT)\
|
||||
--console Don't remove the console window\n\
|
||||
--install Install the default service (NT)\n\
|
||||
--install-manual Install the default service started manually (NT)\n\
|
||||
--remove Remove the default service from the service list (NT)\n\
|
||||
--enable-named-pipe Enable the named pipe (NT)\n\
|
||||
--standalone Dummy option to start as a standalone program (NT)\
|
||||
");
|
||||
#ifdef USE_SYMDIR
|
||||
puts("--use-symbolic-links Enable symbolic link support");
|
||||
@ -3560,9 +3567,10 @@ static void set_options(void)
|
||||
opt_specialflag |= SPECIAL_NO_PRIOR;
|
||||
#endif
|
||||
|
||||
(void) strmov( default_charset, MYSQL_CHARSET);
|
||||
(void) strmov( language, LANGUAGE);
|
||||
(void) strmov( mysql_real_data_home, get_relative_path(DATADIR));
|
||||
(void) strmake(default_charset, MYSQL_CHARSET, sizeof(default_charset)-1);
|
||||
(void) strmake(language, LANGUAGE, sizeof(language)-1);
|
||||
(void) strmake(mysql_real_data_home, get_relative_path(DATADIR),
|
||||
sizeof(mysql_real_data_home-1));
|
||||
#ifdef __WIN__
|
||||
/* Allow Win32 users to move MySQL anywhere */
|
||||
{
|
||||
@ -3573,9 +3581,9 @@ static void set_options(void)
|
||||
}
|
||||
#else
|
||||
const char *tmpenv;
|
||||
if ( !(tmpenv = getenv("MY_BASEDIR_VERSION")))
|
||||
if (!(tmpenv = getenv("MY_BASEDIR_VERSION")))
|
||||
tmpenv = DEFAULT_MYSQL_HOME;
|
||||
(void) strmov( mysql_home, tmpenv );
|
||||
(void) strmake(mysql_home, tmpenv, sizeof(mysql_home)-1);
|
||||
#endif
|
||||
|
||||
#if defined( HAVE_mit_thread ) || defined( __WIN__ ) || defined( HAVE_LINUXTHREADS )
|
||||
@ -3621,17 +3629,17 @@ static void get_options(int argc,char **argv)
|
||||
default_tx_isolation= ISO_SERIALIZABLE;
|
||||
break;
|
||||
case 'b':
|
||||
strmov(mysql_home,optarg);
|
||||
strmake(mysql_home,optarg,sizeof(mysql_home)-1);
|
||||
break;
|
||||
case 'l':
|
||||
opt_log=1;
|
||||
opt_logname=optarg; // Use hostname.log if null
|
||||
break;
|
||||
case 'h':
|
||||
strmov(mysql_real_data_home,optarg);
|
||||
strmake(mysql_real_data_home,optarg, sizeof(mysql_real_data_home)-1);
|
||||
break;
|
||||
case 'L':
|
||||
strmov(language,optarg);
|
||||
strmake(language, optarg, sizeof(language)-1);
|
||||
break;
|
||||
case 'n':
|
||||
opt_specialflag|= SPECIAL_NEW_FUNC;
|
||||
@ -3991,11 +3999,16 @@ static void get_options(int argc,char **argv)
|
||||
}
|
||||
break;
|
||||
case (int) OPT_PID_FILE:
|
||||
strmov(pidfile_name,optarg);
|
||||
strmake(pidfile_name, optarg, sizeof(pidfile_name)-1);
|
||||
break;
|
||||
case (int) OPT_INIT_FILE:
|
||||
opt_init_file=optarg;
|
||||
break;
|
||||
case (int) OPT_HAVE_NAMED_PIPE:
|
||||
#if __NT__
|
||||
opt_enable_named_pipe=1;
|
||||
#endif
|
||||
break;
|
||||
#ifdef __WIN__
|
||||
case (int) OPT_STANDALONE: /* Dummy option for NT */
|
||||
break;
|
||||
@ -4041,10 +4054,10 @@ static void get_options(int argc,char **argv)
|
||||
myisam_delay_key_write=0;
|
||||
break;
|
||||
case 'C':
|
||||
strmov(default_charset,optarg);
|
||||
strmake(default_charset, optarg, sizeof(default_charset)-1);
|
||||
break;
|
||||
case OPT_CHARSETS_DIR:
|
||||
strmov(mysql_charsets_dir, optarg);
|
||||
strmake(mysql_charsets_dir, optarg, sizeof(mysql_charsets_dir)-1);
|
||||
charsets_dir = mysql_charsets_dir;
|
||||
break;
|
||||
#include "sslopt-case.h"
|
||||
@ -4314,16 +4327,17 @@ static void fix_paths(void)
|
||||
|
||||
char buff[FN_REFLEN],*sharedir=get_relative_path(SHAREDIR);
|
||||
if (test_if_hard_path(sharedir))
|
||||
strmov(buff,sharedir); /* purecov: tested */
|
||||
strmake(buff,sharedir,sizeof(buff)-1); /* purecov: tested */
|
||||
else
|
||||
strxmov(buff,mysql_home,sharedir,NullS);
|
||||
strxnmov(buff,sizeof(buff)-1,mysql_home,sharedir,NullS);
|
||||
convert_dirname(buff,buff,NullS);
|
||||
(void) my_load_path(language,language,buff);
|
||||
|
||||
/* If --character-sets-dir isn't given, use shared library dir */
|
||||
if (charsets_dir != mysql_charsets_dir)
|
||||
{
|
||||
strmov(strmov(mysql_charsets_dir,buff),CHARSET_DIR);
|
||||
strxnmov(mysql_charsets_dir, sizeof(mysql_charsets_dir)-1, buff,
|
||||
CHARSET_DIR, NullS);
|
||||
charsets_dir=mysql_charsets_dir;
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@ dist-hook:
|
||||
test -d $(distdir)/$$dir || mkdir $(distdir)/$$dir; \
|
||||
$(INSTALL_DATA) $(srcdir)/$$dir/*.* $(distdir)/$$dir; \
|
||||
done; \
|
||||
sleep 1 ; touch $(srcdir)/*/errmsg.sys
|
||||
$(INSTALL_DATA) $(srcdir)/charsets/README $(distdir)/charsets
|
||||
$(INSTALL_DATA) $(srcdir)/charsets/Index $(distdir)/charsets
|
||||
|
||||
|
@ -430,6 +430,7 @@ void close_thread_tables(THD *thd, bool locked)
|
||||
|
||||
while (thd->open_tables)
|
||||
found_old_table|=close_thread_table(thd, &thd->open_tables);
|
||||
thd->some_tables_deleted=0;
|
||||
|
||||
/* Free tables to hold down open files */
|
||||
while (open_cache.records > table_cache_size && unused_tables)
|
||||
@ -1692,7 +1693,7 @@ find_item_in_list(Item *find,List<Item> &items)
|
||||
{
|
||||
if (found)
|
||||
{
|
||||
if ((*found)->eq(item))
|
||||
if ((*found)->eq(item,0))
|
||||
continue; // Same field twice (Access?)
|
||||
if (current_thd->where)
|
||||
my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0),
|
||||
@ -1708,7 +1709,7 @@ find_item_in_list(Item *find,List<Item> &items)
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!table_name && (item->eq(find) ||
|
||||
else if (!table_name && (item->eq(find,0) ||
|
||||
find->name &&
|
||||
!my_strcasecmp(item->name,find->name)))
|
||||
{
|
||||
@ -2186,7 +2187,7 @@ int setup_ftfuncs(THD *thd)
|
||||
lj.rewind();
|
||||
while ((ftf2=lj++) != ftf)
|
||||
{
|
||||
if (ftf->eq(ftf2) && !ftf2->master)
|
||||
if (ftf->eq(ftf2,1) && !ftf2->master)
|
||||
ftf2->master=ftf;
|
||||
}
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error)
|
||||
new_table=ren_table->next;
|
||||
|
||||
sprintf(name,"%s/%s/%s%s",mysql_data_home,
|
||||
new_table->db,new_table->name,
|
||||
new_table->db,new_table->real_name,
|
||||
reg_ext);
|
||||
if (!access(name,F_OK))
|
||||
{
|
||||
@ -134,7 +134,7 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error)
|
||||
DBUG_RETURN(ren_table); // This can't be skipped
|
||||
}
|
||||
sprintf(name,"%s/%s/%s%s",mysql_data_home,
|
||||
ren_table->db,ren_table->name,
|
||||
ren_table->db,ren_table->real_name,
|
||||
reg_ext);
|
||||
if ((table_type=get_table_type(name)) == DB_TYPE_UNKNOWN)
|
||||
{
|
||||
@ -143,11 +143,11 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error)
|
||||
DBUG_RETURN(ren_table);
|
||||
}
|
||||
else if (mysql_rename_table(table_type,
|
||||
ren_table->db, ren_table->name,
|
||||
new_table->db, new_table->name))
|
||||
ren_table->db, ren_table->real_name,
|
||||
new_table->db, new_table->real_name))
|
||||
{
|
||||
if (!skip_error)
|
||||
return ren_table;
|
||||
DBUG_RETURN(ren_table);
|
||||
}
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
|
@ -1247,14 +1247,14 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end,
|
||||
{
|
||||
if (new_fields->val->used_tables())
|
||||
{
|
||||
if (old->val->eq(new_fields->val))
|
||||
if (old->val->eq(new_fields->val, old->field->binary()))
|
||||
{
|
||||
old->level=old->const_level=and_level;
|
||||
old->exists_optimize&=new_fields->exists_optimize;
|
||||
}
|
||||
}
|
||||
else if (old->val->eq(new_fields->val) && old->eq_func &&
|
||||
new_fields->eq_func)
|
||||
else if (old->val->eq(new_fields->val, old->field->binary()) &&
|
||||
old->eq_func && new_fields->eq_func)
|
||||
{
|
||||
old->level=old->const_level=and_level;
|
||||
old->exists_optimize&=new_fields->exists_optimize;
|
||||
@ -2769,7 +2769,7 @@ eq_ref_table(JOIN *join, ORDER *start_order, JOIN_TAB *tab)
|
||||
ORDER *order;
|
||||
for (order=start_order ; order ; order=order->next)
|
||||
{
|
||||
if ((*ref_item)->eq(order->item[0]))
|
||||
if ((*ref_item)->eq(order->item[0],0))
|
||||
break;
|
||||
}
|
||||
if (order)
|
||||
@ -3026,7 +3026,7 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father,
|
||||
Item *right_item= func->arguments()[1];
|
||||
Item_func::Functype functype= func->functype();
|
||||
|
||||
if (right_item->eq(field) && left_item != value)
|
||||
if (right_item->eq(field,0) && left_item != value)
|
||||
{
|
||||
Item *tmp=value->new_item();
|
||||
if (tmp)
|
||||
@ -3045,7 +3045,7 @@ change_cond_ref_to_const(I_List<COND_CMP> *save_list,Item *and_father,
|
||||
func->arguments()[1]->result_type()));
|
||||
}
|
||||
}
|
||||
else if (left_item->eq(field) && right_item != value)
|
||||
else if (left_item->eq(field,0) && right_item != value)
|
||||
{
|
||||
Item *tmp=value->new_item();
|
||||
if (tmp)
|
||||
@ -3286,7 +3286,7 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value)
|
||||
{ // boolan compare function
|
||||
Item *left_item= ((Item_func*) cond)->arguments()[0];
|
||||
Item *right_item= ((Item_func*) cond)->arguments()[1];
|
||||
if (left_item->eq(right_item))
|
||||
if (left_item->eq(right_item,1))
|
||||
{
|
||||
if (!left_item->maybe_null ||
|
||||
((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC)
|
||||
@ -3331,22 +3331,22 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
|
||||
return 0;
|
||||
Item *left_item= ((Item_func*) cond)->arguments()[0];
|
||||
Item *right_item= ((Item_func*) cond)->arguments()[1];
|
||||
if (left_item->eq(comp_item))
|
||||
if (left_item->eq(comp_item,1))
|
||||
{
|
||||
if (right_item->const_item())
|
||||
{
|
||||
if (*const_item)
|
||||
return right_item->eq(*const_item);
|
||||
return right_item->eq(*const_item, 1);
|
||||
*const_item=right_item;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
else if (right_item->eq(comp_item))
|
||||
else if (right_item->eq(comp_item,1))
|
||||
{
|
||||
if (left_item->const_item())
|
||||
{
|
||||
if (*const_item)
|
||||
return left_item->eq(*const_item);
|
||||
return left_item->eq(*const_item, 1);
|
||||
*const_item=left_item;
|
||||
return 1;
|
||||
}
|
||||
@ -5267,7 +5267,7 @@ static bool test_if_ref(Item_field *left_item,Item *right_item)
|
||||
if (!field->table->const_table && !field->table->maybe_null)
|
||||
{
|
||||
Item *ref_item=part_of_refkey(field->table,field);
|
||||
if (ref_item && ref_item->eq(right_item))
|
||||
if (ref_item && ref_item->eq(right_item,1))
|
||||
{
|
||||
if (right_item->type() == Item::FIELD_ITEM)
|
||||
return (field->eq_def(((Item_field *) right_item)->field));
|
||||
@ -6513,7 +6513,7 @@ test_if_subpart(ORDER *a,ORDER *b)
|
||||
{
|
||||
for (; a && b; a=a->next,b=b->next)
|
||||
{
|
||||
if ((*a->item)->eq(*b->item))
|
||||
if ((*a->item)->eq(*b->item,1))
|
||||
a->asc=b->asc;
|
||||
else
|
||||
return 0;
|
||||
@ -6540,7 +6540,7 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables)
|
||||
|
||||
for (; a && b; a=a->next,b=b->next)
|
||||
{
|
||||
if (!(*a->item)->eq(*b->item))
|
||||
if (!(*a->item)->eq(*b->item,1))
|
||||
DBUG_RETURN(0);
|
||||
map|=a->item[0]->used_tables();
|
||||
}
|
||||
|
@ -908,9 +908,21 @@ store_create_info(THD *thd, TABLE *table, String *packet)
|
||||
}
|
||||
packet->append(')');
|
||||
}
|
||||
packet->append("\n)", 2);
|
||||
|
||||
handler *file = table->file;
|
||||
|
||||
/* Get possible foreign key definitions stored in InnoDB and append them
|
||||
to the CREATE TABLE statement */
|
||||
|
||||
char* for_str = file->get_foreign_key_create_info();
|
||||
|
||||
if (for_str) {
|
||||
packet->append(for_str, strlen(for_str));
|
||||
|
||||
file->free_foreign_key_create_info(for_str);
|
||||
}
|
||||
|
||||
packet->append("\n)", 2);
|
||||
packet->append(" TYPE=", 6);
|
||||
packet->append(file->table_type());
|
||||
char buff[128];
|
||||
|
@ -204,9 +204,17 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-/bin:/usr/bin}\" \
|
||||
make benchdir_root=$RPM_BUILD_ROOT/usr/share/
|
||||
}
|
||||
|
||||
# Use the build root for temporary storage of the shared libraries.
|
||||
# Use our own copy of glibc
|
||||
|
||||
OTHER_LIBC_DIR=/usr/local/mysql-glibc
|
||||
USE_OTHER_LIBC_DIR=""
|
||||
if test -d "$OTHER_LIBC_DIR"
|
||||
then
|
||||
USE_OTHER_LIBC_DIR="--with-other-libc=$OTHER_LIBC_DIR"
|
||||
fi
|
||||
|
||||
# Use the build root for temporary storage of the shared libraries.
|
||||
|
||||
RBR=$RPM_BUILD_ROOT
|
||||
MBD=$RPM_BUILD_DIR/mysql-%{mysql_version}
|
||||
if test -z "$RBR" -o "$RBR" = "/"
|
||||
@ -217,10 +225,16 @@ fi
|
||||
rm -rf $RBR
|
||||
mkdir -p $RBR
|
||||
|
||||
#
|
||||
# Use MYSQL_BUILD_PATH so that we can use a dedicated version of gcc
|
||||
#
|
||||
PATH=${MYSQL_BUILD_PATH:-/bin:/usr/bin}
|
||||
export PATH
|
||||
|
||||
# We need to build shared libraries separate from mysqld-max because we
|
||||
# are using --with-other-libc
|
||||
|
||||
BuildMySQL "--disable-shared --with-other-libc=$OTHER_LIBC_DIR --with-berkeley-db --with-innodb --with-mysqld-ldflags='-all-static' --with-server-suffix='-Max'"
|
||||
BuildMySQL "--disable-shared $USE_OTHER_LIBC_DIR --with-berkeley-db --with-innodb --with-mysqld-ldflags='-all-static' --with-server-suffix='-Max'"
|
||||
|
||||
# Save everything for debug
|
||||
# tar cf $RBR/all.tar .
|
||||
@ -250,7 +264,7 @@ automake
|
||||
BuildMySQL "--disable-shared" \
|
||||
"--with-mysqld-ldflags='-all-static'" \
|
||||
"--with-client-ldflags='-all-static'" \
|
||||
"--with-other-libc=$OTHER_LIBC_DIR" \
|
||||
"$USE_OTHER_LIBC_DIR" \
|
||||
"--without-berkeley-db --without-innodb"
|
||||
nm --numeric-sort sql/mysqld > sql/mysqld.sym
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user