5.1 -> 5.2 merge
This commit is contained in:
commit
5dec570d7c
@ -42,7 +42,7 @@ AC_DEFUN([MYSQL_USE_BUNDLED_YASSL], [
|
||||
yassl_thread_cxxflags=""
|
||||
yassl_thread_safe=""
|
||||
if test "$with_server" != "no" -o "$THREAD_SAFE_CLIENT" != "no"; then
|
||||
yassl_thread_cxxflags="-DYASSL_THREAD_SAFE"
|
||||
yassl_thread_cxxflags="-DMULTI_THREADED"
|
||||
yassl_thread_safe="(thread-safe)"
|
||||
fi
|
||||
AC_SUBST([yassl_thread_cxxflags])
|
||||
|
29
configure.in
29
configure.in
@ -13,7 +13,7 @@ dnl When changing the major version number please also check the switch
|
||||
dnl statement in mysqlbinlog::check_master_version(). You may also need
|
||||
dnl to update version.c in ndb.
|
||||
|
||||
AC_INIT([MariaDB Server], [5.2.14-MariaDB], [], [mysql])
|
||||
AC_INIT([MariaDB Server], [5.2.15-MariaDB], [], [mysql])
|
||||
|
||||
AC_CONFIG_SRCDIR([sql/mysqld.cc])
|
||||
AC_CANONICAL_SYSTEM
|
||||
@ -23,7 +23,7 @@ AC_CANONICAL_SYSTEM
|
||||
AM_INIT_AUTOMAKE([1.9 tar-ustar])
|
||||
AC_PROG_LIBTOOL
|
||||
|
||||
AM_CONFIG_HEADER([include/config.h])
|
||||
AC_CONFIG_HEADERS([include/config.h])
|
||||
|
||||
# Request support for automake silent-rules if available.
|
||||
# Default to verbose output. One can use the configure-time
|
||||
@ -275,6 +275,31 @@ AC_SUBST(LIBTOOL)dnl
|
||||
|
||||
AC_SUBST(NM)dnl
|
||||
|
||||
##############################################################################
|
||||
# In automake 1.12, the extension on generated yacc/bison header files changed
|
||||
##############################################################################
|
||||
|
||||
YACC_HEXT="h"
|
||||
MAKEFILE_1ST=`head -1 "$srcdir/Makefile.in"`
|
||||
AMAKE_MAJOR=`expr "$MAKEFILE_1ST" : '.*generated by automake \([[0-9]]*\).*'`
|
||||
if test $? -eq "0" ; then
|
||||
if test "$AMAKE_MAJOR" -gt "1" ; then
|
||||
YACC_HEXT="hh"
|
||||
CXXFLAGS="$CXXFLAGS -DYACC_HEXT_HH"
|
||||
elif test "$AMAKE_MAJOR" -eq "1" ; then
|
||||
AMAKE_MINOR=`expr "$MAKEFILE_1ST" : '.*generated by automake 1.\([[0-9]]*\).*'`
|
||||
if test $? -eq "0" ; then
|
||||
if test "$AMAKE_MINOR" -ge "12" ; then
|
||||
YACC_HEXT="hh"
|
||||
CXXFLAGS="$CXXFLAGS -DYACC_HEXT_HH"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
AC_SUBST(YACC_HEXT)
|
||||
|
||||
##############################################################################
|
||||
|
||||
# NM= "$NM -X64"
|
||||
#archive_expsym_cmds= `echo "$archive_expsym_cmds" | sed -e '/"$(CC)"//'`
|
||||
#archive_expsym_cmds= "$CC -q64 $archive_expsym_cmds"
|
||||
|
@ -12,7 +12,16 @@ before calling SSL_new();
|
||||
|
||||
*** end Note ***
|
||||
|
||||
yaSSL Release notes, version 2.1.2 (9/2/2011)
|
||||
yaSSL Release notes, version 2.2.2 (7/5/2012)
|
||||
|
||||
This release of yaSSL contains bug fixes and more security checks around
|
||||
malicious certificates.
|
||||
|
||||
See normal build instructions below under 1.0.6.
|
||||
See libcurl build instructions below under 1.3.0 and note in 1.5.8.
|
||||
|
||||
|
||||
*****************yaSSL Release notes, version 2.1.2 (9/2/2011)
|
||||
|
||||
This release of yaSSL contains bug fixes, better non-blocking support with
|
||||
SSL_write, and OpenSSL RSA public key format support.
|
||||
|
@ -27,7 +27,7 @@
|
||||
Visual Studio Source Annotations header (sourceannotations.h) fails
|
||||
to compile if outside of the global namespace.
|
||||
*/
|
||||
#ifdef YASSL_THREAD_SAFE
|
||||
#ifdef MULTI_THREADED
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#endif
|
||||
@ -36,8 +36,9 @@
|
||||
namespace yaSSL {
|
||||
|
||||
|
||||
#ifdef YASSL_THREAD_SAFE
|
||||
#ifdef MULTI_THREADED
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
|
||||
class Mutex {
|
||||
CRITICAL_SECTION cs_;
|
||||
@ -77,7 +78,7 @@ namespace yaSSL {
|
||||
};
|
||||
|
||||
#endif // _WIN32
|
||||
#else // YASSL_THREAD_SAFE (WE'RE SINGLE)
|
||||
#else // MULTI_THREADED (WE'RE SINGLE)
|
||||
|
||||
class Mutex {
|
||||
public:
|
||||
@ -87,7 +88,7 @@ namespace yaSSL {
|
||||
};
|
||||
};
|
||||
|
||||
#endif // YASSL_THREAD_SAFE
|
||||
#endif // MULTI_THREADED
|
||||
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
#include "rsa.h"
|
||||
|
||||
|
||||
#define YASSL_VERSION "2.2.0"
|
||||
#define YASSL_VERSION "2.2.2"
|
||||
|
||||
|
||||
#if defined(__cplusplus)
|
||||
|
@ -65,7 +65,7 @@ enum YasslError {
|
||||
enum Library { yaSSL_Lib = 0, CryptoLib, SocketLib };
|
||||
enum { MAX_ERROR_SZ = 80 };
|
||||
|
||||
void SetErrorString(unsigned long, char*);
|
||||
void SetErrorString(YasslError, char*);
|
||||
|
||||
/* remove for now, if go back to exceptions use this wrapper
|
||||
// Base class for all yaSSL exceptions
|
||||
|
@ -250,8 +250,7 @@ int CertManager::Validate()
|
||||
TaoCrypt::Source source((*last)->get_buffer(), (*last)->get_length());
|
||||
TaoCrypt::CertDecoder cert(source, true, &signers_, verifyNone_);
|
||||
|
||||
int err = cert.GetError().What();
|
||||
if ( err )
|
||||
if (int err = cert.GetError().What())
|
||||
return err;
|
||||
|
||||
const TaoCrypt::PublicKey& key = cert.GetPublicKey();
|
||||
|
@ -767,8 +767,14 @@ int DoProcessReply(SSL& ssl)
|
||||
|
||||
while (buffer.get_current() < hdr.length_ + RECORD_HEADER + offset) {
|
||||
// each message in record, can be more than 1 if not encrypted
|
||||
if (ssl.getSecurity().get_parms().pending_ == false) // cipher on
|
||||
if (ssl.getSecurity().get_parms().pending_ == false) { // cipher on
|
||||
// sanity check for malicious/corrupted/illegal input
|
||||
if (buffer.get_remaining() < hdr.length_) {
|
||||
ssl.SetError(bad_input);
|
||||
return 0;
|
||||
}
|
||||
decrypt_message(ssl, buffer, hdr.length_);
|
||||
}
|
||||
|
||||
mySTL::auto_ptr<Message> msg(mf.CreateObject(hdr.type_));
|
||||
if (!msg.get()) {
|
||||
|
@ -26,7 +26,7 @@
|
||||
namespace yaSSL {
|
||||
|
||||
|
||||
#ifdef YASSL_THREAD_SAFE
|
||||
#ifdef MULTI_THREADED
|
||||
#ifdef _WIN32
|
||||
|
||||
Mutex::Mutex()
|
||||
@ -79,7 +79,7 @@ namespace yaSSL {
|
||||
|
||||
|
||||
#endif // _WIN32
|
||||
#endif // YASSL_THREAD_SAFE
|
||||
#endif // MULTI_THREADED
|
||||
|
||||
|
||||
|
||||
|
@ -27,7 +27,6 @@
|
||||
|
||||
|
||||
|
||||
|
||||
/* see man pages for function descriptions */
|
||||
|
||||
#include "runtime.hpp"
|
||||
@ -1014,7 +1013,7 @@ char* ERR_error_string(unsigned long errNumber, char* buffer)
|
||||
static char* msg = (char*)"Please supply a buffer for error string";
|
||||
|
||||
if (buffer) {
|
||||
SetErrorString(errNumber, buffer);
|
||||
SetErrorString(YasslError(errNumber), buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
@ -31,11 +31,6 @@
|
||||
#pragma warning(disable: 4996)
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
// 4996 warning to use MS extensions e.g., strcpy_s instead of strncpy
|
||||
#pragma warning(disable: 4996)
|
||||
#endif
|
||||
|
||||
namespace yaSSL {
|
||||
|
||||
|
||||
@ -60,7 +55,7 @@ Library Error::get_lib() const
|
||||
*/
|
||||
|
||||
|
||||
void SetErrorString(unsigned long error, char* buffer)
|
||||
void SetErrorString(YasslError error, char* buffer)
|
||||
{
|
||||
using namespace TaoCrypt;
|
||||
const int max = MAX_ERROR_SZ; // shorthand
|
||||
|
@ -92,7 +92,6 @@ typedef BlockCipher<ENCRYPTION, AES, CBC> AES_CBC_Encryption;
|
||||
typedef BlockCipher<DECRYPTION, AES, CBC> AES_CBC_Decryption;
|
||||
|
||||
|
||||
|
||||
} // naemspace
|
||||
|
||||
#endif // TAO_CRYPT_AES_HPP
|
||||
|
@ -48,9 +48,11 @@ word32 PBKDF2_HMAC<T>::DeriveKey(byte* derived, word32 dLen, const byte* pwd,
|
||||
word32 pLen, const byte* salt, word32 sLen,
|
||||
word32 iterations) const
|
||||
{
|
||||
if (dLen > MaxDerivedKeyLength())
|
||||
if (dLen > MaxDerivedKeyLength())
|
||||
return 0;
|
||||
|
||||
if (iterations < 0)
|
||||
return 0;
|
||||
|
||||
ByteBlock buffer(T::DIGEST_SIZE);
|
||||
HMAC<T> hmac;
|
||||
|
@ -154,6 +154,8 @@ word32 GetLength(Source& source)
|
||||
else
|
||||
length = b;
|
||||
|
||||
if (source.IsLeft(length) == false) return 0;
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
@ -832,7 +834,7 @@ void CertDecoder::GetName(NameType nt)
|
||||
if (email) {
|
||||
if (!(ptr = AddTag(ptr, buf_end, "/emailAddress=", 14, length))) {
|
||||
source_.SetError(CONTENT_E);
|
||||
return;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,6 +103,16 @@ void HexDecoder::Decode()
|
||||
byte b = coded_.next() - 0x30; // 0 starts at 0x30
|
||||
byte b2 = coded_.next() - 0x30;
|
||||
|
||||
// sanity checks
|
||||
if (b >= sizeof(hexDecode)/sizeof(hexDecode[0])) {
|
||||
coded_.SetError(PEM_E);
|
||||
return;
|
||||
}
|
||||
if (b2 >= sizeof(hexDecode)/sizeof(hexDecode[0])) {
|
||||
coded_.SetError(PEM_E);
|
||||
return;
|
||||
}
|
||||
|
||||
b = hexDecode[b];
|
||||
b2 = hexDecode[b2];
|
||||
|
||||
@ -178,6 +188,7 @@ void Base64Decoder::Decode()
|
||||
{
|
||||
word32 bytes = coded_.size();
|
||||
word32 plainSz = bytes - ((bytes + (pemLineSz - 1)) / pemLineSz);
|
||||
const byte maxIdx = (byte)sizeof(base64Decode) + 0x2B - 1;
|
||||
plainSz = ((plainSz * 3) / 4) + 3;
|
||||
decoded_.New(plainSz);
|
||||
|
||||
@ -200,6 +211,16 @@ void Base64Decoder::Decode()
|
||||
if (e4 == pad)
|
||||
pad4 = true;
|
||||
|
||||
if (e1 < 0x2B || e2 < 0x2B || e3 < 0x2B || e4 < 0x2B) {
|
||||
coded_.SetError(PEM_E);
|
||||
return;
|
||||
}
|
||||
|
||||
if (e1 > maxIdx || e2 > maxIdx || e3 > maxIdx || e4 > maxIdx) {
|
||||
coded_.SetError(PEM_E);
|
||||
return;
|
||||
}
|
||||
|
||||
e1 = base64Decode[e1 - 0x2B];
|
||||
e2 = base64Decode[e2 - 0x2B];
|
||||
e3 = (e3 == pad) ? 0 : base64Decode[e3 - 0x2B];
|
||||
|
@ -3,6 +3,21 @@ Microsoft Developer Studio Workspace File, Format Version 6.00
|
||||
|
||||
###############################################################################
|
||||
|
||||
Project: "benchmark"=.\benchmark\benchmark.dsp - Package Owner=<4>
|
||||
|
||||
Package=<5>
|
||||
{{{
|
||||
}}}
|
||||
|
||||
Package=<4>
|
||||
{{{
|
||||
Begin Project Dependency
|
||||
Project_Dep_Name taocrypt
|
||||
End Project Dependency
|
||||
}}}
|
||||
|
||||
###############################################################################
|
||||
|
||||
Project: "taocrypt"=.\taocrypt.dsp - Package Owner=<4>
|
||||
|
||||
Package=<5>
|
||||
@ -15,7 +30,7 @@ Package=<4>
|
||||
|
||||
###############################################################################
|
||||
|
||||
Project: "test"=.\test.dsp - Package Owner=<4>
|
||||
Project: "test"=.\test\test.dsp - Package Owner=<4>
|
||||
|
||||
Package=<5>
|
||||
{{{
|
||||
|
@ -13,7 +13,7 @@
|
||||
To use MemoryTracker merely add this file to your project
|
||||
No need to instantiate anything
|
||||
|
||||
If your app is multi threaded define YASSL_THREAD_SAFE
|
||||
If your app is multi threaded define MULTI_THREADED
|
||||
|
||||
*********************************************************************/
|
||||
|
||||
|
@ -37,12 +37,12 @@ RSC=rc.exe
|
||||
# PROP BASE Target_Dir ""
|
||||
# PROP Use_MFC 0
|
||||
# PROP Use_Debug_Libraries 0
|
||||
# PROP Output_Dir "test\Release"
|
||||
# PROP Intermediate_Dir "test\Release"
|
||||
# PROP Output_Dir "Release"
|
||||
# PROP Intermediate_Dir "Release"
|
||||
# PROP Ignore_Export_Lib 0
|
||||
# PROP Target_Dir ""
|
||||
# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
|
||||
# ADD CPP /nologo /MT /W3 /O2 /I "include" /I "mySTL" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /c
|
||||
# ADD CPP /nologo /MD /W3 /O2 /I "../include" /I "../mySTL" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /c
|
||||
# ADD BASE RSC /l 0x409 /d "NDEBUG"
|
||||
# ADD RSC /l 0x409 /d "NDEBUG"
|
||||
BSC32=bscmake.exe
|
||||
@ -61,12 +61,12 @@ LINK32=link.exe
|
||||
# PROP BASE Target_Dir ""
|
||||
# PROP Use_MFC 0
|
||||
# PROP Use_Debug_Libraries 1
|
||||
# PROP Output_Dir "test\Debug"
|
||||
# PROP Intermediate_Dir "test\Debug"
|
||||
# PROP Output_Dir "Debug"
|
||||
# PROP Intermediate_Dir "Debug"
|
||||
# PROP Ignore_Export_Lib 0
|
||||
# PROP Target_Dir ""
|
||||
# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
|
||||
# ADD CPP /nologo /MTd /W3 /Gm /ZI /Od /I "include" /I "mySTL" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /GZ /c
|
||||
# ADD CPP /nologo /MDd /W3 /Gm /ZI /Od /I "../include" /I "../mySTL" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FR /YX /FD /GZ /c
|
||||
# ADD BASE RSC /l 0x409 /d "_DEBUG"
|
||||
# ADD RSC /l 0x409 /d "_DEBUG"
|
||||
BSC32=bscmake.exe
|
||||
@ -87,7 +87,7 @@ LINK32=link.exe
|
||||
# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
|
||||
# Begin Source File
|
||||
|
||||
SOURCE=.\test\test.cpp
|
||||
SOURCE=.\test.cpp
|
||||
# End Source File
|
||||
# End Group
|
||||
# Begin Group "Header Files"
|
@ -90,7 +90,7 @@ Package=<4>
|
||||
|
||||
###############################################################################
|
||||
|
||||
Project: "test"=.\taocrypt\test.dsp - Package Owner=<4>
|
||||
Project: "test"=.\taocrypt\test\test.dsp - Package Owner=<4>
|
||||
|
||||
Package=<5>
|
||||
{{{
|
||||
@ -113,9 +113,6 @@ Package=<5>
|
||||
|
||||
Package=<4>
|
||||
{{{
|
||||
Begin Project Dependency
|
||||
Project_Dep_Name taocrypt
|
||||
End Project Dependency
|
||||
Begin Project Dependency
|
||||
Project_Dep_Name yassl
|
||||
End Project Dependency
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2011, 2012, Oracle and/or its affiliates.
|
||||
/* Copyright (c) 2011, 2013, Oracle and/or its affiliates.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -16,7 +16,7 @@
|
||||
#ifndef _welcome_copyright_notice_h_
|
||||
#define _welcome_copyright_notice_h_
|
||||
|
||||
#define COPYRIGHT_NOTICE_CURRENT_YEAR "2012"
|
||||
#define COPYRIGHT_NOTICE_CURRENT_YEAR "2013"
|
||||
|
||||
/*
|
||||
This define specifies copyright notice which is displayed by every MySQL
|
||||
|
@ -81,7 +81,7 @@ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
|
||||
sql_servers.cc event_parse_data.cc opt_table_elimination.cc
|
||||
|
||||
# automake misses these
|
||||
sql_yacc.cc sql_yacc.h: $(top_srcdir)/sql/sql_yacc.yy
|
||||
sql_yacc.cc sql_yacc.$(YACC_HEXT): $(top_srcdir)/sql/sql_yacc.yy
|
||||
|
||||
# The following libraries should be included in libmysqld.a
|
||||
INC_LIB= $(top_builddir)/regex/libregex.la \
|
||||
|
@ -65,8 +65,7 @@ nobase_test_DATA = \
|
||||
lib/My/SysInfo.pm \
|
||||
lib/My/Suite.pm \
|
||||
lib/My/CoreDump.pm \
|
||||
lib/My/SafeProcess/Base.pm \
|
||||
lib/My/SafeProcess/safe_process.pl
|
||||
lib/My/SafeProcess/Base.pm
|
||||
|
||||
SUBDIRS = lib/My/SafeProcess
|
||||
|
||||
|
@ -1,22 +0,0 @@
|
||||
--exec $MYSQL test -e "show processlist" > $MYSQLTEST_VARDIR/tmp/bl_dump_thread_id
|
||||
--disable_warnings
|
||||
drop table if exists t999;
|
||||
--enable_warnings
|
||||
# Create a table to hold the process list
|
||||
create temporary table t999(
|
||||
id int,
|
||||
user char(255),
|
||||
host char(255),
|
||||
db char(255),
|
||||
Command char(255),
|
||||
time int,
|
||||
State char(255),
|
||||
info char(255)
|
||||
);
|
||||
# Load processlist into table, headers will create seom warnings
|
||||
--disable_warnings
|
||||
--replace_result $MYSQLTEST_VARDIR "."
|
||||
eval LOAD DATA INFILE "$MYSQLTEST_VARDIR/tmp/bl_dump_thread_id" into table t999;
|
||||
--enable_warnings
|
||||
let $id = `select Id from t999 where Command="Binlog Dump"`;
|
||||
drop table t999;
|
@ -154,12 +154,19 @@ int main(int argc, char* const argv[] )
|
||||
pid_t own_pid= getpid();
|
||||
pid_t parent_pid= getppid();
|
||||
bool nocore = false;
|
||||
struct sigaction sa,sa_abort;
|
||||
|
||||
sa.sa_handler= handle_signal;
|
||||
sa.sa_flags= SA_NOCLDSTOP;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
|
||||
sa_abort.sa_handler= handle_abort;
|
||||
sigemptyset(&sa_abort.sa_mask);
|
||||
/* Install signal handlers */
|
||||
signal(SIGTERM, handle_signal);
|
||||
signal(SIGINT, handle_signal);
|
||||
signal(SIGCHLD, handle_signal);
|
||||
signal(SIGABRT, handle_abort);
|
||||
sigaction(SIGTERM, &sa,NULL);
|
||||
sigaction(SIGINT, &sa,NULL);
|
||||
sigaction(SIGCHLD, &sa,NULL);
|
||||
sigaction(SIGABRT, &sa_abort,NULL);
|
||||
|
||||
sprintf(safe_process_name, "safe_process[%ld]", (long) own_pid);
|
||||
|
||||
|
@ -1,166 +0,0 @@
|
||||
#!/usr/bin/perl
|
||||
# -*- cperl -*-
|
||||
|
||||
# Copyright (c) 2007, 2011, Oracle and/or its affiliates
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; version 2 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use lib 'lib';
|
||||
use My::SafeProcess::Base;
|
||||
use POSIX qw(WNOHANG);
|
||||
|
||||
###########################################################################
|
||||
# Util functions
|
||||
###########################################################################
|
||||
|
||||
#
|
||||
#Print message to stderr
|
||||
#
|
||||
my $verbose= 0;
|
||||
sub message {
|
||||
if ($verbose > 0){
|
||||
use Time::localtime;
|
||||
my $tm= localtime();
|
||||
my $timestamp= sprintf("%02d%02d%02d %2d:%02d:%02d",
|
||||
$tm->year % 100, $tm->mon+1, $tm->mday,
|
||||
$tm->hour, $tm->min, $tm->sec);
|
||||
print STDERR $timestamp, " monitor[$$]: ", @_, "\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Main program
|
||||
###########################################################################
|
||||
|
||||
my $terminated= 0;
|
||||
|
||||
# Protect against being killed in the middle
|
||||
# of child creation, just set the terminated flag
|
||||
# to make sure the child will be killed off
|
||||
# when program is ready to do that
|
||||
$SIG{TERM}= sub { message("!Got signal @_"); $terminated= 1; };
|
||||
$SIG{INT}= sub { message("!Got signal @_"); $terminated= 1; };
|
||||
|
||||
my $parent_pid= getppid();
|
||||
|
||||
my $found_double_dash= 0;
|
||||
while (my $arg= shift(@ARGV)){
|
||||
|
||||
if ($arg =~ /^--$/){
|
||||
$found_double_dash= 1;
|
||||
last;
|
||||
}
|
||||
elsif ($arg =~ /^--verbose$/){
|
||||
$verbose= 1;
|
||||
}
|
||||
else {
|
||||
die "Unknown option: $arg";
|
||||
}
|
||||
}
|
||||
|
||||
my $path= shift(@ARGV); # Executable
|
||||
|
||||
die "usage:\n" .
|
||||
" safe_process.pl [opts] -- <path> [<args> [...<args_n>]]"
|
||||
unless defined $path || $found_double_dash;
|
||||
|
||||
|
||||
message("started");
|
||||
#message("path: '$path'");
|
||||
message("parent: $parent_pid");
|
||||
|
||||
# Start process to monitor
|
||||
my $child_pid=
|
||||
create_process(
|
||||
path => $path,
|
||||
args => \@ARGV,
|
||||
setpgrp => 1,
|
||||
);
|
||||
message("Started child $child_pid");
|
||||
|
||||
eval {
|
||||
sub handle_signal {
|
||||
$terminated= 1;
|
||||
message("Got signal @_");
|
||||
|
||||
# Ignore all signals
|
||||
foreach my $name (keys %SIG){
|
||||
$SIG{$name}= 'IGNORE';
|
||||
}
|
||||
|
||||
die "signaled\n";
|
||||
};
|
||||
local $SIG{TERM}= \&handle_signal;
|
||||
local $SIG{INT}= \&handle_signal;
|
||||
local $SIG{CHLD}= sub {
|
||||
message("Got signal @_");
|
||||
kill('KILL', -$child_pid);
|
||||
my $ret= waitpid($child_pid, 0);
|
||||
if ($? & 127){
|
||||
exit(65); # Killed by signal
|
||||
}
|
||||
exit($? >> 8);
|
||||
};
|
||||
|
||||
# Monitoring loop
|
||||
while(!$terminated) {
|
||||
|
||||
# Check if parent is still alive
|
||||
if (kill(0, $parent_pid) < 1){
|
||||
message("Parent is not alive anymore");
|
||||
last;
|
||||
}
|
||||
|
||||
# Wait for child to terminate but wakeup every
|
||||
# second to also check that parent is still alive
|
||||
my $ret_pid;
|
||||
$ret_pid= waitpid($child_pid, &WNOHANG);
|
||||
if ($ret_pid == $child_pid) {
|
||||
# Process has exited, collect return status
|
||||
my $ret_code= $? >> 8;
|
||||
message("Child exit: $ret_code");
|
||||
# Exit with exit status of the child
|
||||
exit ($ret_code);
|
||||
}
|
||||
sleep(1);
|
||||
}
|
||||
};
|
||||
if ( $@ ) {
|
||||
# The monitoring loop should have been
|
||||
# broken by handle_signal
|
||||
warn "Unexpected: $@" unless ( $@ =~ /signaled/ );
|
||||
}
|
||||
|
||||
# Use negative pid in order to kill the whole
|
||||
# process group
|
||||
#
|
||||
my $ret= kill('KILL', -$child_pid);
|
||||
message("Killed child: $child_pid, ret: $ret");
|
||||
if ($ret > 0) {
|
||||
message("Killed child: $child_pid");
|
||||
# Wait blocking for the child to return
|
||||
my $ret_pid= waitpid($child_pid, 0);
|
||||
if ($ret_pid != $child_pid){
|
||||
message("unexpected pid $ret_pid returned from waitpid($child_pid)");
|
||||
}
|
||||
}
|
||||
|
||||
message("DONE!");
|
||||
exit (1);
|
||||
|
||||
|
@ -664,6 +664,10 @@ sub run_test_server ($$$) {
|
||||
else {
|
||||
mtr_report("\nRetrying test $tname, ".
|
||||
"attempt($retries/$opt_retry)...\n");
|
||||
#saving the log file as filename.failed in case of retry
|
||||
my $worker_logdir= $result->{savedir};
|
||||
my $log_file_name=dirname($worker_logdir)."/".$result->{shortname}.".log";
|
||||
rename $log_file_name,$log_file_name.".failed";
|
||||
delete($result->{result});
|
||||
$result->{retries}= $retries+1;
|
||||
$result->write_test($sock, 'TESTCASE');
|
||||
@ -4465,6 +4469,7 @@ sub extract_warning_lines ($) {
|
||||
qr|Access denied for user|,
|
||||
qr|Aborted connection|,
|
||||
qr|table.*is full|,
|
||||
qr|setrlimit could not change the size of core files to 'infinity';|,
|
||||
);
|
||||
|
||||
my $matched_lines= [];
|
||||
|
@ -1940,6 +1940,250 @@ Warning 1292 Truncated incorrect INTEGER value: 'K'
|
||||
Warning 1292 Truncated incorrect INTEGER value: 'jxW<'
|
||||
DROP TABLE t1;
|
||||
SET SQL_BIG_TABLES=0;
|
||||
#
|
||||
# MDEV-641 LP:1002108 - Wrong result (or crash) from a query with duplicated field in the group list and a limit clause
|
||||
# Bug#11761078: 53534: INCORRECT 'SELECT SQL_BIG_RESULT...'
|
||||
# WITH GROUP BY ON DUPLICATED FIELDS
|
||||
#
|
||||
CREATE TABLE t1(
|
||||
col1 int,
|
||||
UNIQUE INDEX idx (col1));
|
||||
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),
|
||||
(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
|
||||
EXPLAIN SELECT col1 AS field1, col1 AS field2
|
||||
FROM t1 GROUP BY field1, field2;;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL idx 5 NULL 20 Using index; Using temporary; Using filesort
|
||||
FLUSH STATUS;
|
||||
SELECT col1 AS field1, col1 AS field2
|
||||
FROM t1 GROUP BY field1, field2;;
|
||||
field1 field2
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
11 11
|
||||
12 12
|
||||
13 13
|
||||
14 14
|
||||
15 15
|
||||
16 16
|
||||
17 17
|
||||
18 18
|
||||
19 19
|
||||
20 20
|
||||
SHOW SESSION STATUS LIKE 'Sort_scan%';
|
||||
Variable_name Value
|
||||
Sort_scan 1
|
||||
EXPLAIN SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
|
||||
FROM t1 GROUP BY field1, field2;;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL idx 5 NULL 20 Using index; Using filesort
|
||||
FLUSH STATUS;
|
||||
SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
|
||||
FROM t1 GROUP BY field1, field2;;
|
||||
field1 field2
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
11 11
|
||||
12 12
|
||||
13 13
|
||||
14 14
|
||||
15 15
|
||||
16 16
|
||||
17 17
|
||||
18 18
|
||||
19 19
|
||||
20 20
|
||||
SHOW SESSION STATUS LIKE 'Sort_scan%';
|
||||
Variable_name Value
|
||||
Sort_scan 1
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
|
||||
FROM v1
|
||||
GROUP BY field1, field2;
|
||||
field1 field2
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
11 11
|
||||
12 12
|
||||
13 13
|
||||
14 14
|
||||
15 15
|
||||
16 16
|
||||
17 17
|
||||
18 18
|
||||
19 19
|
||||
20 20
|
||||
SELECT SQL_BIG_RESULT tbl1.col1 AS field1, tbl2.col1 AS field2
|
||||
FROM t1 as tbl1, t1 as tbl2
|
||||
GROUP BY field1, field2
|
||||
LIMIT 3;
|
||||
field1 field2
|
||||
1 1
|
||||
1 2
|
||||
1 3
|
||||
explain
|
||||
select col1 f1, col1 f2 from t1 order by f2, f1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL idx 5 NULL 20 Using index; Using filesort
|
||||
select col1 f1, col1 f2 from t1 order by f2, f1;
|
||||
f1 f2
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
11 11
|
||||
12 12
|
||||
13 13
|
||||
14 14
|
||||
15 15
|
||||
16 16
|
||||
17 17
|
||||
18 18
|
||||
19 19
|
||||
20 20
|
||||
explain
|
||||
select col1 f1, col1 f2 from t1 group by f2 order by f2, f1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL idx 5 NULL 7 Using index for group-by; Using temporary; Using filesort
|
||||
select col1 f1, col1 f2 from t1 group by f2 order by f2, f1;
|
||||
f1 f2
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
11 11
|
||||
12 12
|
||||
13 13
|
||||
14 14
|
||||
15 15
|
||||
16 16
|
||||
17 17
|
||||
18 18
|
||||
19 19
|
||||
20 20
|
||||
explain
|
||||
select col1 f1, col1 f2 from t1 group by f1, f2 order by f2, f1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL idx 5 NULL 20 Using index; Using temporary; Using filesort
|
||||
select col1 f1, col1 f2 from t1 group by f1, f2 order by f2, f1;
|
||||
f1 f2
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
6 6
|
||||
7 7
|
||||
8 8
|
||||
9 9
|
||||
10 10
|
||||
11 11
|
||||
12 12
|
||||
13 13
|
||||
14 14
|
||||
15 15
|
||||
16 16
|
||||
17 17
|
||||
18 18
|
||||
19 19
|
||||
20 20
|
||||
CREATE TABLE t2(
|
||||
col1 int,
|
||||
col2 int,
|
||||
UNIQUE INDEX idx (col1, col2));
|
||||
INSERT INTO t2(col1, col2) VALUES
|
||||
(1,20),(2,19),(3,18),(4,17),(5,16),(6,15),(7,14),(8,13),(9,12),(10,11),
|
||||
(11,10),(12,9),(13,8),(14,7),(15,6),(16,5),(17,4),(18,3),(19,2),(20,1);
|
||||
explain
|
||||
select col1 f1, col2 f2, col1 f3 from t2 group by f1, f2, f3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 index NULL idx 10 NULL 20 Using index; Using temporary; Using filesort
|
||||
select col1 f1, col2 f2, col1 f3 from t2 group by f1, f2, f3;
|
||||
f1 f2 f3
|
||||
1 20 1
|
||||
2 19 2
|
||||
3 18 3
|
||||
4 17 4
|
||||
5 16 5
|
||||
6 15 6
|
||||
7 14 7
|
||||
8 13 8
|
||||
9 12 9
|
||||
10 11 10
|
||||
11 10 11
|
||||
12 9 12
|
||||
13 8 13
|
||||
14 7 14
|
||||
15 6 15
|
||||
16 5 16
|
||||
17 4 17
|
||||
18 3 18
|
||||
19 2 19
|
||||
20 1 20
|
||||
explain
|
||||
select col1 f1, col2 f2, col1 f3 from t2 order by f1, f2, f3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 index NULL idx 10 NULL 20 Using index; Using filesort
|
||||
select col1 f1, col2 f2, col1 f3 from t2 order by f1, f2, f3;
|
||||
f1 f2 f3
|
||||
1 20 1
|
||||
2 19 2
|
||||
3 18 3
|
||||
4 17 4
|
||||
5 16 5
|
||||
6 15 6
|
||||
7 14 7
|
||||
8 13 8
|
||||
9 12 9
|
||||
10 11 10
|
||||
11 10 11
|
||||
12 9 12
|
||||
13 8 13
|
||||
14 7 14
|
||||
15 6 15
|
||||
16 5 16
|
||||
17 4 17
|
||||
18 3 18
|
||||
19 2 19
|
||||
20 1 20
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1, t2;
|
||||
# End of 5.1 tests
|
||||
#
|
||||
# LP bug#694450 Wrong result with non-standard GROUP BY + ORDER BY
|
||||
|
@ -738,3 +738,23 @@ SELECT c2 FROM t1;
|
||||
c2
|
||||
0
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
id int(11) NOT NULL AUTO_INCREMENT,
|
||||
color enum('GREEN', 'WHITE') DEFAULT NULL,
|
||||
ts int,
|
||||
PRIMARY KEY (id),
|
||||
KEY color (color) USING HASH
|
||||
) ENGINE=MEMORY DEFAULT CHARSET=utf8;
|
||||
INSERT INTO t1 VALUES("1","GREEN",1);
|
||||
INSERT INTO t1 VALUES("2","GREEN",1);
|
||||
INSERT INTO t1 VALUES("3","GREEN",1);
|
||||
INSERT INTO t1 VALUES("4","GREEN",1);
|
||||
INSERT INTO t1 VALUES("5","GREEN",1);
|
||||
INSERT INTO t1 VALUES("6","GREEN",1);
|
||||
DELETE FROM t1 WHERE id = 1;
|
||||
INSERT INTO t1 VALUES("7","GREEN", 2);
|
||||
DELETE FROM t1 WHERE ts = 1 AND color = 'GREEN';
|
||||
SELECT * from t1;
|
||||
id color ts
|
||||
7 GREEN 2
|
||||
DROP TABLE t1;
|
||||
|
@ -463,6 +463,8 @@ GROUP BY @b:=(SELECT COUNT(*) > t2.a);
|
||||
@a:=MIN(t1.a)
|
||||
1
|
||||
DROP TABLE t1;
|
||||
SET @bug12408412=1;
|
||||
SELECT GROUP_CONCAT(@bug12408412 ORDER BY 1) INTO @bug12408412;
|
||||
End of 5.1 tests
|
||||
CREATE TABLE t1(a INT);
|
||||
INSERT INTO t1 VALUES (0);
|
||||
@ -496,3 +498,4 @@ SELECT @a;
|
||||
@a
|
||||
1
|
||||
DROP TABLE t1;
|
||||
End of 5.2 tests
|
||||
|
@ -1,204 +1,60 @@
|
||||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
stop slave;
|
||||
create table t1(n int not null auto_increment primary key);
|
||||
insert into t1 values (1),(2),(3),(4);
|
||||
drop table t1;
|
||||
create table t2(n int not null auto_increment primary key);
|
||||
insert into t2 values (1),(2);
|
||||
insert into t2 values (3),(4);
|
||||
drop table t2;
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=311;
|
||||
select * from t1;
|
||||
include/master-slave.inc
|
||||
[connection master]
|
||||
CREATE TABLE t1(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
|
||||
INSERT INTO t1 VALUES (1),(2),(3),(4);
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
|
||||
INSERT INTO t2 VALUES (1),(2);
|
||||
INSERT INTO t2 VALUES (3),(4);
|
||||
DROP TABLE t2;
|
||||
include/stop_slave.inc
|
||||
RESET SLAVE;
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_drop_t1
|
||||
include/wait_for_slave_sql_to_stop.inc
|
||||
SELECT * FROM t1;
|
||||
n
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
show slave status;
|
||||
Slave_IO_State #
|
||||
Master_Host 127.0.0.1
|
||||
Master_User root
|
||||
Master_Port MASTER_MYPORT
|
||||
Connect_Retry 1
|
||||
Master_Log_File master-bin.000001
|
||||
Read_Master_Log_Pos #
|
||||
Relay_Log_File slave-relay-bin.000004
|
||||
Relay_Log_Pos #
|
||||
Relay_Master_Log_File master-bin.000001
|
||||
Slave_IO_Running #
|
||||
Slave_SQL_Running No
|
||||
Replicate_Do_DB
|
||||
Replicate_Ignore_DB
|
||||
Replicate_Do_Table
|
||||
Replicate_Ignore_Table
|
||||
Replicate_Wild_Do_Table
|
||||
Replicate_Wild_Ignore_Table
|
||||
Last_Errno 0
|
||||
Last_Error
|
||||
Skip_Counter 0
|
||||
Exec_Master_Log_Pos #
|
||||
Relay_Log_Space #
|
||||
Until_Condition Master
|
||||
Until_Log_File master-bin.000001
|
||||
Until_Log_Pos 311
|
||||
Master_SSL_Allowed No
|
||||
Master_SSL_CA_File
|
||||
Master_SSL_CA_Path
|
||||
Master_SSL_Cert
|
||||
Master_SSL_Cipher
|
||||
Master_SSL_Key
|
||||
Seconds_Behind_Master #
|
||||
Master_SSL_Verify_Server_Cert No
|
||||
Last_IO_Errno 0
|
||||
Last_IO_Error
|
||||
Last_SQL_Errno 0
|
||||
Last_SQL_Error
|
||||
start slave until master_log_file='master-no-such-bin.000001', master_log_pos=291;
|
||||
select * from t1;
|
||||
n 1
|
||||
n 2
|
||||
n 3
|
||||
n 4
|
||||
show slave status;
|
||||
Slave_IO_State #
|
||||
Master_Host 127.0.0.1
|
||||
Master_User root
|
||||
Master_Port MASTER_MYPORT
|
||||
Connect_Retry 1
|
||||
Master_Log_File master-bin.000001
|
||||
Read_Master_Log_Pos #
|
||||
Relay_Log_File slave-relay-bin.000004
|
||||
Relay_Log_Pos #
|
||||
Relay_Master_Log_File master-bin.000001
|
||||
Slave_IO_Running #
|
||||
Slave_SQL_Running No
|
||||
Replicate_Do_DB
|
||||
Replicate_Ignore_DB
|
||||
Replicate_Do_Table
|
||||
Replicate_Ignore_Table
|
||||
Replicate_Wild_Do_Table
|
||||
Replicate_Wild_Ignore_Table
|
||||
Last_Errno 0
|
||||
Last_Error
|
||||
Skip_Counter 0
|
||||
Exec_Master_Log_Pos #
|
||||
Relay_Log_Space #
|
||||
Until_Condition Master
|
||||
Until_Log_File master-no-such-bin.000001
|
||||
Until_Log_Pos 291
|
||||
Master_SSL_Allowed No
|
||||
Master_SSL_CA_File
|
||||
Master_SSL_CA_Path
|
||||
Master_SSL_Cert
|
||||
Master_SSL_Cipher
|
||||
Master_SSL_Key
|
||||
Seconds_Behind_Master #
|
||||
Master_SSL_Verify_Server_Cert No
|
||||
Last_IO_Errno 0
|
||||
Last_IO_Error
|
||||
Last_SQL_Errno 0
|
||||
Last_SQL_Error
|
||||
start slave until relay_log_file='slave-relay-bin.000004', relay_log_pos=728;
|
||||
select * from t2;
|
||||
show slave status;
|
||||
Slave_IO_State #
|
||||
Master_Host 127.0.0.1
|
||||
Master_User root
|
||||
Master_Port MASTER_MYPORT
|
||||
Connect_Retry 1
|
||||
Master_Log_File master-bin.000001
|
||||
Read_Master_Log_Pos #
|
||||
Relay_Log_File slave-relay-bin.000004
|
||||
Relay_Log_Pos #
|
||||
Relay_Master_Log_File master-bin.000001
|
||||
Slave_IO_Running #
|
||||
Slave_SQL_Running No
|
||||
Replicate_Do_DB
|
||||
Replicate_Ignore_DB
|
||||
Replicate_Do_Table
|
||||
Replicate_Ignore_Table
|
||||
Replicate_Wild_Do_Table
|
||||
Replicate_Wild_Ignore_Table
|
||||
Last_Errno 0
|
||||
Last_Error
|
||||
Skip_Counter 0
|
||||
Exec_Master_Log_Pos #
|
||||
Relay_Log_Space #
|
||||
Until_Condition Relay
|
||||
Until_Log_File slave-relay-bin.000004
|
||||
Until_Log_Pos 728
|
||||
Master_SSL_Allowed No
|
||||
Master_SSL_CA_File
|
||||
Master_SSL_CA_Path
|
||||
Master_SSL_Cert
|
||||
Master_SSL_Cipher
|
||||
Master_SSL_Key
|
||||
Seconds_Behind_Master #
|
||||
Master_SSL_Verify_Server_Cert No
|
||||
Last_IO_Errno 0
|
||||
Last_IO_Error
|
||||
Last_SQL_Errno 0
|
||||
Last_SQL_Error
|
||||
start slave;
|
||||
stop slave;
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=740;
|
||||
show slave status;
|
||||
Slave_IO_State #
|
||||
Master_Host 127.0.0.1
|
||||
Master_User root
|
||||
Master_Port MASTER_MYPORT
|
||||
Connect_Retry 1
|
||||
Master_Log_File master-bin.000001
|
||||
Read_Master_Log_Pos #
|
||||
Relay_Log_File slave-relay-bin.000004
|
||||
Relay_Log_Pos #
|
||||
Relay_Master_Log_File master-bin.000001
|
||||
Slave_IO_Running Yes
|
||||
Slave_SQL_Running No
|
||||
Replicate_Do_DB
|
||||
Replicate_Ignore_DB
|
||||
Replicate_Do_Table
|
||||
Replicate_Ignore_Table
|
||||
Replicate_Wild_Do_Table
|
||||
Replicate_Wild_Ignore_Table
|
||||
Last_Errno 0
|
||||
Last_Error
|
||||
Skip_Counter 0
|
||||
Exec_Master_Log_Pos #
|
||||
Relay_Log_Space #
|
||||
Until_Condition Master
|
||||
Until_Log_File master-bin.000001
|
||||
Until_Log_Pos 740
|
||||
Master_SSL_Allowed No
|
||||
Master_SSL_CA_File
|
||||
Master_SSL_CA_Path
|
||||
Master_SSL_Cert
|
||||
Master_SSL_Cipher
|
||||
Master_SSL_Key
|
||||
Seconds_Behind_Master #
|
||||
Master_SSL_Verify_Server_Cert No
|
||||
Last_IO_Errno 0
|
||||
Last_IO_Error
|
||||
Last_SQL_Errno 0
|
||||
Last_SQL_Error
|
||||
start slave until master_log_file='master-bin', master_log_pos=561;
|
||||
include/check_slave_param.inc [Exec_Master_Log_Pos]
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS;
|
||||
include/wait_for_slave_sql_to_stop.inc
|
||||
SELECT * FROM t1;
|
||||
n
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
include/check_slave_param.inc [Exec_Master_Log_Pos]
|
||||
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2
|
||||
include/wait_for_slave_sql_to_stop.inc
|
||||
SELECT * FROM t2;
|
||||
n
|
||||
1
|
||||
2
|
||||
include/check_slave_param.inc [Exec_Master_Log_Pos]
|
||||
START SLAVE;
|
||||
include/wait_for_slave_to_start.inc
|
||||
include/stop_slave.inc
|
||||
START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=master_pos_create_t2
|
||||
include/wait_for_slave_param.inc [Until_Log_Pos]
|
||||
include/wait_for_slave_sql_to_stop.inc
|
||||
include/check_slave_param.inc [Exec_Master_Log_Pos]
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin', MASTER_LOG_POS=MASTER_LOG_POS;
|
||||
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=561, relay_log_pos=12;
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS, RELAY_LOG_POS=RELAY_LOG_POS;
|
||||
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
|
||||
start slave until master_log_file='master-bin.000001';
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001';
|
||||
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
|
||||
start slave until relay_log_file='slave-relay-bin.000002';
|
||||
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000009';
|
||||
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
|
||||
start slave until relay_log_file='slave-relay-bin.000002', master_log_pos=561;
|
||||
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', MASTER_LOG_POS=MASTER_LOG_POS;
|
||||
ERROR HY000: Incorrect parameter or combination of parameters for START SLAVE UNTIL
|
||||
start slave sql_thread;
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=740;
|
||||
START SLAVE;
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=MASTER_LOG_POS;
|
||||
Warnings:
|
||||
Level Note
|
||||
Code 1254
|
||||
Message Slave is already running
|
||||
Note 1254 Slave is already running
|
||||
include/stop_slave.inc
|
||||
RESET SLAVE;
|
||||
include/rpl_end.inc
|
||||
|
@ -72,7 +72,6 @@ rpl000017 : Result Difference Due to Change in .inc file
|
||||
rpl_skip_error : Result Difference Due to Change in .inc file
|
||||
rpl_sp : Result Difference Due to Change in .inc file
|
||||
|
||||
rpl_row_until : Test Present in rpl suite as well . Test Fails with table t2 not found.
|
||||
rpl_loaddata_s : Test Present in rpl suite as well . Test Fails due to bin log truncation.
|
||||
rpl_log_pos : Test Present in rpl suite as well . Test Fails due to bin log truncation.
|
||||
rpl_row_NOW : Result Difference Due to Change in .inc file
|
||||
|
@ -2,90 +2,126 @@
|
||||
-- source include/have_binlog_format_row.inc
|
||||
-- source include/master-slave.inc
|
||||
|
||||
# Test is dependent on binlog positions
|
||||
# Note: The test is dependent on binlog positions
|
||||
|
||||
# prepare version for substitutions
|
||||
let $VERSION=`select version()`;
|
||||
|
||||
# stop slave before he will start replication also sync with master
|
||||
# for avoiding undetermenistic behaviour
|
||||
# Create some events on master
|
||||
connection master;
|
||||
CREATE TABLE t1(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
|
||||
INSERT INTO t1 VALUES (1),(2),(3),(4);
|
||||
DROP TABLE t1;
|
||||
# Save master log position for query DROP TABLE t1
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
stop slave;
|
||||
let $master_pos_drop_t1= query_get_value(SHOW BINLOG EVENTS, Pos, 7);
|
||||
let $master_log_file= query_get_value(SHOW BINLOG EVENTS, Log_name, 7);
|
||||
|
||||
CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY);
|
||||
# Save master log position for query CREATE TABLE t2
|
||||
save_master_pos;
|
||||
let $master_pos_create_t2= query_get_value(SHOW BINLOG EVENTS, Pos, 8);
|
||||
|
||||
INSERT INTO t2 VALUES (1),(2);
|
||||
save_master_pos;
|
||||
# Save master log position for query INSERT INTO t2 VALUES (1),(2);
|
||||
let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 12);
|
||||
sync_slave_with_master;
|
||||
|
||||
# Save relay log position for query INSERT INTO t2 VALUES (1),(2);
|
||||
let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1);
|
||||
|
||||
connection master;
|
||||
# create some events on master
|
||||
create table t1(n int not null auto_increment primary key);
|
||||
insert into t1 values (1),(2),(3),(4);
|
||||
drop table t1;
|
||||
create table t2(n int not null auto_increment primary key);
|
||||
insert into t2 values (1),(2);
|
||||
insert into t2 values (3),(4);
|
||||
drop table t2;
|
||||
INSERT INTO t2 VALUES (3),(4);
|
||||
DROP TABLE t2;
|
||||
# Save master log position for query INSERT INTO t2 VALUES (1),(2);
|
||||
let $master_pos_drop_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 17);
|
||||
sync_slave_with_master;
|
||||
|
||||
# try to replicate all queries until drop of t1
|
||||
--source include/stop_slave.inc
|
||||
# Reset slave.
|
||||
RESET SLAVE;
|
||||
--disable_query_log
|
||||
eval CHANGE MASTER TO MASTER_USER='root', MASTER_CONNECT_RETRY=1, MASTER_HOST='127.0.0.1', MASTER_PORT=$MASTER_MYPORT;
|
||||
--enable_query_log
|
||||
|
||||
# Try to replicate all queries until drop of t1
|
||||
connection slave;
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=311;
|
||||
sleep 2;
|
||||
wait_for_slave_to_stop;
|
||||
# here table should be still not deleted
|
||||
select * from t1;
|
||||
--vertical_results
|
||||
--replace_result $MASTER_MYPORT MASTER_MYPORT
|
||||
--replace_column 1 # 7 # 9 # 11 # 22 # 23 # 33 #
|
||||
show slave status;
|
||||
echo START SLAVE UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=master_pos_drop_t1;
|
||||
--disable_query_log
|
||||
eval START SLAVE UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=$master_pos_drop_t1;
|
||||
--enable_query_log
|
||||
--source include/wait_for_slave_sql_to_stop.inc
|
||||
|
||||
# this should fail right after start
|
||||
start slave until master_log_file='master-no-such-bin.000001', master_log_pos=291;
|
||||
# Here table should be still not deleted
|
||||
SELECT * FROM t1;
|
||||
--let $slave_param= Exec_Master_Log_Pos
|
||||
--let $slave_param_value= $master_pos_drop_t1
|
||||
--source include/check_slave_param.inc
|
||||
|
||||
# This should fail right after start
|
||||
--replace_result 291 MASTER_LOG_POS
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-no-such-bin.000001', MASTER_LOG_POS=291;
|
||||
--source include/wait_for_slave_sql_to_stop.inc
|
||||
# again this table should be still not deleted
|
||||
select * from t1;
|
||||
sleep 2;
|
||||
wait_for_slave_to_stop;
|
||||
--vertical_results
|
||||
--replace_result $MASTER_MYPORT MASTER_MYPORT
|
||||
--replace_column 1 # 7 # 9 # 11 # 22 # 23 # 33 #
|
||||
show slave status;
|
||||
SELECT * FROM t1;
|
||||
|
||||
# try replicate all up to and not including the second insert to t2;
|
||||
start slave until relay_log_file='slave-relay-bin.000004', relay_log_pos=728;
|
||||
sleep 2;
|
||||
wait_for_slave_to_stop;
|
||||
select * from t2;
|
||||
--vertical_results
|
||||
--replace_result $MASTER_MYPORT MASTER_MYPORT
|
||||
--replace_column 1 # 7 # 9 # 11 # 22 # 23 # 33 #
|
||||
show slave status;
|
||||
--let $slave_param= Exec_Master_Log_Pos
|
||||
--let $slave_param_value= $master_pos_drop_t1
|
||||
--source include/check_slave_param.inc
|
||||
|
||||
# Try replicate all up to and not including the second insert to t2;
|
||||
echo START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2;
|
||||
--disable_query_log
|
||||
eval START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=$relay_pos_insert1_t2;
|
||||
--enable_query_log
|
||||
--source include/wait_for_slave_sql_to_stop.inc
|
||||
SELECT * FROM t2;
|
||||
|
||||
--let $slave_param= Exec_Master_Log_Pos
|
||||
--let $slave_param_value= $master_pos_insert1_t2
|
||||
--source include/check_slave_param.inc
|
||||
|
||||
# clean up
|
||||
start slave;
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
connection master;
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
stop slave;
|
||||
sync_slave_with_master;
|
||||
--source include/stop_slave.inc
|
||||
|
||||
# this should stop immediately as we are already there
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=740;
|
||||
sleep 2;
|
||||
wait_for_slave_to_stop;
|
||||
# This should stop immediately as we are already there
|
||||
echo START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=master_pos_create_t2;
|
||||
--disable_query_log
|
||||
eval START SLAVE SQL_THREAD UNTIL MASTER_LOG_FILE='$master_log_file', MASTER_LOG_POS=$master_pos_create_t2;
|
||||
--enable_query_log
|
||||
let $slave_param= Until_Log_Pos;
|
||||
let $slave_param_value= $master_pos_create_t2;
|
||||
--source include/wait_for_slave_param.inc
|
||||
--source include/wait_for_slave_sql_to_stop.inc
|
||||
# here the sql slave thread should be stopped
|
||||
--vertical_results
|
||||
--replace_result $MASTER_MYPORT MASTER_MYPORT bin.000005 bin.000004 bin.000006 bin.000004 bin.000007 bin.000004
|
||||
--replace_column 1 # 7 # 9 # 22 # 23 # 33 #
|
||||
show slave status;
|
||||
--let $slave_param= Exec_Master_Log_Pos
|
||||
--let $slave_param_value= $master_pos_drop_t2
|
||||
--source include/check_slave_param.inc
|
||||
|
||||
#testing various error conditions
|
||||
--replace_result 561 MASTER_LOG_POS
|
||||
--error 1277
|
||||
start slave until master_log_file='master-bin', master_log_pos=561;
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin', MASTER_LOG_POS=561;
|
||||
--replace_result 561 MASTER_LOG_POS 12 RELAY_LOG_POS
|
||||
--error 1277
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=561, relay_log_pos=12;
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=561, RELAY_LOG_POS=12;
|
||||
--error 1277
|
||||
start slave until master_log_file='master-bin.000001';
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001';
|
||||
--error 1277
|
||||
start slave until relay_log_file='slave-relay-bin.000002';
|
||||
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000009';
|
||||
--replace_result 561 MASTER_LOG_POS
|
||||
--error 1277
|
||||
start slave until relay_log_file='slave-relay-bin.000002', master_log_pos=561;
|
||||
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', MASTER_LOG_POS=561;
|
||||
# Warning should be given for second command
|
||||
start slave sql_thread;
|
||||
start slave until master_log_file='master-bin.000001', master_log_pos=740;
|
||||
START SLAVE;
|
||||
--replace_result 740 MASTER_LOG_POS
|
||||
START SLAVE UNTIL MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=740;
|
||||
|
||||
--source include/stop_slave.inc
|
||||
# Clear slave IO error.
|
||||
RESET SLAVE;
|
||||
|
||||
--let $rpl_only_running_threads= 1
|
||||
--source include/rpl_end.inc
|
||||
|
53
mysql-test/suite/innodb/r/innodb_bug14676111.result
Normal file
53
mysql-test/suite/innodb/r/innodb_bug14676111.result
Normal file
@ -0,0 +1,53 @@
|
||||
drop table if exists t1;
|
||||
CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
|
||||
set global innodb_limit_optimistic_insert_debug = 2;
|
||||
insert into t1 values (1);
|
||||
insert into t1 values (5);
|
||||
insert into t1 values (4);
|
||||
insert into t1 values (3);
|
||||
insert into t1 values (2);
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
10.0000
|
||||
delete from t1 where a=4;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
8.0000
|
||||
delete from t1 where a=5;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
5.0000
|
||||
set global innodb_limit_optimistic_insert_debug = 10000;
|
||||
delete from t1 where a=2;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
3.0000
|
||||
insert into t1 values (2);
|
||||
delete from t1 where a=2;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
2.0000
|
||||
insert into t1 values (2);
|
||||
delete from t1 where a=2;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
1.0000
|
||||
drop table t1;
|
@ -1,11 +1,6 @@
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_debug.inc
|
||||
|
||||
if (`select plugin_auth_version <= "1.0.17-13.01" from information_schema.plugins where plugin_name='innodb'`)
|
||||
{
|
||||
--skip Not fixed in XtraDB 1.0.17-13.01 or earlier
|
||||
}
|
||||
|
||||
create table t1 (
|
||||
rowid int,
|
||||
f1 int,
|
||||
|
128
mysql-test/suite/innodb/t/innodb_bug14676111.test
Normal file
128
mysql-test/suite/innodb/t/innodb_bug14676111.test
Normal file
@ -0,0 +1,128 @@
|
||||
# Test for bug #14676111: WRONG PAGE_LEVEL WRITTEN FOR UPPER THAN FATHER PAGE AT BTR_LIFT_PAGE_UP()
|
||||
|
||||
-- source include/have_innodb.inc
|
||||
-- source include/have_debug.inc
|
||||
|
||||
if (`select count(*)=0 from information_schema.global_variables where variable_name = 'INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG'`)
|
||||
{
|
||||
--skip Test requires InnoDB built with UNIV_DEBUG definition.
|
||||
}
|
||||
|
||||
--disable_query_log
|
||||
set @old_innodb_limit_optimistic_insert_debug = @@innodb_limit_optimistic_insert_debug;
|
||||
--enable_query_log
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
|
||||
|
||||
let $wait_condition=
|
||||
SELECT VARIABLE_VALUE < 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS
|
||||
WHERE VARIABLE_NAME = 'INNODB_PURGE_TRX_ID_AGE';
|
||||
|
||||
#
|
||||
# make 4 leveled straight tree
|
||||
#
|
||||
set global innodb_limit_optimistic_insert_debug = 2;
|
||||
insert into t1 values (1);
|
||||
insert into t1 values (5);
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
|
||||
insert into t1 values (4);
|
||||
#records in a page is limited to 2 artificially. root rise occurs
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
#(1, 4) (5)
|
||||
|
||||
insert into t1 values (3);
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
# (1, 4) (5)
|
||||
#(1, 3) (4) (5)
|
||||
|
||||
insert into t1 values (2);
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
# (1, 4) (5)
|
||||
# (1, 3) (4) (5)
|
||||
#(1, 2) (3) (4) (5)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
delete from t1 where a=4;
|
||||
--source include/wait_condition.inc
|
||||
#deleting 1 record of 2 records don't cause merge artificially.
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
# (1) (5)
|
||||
# (1, 3) (5)
|
||||
#(1, 2) (3) (5)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
delete from t1 where a=5;
|
||||
--source include/wait_condition.inc
|
||||
#deleting 1 record of 2 records don't cause merge artificially.
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1)
|
||||
# (1, 3) <- lift up this level next, when deleting node ptr
|
||||
#(1, 2) (3) <- merged next
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
#
|
||||
# cause merge at level 0
|
||||
#
|
||||
|
||||
#disable the artificial limitation of records in a page
|
||||
set global innodb_limit_optimistic_insert_debug = 10000;
|
||||
delete from t1 where a=2;
|
||||
--source include/wait_condition.inc
|
||||
#merge page occurs. and lift up occurs.
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1)
|
||||
# (1, 3)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
insert into t1 values (2);
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1) <- lift up this level next, because it is not root
|
||||
# (1, 2, 3)
|
||||
|
||||
delete from t1 where a=2;
|
||||
--source include/wait_condition.inc
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1, 3)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
insert into t1 values (2);
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1, 2, 3) <- lift up this level next, because the father is root
|
||||
|
||||
delete from t1 where a=2;
|
||||
--source include/wait_condition.inc
|
||||
#current tree form
|
||||
# (1, 3)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
drop table t1;
|
||||
|
||||
--disable_query_log
|
||||
set global innodb_limit_optimistic_insert_debug = @old_innodb_limit_optimistic_insert_debug;
|
||||
--enable_query_log
|
@ -963,7 +963,7 @@ Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
explain select * from t1 where b like 'adfd%';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL b NULL NULL NULL 15 Using where
|
||||
1 SIMPLE t1 range b b 769 NULL # Using where
|
||||
create table t2(a int, b varchar(255), primary key(a,b)) engine=innodb;
|
||||
insert into t2 select a,left(b,255) from t1;
|
||||
drop table t1;
|
||||
|
53
mysql-test/suite/innodb_plugin/r/innodb_bug14676111.result
Normal file
53
mysql-test/suite/innodb_plugin/r/innodb_bug14676111.result
Normal file
@ -0,0 +1,53 @@
|
||||
drop table if exists t1;
|
||||
CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
|
||||
set global innodb_limit_optimistic_insert_debug = 2;
|
||||
insert into t1 values (1);
|
||||
insert into t1 values (5);
|
||||
insert into t1 values (4);
|
||||
insert into t1 values (3);
|
||||
insert into t1 values (2);
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
10.0000
|
||||
delete from t1 where a=4;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
8.0000
|
||||
delete from t1 where a=5;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
5.0000
|
||||
set global innodb_limit_optimistic_insert_debug = 10000;
|
||||
delete from t1 where a=2;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
3.0000
|
||||
insert into t1 values (2);
|
||||
delete from t1 where a=2;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
2.0000
|
||||
insert into t1 values (2);
|
||||
delete from t1 where a=2;
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
DATA_LENGTH / 16384
|
||||
1.0000
|
||||
drop table t1;
|
@ -343,7 +343,7 @@ id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index; Using temporary
|
||||
explain select distinct f1, f2 from t1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary
|
||||
1 SIMPLE t1 index NULL PRIMARY 5 NULL 4 Using index
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (id int(11) NOT NULL PRIMARY KEY, name varchar(20),
|
||||
INDEX (name));
|
||||
|
@ -1,4 +1,8 @@
|
||||
-- source include/have_innodb_plugin.inc
|
||||
if (`select plugin_auth_version <= "1.0.17-14.1" from information_schema.plugins where plugin_name='innodb'`)
|
||||
{
|
||||
--skip Not fixed in XtraDB 1.0.17-14.1 or earlier
|
||||
}
|
||||
|
||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||
|
||||
@ -420,6 +424,10 @@ select a,
|
||||
length(b),b=left(repeat(d,100*a),65535),length(c),c=repeat(d,20*a),d from t1;
|
||||
show create table t1;
|
||||
check table t1;
|
||||
|
||||
# In my local machine and in pb2 machine only the key_len field is differing.
|
||||
# So masking this problematic output.
|
||||
--replace_column 9 #
|
||||
explain select * from t1 where b like 'adfd%';
|
||||
|
||||
#
|
||||
|
128
mysql-test/suite/innodb_plugin/t/innodb_bug14676111.test
Normal file
128
mysql-test/suite/innodb_plugin/t/innodb_bug14676111.test
Normal file
@ -0,0 +1,128 @@
|
||||
# Test for bug #14676111: WRONG PAGE_LEVEL WRITTEN FOR UPPER THAN FATHER PAGE AT BTR_LIFT_PAGE_UP()
|
||||
|
||||
-- source include/have_innodb_plugin.inc
|
||||
-- source include/have_debug.inc
|
||||
|
||||
if (`select count(*)=0 from information_schema.global_variables where variable_name = 'INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG'`)
|
||||
{
|
||||
--skip Test requires InnoDB built with UNIV_DEBUG definition.
|
||||
}
|
||||
|
||||
--disable_query_log
|
||||
set @old_innodb_limit_optimistic_insert_debug = @@innodb_limit_optimistic_insert_debug;
|
||||
--enable_query_log
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
CREATE TABLE t1 (a int not null primary key) engine=InnoDB;
|
||||
|
||||
let $wait_condition=
|
||||
SELECT VARIABLE_VALUE < 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS
|
||||
WHERE VARIABLE_NAME = 'INNODB_PURGE_TRX_ID_AGE';
|
||||
|
||||
#
|
||||
# make 4 leveled straight tree
|
||||
#
|
||||
set global innodb_limit_optimistic_insert_debug = 2;
|
||||
insert into t1 values (1);
|
||||
insert into t1 values (5);
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
|
||||
insert into t1 values (4);
|
||||
#records in a page is limited to 2 artificially. root rise occurs
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
#(1, 4) (5)
|
||||
|
||||
insert into t1 values (3);
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
# (1, 4) (5)
|
||||
#(1, 3) (4) (5)
|
||||
|
||||
insert into t1 values (2);
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
# (1, 4) (5)
|
||||
# (1, 3) (4) (5)
|
||||
#(1, 2) (3) (4) (5)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
delete from t1 where a=4;
|
||||
--source include/wait_condition.inc
|
||||
#deleting 1 record of 2 records don't cause merge artificially.
|
||||
#current tree form
|
||||
# (1, 5)
|
||||
# (1) (5)
|
||||
# (1, 3) (5)
|
||||
#(1, 2) (3) (5)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
delete from t1 where a=5;
|
||||
--source include/wait_condition.inc
|
||||
#deleting 1 record of 2 records don't cause merge artificially.
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1)
|
||||
# (1, 3) <- lift up this level next, when deleting node ptr
|
||||
#(1, 2) (3) <- merged next
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
#
|
||||
# cause merge at level 0
|
||||
#
|
||||
|
||||
#disable the artificial limitation of records in a page
|
||||
set global innodb_limit_optimistic_insert_debug = 10000;
|
||||
delete from t1 where a=2;
|
||||
--source include/wait_condition.inc
|
||||
#merge page occurs. and lift up occurs.
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1)
|
||||
# (1, 3)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
insert into t1 values (2);
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1) <- lift up this level next, because it is not root
|
||||
# (1, 2, 3)
|
||||
|
||||
delete from t1 where a=2;
|
||||
--source include/wait_condition.inc
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1, 3)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
insert into t1 values (2);
|
||||
#current tree form
|
||||
# (1)
|
||||
# (1, 2, 3) <- lift up this level next, because the father is root
|
||||
|
||||
delete from t1 where a=2;
|
||||
--source include/wait_condition.inc
|
||||
#current tree form
|
||||
# (1, 3)
|
||||
|
||||
analyze table t1;
|
||||
select DATA_LENGTH / 16384 from information_schema.TABLES where TABLE_SCHEMA = 'test' and TABLE_NAME = 't1';
|
||||
|
||||
drop table t1;
|
||||
|
||||
--disable_query_log
|
||||
set global innodb_limit_optimistic_insert_debug = @old_innodb_limit_optimistic_insert_debug;
|
||||
--enable_query_log
|
@ -4,6 +4,10 @@
|
||||
# 2006-07-26 ML test refactored (MySQL 5.1)
|
||||
# main testing code t/innodb_mysql.test -> include/mix1.inc
|
||||
#
|
||||
if (`select plugin_auth_version <= "1.0.17-14.1" from information_schema.plugins where plugin_name='innodb'`)
|
||||
{
|
||||
--skip Not fixed in XtraDB 1.0.17-14.1 or earlier
|
||||
}
|
||||
|
||||
-- source include/have_query_cache.inc
|
||||
|
||||
|
@ -37566,7 +37566,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -38026,7 +38026,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -38497,7 +38497,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -38969,7 +38969,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -39435,7 +39435,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -39907,7 +39907,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -40384,7 +40384,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -40859,7 +40859,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -41324,7 +41324,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -41784,7 +41784,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -42255,7 +42255,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -42727,7 +42727,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -43193,7 +43193,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -43665,7 +43665,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -44142,7 +44142,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -44617,7 +44617,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -52582,7 +52582,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -53042,7 +53042,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -53513,7 +53513,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -53985,7 +53985,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -54451,7 +54451,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -54923,7 +54923,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -55400,7 +55400,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
@ -55875,7 +55875,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
|
||||
ALTER TABLE t1 OPTIMIZE PARTITION ALL;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed.
|
||||
test.t1 optimize status OK
|
||||
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
|
||||
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
|
||||
|
@ -27,7 +27,6 @@ n
|
||||
3
|
||||
4
|
||||
include/check_slave_param.inc [Exec_Master_Log_Pos]
|
||||
START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2
|
||||
include/wait_for_slave_sql_to_stop.inc
|
||||
SELECT * FROM t2;
|
||||
n
|
||||
|
@ -12,5 +12,5 @@
|
||||
|
||||
rpl_row_create_table : Bug#11759274 Feb 27 2010 andrei failed different way than earlier with bug#45576
|
||||
rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock
|
||||
rpl_row_until : BUG#59543 Jan 26 2011 alfranio Replication test from eits suite rpl_row_until times out
|
||||
rpl_stm_until : BUG#59543 Jan 26 2011 alfranio Replication test from eits suite rpl_row_until times out
|
||||
rpl_row_until @macosx : BUG#15965353 RPL.RPL_ROW_UNTIL FAILS ON PB2 , PLATFORM= MACOSX10.6 X86_64 MAX
|
||||
|
@ -26,6 +26,7 @@ let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 12)
|
||||
sync_slave_with_master;
|
||||
|
||||
# Save relay log position for query INSERT INTO t2 VALUES (1),(2);
|
||||
let $relay_log_file= query_get_value(show slave status, Relay_Log_File,1);
|
||||
let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1);
|
||||
|
||||
connection master;
|
||||
@ -68,9 +69,8 @@ SELECT * FROM t1;
|
||||
--source include/check_slave_param.inc
|
||||
|
||||
# Try replicate all up to and not including the second insert to t2;
|
||||
echo START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=relay_pos_insert1_t2;
|
||||
--disable_query_log
|
||||
eval START SLAVE UNTIL RELAY_LOG_FILE='slave-relay-bin.000002', RELAY_LOG_POS=$relay_pos_insert1_t2;
|
||||
eval START SLAVE UNTIL RELAY_LOG_FILE='$relay_log_file', RELAY_LOG_POS=$relay_pos_insert1_t2;
|
||||
--enable_query_log
|
||||
--source include/wait_for_slave_sql_to_stop.inc
|
||||
SELECT * FROM t2;
|
||||
|
@ -1315,6 +1315,80 @@ SELECT 1 FROM t1 GROUP BY SUBSTRING(SYSDATE() FROM 'K' FOR 'jxW<');
|
||||
DROP TABLE t1;
|
||||
SET SQL_BIG_TABLES=0;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-641 LP:1002108 - Wrong result (or crash) from a query with duplicated field in the group list and a limit clause
|
||||
--echo # Bug#11761078: 53534: INCORRECT 'SELECT SQL_BIG_RESULT...'
|
||||
--echo # WITH GROUP BY ON DUPLICATED FIELDS
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1(
|
||||
col1 int,
|
||||
UNIQUE INDEX idx (col1));
|
||||
|
||||
INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),
|
||||
(11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
|
||||
|
||||
let $query0=SELECT col1 AS field1, col1 AS field2
|
||||
FROM t1 GROUP BY field1, field2;
|
||||
|
||||
# Needs to be range to exercise bug
|
||||
--eval EXPLAIN $query0;
|
||||
FLUSH STATUS;
|
||||
--eval $query0;
|
||||
SHOW SESSION STATUS LIKE 'Sort_scan%';
|
||||
|
||||
let $query=SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
|
||||
FROM t1 GROUP BY field1, field2;
|
||||
|
||||
# Needs to be range to exercise bug
|
||||
--eval EXPLAIN $query;
|
||||
FLUSH STATUS;
|
||||
--eval $query;
|
||||
SHOW SESSION STATUS LIKE 'Sort_scan%';
|
||||
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
|
||||
SELECT SQL_BIG_RESULT col1 AS field1, col1 AS field2
|
||||
FROM v1
|
||||
GROUP BY field1, field2;
|
||||
|
||||
SELECT SQL_BIG_RESULT tbl1.col1 AS field1, tbl2.col1 AS field2
|
||||
FROM t1 as tbl1, t1 as tbl2
|
||||
GROUP BY field1, field2
|
||||
LIMIT 3;
|
||||
|
||||
explain
|
||||
select col1 f1, col1 f2 from t1 order by f2, f1;
|
||||
select col1 f1, col1 f2 from t1 order by f2, f1;
|
||||
|
||||
explain
|
||||
select col1 f1, col1 f2 from t1 group by f2 order by f2, f1;
|
||||
select col1 f1, col1 f2 from t1 group by f2 order by f2, f1;
|
||||
|
||||
explain
|
||||
select col1 f1, col1 f2 from t1 group by f1, f2 order by f2, f1;
|
||||
select col1 f1, col1 f2 from t1 group by f1, f2 order by f2, f1;
|
||||
|
||||
CREATE TABLE t2(
|
||||
col1 int,
|
||||
col2 int,
|
||||
UNIQUE INDEX idx (col1, col2));
|
||||
|
||||
INSERT INTO t2(col1, col2) VALUES
|
||||
(1,20),(2,19),(3,18),(4,17),(5,16),(6,15),(7,14),(8,13),(9,12),(10,11),
|
||||
(11,10),(12,9),(13,8),(14,7),(15,6),(16,5),(17,4),(18,3),(19,2),(20,1);
|
||||
|
||||
explain
|
||||
select col1 f1, col2 f2, col1 f3 from t2 group by f1, f2, f3;
|
||||
select col1 f1, col2 f2, col1 f3 from t2 group by f1, f2, f3;
|
||||
|
||||
explain
|
||||
select col1 f1, col2 f2, col1 f3 from t2 order by f1, f2, f3;
|
||||
select col1 f1, col2 f2, col1 f3 from t2 order by f1, f2, f3;
|
||||
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo # End of 5.1 tests
|
||||
|
||||
--echo #
|
||||
|
@ -485,3 +485,30 @@ INSERT INTO t1 VALUES('', 0);
|
||||
ALTER TABLE t1 MODIFY c1 VARCHAR(101);
|
||||
SELECT c2 FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#51763 Can't delete rows from MEMORY table with HASH key
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
id int(11) NOT NULL AUTO_INCREMENT,
|
||||
color enum('GREEN', 'WHITE') DEFAULT NULL,
|
||||
ts int,
|
||||
PRIMARY KEY (id),
|
||||
KEY color (color) USING HASH
|
||||
) ENGINE=MEMORY DEFAULT CHARSET=utf8;
|
||||
|
||||
INSERT INTO t1 VALUES("1","GREEN",1);
|
||||
INSERT INTO t1 VALUES("2","GREEN",1);
|
||||
INSERT INTO t1 VALUES("3","GREEN",1);
|
||||
INSERT INTO t1 VALUES("4","GREEN",1);
|
||||
INSERT INTO t1 VALUES("5","GREEN",1);
|
||||
INSERT INTO t1 VALUES("6","GREEN",1);
|
||||
DELETE FROM t1 WHERE id = 1;
|
||||
INSERT INTO t1 VALUES("7","GREEN", 2);
|
||||
DELETE FROM t1 WHERE ts = 1 AND color = 'GREEN';
|
||||
SELECT * from t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 5.1 tests
|
||||
|
||||
|
@ -367,7 +367,7 @@ SELECT (@v:=a) <> (@v:=1) FROM t1;
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# LP BUG#1001506 Crash on a query with GROUP BY and user variables
|
||||
# lp:1001506 Crash on a query with GROUP BY and user variables
|
||||
# MySQL Bug #11764372 57197: EVEN MORE USER VARIABLE CRASHING FUN
|
||||
#
|
||||
|
||||
@ -377,10 +377,17 @@ SELECT DISTINCT @a:=MIN(t1.a) FROM t1, t1 AS t2
|
||||
GROUP BY @b:=(SELECT COUNT(*) > t2.a);
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug #12408412: GROUP_CONCAT + ORDER BY + INPUT/OUTPUT
|
||||
# SAME USER VARIABLE = CRASH
|
||||
#
|
||||
SET @bug12408412=1;
|
||||
SELECT GROUP_CONCAT(@bug12408412 ORDER BY 1) INTO @bug12408412;
|
||||
|
||||
--echo End of 5.1 tests
|
||||
|
||||
#
|
||||
# MDEV-616 LP BUG#1002126
|
||||
# MDEV-616 lp:1002126
|
||||
# Bug #11764371 57196: MORE FUN WITH ASSERTION: !TABLE->FILE ||
|
||||
# TABLE->FILE->INITED == HANDLER::
|
||||
#
|
||||
@ -406,3 +413,5 @@ INSERT INTO t1 VALUES (0),(1),(3);
|
||||
SELECT DISTINCT POW(COUNT(distinct a), @a:=(SELECT 1 FROM t1 LEFT JOIN t1 AS t2 ON @a limit 1)) AS b FROM t1 GROUP BY a;
|
||||
SELECT @a;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo End of 5.2 tests
|
||||
|
@ -54,7 +54,7 @@ const char * NEAR globerrs[GLOBERRS]=
|
||||
"File '%s' (fileno: %d) was not closed",
|
||||
"Can't change ownership of the file '%s' (Errcode: %d)",
|
||||
"Can't change permissions of the file '%s' (Errcode: %d)",
|
||||
"Can't seek in file '%s' (Errcode: %d)"
|
||||
"Can't seek in file '%s' (Errcode: %d)",
|
||||
"Can't change mode for file '%s' to 0x%lx (Error: %d)",
|
||||
"Warning: Can't copy ownership for file '%s' (Error: %d)"
|
||||
};
|
||||
@ -108,12 +108,12 @@ void init_glob_errs()
|
||||
void wait_for_free_space(const char *filename, int errors)
|
||||
{
|
||||
if (errors == 0)
|
||||
my_error(EE_DISK_FULL,MYF(ME_BELL | ME_NOREFRESH),
|
||||
my_error(EE_DISK_FULL,MYF(ME_BELL | ME_NOREFRESH | ME_JUST_WARNING),
|
||||
filename,my_errno,MY_WAIT_FOR_USER_TO_FIX_PANIC);
|
||||
if (!(errors % MY_WAIT_GIVE_USER_A_MESSAGE))
|
||||
my_printf_error(EE_DISK_FULL,
|
||||
"Retry in %d secs. Message reprinted in %d secs",
|
||||
MYF(ME_BELL | ME_NOREFRESH),
|
||||
MYF(ME_BELL | ME_NOREFRESH | ME_JUST_WARNING),
|
||||
MY_WAIT_FOR_USER_TO_FIX_PANIC,
|
||||
MY_WAIT_GIVE_USER_A_MESSAGE * MY_WAIT_FOR_USER_TO_FIX_PANIC );
|
||||
VOID(sleep(MY_WAIT_FOR_USER_TO_FIX_PANIC));
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2000 MySQL AB
|
||||
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
|
||||
Copyright (c) 2012, 2013, Monty Program Ab.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -11,7 +12,8 @@
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include "mysys_priv.h"
|
||||
#include <m_string.h>
|
||||
@ -519,10 +521,10 @@ char *intern_filename(char *to, const char *from)
|
||||
char buff[FN_REFLEN + 1];
|
||||
if (from == to)
|
||||
{ /* Dirname may destroy from */
|
||||
strmov(buff,from);
|
||||
(void) strnmov(buff, from, FN_REFLEN);
|
||||
from=buff;
|
||||
}
|
||||
length= dirname_part(to, from, &to_length); /* Copy dirname & fix chars */
|
||||
(void) strmov(to + to_length,from+length);
|
||||
(void) strnmov(to + to_length, from + length, FN_REFLEN - to_length);
|
||||
return (to);
|
||||
} /* intern_filename */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2000 MySQL AB
|
||||
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -11,7 +11,8 @@
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
/* TODO: check for overun of memory for names. */
|
||||
/* Convert MSDOS-TIME to standar time_t (still needed?) */
|
||||
@ -103,7 +104,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
|
||||
MEM_ROOT *names_storage;
|
||||
DIR *dirp;
|
||||
struct dirent *dp;
|
||||
char tmp_path[FN_REFLEN+1],*tmp_file;
|
||||
char tmp_path[FN_REFLEN + 2], *tmp_file;
|
||||
#ifdef THREAD
|
||||
char dirent_tmp[sizeof(struct dirent)+_POSIX_PATH_MAX+1];
|
||||
#endif
|
||||
@ -215,10 +216,11 @@ char * directory_file_name (char * dst, const char *src)
|
||||
/* Process as Unix format: just remove test the final slash. */
|
||||
|
||||
char * end;
|
||||
DBUG_ASSERT(strlen(src) < (FN_REFLEN + 1));
|
||||
|
||||
if (src[0] == 0)
|
||||
src= (char*) "."; /* Use empty as current */
|
||||
end=strmov(dst, src);
|
||||
end= strnmov(dst, src, FN_REFLEN + 1);
|
||||
if (end[-1] != FN_LIBCHAR)
|
||||
{
|
||||
end[0]=FN_LIBCHAR; /* Add last '/' */
|
||||
|
@ -46,7 +46,7 @@ size_t my_write(int Filedes, const uchar *Buffer, size_t Count, myf MyFlags)
|
||||
and the number pf written bytes to -1.
|
||||
*/
|
||||
DBUG_EXECUTE_IF ("simulate_file_write_error",
|
||||
{
|
||||
if (!errors) {
|
||||
errno= ENOSPC;
|
||||
writenbytes= (size_t) -1;
|
||||
});
|
||||
|
@ -397,7 +397,7 @@ then
|
||||
log_notice "Logging to '$err_log'."
|
||||
logging=file
|
||||
|
||||
if [ ! -e "$err_log" ]; then # if error log already exists,
|
||||
if [ ! -f "$err_log" ]; then # if error log already exists,
|
||||
touch "$err_log" # we just append. otherwise,
|
||||
chmod "$fmode" "$err_log" # fix the permissions here!
|
||||
fi
|
||||
@ -607,7 +607,7 @@ do
|
||||
|
||||
eval_log_error "$cmd"
|
||||
|
||||
if [ $want_syslog -eq 0 -a ! -e "$err_log" ]; then
|
||||
if [ $want_syslog -eq 0 -a ! -f "$err_log" ]; then
|
||||
touch "$err_log" # hypothetical: log was renamed but not
|
||||
chown $user "$err_log" # flushed yet. we'd recreate it with
|
||||
chmod "$fmode" "$err_log" # wrong owner next time we log, so set
|
||||
|
@ -9,4 +9,5 @@ mysqlbinlog
|
||||
mysqlbinlog
|
||||
mysqld
|
||||
sql_yacc.cc
|
||||
sql_yacc.hh
|
||||
sql_yacc.h
|
||||
|
@ -150,7 +150,7 @@ DEFS = -DMYSQL_SERVER \
|
||||
-DHAVE_EVENT_SCHEDULER \
|
||||
@DEFS@
|
||||
|
||||
BUILT_MAINT_SRC = sql_yacc.cc sql_yacc.h
|
||||
BUILT_MAINT_SRC = sql_yacc.cc sql_yacc.$(YACC_HEXT)
|
||||
BUILT_SOURCES = $(BUILT_MAINT_SRC) lex_hash.h link_sources
|
||||
EXTRA_DIST = udf_example.c udf_example.def $(BUILT_MAINT_SRC) \
|
||||
nt_servc.cc nt_servc.h mysql_install_db.cc mysql_upgrade_service.cc \
|
||||
|
@ -185,7 +185,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
|
||||
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
|
||||
MYSQL_TYPE_LONG, MYSQL_TYPE_VARCHAR,
|
||||
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
|
||||
MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24,
|
||||
MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG,
|
||||
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
|
||||
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
|
||||
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
|
||||
@ -216,7 +216,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
|
||||
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
|
||||
MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR,
|
||||
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
|
||||
MYSQL_TYPE_FLOAT, MYSQL_TYPE_INT24,
|
||||
MYSQL_TYPE_FLOAT, MYSQL_TYPE_FLOAT,
|
||||
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
|
||||
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
|
||||
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
|
||||
@ -247,7 +247,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
|
||||
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
|
||||
MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR,
|
||||
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
|
||||
MYSQL_TYPE_DOUBLE, MYSQL_TYPE_INT24,
|
||||
MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE,
|
||||
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
|
||||
MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR,
|
||||
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
|
||||
@ -278,7 +278,7 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
|
||||
//MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP
|
||||
MYSQL_TYPE_NULL, MYSQL_TYPE_TIMESTAMP,
|
||||
//MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24
|
||||
MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24,
|
||||
MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG,
|
||||
//MYSQL_TYPE_DATE MYSQL_TYPE_TIME
|
||||
MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME,
|
||||
//MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR
|
||||
|
@ -4155,6 +4155,7 @@ bool ha_partition::init_record_priority_queue()
|
||||
{
|
||||
if (bitmap_is_set(&m_part_info->used_partitions, i))
|
||||
{
|
||||
DBUG_PRINT("info", ("init rec-buf for part %u", i));
|
||||
int2store(ptr, i);
|
||||
ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
|
||||
}
|
||||
@ -5032,11 +5033,27 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
||||
m_top_entry= NO_CURRENT_PART_ID;
|
||||
queue_remove_all(&m_queue);
|
||||
|
||||
DBUG_PRINT("info", ("m_part_spec.start_part %d", m_part_spec.start_part));
|
||||
for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
|
||||
/*
|
||||
Position part_rec_buf_ptr to point to the first used partition >=
|
||||
start_part. There may be partitions marked by used_partitions,
|
||||
but is before start_part. These partitions has allocated record buffers
|
||||
but is dynamically pruned, so those buffers must be skipped.
|
||||
*/
|
||||
uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions);
|
||||
for (; first_used_part < m_part_spec.start_part; first_used_part++)
|
||||
{
|
||||
if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part))
|
||||
part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
|
||||
}
|
||||
DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u",
|
||||
m_part_spec.start_part, first_used_part));
|
||||
for (i= first_used_part; i <= m_part_spec.end_part; i++)
|
||||
{
|
||||
if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
|
||||
continue;
|
||||
DBUG_PRINT("info", ("reading from part %u (scan_type: %u)",
|
||||
i, m_index_scan_type));
|
||||
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr));
|
||||
uchar *rec_buf_ptr= part_rec_buf_ptr + PARTITION_BYTES_IN_POS;
|
||||
int error;
|
||||
handler *file= m_file[i];
|
||||
|
@ -4216,7 +4216,7 @@ void Item_func_set_user_var::save_item_result(Item *item)
|
||||
{
|
||||
DBUG_ENTER("Item_func_set_user_var::save_item_result");
|
||||
|
||||
switch (cached_result_type) {
|
||||
switch (args[0]->result_type()) {
|
||||
case REAL_RESULT:
|
||||
save_result.vreal= item->val_result();
|
||||
break;
|
||||
|
@ -2678,8 +2678,12 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
|
||||
|
||||
node.parent= data->parent; // Set parent for the new node to old parent
|
||||
data->parent= numnodes; // Remember current node as new parent
|
||||
DBUG_ASSERT(data->level <= MAX_LEVEL);
|
||||
data->pos[data->level]= numnodes;
|
||||
node.level= data->level++;
|
||||
if (data->level < MAX_LEVEL)
|
||||
node.level= data->level++;
|
||||
else
|
||||
return MY_XML_ERROR;
|
||||
node.type= st->current_node_type; // TAG or ATTR
|
||||
node.beg= attr;
|
||||
node.end= attr + len;
|
||||
|
@ -4191,7 +4191,6 @@ Format_description_log_event::do_shall_skip(Relay_log_info *rli)
|
||||
into 'server_version_split':
|
||||
X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z}
|
||||
X.Yabc -> {X,Y,0}
|
||||
Xabc -> {X,0,0}
|
||||
'server_version_split' is then used for lookups to find if the server which
|
||||
created this event has some known bug.
|
||||
*/
|
||||
@ -4202,10 +4201,21 @@ void Format_description_log_event::calc_server_version_split()
|
||||
for (uint i= 0; i<=2; i++)
|
||||
{
|
||||
number= strtoul(p, &r, 10);
|
||||
server_version_split[i]= (uchar)number;
|
||||
DBUG_ASSERT(number < 256); // fit in uchar
|
||||
/*
|
||||
It is an invalid version if any version number greater than 255 or
|
||||
first number is not followed by '.'.
|
||||
*/
|
||||
if (number < 256 && (*r == '.' || i != 0))
|
||||
server_version_split[i]= (uchar)number;
|
||||
else
|
||||
{
|
||||
server_version_split[0]= 0;
|
||||
server_version_split[1]= 0;
|
||||
server_version_split[2]= 0;
|
||||
break;
|
||||
}
|
||||
|
||||
p= r;
|
||||
DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice
|
||||
if (*r == '.')
|
||||
p++; // skip the dot
|
||||
}
|
||||
|
@ -977,7 +977,7 @@ public:
|
||||
return thd ? thd->db : 0;
|
||||
}
|
||||
#else
|
||||
Log_event() : temp_buf(0) {}
|
||||
Log_event() : temp_buf(0), flags(0) {}
|
||||
/* avoid having to link mysqlbinlog against libpthread */
|
||||
static Log_event* read_log_event(IO_CACHE* file,
|
||||
const Format_description_log_event
|
||||
@ -2259,12 +2259,26 @@ public:
|
||||
#ifndef MYSQL_CLIENT
|
||||
bool write(IO_CACHE* file);
|
||||
#endif
|
||||
bool is_valid() const
|
||||
bool header_is_valid() const
|
||||
{
|
||||
return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
|
||||
LOG_EVENT_MINIMAL_HEADER_LEN)) &&
|
||||
(post_header_len != NULL));
|
||||
}
|
||||
|
||||
bool version_is_valid() const
|
||||
{
|
||||
/* It is invalid only when all version numbers are 0 */
|
||||
return !(server_version_split[0] == 0 &&
|
||||
server_version_split[1] == 0 &&
|
||||
server_version_split[2] == 0);
|
||||
}
|
||||
|
||||
bool is_valid() const
|
||||
{
|
||||
return header_is_valid() && version_is_valid();
|
||||
}
|
||||
|
||||
int get_data_size()
|
||||
{
|
||||
/*
|
||||
|
@ -1346,6 +1346,31 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok);
|
||||
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create);
|
||||
uint create_table_def_key(THD *thd, char *key, TABLE_LIST *table_list,
|
||||
bool tmp_table);
|
||||
|
||||
/**
|
||||
Create a table cache key for non-temporary table.
|
||||
|
||||
@param key Buffer for key (must be at least NAME_LEN*2+2 bytes).
|
||||
@param db Database name.
|
||||
@param table_name Table name.
|
||||
|
||||
@return Length of key.
|
||||
|
||||
@sa create_table_def_key(thd, char *, table_list, bool)
|
||||
*/
|
||||
|
||||
inline uint
|
||||
create_table_def_key(char *key, const char *db, const char *table_name)
|
||||
{
|
||||
/*
|
||||
In theory caller should ensure that both db and table_name are
|
||||
not longer than NAME_LEN bytes. In practice we play safe to avoid
|
||||
buffer overruns.
|
||||
*/
|
||||
return (uint)(strmake(strmake(key, db, NAME_LEN) + 1, table_name,
|
||||
NAME_LEN) - key + 1);
|
||||
}
|
||||
|
||||
TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key,
|
||||
uint key_length, uint db_flags, int *error);
|
||||
void release_table_share(TABLE_SHARE *share, enum release_type type);
|
||||
@ -1649,7 +1674,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint counter, bool *need_reopen);
|
||||
int decide_logging_format(THD *thd, TABLE_LIST *tables);
|
||||
TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
|
||||
const char *table_name, bool link_in_list);
|
||||
bool rm_temporary_table(handlerton *base, char *path);
|
||||
bool rm_temporary_table(handlerton *base, const char *path);
|
||||
void free_io_cache(TABLE *entry);
|
||||
void intern_close_table(TABLE *entry);
|
||||
bool close_thread_table(THD *thd, TABLE **table_ptr);
|
||||
|
@ -901,6 +901,7 @@ static int test_if_case_insensitive(const char *dir_name);
|
||||
static void register_mutex_order();
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
static bool pid_file_created= false;
|
||||
static void usage(void);
|
||||
static void start_signal_handler(void);
|
||||
static void close_server_sock();
|
||||
@ -909,6 +910,7 @@ static void wait_for_signal_thread_to_end(void);
|
||||
static void create_pid_file();
|
||||
static void end_ssl();
|
||||
#endif
|
||||
static void delete_pid_file(myf flags);
|
||||
|
||||
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
@ -1395,7 +1397,6 @@ void clean_up(bool print_message)
|
||||
lex_free(); /* Free some memory */
|
||||
item_create_cleanup();
|
||||
set_var_free();
|
||||
free_charsets();
|
||||
if (!opt_noacl)
|
||||
{
|
||||
#ifdef HAVE_DLOPEN
|
||||
@ -1450,15 +1451,13 @@ void clean_up(bool print_message)
|
||||
#ifdef USE_REGEX
|
||||
my_regex_end();
|
||||
#endif
|
||||
free_charsets();
|
||||
#if defined(ENABLED_DEBUG_SYNC)
|
||||
/* End the debug sync facility. See debug_sync.cc. */
|
||||
debug_sync_end();
|
||||
#endif /* defined(ENABLED_DEBUG_SYNC) */
|
||||
|
||||
#if !defined(EMBEDDED_LIBRARY)
|
||||
if (!opt_bootstrap)
|
||||
(void) my_delete(pidfile_name,MYF(0)); // This may not always exist
|
||||
#endif
|
||||
delete_pid_file(MYF(0));
|
||||
if (print_message && errmesg && server_start_time)
|
||||
sql_print_information(ER(ER_SHUTDOWN_COMPLETE),my_progname);
|
||||
thread_scheduler.end();
|
||||
@ -2092,7 +2091,7 @@ static bool cache_thread()
|
||||
this thread for handling of new THD object/connection.
|
||||
*/
|
||||
thd->mysys_var->abort= 0;
|
||||
thd->thr_create_utime= my_micro_time();
|
||||
thd->thr_create_utime= thd->start_utime= my_micro_time();
|
||||
threads.append(thd);
|
||||
return(1);
|
||||
}
|
||||
@ -4594,9 +4593,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
|
||||
(void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL);
|
||||
#endif /* __NETWARE__ */
|
||||
|
||||
if (!opt_bootstrap)
|
||||
(void) my_delete(pidfile_name,MYF(MY_WME)); // Not needed anymore
|
||||
|
||||
delete_pid_file(MYF(MY_WME));
|
||||
if (unix_sock != INVALID_SOCKET)
|
||||
unlink(mysqld_unix_port);
|
||||
exit(1);
|
||||
@ -9667,12 +9664,13 @@ static void create_pid_file()
|
||||
if ((file = my_create(pidfile_name,0664,
|
||||
O_WRONLY | O_TRUNC, MYF(MY_WME))) >= 0)
|
||||
{
|
||||
char buff[21], *end;
|
||||
char buff[MAX_BIGINT_WIDTH + 1], *end;
|
||||
end= int10_to_str((long) getpid(), buff, 10);
|
||||
*end++= '\n';
|
||||
if (!my_write(file, (uchar*) buff, (uint) (end-buff), MYF(MY_WME | MY_NABP)))
|
||||
{
|
||||
(void) my_close(file, MYF(0));
|
||||
pid_file_created= true;
|
||||
return;
|
||||
}
|
||||
(void) my_close(file, MYF(0));
|
||||
@ -9682,6 +9680,26 @@ static void create_pid_file()
|
||||
}
|
||||
#endif /* EMBEDDED_LIBRARY */
|
||||
|
||||
|
||||
/**
|
||||
Remove the process' pid file.
|
||||
|
||||
@param flags file operation flags
|
||||
*/
|
||||
|
||||
static void delete_pid_file(myf flags)
|
||||
{
|
||||
#ifndef EMBEDDED_LIBRARY
|
||||
if (opt_bootstrap || !pid_file_created)
|
||||
return;
|
||||
|
||||
my_delete(pidfile_name, flags);
|
||||
pid_file_created= false;
|
||||
#endif /* EMBEDDED_LIBRARY */
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/** Clear most status variables. */
|
||||
void refresh_status(THD *thd)
|
||||
{
|
||||
|
@ -9490,6 +9490,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
||||
else
|
||||
goto next_index;
|
||||
}
|
||||
/*
|
||||
This function is called on the precondition that the index is covering.
|
||||
Therefore if the GROUP BY list contains more elements than the index,
|
||||
these are duplicates. The GROUP BY list cannot be a prefix of the index.
|
||||
*/
|
||||
if (cur_part == end_part && tmp_group)
|
||||
goto next_index;
|
||||
}
|
||||
/*
|
||||
Check (GA2) if this is a DISTINCT query.
|
||||
|
@ -243,12 +243,9 @@ static void check_unused(void)
|
||||
uint create_table_def_key(THD *thd, char *key, TABLE_LIST *table_list,
|
||||
bool tmp_table)
|
||||
{
|
||||
char *db_end= strnmov(key, table_list->db, MAX_DBKEY_LENGTH - 2);
|
||||
*db_end++= '\0';
|
||||
char *table_end= strnmov(db_end, table_list->table_name,
|
||||
key + MAX_DBKEY_LENGTH - 1 - db_end);
|
||||
*table_end++= '\0';
|
||||
uint key_length= (uint) (table_end-key);
|
||||
uint key_length= create_table_def_key(key, table_list->db,
|
||||
table_list->table_name);
|
||||
|
||||
if (tmp_table)
|
||||
{
|
||||
int4store(key + key_length, thd->server_id);
|
||||
@ -624,13 +621,10 @@ void release_table_share(TABLE_SHARE *share, enum release_type type)
|
||||
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name)
|
||||
{
|
||||
char key[SAFE_NAME_LEN*2+2];
|
||||
TABLE_LIST table_list;
|
||||
uint key_length;
|
||||
safe_mutex_assert_owner(&LOCK_open);
|
||||
|
||||
table_list.db= (char*) db;
|
||||
table_list.table_name= (char*) table_name;
|
||||
key_length= create_table_def_key((THD*) 0, key, &table_list, 0);
|
||||
key_length= create_table_def_key(key, db, table_name);
|
||||
return (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key, key_length);
|
||||
}
|
||||
|
||||
@ -2455,7 +2449,7 @@ bool lock_table_name_if_not_cached(THD *thd, const char *db,
|
||||
uint key_length;
|
||||
DBUG_ENTER("lock_table_name_if_not_cached");
|
||||
|
||||
key_length= (uint)(strmov(strmov(key, db) + 1, table_name) - key) + 1;
|
||||
key_length= create_table_def_key(key, db, table_name);
|
||||
VOID(pthread_mutex_lock(&LOCK_open));
|
||||
|
||||
if (hash_search(&open_cache, (uchar *)key, key_length))
|
||||
@ -3066,7 +3060,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
|
||||
TABLE *find_locked_table(THD *thd, const char *db,const char *table_name)
|
||||
{
|
||||
char key[MAX_DBKEY_LENGTH];
|
||||
uint key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
|
||||
uint key_length= create_table_def_key(key, db, table_name);
|
||||
|
||||
for (TABLE *table=thd->open_tables; table ; table=table->next)
|
||||
{
|
||||
@ -5793,17 +5787,27 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
|
||||
}
|
||||
|
||||
|
||||
bool rm_temporary_table(handlerton *base, char *path)
|
||||
/**
|
||||
Delete a temporary table.
|
||||
|
||||
@param base Handlerton for table to be deleted.
|
||||
@param path Path to the table to be deleted (i.e. path
|
||||
to its .frm without an extension).
|
||||
|
||||
@retval false - success.
|
||||
@retval true - failure.
|
||||
*/
|
||||
|
||||
bool rm_temporary_table(handlerton *base, const char *path)
|
||||
{
|
||||
bool error=0;
|
||||
handler *file;
|
||||
char *ext;
|
||||
char frm_path[FN_REFLEN + 1];
|
||||
DBUG_ENTER("rm_temporary_table");
|
||||
|
||||
strmov(ext= strend(path), reg_ext);
|
||||
if (my_delete(path,MYF(0)))
|
||||
strxnmov(frm_path, sizeof(frm_path) - 1, path, reg_ext, NullS);
|
||||
if (my_delete(frm_path, MYF(0)))
|
||||
error=1; /* purecov: inspected */
|
||||
*ext= 0; // remove extension
|
||||
file= get_new_handler((TABLE_SHARE*) 0, current_thd->mem_root, base);
|
||||
if (file && file->ha_delete_table(path))
|
||||
{
|
||||
@ -8813,7 +8817,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
|
||||
DBUG_ENTER("remove_table_from_cache");
|
||||
DBUG_PRINT("enter", ("table: '%s'.'%s' flags: %u", db, table_name, flags));
|
||||
|
||||
key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
|
||||
key_length= create_table_def_key(key, db, table_name);
|
||||
for (;;)
|
||||
{
|
||||
HASH_SEARCH_STATE state;
|
||||
@ -9021,12 +9025,14 @@ open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
|
||||
{
|
||||
LEX_STRING pathstr;
|
||||
File_parser *parser;
|
||||
char path[FN_REFLEN];
|
||||
char path[FN_REFLEN+1];
|
||||
DBUG_ENTER("open_new_frm");
|
||||
|
||||
/* Create path with extension */
|
||||
pathstr.length= (uint) (strxmov(path, share->normalized_path.str, reg_ext,
|
||||
NullS)- path);
|
||||
pathstr.length= (uint) (strxnmov(path, sizeof(path) - 1,
|
||||
share->normalized_path.str,
|
||||
reg_ext,
|
||||
NullS) - path);
|
||||
pathstr.str= path;
|
||||
|
||||
if ((parser= sql_parse_prepare(&pathstr, mem_root, 1)))
|
||||
@ -9167,7 +9173,7 @@ void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table
|
||||
TABLE *table;
|
||||
DBUG_ENTER("mysql_wait_completed_table");
|
||||
|
||||
key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
|
||||
key_length= create_table_def_key(key, lpt->db, lpt->table_name);
|
||||
VOID(pthread_mutex_lock(&LOCK_open));
|
||||
HASH_SEARCH_STATE state;
|
||||
for (table= (TABLE*) hash_first(&open_cache,(uchar*) key,key_length,
|
||||
|
@ -2750,8 +2750,8 @@ void Query_cache::invalidate_table(THD *thd, TABLE_LIST *table_list)
|
||||
char key[MAX_DBKEY_LENGTH];
|
||||
uint key_length;
|
||||
|
||||
key_length=(uint) (strmov(strmov(key,table_list->db)+1,
|
||||
table_list->table_name) -key)+ 1;
|
||||
key_length= create_table_def_key(key, table_list->db,
|
||||
table_list->table_name);
|
||||
|
||||
// We don't store temporary tables => no key_length+=4 ...
|
||||
invalidate_table(thd, (uchar *)key, key_length);
|
||||
@ -2872,8 +2872,8 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
|
||||
DBUG_PRINT("qcache", ("view: %s db: %s",
|
||||
tables_used->view_name.str,
|
||||
tables_used->view_db.str));
|
||||
key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1,
|
||||
tables_used->view_name.str) - key) + 1;
|
||||
key_length= create_table_def_key(key, tables_used->view_db.str,
|
||||
tables_used->view_name.str);
|
||||
/*
|
||||
There are not callback function for for VIEWs
|
||||
*/
|
||||
@ -3935,14 +3935,13 @@ my_bool Query_cache::move_by_type(uchar **border,
|
||||
case Query_cache_block::RESULT:
|
||||
{
|
||||
DBUG_PRINT("qcache", ("block 0x%lx RES* (%d)", (ulong) block,
|
||||
(int) block->type));
|
||||
(int) block->type));
|
||||
if (*border == 0)
|
||||
break;
|
||||
Query_cache_block *query_block = block->result()->parent(),
|
||||
*next = block->next,
|
||||
*prev = block->prev;
|
||||
Query_cache_block::block_type type = block->type;
|
||||
Query_cache_block *query_block= block->result()->parent();
|
||||
BLOCK_LOCK_WR(query_block);
|
||||
Query_cache_block *next= block->next, *prev= block->prev;
|
||||
Query_cache_block::block_type type= block->type;
|
||||
ulong len = block->length, used = block->used;
|
||||
Query_cache_block *pprev = block->pprev,
|
||||
*pnext = block->pnext,
|
||||
@ -4104,8 +4103,9 @@ uint Query_cache::filename_2_table_key (char *key, const char *path,
|
||||
*db_length= (filename - dbname) - 1;
|
||||
DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename));
|
||||
|
||||
DBUG_RETURN((uint) (strmov(strmake(key, dbname, *db_length) + 1,
|
||||
filename) -key) + 1);
|
||||
DBUG_RETURN((uint) (strmake(strmake(key, dbname,
|
||||
min(*db_length, NAME_LEN)) + 1,
|
||||
filename, NAME_LEN) - key) + 1);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -3072,9 +3072,9 @@ int select_dumpvar::send_data(List<Item> &items)
|
||||
else
|
||||
{
|
||||
Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item);
|
||||
suv->save_item_result(item);
|
||||
if (suv->fix_fields(thd, 0))
|
||||
DBUG_RETURN (1);
|
||||
suv->save_item_result(item);
|
||||
if (suv->update())
|
||||
DBUG_RETURN (1);
|
||||
}
|
||||
|
@ -47,7 +47,11 @@ class Event_parse_data;
|
||||
#else
|
||||
#include "lex_symbol.h"
|
||||
#if MYSQL_LEX
|
||||
#include "sql_yacc.h"
|
||||
# if YACC_HEXT_HH
|
||||
# include "sql_yacc.hh"
|
||||
# else
|
||||
# include "sql_yacc.h"
|
||||
# endif
|
||||
#define LEX_YYSTYPE YYSTYPE *
|
||||
#else
|
||||
#define LEX_YYSTYPE void *
|
||||
|
@ -875,6 +875,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
|
||||
|
||||
if (thd->lex->mi.pos)
|
||||
{
|
||||
if (thd->lex->mi.relay_log_pos)
|
||||
slave_errno=ER_BAD_SLAVE_UNTIL_COND;
|
||||
mi->rli.until_condition= Relay_log_info::UNTIL_MASTER_POS;
|
||||
mi->rli.until_log_pos= thd->lex->mi.pos;
|
||||
/*
|
||||
@ -886,6 +888,8 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
|
||||
}
|
||||
else if (thd->lex->mi.relay_log_pos)
|
||||
{
|
||||
if (thd->lex->mi.pos)
|
||||
slave_errno=ER_BAD_SLAVE_UNTIL_COND;
|
||||
mi->rli.until_condition= Relay_log_info::UNTIL_RELAY_POS;
|
||||
mi->rli.until_log_pos= thd->lex->mi.relay_log_pos;
|
||||
strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name,
|
||||
|
255
sql/sql_table.cc
255
sql/sql_table.cc
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
|
||||
Copyright (c) 2000, 2012, Oracle and/or its affiliates.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -612,13 +612,6 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
|
||||
|
||||
struct st_global_ddl_log
|
||||
{
|
||||
/*
|
||||
We need to adjust buffer size to be able to handle downgrades/upgrades
|
||||
where IO_SIZE has changed. We'll set the buffer size such that we can
|
||||
handle that the buffer size was upto 4 times bigger in the version
|
||||
that wrote the DDL log.
|
||||
*/
|
||||
char file_entry_buf[4*IO_SIZE];
|
||||
char file_name_str[FN_REFLEN];
|
||||
char *file_name;
|
||||
DDL_LOG_MEMORY_ENTRY *first_free;
|
||||
@ -646,51 +639,60 @@ pthread_mutex_t LOCK_gdl;
|
||||
#define DDL_LOG_NUM_ENTRY_POS 0
|
||||
#define DDL_LOG_NAME_LEN_POS 4
|
||||
#define DDL_LOG_IO_SIZE_POS 8
|
||||
#define DDL_LOG_HEADER_SIZE 12
|
||||
|
||||
/*
|
||||
Read one entry from ddl log file
|
||||
SYNOPSIS
|
||||
read_ddl_log_file_entry()
|
||||
entry_no Entry number to read
|
||||
RETURN VALUES
|
||||
TRUE Error
|
||||
FALSE Success
|
||||
/**
|
||||
Read one entry from ddl log file.
|
||||
@param[out] file_entry_buf Buffer to read into
|
||||
@param entry_no Entry number to read
|
||||
@param size Number of bytes of the entry to read
|
||||
|
||||
@return Operation status
|
||||
@retval true Error
|
||||
@retval false Success
|
||||
*/
|
||||
|
||||
static bool read_ddl_log_file_entry(uint entry_no)
|
||||
static bool read_ddl_log_file_entry(uchar *file_entry_buf,
|
||||
uint entry_no,
|
||||
uint size)
|
||||
{
|
||||
bool error= FALSE;
|
||||
File file_id= global_ddl_log.file_id;
|
||||
uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
|
||||
uint io_size= global_ddl_log.io_size;
|
||||
DBUG_ENTER("read_ddl_log_file_entry");
|
||||
DBUG_ASSERT(io_size >= size);
|
||||
|
||||
if (my_pread(file_id, file_entry_buf, io_size, io_size * entry_no,
|
||||
MYF(MY_WME)) != io_size)
|
||||
if (my_pread(file_id, file_entry_buf, size, io_size * entry_no,
|
||||
MYF(MY_WME)) != size)
|
||||
error= TRUE;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Write one entry from ddl log file
|
||||
SYNOPSIS
|
||||
write_ddl_log_file_entry()
|
||||
entry_no Entry number to write
|
||||
RETURN VALUES
|
||||
TRUE Error
|
||||
FALSE Success
|
||||
/**
|
||||
Write one entry to ddl log file.
|
||||
|
||||
@param file_entry_buf Buffer to write
|
||||
@param entry_no Entry number to write
|
||||
@param size Number of bytes of the entry to write
|
||||
|
||||
@return Operation status
|
||||
@retval true Error
|
||||
@retval false Success
|
||||
*/
|
||||
|
||||
static bool write_ddl_log_file_entry(uint entry_no)
|
||||
static bool write_ddl_log_file_entry(uchar *file_entry_buf,
|
||||
uint entry_no,
|
||||
uint size)
|
||||
{
|
||||
bool error= FALSE;
|
||||
File file_id= global_ddl_log.file_id;
|
||||
char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
|
||||
uint io_size= global_ddl_log.io_size;
|
||||
DBUG_ENTER("write_ddl_log_file_entry");
|
||||
DBUG_ASSERT(io_size >= size);
|
||||
|
||||
if (my_pwrite(file_id, (uchar*)file_entry_buf,
|
||||
IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
|
||||
if (my_pwrite(file_id, file_entry_buf, size,
|
||||
io_size * entry_no, MYF(MY_WME)) != size)
|
||||
error= TRUE;
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
@ -709,17 +711,20 @@ static bool write_ddl_log_header()
|
||||
{
|
||||
uint16 const_var;
|
||||
bool error= FALSE;
|
||||
uchar file_entry_buf[DDL_LOG_HEADER_SIZE];
|
||||
DBUG_ENTER("write_ddl_log_header");
|
||||
DBUG_ASSERT((DDL_LOG_NAME_POS + 3 * global_ddl_log.name_len)
|
||||
<= global_ddl_log.io_size);
|
||||
|
||||
int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
|
||||
int4store(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
|
||||
global_ddl_log.num_entries);
|
||||
const_var= FN_LEN;
|
||||
int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
|
||||
const_var= global_ddl_log.name_len;
|
||||
int4store(&file_entry_buf[DDL_LOG_NAME_LEN_POS],
|
||||
(ulong) const_var);
|
||||
const_var= IO_SIZE;
|
||||
int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS],
|
||||
const_var= global_ddl_log.io_size;
|
||||
int4store(&file_entry_buf[DDL_LOG_IO_SIZE_POS],
|
||||
(ulong) const_var);
|
||||
if (write_ddl_log_file_entry(0UL))
|
||||
if (write_ddl_log_file_entry(file_entry_buf, 0UL, DDL_LOG_HEADER_SIZE))
|
||||
{
|
||||
sql_print_error("Error writing ddl log header");
|
||||
DBUG_RETURN(TRUE);
|
||||
@ -759,17 +764,19 @@ static inline void create_ddl_log_file_name(char *file_name)
|
||||
|
||||
static uint read_ddl_log_header()
|
||||
{
|
||||
char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
|
||||
char file_entry_buf[DDL_LOG_HEADER_SIZE];
|
||||
char file_name[FN_REFLEN];
|
||||
uint entry_no;
|
||||
bool successful_open= FALSE;
|
||||
DBUG_ENTER("read_ddl_log_header");
|
||||
DBUG_ASSERT(global_ddl_log.io_size <= IO_SIZE);
|
||||
|
||||
create_ddl_log_file_name(file_name);
|
||||
if ((global_ddl_log.file_id= my_open(file_name,
|
||||
O_RDWR | O_BINARY, MYF(0))) >= 0)
|
||||
{
|
||||
if (read_ddl_log_file_entry(0UL))
|
||||
if (read_ddl_log_file_entry((uchar *) file_entry_buf, 0UL,
|
||||
DDL_LOG_HEADER_SIZE))
|
||||
{
|
||||
/* Write message into error log */
|
||||
sql_print_error("Failed to read ddl log file in recovery");
|
||||
@ -782,8 +789,6 @@ static uint read_ddl_log_header()
|
||||
entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]);
|
||||
global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]);
|
||||
global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]);
|
||||
DBUG_ASSERT(global_ddl_log.io_size <=
|
||||
sizeof(global_ddl_log.file_entry_buf));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -798,30 +803,22 @@ static uint read_ddl_log_header()
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Read a ddl log entry
|
||||
SYNOPSIS
|
||||
read_ddl_log_entry()
|
||||
read_entry Number of entry to read
|
||||
out:entry_info Information from entry
|
||||
RETURN VALUES
|
||||
TRUE Error
|
||||
FALSE Success
|
||||
DESCRIPTION
|
||||
Read a specified entry in the ddl log
|
||||
/**
|
||||
Set ddl log entry struct from buffer
|
||||
@param read_entry Entry number
|
||||
@param file_entry_buf Buffer to use
|
||||
@param ddl_log_entry Entry to be set
|
||||
|
||||
@note Pointers in ddl_log_entry will point into file_entry_buf!
|
||||
*/
|
||||
|
||||
bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
|
||||
static void set_ddl_log_entry_from_buf(uint read_entry,
|
||||
uchar *file_entry_buf,
|
||||
DDL_LOG_ENTRY *ddl_log_entry)
|
||||
{
|
||||
char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
|
||||
uint inx;
|
||||
uchar single_char;
|
||||
DBUG_ENTER("read_ddl_log_entry");
|
||||
|
||||
if (read_ddl_log_file_entry(read_entry))
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
DBUG_ENTER("set_ddl_log_entry_from_buf");
|
||||
ddl_log_entry->entry_pos= read_entry;
|
||||
single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
|
||||
ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
|
||||
@ -829,14 +826,14 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
|
||||
ddl_log_entry->action_type= (enum ddl_log_action_code)single_char;
|
||||
ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS];
|
||||
ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]);
|
||||
ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS];
|
||||
ddl_log_entry->name= (char*) &file_entry_buf[DDL_LOG_NAME_POS];
|
||||
inx= DDL_LOG_NAME_POS + global_ddl_log.name_len;
|
||||
ddl_log_entry->from_name= &file_entry_buf[inx];
|
||||
ddl_log_entry->from_name= (char*) &file_entry_buf[inx];
|
||||
inx+= global_ddl_log.name_len;
|
||||
ddl_log_entry->handler_name= &file_entry_buf[inx];
|
||||
DBUG_RETURN(FALSE);
|
||||
ddl_log_entry->handler_name= (char*) &file_entry_buf[inx];
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Initialise ddl log
|
||||
@ -1039,6 +1036,7 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
|
||||
DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used;
|
||||
DBUG_ENTER("get_free_ddl_log_entry");
|
||||
|
||||
safe_mutex_assert_owner(&LOCK_gdl);
|
||||
if (global_ddl_log.first_free == NULL)
|
||||
{
|
||||
if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(
|
||||
@ -1095,34 +1093,36 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
|
||||
DDL_LOG_MEMORY_ENTRY **active_entry)
|
||||
{
|
||||
bool error, write_header;
|
||||
char file_entry_buf[IO_SIZE];
|
||||
DBUG_ENTER("write_ddl_log_entry");
|
||||
|
||||
if (init_ddl_log())
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
|
||||
memset(file_entry_buf, 0, sizeof(file_entry_buf));
|
||||
file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
|
||||
(char)DDL_LOG_ENTRY_CODE;
|
||||
global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
|
||||
file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
|
||||
(char)ddl_log_entry->action_type;
|
||||
global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
|
||||
int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
|
||||
file_entry_buf[DDL_LOG_PHASE_POS]= 0;
|
||||
int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
|
||||
ddl_log_entry->next_entry);
|
||||
DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN);
|
||||
strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
|
||||
ddl_log_entry->name, FN_LEN - 1);
|
||||
DBUG_ASSERT(strlen(ddl_log_entry->name) < global_ddl_log.name_len);
|
||||
strmake(&file_entry_buf[DDL_LOG_NAME_POS], ddl_log_entry->name,
|
||||
global_ddl_log.name_len - 1);
|
||||
if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
|
||||
ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
|
||||
{
|
||||
DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN);
|
||||
strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN],
|
||||
ddl_log_entry->from_name, FN_LEN - 1);
|
||||
DBUG_ASSERT(strlen(ddl_log_entry->from_name) < global_ddl_log.name_len);
|
||||
strmake(&file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len],
|
||||
ddl_log_entry->from_name, global_ddl_log.name_len - 1);
|
||||
}
|
||||
else
|
||||
global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
|
||||
DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN);
|
||||
strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)],
|
||||
ddl_log_entry->handler_name, FN_LEN - 1);
|
||||
file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len]= 0;
|
||||
DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < global_ddl_log.name_len);
|
||||
strmake(&file_entry_buf[DDL_LOG_NAME_POS + (2*global_ddl_log.name_len)],
|
||||
ddl_log_entry->handler_name, global_ddl_log.name_len - 1);
|
||||
if (get_free_ddl_log_entry(active_entry, &write_header))
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
@ -1130,14 +1130,15 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
|
||||
error= FALSE;
|
||||
DBUG_PRINT("ddl_log",
|
||||
("write type %c next %u name '%s' from_name '%s' handler '%s'",
|
||||
(char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
|
||||
(char) file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
|
||||
ddl_log_entry->next_entry,
|
||||
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
|
||||
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
|
||||
+ FN_LEN],
|
||||
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
|
||||
+ (2*FN_LEN)]));
|
||||
if (write_ddl_log_file_entry((*active_entry)->entry_pos))
|
||||
(char*) &file_entry_buf[DDL_LOG_NAME_POS],
|
||||
(char*) &file_entry_buf[DDL_LOG_NAME_POS +
|
||||
global_ddl_log.name_len],
|
||||
(char*) &file_entry_buf[DDL_LOG_NAME_POS +
|
||||
(2*global_ddl_log.name_len)]));
|
||||
if (write_ddl_log_file_entry((uchar*) file_entry_buf,
|
||||
(*active_entry)->entry_pos, IO_SIZE))
|
||||
{
|
||||
error= TRUE;
|
||||
sql_print_error("Failed to write entry_no = %u",
|
||||
@ -1187,13 +1188,14 @@ bool write_execute_ddl_log_entry(uint first_entry,
|
||||
DDL_LOG_MEMORY_ENTRY **active_entry)
|
||||
{
|
||||
bool write_header= FALSE;
|
||||
char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
|
||||
char file_entry_buf[IO_SIZE];
|
||||
DBUG_ENTER("write_execute_ddl_log_entry");
|
||||
|
||||
if (init_ddl_log())
|
||||
{
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
memset(file_entry_buf, 0, sizeof(file_entry_buf));
|
||||
if (!complete)
|
||||
{
|
||||
/*
|
||||
@ -1207,12 +1209,7 @@ bool write_execute_ddl_log_entry(uint first_entry,
|
||||
}
|
||||
else
|
||||
file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE;
|
||||
file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */
|
||||
file_entry_buf[DDL_LOG_PHASE_POS]= 0;
|
||||
int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
|
||||
file_entry_buf[DDL_LOG_NAME_POS]= 0;
|
||||
file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
|
||||
file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0;
|
||||
if (!(*active_entry))
|
||||
{
|
||||
if (get_free_ddl_log_entry(active_entry, &write_header))
|
||||
@ -1220,7 +1217,9 @@ bool write_execute_ddl_log_entry(uint first_entry,
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
}
|
||||
if (write_ddl_log_file_entry((*active_entry)->entry_pos))
|
||||
if (write_ddl_log_file_entry((uchar*) file_entry_buf,
|
||||
(*active_entry)->entry_pos,
|
||||
IO_SIZE))
|
||||
{
|
||||
sql_print_error("Error writing execute entry in ddl log");
|
||||
release_ddl_log_memory_entry(*active_entry);
|
||||
@ -1265,10 +1264,16 @@ bool write_execute_ddl_log_entry(uint first_entry,
|
||||
|
||||
bool deactivate_ddl_log_entry(uint entry_no)
|
||||
{
|
||||
char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
|
||||
uchar file_entry_buf[DDL_LOG_NAME_POS];
|
||||
DBUG_ENTER("deactivate_ddl_log_entry");
|
||||
|
||||
if (!read_ddl_log_file_entry(entry_no))
|
||||
|
||||
/*
|
||||
Only need to read and write the first bytes of the entry, where
|
||||
ENTRY_TYPE, ACTION_TYPE and PHASE reside. Using DDL_LOG_NAME_POS
|
||||
to include all info except for the names.
|
||||
*/
|
||||
if (!read_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS))
|
||||
{
|
||||
if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
|
||||
{
|
||||
@ -1286,7 +1291,7 @@ bool deactivate_ddl_log_entry(uint entry_no)
|
||||
{
|
||||
DBUG_ASSERT(0);
|
||||
}
|
||||
if (write_ddl_log_file_entry(entry_no))
|
||||
if (write_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS))
|
||||
{
|
||||
sql_print_error("Error in deactivating log entry. Position = %u",
|
||||
entry_no);
|
||||
@ -1347,6 +1352,7 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
|
||||
DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry;
|
||||
DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
|
||||
DBUG_ENTER("release_ddl_log_memory_entry");
|
||||
safe_mutex_assert_owner(&LOCK_gdl);
|
||||
|
||||
global_ddl_log.first_free= log_entry;
|
||||
log_entry->next_log_entry= first_free;
|
||||
@ -1376,24 +1382,26 @@ bool execute_ddl_log_entry(THD *thd, uint first_entry)
|
||||
{
|
||||
DDL_LOG_ENTRY ddl_log_entry;
|
||||
uint read_entry= first_entry;
|
||||
uchar file_entry_buf[IO_SIZE];
|
||||
DBUG_ENTER("execute_ddl_log_entry");
|
||||
|
||||
pthread_mutex_lock(&LOCK_gdl);
|
||||
do
|
||||
{
|
||||
if (read_ddl_log_entry(read_entry, &ddl_log_entry))
|
||||
if (read_ddl_log_file_entry(file_entry_buf, read_entry, IO_SIZE))
|
||||
{
|
||||
/* Write to error log and continue with next log entry */
|
||||
/* Print the error to the log and continue with next log entry */
|
||||
sql_print_error("Failed to read entry = %u from ddl log",
|
||||
read_entry);
|
||||
break;
|
||||
}
|
||||
set_ddl_log_entry_from_buf(read_entry, file_entry_buf, &ddl_log_entry);
|
||||
DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
|
||||
ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
|
||||
|
||||
if (execute_ddl_log_action(thd, &ddl_log_entry))
|
||||
{
|
||||
/* Write to error log and continue with next log entry */
|
||||
/* Print the error to the log and continue with next log entry */
|
||||
sql_print_error("Failed to execute action for entry = %u from ddl log",
|
||||
read_entry);
|
||||
break;
|
||||
@ -1438,13 +1446,14 @@ void execute_ddl_log_recovery()
|
||||
uint num_entries, i;
|
||||
THD *thd;
|
||||
DDL_LOG_ENTRY ddl_log_entry;
|
||||
uchar *file_entry_buf;
|
||||
uint io_size;
|
||||
char file_name[FN_REFLEN];
|
||||
DBUG_ENTER("execute_ddl_log_recovery");
|
||||
|
||||
/*
|
||||
Initialise global_ddl_log struct
|
||||
*/
|
||||
bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf));
|
||||
global_ddl_log.inited= FALSE;
|
||||
global_ddl_log.recovery_phase= TRUE;
|
||||
global_ddl_log.io_size= IO_SIZE;
|
||||
@ -1459,14 +1468,23 @@ void execute_ddl_log_recovery()
|
||||
thd->store_globals();
|
||||
|
||||
num_entries= read_ddl_log_header();
|
||||
io_size= global_ddl_log.io_size;
|
||||
file_entry_buf= (uchar*) my_malloc(io_size, MYF(0));
|
||||
if (!file_entry_buf)
|
||||
{
|
||||
sql_print_error("Failed to allocate buffer for recover ddl log");
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
for (i= 1; i < num_entries + 1; i++)
|
||||
{
|
||||
if (read_ddl_log_entry(i, &ddl_log_entry))
|
||||
if (read_ddl_log_file_entry(file_entry_buf, i, io_size))
|
||||
{
|
||||
sql_print_error("Failed to read entry no = %u from ddl log",
|
||||
i);
|
||||
continue;
|
||||
}
|
||||
|
||||
set_ddl_log_entry_from_buf(i, file_entry_buf, &ddl_log_entry);
|
||||
if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
|
||||
{
|
||||
if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
|
||||
@ -1481,6 +1499,7 @@ void execute_ddl_log_recovery()
|
||||
VOID(my_delete(file_name, MYF(0)));
|
||||
global_ddl_log.recovery_phase= FALSE;
|
||||
delete thd;
|
||||
my_free(file_entry_buf, MYF(0));
|
||||
/* Remember that we don't have a THD */
|
||||
my_pthread_setspecific_ptr(THR_THD, 0);
|
||||
DBUG_VOID_RETURN;
|
||||
@ -1497,14 +1516,16 @@ void execute_ddl_log_recovery()
|
||||
|
||||
void release_ddl_log()
|
||||
{
|
||||
DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free;
|
||||
DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used;
|
||||
DDL_LOG_MEMORY_ENTRY *free_list;
|
||||
DDL_LOG_MEMORY_ENTRY *used_list;
|
||||
DBUG_ENTER("release_ddl_log");
|
||||
|
||||
if (!global_ddl_log.do_release)
|
||||
DBUG_VOID_RETURN;
|
||||
|
||||
pthread_mutex_lock(&LOCK_gdl);
|
||||
free_list= global_ddl_log.first_free;
|
||||
used_list= global_ddl_log.first_used;
|
||||
while (used_list)
|
||||
{
|
||||
DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry;
|
||||
@ -5042,6 +5063,11 @@ send_result_message:
|
||||
|
||||
case HA_ADMIN_TRY_ALTER:
|
||||
{
|
||||
uint save_flags;
|
||||
Alter_info *alter_info= &lex->alter_info;
|
||||
|
||||
/* Store the original value of alter_info->flags */
|
||||
save_flags= alter_info->flags;
|
||||
/*
|
||||
This is currently used only by InnoDB. ha_innobase::optimize() answers
|
||||
"try with alter", so here we close the table, do an ALTER TABLE,
|
||||
@ -5049,9 +5075,18 @@ send_result_message:
|
||||
We have to end the row, so analyze could return more rows.
|
||||
*/
|
||||
protocol->store(STRING_WITH_LEN("note"), system_charset_info);
|
||||
protocol->store(STRING_WITH_LEN(
|
||||
"Table does not support optimize, doing recreate + analyze instead"),
|
||||
system_charset_info);
|
||||
if(alter_info->flags & ALTER_ADMIN_PARTITION)
|
||||
{
|
||||
protocol->store(STRING_WITH_LEN(
|
||||
"Table does not support optimize on partitions. All partitions "
|
||||
"will be rebuilt and analyzed."),system_charset_info);
|
||||
}
|
||||
else
|
||||
{
|
||||
protocol->store(STRING_WITH_LEN(
|
||||
"Table does not support optimize, doing recreate + analyze instead"),
|
||||
system_charset_info);
|
||||
}
|
||||
if (protocol->write())
|
||||
goto err;
|
||||
DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze..."));
|
||||
@ -5065,9 +5100,15 @@ send_result_message:
|
||||
close_thread_tables(thd);
|
||||
if (!result_code) // recreation went ok
|
||||
{
|
||||
/*
|
||||
Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags
|
||||
to force analyze on all partitions.
|
||||
*/
|
||||
alter_info->flags &= ~(ALTER_ADMIN_PARTITION);
|
||||
if ((table->table= open_ltable(thd, table, lock_type, 0)) &&
|
||||
((result_code= table->table->file->ha_analyze(thd, check_opt)) > 0))
|
||||
result_code= 0; // analyze went ok
|
||||
alter_info->flags= save_flags;
|
||||
}
|
||||
/* Start a new row for the final status row */
|
||||
protocol->prepare_for_resend();
|
||||
|
@ -1982,9 +1982,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
|
||||
*/
|
||||
#ifndef DBUG_OFF
|
||||
uchar key[MAX_DBKEY_LENGTH];
|
||||
uint key_length= (uint) (strmov(strmov((char*)&key[0], db)+1,
|
||||
old_table)-(char*)&key[0])+1;
|
||||
|
||||
uint key_length= create_table_def_key((char *)key, db, old_table);
|
||||
if (!is_table_name_exclusively_locked_by_this_thread(thd, key, key_length))
|
||||
safe_mutex_assert_owner(&LOCK_open);
|
||||
#endif
|
||||
|
@ -1808,7 +1808,7 @@ static Time_zone*
|
||||
tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
||||
{
|
||||
TABLE *table= 0;
|
||||
TIME_ZONE_INFO *tz_info;
|
||||
TIME_ZONE_INFO *tz_info= NULL;
|
||||
Tz_names_entry *tmp_tzname;
|
||||
Time_zone *return_val= 0;
|
||||
int res;
|
||||
@ -1818,7 +1818,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
||||
uchar keybuff[32];
|
||||
Field *field;
|
||||
String abbr(buff, sizeof(buff), &my_charset_latin1);
|
||||
char *alloc_buff, *tz_name_buff;
|
||||
char *alloc_buff= NULL;
|
||||
char *tz_name_buff= NULL;
|
||||
/*
|
||||
Temporary arrays that are used for loading of data for filling
|
||||
TIME_ZONE_INFO structure
|
||||
@ -1838,22 +1839,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
|
||||
|
||||
DBUG_ENTER("tz_load_from_open_tables");
|
||||
|
||||
/* Prepare tz_info for loading also let us make copy of time zone name */
|
||||
if (!(alloc_buff= (char*) alloc_root(&tz_storage, sizeof(TIME_ZONE_INFO) +
|
||||
tz_name->length() + 1)))
|
||||
{
|
||||
sql_print_error("Out of memory while loading time zone description");
|
||||
return 0;
|
||||
}
|
||||
tz_info= (TIME_ZONE_INFO *)alloc_buff;
|
||||
bzero(tz_info, sizeof(TIME_ZONE_INFO));
|
||||
tz_name_buff= alloc_buff + sizeof(TIME_ZONE_INFO);
|
||||
/*
|
||||
By writing zero to the end we guarantee that we can call ptr()
|
||||
instead of c_ptr() for time zone name.
|
||||
*/
|
||||
strmake(tz_name_buff, tz_name->ptr(), tz_name->length());
|
||||
|
||||
/*
|
||||
Let us find out time zone id by its name (there is only one index
|
||||
and it is specifically for this purpose).
|
||||
|
@ -47,7 +47,6 @@ int heap_delete(HP_INFO *info, const uchar *record)
|
||||
share->del_link=pos;
|
||||
pos[share->reclength]=0; /* Record deleted */
|
||||
share->deleted++;
|
||||
info->current_hash_ptr=0;
|
||||
#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
|
||||
DBUG_EXECUTE("check_heap",heap_check_heap(info, 0););
|
||||
#endif
|
||||
@ -104,7 +103,7 @@ int hp_rb_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
|
||||
int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
|
||||
const uchar *record, uchar *recpos, int flag)
|
||||
{
|
||||
ulong blength,pos2,pos_hashnr,lastpos_hashnr;
|
||||
ulong blength, pos2, pos_hashnr, lastpos_hashnr, key_pos;
|
||||
HASH_INFO *lastpos,*gpos,*pos,*pos3,*empty,*last_ptr;
|
||||
HP_SHARE *share=info->s;
|
||||
DBUG_ENTER("hp_delete_key");
|
||||
@ -116,9 +115,9 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
|
||||
last_ptr=0;
|
||||
|
||||
/* Search after record with key */
|
||||
pos= hp_find_hash(&keyinfo->block,
|
||||
hp_mask(hp_rec_hashnr(keyinfo, record), blength,
|
||||
share->records + 1));
|
||||
key_pos= hp_mask(hp_rec_hashnr(keyinfo, record), blength, share->records + 1);
|
||||
pos= hp_find_hash(&keyinfo->block, key_pos);
|
||||
|
||||
gpos = pos3 = 0;
|
||||
|
||||
while (pos->ptr_to_rec != recpos)
|
||||
@ -180,21 +179,50 @@ int hp_delete_key(HP_INFO *info, register HP_KEYDEF *keyinfo,
|
||||
}
|
||||
pos2= hp_mask(lastpos_hashnr, blength, share->records + 1);
|
||||
if (pos2 == hp_mask(pos_hashnr, blength, share->records + 1))
|
||||
{ /* Identical key-positions */
|
||||
{
|
||||
/* lastpos and the row in the main bucket entry (pos) has the same hash */
|
||||
if (pos2 != share->records)
|
||||
{
|
||||
empty[0]=lastpos[0];
|
||||
/*
|
||||
The bucket entry was not deleted. Copy lastpos over the
|
||||
deleted entry and update previous link to point to it.
|
||||
*/
|
||||
empty[0]= lastpos[0];
|
||||
hp_movelink(lastpos, pos, empty);
|
||||
if (last_ptr == lastpos)
|
||||
{
|
||||
/*
|
||||
We moved the row that info->current_hash_ptr points to.
|
||||
Update info->current_hash_ptr to point to the new position.
|
||||
*/
|
||||
info->current_hash_ptr= empty;
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
pos3= pos; /* Link pos->next after lastpos */
|
||||
}
|
||||
else
|
||||
{
|
||||
pos3= 0; /* Different positions merge */
|
||||
keyinfo->hash_buckets--;
|
||||
/*
|
||||
Shrinking the hash table deleted the main bucket entry for this hash.
|
||||
In this case the last entry was the first key in the key chain.
|
||||
We move things around so that we keep the original key order to ensure
|
||||
that heap_rnext() works.
|
||||
|
||||
- Move the row at the main bucket entry to the empty spot.
|
||||
- Move the last entry first in the new chain.
|
||||
- Link in the first element of the hash.
|
||||
*/
|
||||
empty[0]= pos[0];
|
||||
pos[0]= lastpos[0];
|
||||
hp_movelink(pos, pos, empty);
|
||||
|
||||
/* Update current_hash_ptr if the entry moved */
|
||||
if (last_ptr == lastpos)
|
||||
info->current_hash_ptr= pos;
|
||||
else if (last_ptr == pos)
|
||||
info->current_hash_ptr= empty;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
pos3= 0; /* Different positions merge */
|
||||
keyinfo->hash_buckets--;
|
||||
empty[0]=lastpos[0];
|
||||
hp_movelink(pos3, empty, pos->next_key);
|
||||
pos->next_key=empty;
|
||||
|
@ -1973,6 +1973,7 @@ btr_lift_page_up(
|
||||
ulint root_page_no;
|
||||
ulint ancestors;
|
||||
ulint i;
|
||||
ibool lift_father_up = FALSE;
|
||||
|
||||
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
|
||||
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
|
||||
@ -2007,6 +2008,27 @@ btr_lift_page_up(
|
||||
pages[ancestors++] = iter_page;
|
||||
}
|
||||
|
||||
if (ancestors > 1 && page_level == 0) {
|
||||
/* The father page also should be the only on its level (not
|
||||
root). We should lift up the father page at first.
|
||||
Because the leaf page should be lifted up only for root page.
|
||||
The freeing page is based on page_level (==0 or !=0)
|
||||
to choose segment. If the page_level is changed ==0 from !=0,
|
||||
later freeing of the page doesn't find the page allocation
|
||||
to be freed.*/
|
||||
|
||||
lift_father_up = TRUE;
|
||||
page = father_page;
|
||||
page_level = btr_page_get_level(page, mtr);
|
||||
|
||||
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
|
||||
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
father_page = pages[1];
|
||||
}
|
||||
|
||||
btr_search_drop_page_hash_index(page);
|
||||
|
||||
/* Make the father empty */
|
||||
@ -2018,7 +2040,7 @@ btr_lift_page_up(
|
||||
lock_update_copy_and_discard(father_page, page);
|
||||
|
||||
/* Go upward to root page, decreasing levels by one. */
|
||||
for (i = 0; i < ancestors; i++) {
|
||||
for (i = lift_father_up ? 1 : 0; i < ancestors; i++) {
|
||||
iter_page = pages[i];
|
||||
|
||||
ut_ad(btr_page_get_level(iter_page, mtr) == (page_level + 1));
|
||||
|
@ -49,6 +49,10 @@ ulint btr_cur_n_sea = 0;
|
||||
ulint btr_cur_n_non_sea_old = 0;
|
||||
ulint btr_cur_n_sea_old = 0;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
uint btr_cur_limit_optimistic_insert_debug = 0;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/* In the optimistic insert, if the insert does not fit, but this much space
|
||||
can be released by page reorganize, then it is reorganized */
|
||||
|
||||
@ -1022,6 +1026,9 @@ calculate_sizes_again:
|
||||
goto calculate_sizes_again;
|
||||
}
|
||||
|
||||
LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
|
||||
goto fail);
|
||||
|
||||
/* If there have been many consecutive inserts, and we are on the leaf
|
||||
level, check if we have to split the page to reserve enough free space
|
||||
for future updates of records. */
|
||||
@ -1034,7 +1041,9 @@ calculate_sizes_again:
|
||||
&& (0 == level)
|
||||
&& (btr_page_get_split_rec_to_right(cursor, &dummy_rec)
|
||||
|| btr_page_get_split_rec_to_left(cursor, &dummy_rec))) {
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
fail:
|
||||
#endif /* UNIV_DEBUG */
|
||||
if (big_rec_vec) {
|
||||
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
|
||||
}
|
||||
|
@ -495,6 +495,12 @@ static SHOW_VAR innodb_status_variables[]= {
|
||||
(char*) &export_vars.innodb_rows_read, SHOW_LONG},
|
||||
{"rows_updated",
|
||||
(char*) &export_vars.innodb_rows_updated, SHOW_LONG},
|
||||
#ifdef UNIV_DEBUG
|
||||
{"purge_trx_id_age",
|
||||
(char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
|
||||
{"purge_view_trx_id_age",
|
||||
(char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
|
||||
#endif /* UNIV_DEBUG */
|
||||
{NullS, NullS, SHOW_LONG}
|
||||
};
|
||||
|
||||
@ -9274,6 +9280,18 @@ static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
|
||||
PLUGIN_VAR_RQCMDARG,
|
||||
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
|
||||
NULL, NULL, 0, 0, 1024, 0);
|
||||
|
||||
static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
|
||||
btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
|
||||
"Artificially limit the number of records per B-tree page (0=unlimited).",
|
||||
NULL, NULL, 0, 0, UINT_MAX32, 0);
|
||||
|
||||
static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
|
||||
srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDARG,
|
||||
"Pause actual purging any delete-marked records, but merely update the purge view. "
|
||||
"It is to create artificially the situation the purge view have been updated "
|
||||
"but the each purges were not done yet.",
|
||||
NULL, NULL, FALSE);
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
static struct st_mysql_sys_var* innobase_system_variables[]= {
|
||||
@ -9323,6 +9341,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
|
||||
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
|
||||
#ifdef UNIV_DEBUG
|
||||
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
|
||||
MYSQL_SYSVAR(limit_optimistic_insert_debug),
|
||||
MYSQL_SYSVAR(trx_purge_view_update_only_debug),
|
||||
#endif /* UNIV_DEBUG */
|
||||
NULL
|
||||
};
|
||||
|
@ -703,6 +703,11 @@ extern ulint btr_cur_n_sea;
|
||||
extern ulint btr_cur_n_non_sea_old;
|
||||
extern ulint btr_cur_n_sea_old;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/* Flag to limit optimistic insert records */
|
||||
extern uint btr_cur_limit_optimistic_insert_debug;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
#ifndef UNIV_NONINL
|
||||
#include "btr0cur.ic"
|
||||
#endif
|
||||
|
@ -8,6 +8,16 @@ Created 10/16/1994 Heikki Tuuri
|
||||
|
||||
#include "btr0btr.h"
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
|
||||
if (btr_cur_limit_optimistic_insert_debug\
|
||||
&& (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
|
||||
CODE;\
|
||||
}
|
||||
#else
|
||||
# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/*************************************************************
|
||||
Returns the page cursor component of a tree cursor. */
|
||||
UNIV_INLINE
|
||||
@ -100,6 +110,9 @@ btr_cur_compress_recommendation(
|
||||
|
||||
page = btr_cur_get_page(cursor);
|
||||
|
||||
LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2,
|
||||
return(FALSE));
|
||||
|
||||
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|
||||
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
|
||||
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
|
||||
|
@ -685,6 +685,13 @@ extern lock_sys_t* lock_sys;
|
||||
remains set when the waiting lock is granted,
|
||||
or if the lock is inherited to a neighboring
|
||||
record */
|
||||
#define LOCK_CONV_BY_OTHER 4096 /* this bit is set when the lock is created
|
||||
by other transaction */
|
||||
/* Checks if this is a waiting lock created by lock->trx itself.
|
||||
@param type_mode lock->type_mode
|
||||
@return whether it is a waiting lock belonging to lock->trx */
|
||||
#define lock_is_wait_not_by_other(type_mode) \
|
||||
((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
|
||||
|
||||
/* When lock bits are reset, the following flags are available: */
|
||||
#define LOCK_RELEASE_WAIT 1
|
||||
|
@ -176,6 +176,10 @@ extern ulint srv_fatal_semaphore_wait_threshold;
|
||||
#define SRV_SEMAPHORE_WAIT_EXTENSION 7200
|
||||
extern ulint srv_dml_needed_delay;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
extern my_bool srv_purge_view_update_only_debug;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
extern mutex_t* kernel_mutex_temp;/* mutex protecting the server, trx structs,
|
||||
query threads, and lock table: we allocate
|
||||
it from dynamic memory to get it to the
|
||||
@ -569,6 +573,10 @@ struct export_var_struct{
|
||||
ulint innodb_rows_inserted;
|
||||
ulint innodb_rows_updated;
|
||||
ulint innodb_rows_deleted;
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint innodb_purge_trx_id_age;
|
||||
ulint innodb_purge_view_trx_id_age;
|
||||
#endif /* UNIV_DEBUG */
|
||||
};
|
||||
|
||||
/* The server system struct */
|
||||
|
@ -133,6 +133,10 @@ struct trx_purge_struct{
|
||||
than this */
|
||||
dulint purge_undo_no; /* Purge has advanced past all records
|
||||
whose undo number is less than this */
|
||||
#ifdef UNIV_DEBUG
|
||||
dulint done_trx_no; /* Indicate 'purge pointer' which have
|
||||
purged already accurately. */
|
||||
#endif /* UNIV_DEBUG */
|
||||
/*-----------------------------*/
|
||||
ibool next_stored; /* TRUE if the info of the next record
|
||||
to purge is stored below: if yes, then
|
||||
|
@ -725,12 +725,16 @@ lock_reset_lock_and_trx_wait(
|
||||
/*=========================*/
|
||||
lock_t* lock) /* in: record lock */
|
||||
{
|
||||
ut_ad((lock->trx)->wait_lock == lock);
|
||||
ut_ad(lock_get_wait(lock));
|
||||
|
||||
/* Reset the back pointer in trx to this waiting lock request */
|
||||
|
||||
(lock->trx)->wait_lock = NULL;
|
||||
if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
|
||||
ut_ad((lock->trx)->wait_lock == lock);
|
||||
(lock->trx)->wait_lock = NULL;
|
||||
} else {
|
||||
ut_ad(lock_get_type(lock) == LOCK_REC);
|
||||
}
|
||||
lock->type_mode = lock->type_mode & ~LOCK_WAIT;
|
||||
}
|
||||
|
||||
@ -1437,9 +1441,9 @@ lock_rec_has_expl(
|
||||
|
||||
while (lock) {
|
||||
if (lock->trx == trx
|
||||
&& !lock_is_wait_not_by_other(lock->type_mode)
|
||||
&& lock_mode_stronger_or_eq(lock_get_mode(lock),
|
||||
precise_mode & LOCK_MODE_MASK)
|
||||
&& !lock_get_wait(lock)
|
||||
&& (!lock_rec_get_rec_not_gap(lock)
|
||||
|| (precise_mode & LOCK_REC_NOT_GAP)
|
||||
|| page_rec_is_supremum(rec))
|
||||
@ -1723,7 +1727,7 @@ lock_rec_create(
|
||||
|
||||
HASH_INSERT(lock_t, hash, lock_sys->rec_hash,
|
||||
lock_rec_fold(space, page_no), lock);
|
||||
if (type_mode & LOCK_WAIT) {
|
||||
if (lock_is_wait_not_by_other(type_mode)) {
|
||||
|
||||
lock_set_lock_and_trx_wait(lock, trx);
|
||||
}
|
||||
@ -1752,10 +1756,11 @@ lock_rec_enqueue_waiting(
|
||||
lock request is set when performing an
|
||||
insert of an index record */
|
||||
rec_t* rec, /* in: record */
|
||||
lock_t* lock, /* in: lock object; NULL if a new
|
||||
one should be created. */
|
||||
dict_index_t* index, /* in: index of record */
|
||||
que_thr_t* thr) /* in: query thread */
|
||||
{
|
||||
lock_t* lock;
|
||||
trx_t* trx;
|
||||
|
||||
ut_ad(mutex_own(&kernel_mutex));
|
||||
@ -1785,8 +1790,16 @@ lock_rec_enqueue_waiting(
|
||||
stderr);
|
||||
}
|
||||
|
||||
/* Enqueue the lock request that will wait to be granted */
|
||||
lock = lock_rec_create(type_mode | LOCK_WAIT, rec, index, trx);
|
||||
if (lock == NULL) {
|
||||
/* Enqueue the lock request that will wait to be granted */
|
||||
lock = lock_rec_create(type_mode | LOCK_WAIT, rec, index, trx);
|
||||
} else {
|
||||
ut_ad(lock->type_mode & LOCK_WAIT);
|
||||
ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
|
||||
|
||||
lock->type_mode &= ~LOCK_CONV_BY_OTHER;
|
||||
lock_set_lock_and_trx_wait(lock, trx);
|
||||
}
|
||||
|
||||
/* Check if a deadlock occurs: if yes, remove the lock request and
|
||||
return an error code */
|
||||
@ -2011,6 +2024,7 @@ lock_rec_lock_slow(
|
||||
que_thr_t* thr) /* in: query thread */
|
||||
{
|
||||
trx_t* trx;
|
||||
lock_t* lock;
|
||||
|
||||
ut_ad(mutex_own(&kernel_mutex));
|
||||
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|
||||
@ -2025,7 +2039,27 @@ lock_rec_lock_slow(
|
||||
|
||||
trx = thr_get_trx(thr);
|
||||
|
||||
if (lock_rec_has_expl(mode, rec, trx)) {
|
||||
lock = lock_rec_has_expl(mode, rec, trx);
|
||||
if (lock) {
|
||||
if (lock->type_mode & LOCK_CONV_BY_OTHER) {
|
||||
/* This lock or lock waiting was created by the other
|
||||
transaction, not by the transaction (trx) itself.
|
||||
So, the transaction (trx) should treat it collectly
|
||||
according as whether granted or not. */
|
||||
|
||||
if (lock->type_mode & LOCK_WAIT) {
|
||||
/* This lock request was not granted yet.
|
||||
Should wait for granted. */
|
||||
|
||||
goto enqueue_waiting;
|
||||
} else {
|
||||
/* This lock request was already granted.
|
||||
Just clearing the flag. */
|
||||
|
||||
lock->type_mode &= ~LOCK_CONV_BY_OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
/* The trx already has a strong enough lock on rec: do
|
||||
nothing */
|
||||
|
||||
@ -2035,7 +2069,9 @@ lock_rec_lock_slow(
|
||||
the queue, as this transaction does not have a lock strong
|
||||
enough already granted on the record, we have to wait. */
|
||||
|
||||
return(lock_rec_enqueue_waiting(mode, rec, index, thr));
|
||||
ut_ad(lock == NULL);
|
||||
enqueue_waiting:
|
||||
return(lock_rec_enqueue_waiting(mode, rec, lock, index, thr));
|
||||
} else if (!impl) {
|
||||
/* Set the requested lock on the record */
|
||||
|
||||
@ -2171,7 +2207,8 @@ lock_grant(
|
||||
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
|
||||
for it */
|
||||
|
||||
if (lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
|
||||
if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
|
||||
&& lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
|
||||
trx_end_lock_wait(lock->trx);
|
||||
}
|
||||
}
|
||||
@ -2188,6 +2225,7 @@ lock_rec_cancel(
|
||||
{
|
||||
ut_ad(mutex_own(&kernel_mutex));
|
||||
ut_ad(lock_get_type(lock) == LOCK_REC);
|
||||
ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
|
||||
|
||||
/* Reset the bit (there can be only one set bit) in the lock bitmap */
|
||||
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
|
||||
@ -2331,8 +2369,12 @@ lock_rec_reset_and_release_wait(
|
||||
lock = lock_rec_get_first(rec);
|
||||
|
||||
while (lock != NULL) {
|
||||
if (lock_get_wait(lock)) {
|
||||
if (lock_is_wait_not_by_other(lock->type_mode)) {
|
||||
lock_rec_cancel(lock);
|
||||
} else if (lock_get_wait(lock)) {
|
||||
/* just reset LOCK_WAIT */
|
||||
lock_rec_reset_nth_bit(lock, heap_no);
|
||||
lock_reset_lock_and_trx_wait(lock);
|
||||
} else {
|
||||
lock_rec_reset_nth_bit(lock, heap_no);
|
||||
}
|
||||
@ -3383,6 +3425,7 @@ lock_table_create(
|
||||
|
||||
ut_ad(table && trx);
|
||||
ut_ad(mutex_own(&kernel_mutex));
|
||||
ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
|
||||
|
||||
if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
|
||||
++table->n_waiting_or_granted_auto_inc_locks;
|
||||
@ -3900,6 +3943,7 @@ lock_cancel_waiting_and_release(
|
||||
lock_t* lock) /* in: waiting lock request */
|
||||
{
|
||||
ut_ad(mutex_own(&kernel_mutex));
|
||||
ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
|
||||
|
||||
if (lock_get_type(lock) == LOCK_REC) {
|
||||
|
||||
@ -4871,7 +4915,7 @@ lock_rec_insert_check_and_lock(
|
||||
/* Note that we may get DB_SUCCESS also here! */
|
||||
err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
|
||||
| LOCK_INSERT_INTENTION,
|
||||
next_rec, index, thr);
|
||||
next_rec, NULL, index, thr);
|
||||
} else {
|
||||
err = DB_SUCCESS;
|
||||
}
|
||||
@ -4941,10 +4985,23 @@ lock_rec_convert_impl_to_expl(
|
||||
|
||||
if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, rec,
|
||||
impl_trx)) {
|
||||
ulint type_mode = (LOCK_REC | LOCK_X
|
||||
| LOCK_REC_NOT_GAP);
|
||||
|
||||
/* If the delete-marked record was locked already,
|
||||
we should reserve lock waiting for impl_trx as
|
||||
implicit lock. Because cannot lock at this moment.*/
|
||||
|
||||
if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
|
||||
&& lock_rec_other_has_conflicting(
|
||||
LOCK_X | LOCK_REC_NOT_GAP,
|
||||
rec, impl_trx)) {
|
||||
|
||||
type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
|
||||
}
|
||||
|
||||
lock_rec_add_to_queue(
|
||||
LOCK_REC | LOCK_X | LOCK_REC_NOT_GAP,
|
||||
rec, index, impl_trx);
|
||||
type_mode, rec, index, impl_trx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2195,7 +2195,10 @@ row_ins_index_entry(
|
||||
err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
|
||||
ext_vec, n_ext_vec, thr);
|
||||
if (err != DB_FAIL) {
|
||||
|
||||
if (index == dict_table_get_first_index(index->table)
|
||||
&& thr_get_trx(thr)->mysql_thd != 0) {
|
||||
DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
|
||||
}
|
||||
return(err);
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,10 @@ Created 10/8/1995 Heikki Tuuri
|
||||
#include "srv0start.h"
|
||||
#include "row0mysql.h"
|
||||
#include "ha_prototypes.h"
|
||||
#include "read0read.h"
|
||||
|
||||
#include "m_string.h" /* for my_sys.h */
|
||||
#include "my_sys.h" /* DEBUG_SYNC_C */
|
||||
|
||||
/* This is set to TRUE if the MySQL user has set it in MySQL; currently
|
||||
affects only FOREIGN KEY definition parsing */
|
||||
@ -1435,6 +1439,10 @@ srv_suspend_mysql_thread(
|
||||
|
||||
trx = thr_get_trx(thr);
|
||||
|
||||
if (trx->mysql_thd != 0) {
|
||||
DEBUG_SYNC_C("srv_suspend_mysql_thread_enter");
|
||||
}
|
||||
|
||||
os_event_set(srv_lock_timeout_thread_event);
|
||||
|
||||
mutex_enter(&kernel_mutex);
|
||||
@ -1913,6 +1921,25 @@ srv_export_innodb_status(void)
|
||||
export_vars.innodb_rows_updated = srv_n_rows_updated;
|
||||
export_vars.innodb_rows_deleted = srv_n_rows_deleted;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
if (ut_dulint_cmp(trx_sys->max_trx_id, purge_sys->done_trx_no) < 0) {
|
||||
export_vars.innodb_purge_trx_id_age = 0;
|
||||
} else {
|
||||
export_vars.innodb_purge_trx_id_age =
|
||||
ut_dulint_minus(trx_sys->max_trx_id, purge_sys->done_trx_no);
|
||||
}
|
||||
|
||||
if (!purge_sys->view
|
||||
|| ut_dulint_cmp(trx_sys->max_trx_id,
|
||||
purge_sys->view->up_limit_id) < 0) {
|
||||
export_vars.innodb_purge_view_trx_id_age = 0;
|
||||
} else {
|
||||
export_vars.innodb_purge_view_trx_id_age =
|
||||
ut_dulint_minus(trx_sys->max_trx_id,
|
||||
purge_sys->view->up_limit_id);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
mutex_exit(&srv_innodb_monitor_mutex);
|
||||
}
|
||||
|
||||
@ -2387,6 +2414,29 @@ loop:
|
||||
+ buf_pool->n_pages_written;
|
||||
srv_main_thread_op_info = "sleeping";
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
if (btr_cur_limit_optimistic_insert_debug) {
|
||||
/* If btr_cur_limit_optimistic_insert_debug is enabled
|
||||
and no purge_threads, purge opportunity is increased
|
||||
by x100 (1purge/100msec), to speed up debug scripts
|
||||
which should wait for purged. */
|
||||
|
||||
if (!skip_sleep) {
|
||||
os_thread_sleep(100000);
|
||||
}
|
||||
|
||||
do {
|
||||
if (srv_fast_shutdown
|
||||
&& srv_shutdown_state > 0) {
|
||||
goto background_loop;
|
||||
}
|
||||
|
||||
srv_main_thread_op_info = "purging";
|
||||
n_pages_purged = trx_purge();
|
||||
|
||||
} while (n_pages_purged);
|
||||
} else
|
||||
#endif /* UNIV_DEBUG */
|
||||
if (!skip_sleep) {
|
||||
|
||||
os_thread_sleep(1000000);
|
||||
|
@ -34,6 +34,10 @@ trx_purge_t* purge_sys = NULL;
|
||||
which needs no purge */
|
||||
trx_undo_rec_t trx_purge_dummy_rec;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
my_bool srv_purge_view_update_only_debug;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/*********************************************************************
|
||||
Checks if trx_id is >= purge_view: then it is guaranteed that its update
|
||||
undo log still exists in the system. */
|
||||
@ -209,6 +213,7 @@ trx_purge_sys_create(void)
|
||||
purge_sys->purge_trx_no = ut_dulint_zero;
|
||||
purge_sys->purge_undo_no = ut_dulint_zero;
|
||||
purge_sys->next_stored = FALSE;
|
||||
ut_d(purge_sys->done_trx_no = ut_dulint_zero);
|
||||
|
||||
rw_lock_create(&purge_sys->latch, SYNC_PURGE_LATCH);
|
||||
|
||||
@ -576,6 +581,7 @@ trx_purge_truncate_if_arr_empty(void)
|
||||
ut_ad(mutex_own(&(purge_sys->mutex)));
|
||||
|
||||
if (purge_sys->arr->n_used == 0) {
|
||||
ut_d(purge_sys->done_trx_no = purge_sys->purge_trx_no);
|
||||
|
||||
trx_purge_truncate_history();
|
||||
|
||||
@ -1077,6 +1083,13 @@ trx_purge(void)
|
||||
|
||||
rw_lock_x_unlock(&(purge_sys->latch));
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
if (srv_purge_view_update_only_debug) {
|
||||
mutex_exit(&(purge_sys->mutex));
|
||||
return(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
purge_sys->state = TRX_PURGE_ON;
|
||||
|
||||
/* Handle at most 20 undo log pages in one purge batch */
|
||||
|
@ -1,3 +1,22 @@
|
||||
2012-12-18 The InnoDB Team
|
||||
|
||||
* include/univ.i:
|
||||
Fix Bug#Bug#13463493 INNODB PLUGIN WERE CHANGED, BUT STILL USE THE
|
||||
SAME VERSION NUMBER 1.0.17
|
||||
|
||||
2012-12-13 The InnoDB Team
|
||||
|
||||
* buf/buf0buf.c:
|
||||
Fix Bug#14329288 IS THE CALL TO IBUF_MERGE_OR_DELETE_FOR_PAGE FROM
|
||||
BUF_PAGE_GET_GEN REDUNDANT?
|
||||
|
||||
2012-11-15 The InnoDB Team
|
||||
|
||||
* include/data0type.ic, include/rem0rec.h,
|
||||
rem/rem0rec.c, row/row0merge.c:
|
||||
Fix Bug#15874001 CREATE INDEX ON A UTF8 CHAR COLUMN FAILS WITH
|
||||
ROW_FORMAT=REDUNDANT
|
||||
|
||||
2012-10-18 The InnoDB Team
|
||||
|
||||
* row/row0sel.c:
|
||||
|
@ -3072,6 +3072,8 @@ btr_lift_page_up(
|
||||
buf_block_t* blocks[BTR_MAX_LEVELS];
|
||||
ulint n_blocks; /*!< last used index in blocks[] */
|
||||
ulint i;
|
||||
ibool lift_father_up = FALSE;
|
||||
buf_block_t* block_orig = block;
|
||||
|
||||
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
|
||||
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
|
||||
@ -3082,11 +3084,13 @@ btr_lift_page_up(
|
||||
|
||||
{
|
||||
btr_cur_t cursor;
|
||||
mem_heap_t* heap = mem_heap_create(100);
|
||||
ulint* offsets;
|
||||
ulint* offsets = NULL;
|
||||
mem_heap_t* heap = mem_heap_create(
|
||||
sizeof(*offsets)
|
||||
* (REC_OFFS_HEADER_SIZE + 1 + 1 + index->n_fields));
|
||||
buf_block_t* b;
|
||||
|
||||
offsets = btr_page_get_father_block(NULL, heap, index,
|
||||
offsets = btr_page_get_father_block(offsets, heap, index,
|
||||
block, mtr, &cursor);
|
||||
father_block = btr_cur_get_block(&cursor);
|
||||
father_page_zip = buf_block_get_page_zip(father_block);
|
||||
@ -3110,6 +3114,29 @@ btr_lift_page_up(
|
||||
blocks[n_blocks++] = b = btr_cur_get_block(&cursor);
|
||||
}
|
||||
|
||||
if (n_blocks && page_level == 0) {
|
||||
/* The father page also should be the only on its level (not
|
||||
root). We should lift up the father page at first.
|
||||
Because the leaf page should be lifted up only for root page.
|
||||
The freeing page is based on page_level (==0 or !=0)
|
||||
to choose segment. If the page_level is changed ==0 from !=0,
|
||||
later freeing of the page doesn't find the page allocation
|
||||
to be freed.*/
|
||||
|
||||
lift_father_up = TRUE;
|
||||
block = father_block;
|
||||
page = buf_block_get_frame(block);
|
||||
page_level = btr_page_get_level(page, mtr);
|
||||
|
||||
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
|
||||
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
|
||||
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
father_block = blocks[0];
|
||||
father_page_zip = buf_block_get_page_zip(father_block);
|
||||
father_page = buf_block_get_frame(father_block);
|
||||
}
|
||||
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
@ -3117,6 +3144,7 @@ btr_lift_page_up(
|
||||
|
||||
/* Make the father empty */
|
||||
btr_page_empty(father_block, father_page_zip, index, page_level, mtr);
|
||||
page_level++;
|
||||
|
||||
/* Copy the records to the father page one by one. */
|
||||
if (0
|
||||
@ -3149,7 +3177,7 @@ btr_lift_page_up(
|
||||
lock_update_copy_and_discard(father_block, block);
|
||||
|
||||
/* Go upward to root page, decrementing levels by one. */
|
||||
for (i = 0; i < n_blocks; i++, page_level++) {
|
||||
for (i = lift_father_up ? 1 : 0; i < n_blocks; i++, page_level++) {
|
||||
page_t* page = buf_block_get_frame(blocks[i]);
|
||||
page_zip_des_t* page_zip= buf_block_get_page_zip(blocks[i]);
|
||||
|
||||
@ -3171,7 +3199,7 @@ btr_lift_page_up(
|
||||
ut_ad(page_validate(father_page, index));
|
||||
ut_ad(btr_check_node_ptr(index, father_block, mtr));
|
||||
|
||||
return(father_block);
|
||||
return(lift_father_up ? block_orig : father_block);
|
||||
}
|
||||
|
||||
/*************************************************************//**
|
||||
|
@ -86,6 +86,11 @@ srv_refresh_innodb_monitor_stats(). Referenced by
|
||||
srv_printf_innodb_monitor(). */
|
||||
UNIV_INTERN ulint btr_cur_n_sea_old = 0;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/* Flag to limit optimistic insert records */
|
||||
UNIV_INTERN uint btr_cur_limit_optimistic_insert_debug = 0;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** In the optimistic insert, if the insert does not fit, but this much space
|
||||
can be released by page reorganize, then it is reorganized */
|
||||
#define BTR_CUR_PAGE_REORGANIZE_LIMIT (UNIV_PAGE_SIZE / 32)
|
||||
@ -1171,6 +1176,9 @@ btr_cur_optimistic_insert(
|
||||
}
|
||||
}
|
||||
|
||||
LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
|
||||
goto fail);
|
||||
|
||||
/* If there have been many consecutive inserts, and we are on the leaf
|
||||
level, check if we have to split the page to reserve enough free space
|
||||
for future updates of records. */
|
||||
|
@ -1108,32 +1108,21 @@ buf_page_make_young(
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Sets the time of the first access of a page and moves a page to the
|
||||
start of the buffer pool LRU list if it is too old. This high-level
|
||||
function can be used to prevent an important page from slipping
|
||||
out of the buffer pool. */
|
||||
Moves a page to the start of the buffer pool LRU list if it is too old.
|
||||
This high-level function can be used to prevent an important page from
|
||||
slipping out of the buffer pool. */
|
||||
static
|
||||
void
|
||||
buf_page_set_accessed_make_young(
|
||||
/*=============================*/
|
||||
buf_page_t* bpage, /*!< in/out: buffer block of a
|
||||
buf_page_make_young_if_needed(
|
||||
/*==========================*/
|
||||
buf_page_t* bpage) /*!< in/out: buffer block of a
|
||||
file page */
|
||||
unsigned access_time) /*!< in: bpage->access_time
|
||||
read under mutex protection,
|
||||
or 0 if unknown */
|
||||
{
|
||||
ut_ad(!buf_pool_mutex_own());
|
||||
ut_a(buf_page_in_file(bpage));
|
||||
|
||||
if (buf_page_peek_if_too_old(bpage)) {
|
||||
buf_pool_mutex_enter();
|
||||
buf_LRU_make_block_young(bpage);
|
||||
buf_pool_mutex_exit();
|
||||
} else if (!access_time) {
|
||||
ulint time_ms = ut_time_ms();
|
||||
buf_pool_mutex_enter();
|
||||
buf_page_set_accessed(bpage, time_ms);
|
||||
buf_pool_mutex_exit();
|
||||
buf_page_make_young(bpage);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1217,7 +1206,6 @@ buf_page_get_zip(
|
||||
buf_page_t* bpage;
|
||||
mutex_t* block_mutex;
|
||||
ibool must_read;
|
||||
unsigned access_time;
|
||||
|
||||
#ifndef UNIV_LOG_DEBUG
|
||||
ut_ad(!ibuf_inside());
|
||||
@ -1284,13 +1272,14 @@ err_exit:
|
||||
|
||||
got_block:
|
||||
must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
|
||||
access_time = buf_page_is_accessed(bpage);
|
||||
|
||||
buf_pool_mutex_exit();
|
||||
|
||||
buf_page_set_accessed(bpage);
|
||||
|
||||
mutex_exit(block_mutex);
|
||||
|
||||
buf_page_set_accessed_make_young(bpage, access_time);
|
||||
buf_page_make_young_if_needed(bpage);
|
||||
|
||||
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
|
||||
ut_a(!bpage->file_page_was_freed);
|
||||
@ -1789,22 +1778,29 @@ wait_until_unfixed:
|
||||
|
||||
UNIV_MEM_INVALID(bpage, sizeof *bpage);
|
||||
|
||||
buf_pool->n_pend_unzip++;
|
||||
buf_pool_mutex_exit();
|
||||
|
||||
access_time = buf_page_is_accessed(&block->page);
|
||||
mutex_exit(&block->mutex);
|
||||
mutex_exit(&buf_pool_zip_mutex);
|
||||
buf_pool->n_pend_unzip++;
|
||||
|
||||
buf_pool_mutex_exit();
|
||||
|
||||
buf_page_free_descriptor(bpage);
|
||||
|
||||
/* Decompress the page and apply buffered operations
|
||||
while not holding buf_pool_mutex or block->mutex. */
|
||||
/* Decompress the page while not holding
|
||||
buf_pool_mutex or block->mutex. */
|
||||
success = buf_zip_decompress(block, srv_use_checksums);
|
||||
ut_a(success);
|
||||
|
||||
if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
|
||||
ibuf_merge_or_delete_for_page(block, space, offset,
|
||||
zip_size, TRUE);
|
||||
if (access_time) {
|
||||
#ifdef UNIV_IBUF_COUNT_DEBUG
|
||||
ut_a(ibuf_count_get(space, offset) == 0);
|
||||
#endif /* UNIV_IBUF_COUNT_DEBUG */
|
||||
} else {
|
||||
ibuf_merge_or_delete_for_page(
|
||||
block, space, offset, zip_size, TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
/* Unfix and unlatch the block. */
|
||||
@ -1861,16 +1857,16 @@ wait_until_unfixed:
|
||||
|
||||
buf_block_buf_fix_inc(block, file, line);
|
||||
|
||||
mutex_exit(&block->mutex);
|
||||
|
||||
/* Check if this is the first access to the page */
|
||||
buf_pool_mutex_exit();
|
||||
|
||||
access_time = buf_page_is_accessed(&block->page);
|
||||
|
||||
buf_pool_mutex_exit();
|
||||
buf_page_set_accessed(&block->page);
|
||||
|
||||
mutex_exit(&block->mutex);
|
||||
|
||||
if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) {
|
||||
buf_page_set_accessed_make_young(&block->page, access_time);
|
||||
buf_page_make_young_if_needed(&block->page);
|
||||
}
|
||||
|
||||
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
|
||||
@ -1925,7 +1921,7 @@ wait_until_unfixed:
|
||||
|
||||
mtr_memo_push(mtr, block, fix_type);
|
||||
|
||||
if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL) && !access_time) {
|
||||
if (mode != BUF_PEEK_IF_IN_POOL && !access_time) {
|
||||
/* In the case of a first access, try to apply linear
|
||||
read-ahead */
|
||||
|
||||
@ -1975,15 +1971,13 @@ buf_page_optimistic_get(
|
||||
|
||||
buf_block_buf_fix_inc(block, file, line);
|
||||
|
||||
access_time = buf_page_is_accessed(&block->page);
|
||||
|
||||
buf_page_set_accessed(&block->page);
|
||||
|
||||
mutex_exit(&block->mutex);
|
||||
|
||||
/* Check if this is the first access to the page.
|
||||
We do a dirty read on purpose, to avoid mutex contention.
|
||||
This field is only used for heuristic purposes; it does not
|
||||
affect correctness. */
|
||||
|
||||
access_time = buf_page_is_accessed(&block->page);
|
||||
buf_page_set_accessed_make_young(&block->page, access_time);
|
||||
buf_page_make_young_if_needed(&block->page);
|
||||
|
||||
ut_ad(!ibuf_inside()
|
||||
|| ibuf_page(buf_block_get_space(block),
|
||||
@ -2035,7 +2029,7 @@ buf_page_optimistic_get(
|
||||
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
|
||||
ut_a(block->page.file_page_was_freed == FALSE);
|
||||
#endif
|
||||
if (UNIV_UNLIKELY(!access_time)) {
|
||||
if (!access_time) {
|
||||
/* In the case of a first access, try to apply linear
|
||||
read-ahead */
|
||||
|
||||
@ -2095,22 +2089,12 @@ buf_page_get_known_nowait(
|
||||
|
||||
buf_block_buf_fix_inc(block, file, line);
|
||||
|
||||
buf_page_set_accessed(&block->page);
|
||||
|
||||
mutex_exit(&block->mutex);
|
||||
|
||||
if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
|
||||
buf_pool_mutex_enter();
|
||||
buf_LRU_make_block_young(&block->page);
|
||||
buf_pool_mutex_exit();
|
||||
} else if (!buf_page_is_accessed(&block->page)) {
|
||||
/* Above, we do a dirty read on purpose, to avoid
|
||||
mutex contention. The field buf_page_t::access_time
|
||||
is only used for heuristic purposes. Writes to the
|
||||
field must be protected by mutex, however. */
|
||||
ulint time_ms = ut_time_ms();
|
||||
|
||||
buf_pool_mutex_enter();
|
||||
buf_page_set_accessed(&block->page, time_ms);
|
||||
buf_pool_mutex_exit();
|
||||
if (mode == BUF_MAKE_YOUNG) {
|
||||
buf_page_make_young_if_needed(&block->page);
|
||||
}
|
||||
|
||||
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
|
||||
@ -2542,7 +2526,6 @@ buf_page_create(
|
||||
buf_frame_t* frame;
|
||||
buf_block_t* block;
|
||||
buf_block_t* free_block = NULL;
|
||||
ulint time_ms = ut_time_ms();
|
||||
|
||||
ut_ad(mtr);
|
||||
ut_ad(mtr->state == MTR_ACTIVE);
|
||||
@ -2627,12 +2610,12 @@ buf_page_create(
|
||||
rw_lock_x_unlock(&block->lock);
|
||||
}
|
||||
|
||||
buf_page_set_accessed(&block->page, time_ms);
|
||||
|
||||
buf_pool_mutex_exit();
|
||||
|
||||
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
|
||||
|
||||
buf_page_set_accessed(&block->page);
|
||||
|
||||
mutex_exit(&block->mutex);
|
||||
|
||||
/* Delete possible entries for the page from the insert buffer:
|
||||
|
@ -2619,7 +2619,7 @@ retry:
|
||||
mutex_exit(&fil_system->mutex);
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
if (success) {
|
||||
if (success && !recv_recovery_on) {
|
||||
mtr_t mtr;
|
||||
|
||||
mtr_start(&mtr);
|
||||
@ -4853,3 +4853,28 @@ fil_close(void)
|
||||
|
||||
fil_system = NULL;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Generate redo logs for swapping two .ibd files */
|
||||
UNIV_INTERN
|
||||
void
|
||||
fil_mtr_rename_log(
|
||||
/*===============*/
|
||||
ulint old_space_id, /*!< in: tablespace id of the old
|
||||
table. */
|
||||
const char* old_name, /*!< in: old table name */
|
||||
ulint new_space_id, /*!< in: tablespace id of the new
|
||||
table */
|
||||
const char* new_name, /*!< in: new table name */
|
||||
const char* tmp_name) /*!< in: temp table name used while
|
||||
swapping */
|
||||
{
|
||||
mtr_t mtr;
|
||||
mtr_start(&mtr);
|
||||
fil_op_write_log(MLOG_FILE_RENAME, old_space_id,
|
||||
0, 0, old_name, tmp_name, &mtr);
|
||||
fil_op_write_log(MLOG_FILE_RENAME, new_space_id,
|
||||
0, 0, new_name, old_name, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
|
@ -577,6 +577,12 @@ static SHOW_VAR innodb_status_variables[]= {
|
||||
(char*) &export_vars.innodb_rows_read, SHOW_LONG},
|
||||
{"rows_updated",
|
||||
(char*) &export_vars.innodb_rows_updated, SHOW_LONG},
|
||||
#ifdef UNIV_DEBUG
|
||||
{"purge_trx_id_age",
|
||||
(char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
|
||||
{"purge_view_trx_id_age",
|
||||
(char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
|
||||
#endif /* UNIV_DEBUG */
|
||||
{NullS, NullS, SHOW_LONG}
|
||||
};
|
||||
|
||||
@ -923,6 +929,8 @@ convert_error_code_to_mysql(
|
||||
#endif /* HA_ERR_TOO_MANY_CONCURRENT_TRXS */
|
||||
case DB_UNSUPPORTED:
|
||||
return(HA_ERR_UNSUPPORTED);
|
||||
case DB_OUT_OF_MEMORY:
|
||||
return(HA_ERR_OUT_OF_MEM);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1132,6 +1140,11 @@ innobase_mysql_tmpfile(void)
|
||||
|
||||
DBUG_ENTER("innobase_mysql_tmpfile");
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"innobase_tmpfile_creation_failure",
|
||||
DBUG_RETURN(-1);
|
||||
);
|
||||
|
||||
tmpdir = my_tmpdir(&mysql_tmpdir_list);
|
||||
|
||||
/* The tmpdir parameter can not be NULL for GetTempFileName. */
|
||||
@ -1193,7 +1206,15 @@ innobase_mysql_tmpfile(void)
|
||||
/*========================*/
|
||||
{
|
||||
int fd2 = -1;
|
||||
File fd = mysql_tmpfile("ib");
|
||||
File fd;
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"innobase_tmpfile_creation_failure",
|
||||
return(-1);
|
||||
);
|
||||
|
||||
fd = mysql_tmpfile("ib");
|
||||
|
||||
if (fd >= 0) {
|
||||
/* Copy the file descriptor, so that the additional resources
|
||||
allocated by create_temp_file() can be freed by invoking
|
||||
@ -11263,6 +11284,18 @@ static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
|
||||
PLUGIN_VAR_RQCMDARG,
|
||||
"Debug flags for InnoDB to limit TRX_RSEG_N_SLOTS for trx_rsegf_undo_find_free()",
|
||||
NULL, NULL, 0, 0, 1024, 0);
|
||||
|
||||
static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug,
|
||||
btr_cur_limit_optimistic_insert_debug, PLUGIN_VAR_RQCMDARG,
|
||||
"Artificially limit the number of records per B-tree page (0=unlimited).",
|
||||
NULL, NULL, 0, 0, UINT_MAX32, 0);
|
||||
|
||||
static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
|
||||
srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDARG,
|
||||
"Pause actual purging any delete-marked records, but merely update the purge view. "
|
||||
"It is to create artificially the situation the purge view have been updated "
|
||||
"but the each purges were not done yet.",
|
||||
NULL, NULL, FALSE);
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
static struct st_mysql_sys_var* innobase_system_variables[]= {
|
||||
@ -11328,6 +11361,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
|
||||
MYSQL_SYSVAR(io_capacity),
|
||||
#ifdef UNIV_DEBUG
|
||||
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
|
||||
MYSQL_SYSVAR(limit_optimistic_insert_debug),
|
||||
MYSQL_SYSVAR(trx_purge_view_update_only_debug),
|
||||
#endif /* UNIV_DEBUG */
|
||||
NULL
|
||||
};
|
||||
|
@ -98,7 +98,6 @@ innobase_col_to_mysql(
|
||||
case DATA_MYSQL:
|
||||
ut_ad(flen >= len);
|
||||
ut_ad(col->mbmaxlen >= col->mbminlen);
|
||||
ut_ad(col->mbmaxlen > col->mbminlen || flen == len);
|
||||
memcpy(dest, data, len);
|
||||
break;
|
||||
|
||||
|
@ -795,6 +795,11 @@ srv_printf_innodb_monitor(). */
|
||||
extern ulint btr_cur_n_sea_old;
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/* Flag to limit optimistic insert records */
|
||||
extern uint btr_cur_limit_optimistic_insert_debug;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
#ifndef UNIV_NONINL
|
||||
#include "btr0cur.ic"
|
||||
#endif
|
||||
|
@ -26,6 +26,16 @@ Created 10/16/1994 Heikki Tuuri
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
#include "btr0btr.h"
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)\
|
||||
if (btr_cur_limit_optimistic_insert_debug\
|
||||
&& (NREC) >= (ulint)btr_cur_limit_optimistic_insert_debug) {\
|
||||
CODE;\
|
||||
}
|
||||
#else
|
||||
# define LIMIT_OPTIMISTIC_INSERT_DEBUG(NREC, CODE)
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/*********************************************************//**
|
||||
Returns the page cursor component of a tree cursor.
|
||||
@ -146,6 +156,9 @@ btr_cur_compress_recommendation(
|
||||
|
||||
page = btr_cur_get_page(cursor);
|
||||
|
||||
LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2,
|
||||
return(FALSE));
|
||||
|
||||
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|
||||
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
|
||||
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
|
||||
|
@ -927,8 +927,7 @@ UNIV_INLINE
|
||||
void
|
||||
buf_page_set_accessed(
|
||||
/*==================*/
|
||||
buf_page_t* bpage, /*!< in/out: control block */
|
||||
ulint time_ms) /*!< in: ut_time_ms() */
|
||||
buf_page_t* bpage) /*!< in/out: control block */
|
||||
__attribute__((nonnull));
|
||||
/*********************************************************************//**
|
||||
Gets the buf_block_t handle of a buffered file block if an uncompressed
|
||||
@ -1260,10 +1259,11 @@ struct buf_page_struct{
|
||||
to read this for heuristic
|
||||
purposes without holding any
|
||||
mutex or latch */
|
||||
unsigned access_time:32; /*!< time of first access, or
|
||||
0 if the block was never accessed
|
||||
in the buffer pool */
|
||||
/* @} */
|
||||
unsigned access_time; /*!< time of first access, or
|
||||
0 if the block was never accessed
|
||||
in the buffer pool. Protected by
|
||||
block mutex */
|
||||
# if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
|
||||
ibool file_page_was_freed;
|
||||
/*!< this is set to TRUE when fsp
|
||||
|
@ -531,15 +531,15 @@ UNIV_INLINE
|
||||
void
|
||||
buf_page_set_accessed(
|
||||
/*==================*/
|
||||
buf_page_t* bpage, /*!< in/out: control block */
|
||||
ulint time_ms) /*!< in: ut_time_ms() */
|
||||
buf_page_t* bpage) /*!< in/out: control block */
|
||||
{
|
||||
ut_a(buf_page_in_file(bpage));
|
||||
ut_ad(buf_pool_mutex_own());
|
||||
ut_ad(!buf_pool_mutex_own());
|
||||
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
|
||||
|
||||
if (!bpage->access_time) {
|
||||
/* Make this the time of the first access. */
|
||||
bpage->access_time = time_ms;
|
||||
bpage->access_time = ut_time_ms();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved.
|
||||
Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@ -439,34 +439,16 @@ dtype_get_fixed_size_low(
|
||||
} else if (!comp) {
|
||||
return(len);
|
||||
} else {
|
||||
/* We play it safe here and ask MySQL for
|
||||
mbminlen and mbmaxlen. Although
|
||||
mbminlen and mbmaxlen are
|
||||
initialized if and only if prtype
|
||||
is (in one of the 3 functions in this file),
|
||||
it could be that none of these functions
|
||||
has been called. */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint i_mbminlen, i_mbmaxlen;
|
||||
|
||||
innobase_get_cset_width(
|
||||
dtype_get_charset_coll(prtype),
|
||||
&i_mbminlen, &i_mbmaxlen);
|
||||
|
||||
if (UNIV_UNLIKELY(mbminlen != i_mbminlen)
|
||||
|| UNIV_UNLIKELY(mbmaxlen != i_mbmaxlen)) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr, " InnoDB: "
|
||||
"mbminlen=%lu, "
|
||||
"mbmaxlen=%lu, "
|
||||
"type->mbminlen=%lu, "
|
||||
"type->mbmaxlen=%lu\n",
|
||||
(ulong) i_mbminlen,
|
||||
(ulong) i_mbmaxlen,
|
||||
(ulong) mbminlen,
|
||||
(ulong) mbmaxlen);
|
||||
}
|
||||
ut_ad(mbminlen == i_mbminlen);
|
||||
ut_ad(mbmaxlen == i_mbmaxlen);
|
||||
#endif /* UNIV_DEBUG */
|
||||
if (mbminlen == mbmaxlen) {
|
||||
return(len);
|
||||
}
|
||||
|
@ -726,6 +726,21 @@ fil_tablespace_is_being_deleted(
|
||||
/*============================*/
|
||||
ulint id); /*!< in: space id */
|
||||
|
||||
/****************************************************************//**
|
||||
Generate redo logs for swapping two .ibd files */
|
||||
UNIV_INTERN
|
||||
void
|
||||
fil_mtr_rename_log(
|
||||
/*===============*/
|
||||
ulint old_space_id, /*!< in: tablespace id of the old
|
||||
table. */
|
||||
const char* old_name, /*!< in: old table name */
|
||||
ulint new_space_id, /*!< in: tablespace id of the new
|
||||
table */
|
||||
const char* new_name, /*!< in: new table name */
|
||||
const char* tmp_name); /*!< in: temp table name used while
|
||||
swapping */
|
||||
|
||||
typedef struct fil_space_struct fil_space_t;
|
||||
|
||||
#endif
|
||||
|
@ -796,14 +796,22 @@ lock_rec_get_page_no(
|
||||
remains set when the waiting lock is granted,
|
||||
or if the lock is inherited to a neighboring
|
||||
record */
|
||||
#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_MODE_MASK
|
||||
#define LOCK_CONV_BY_OTHER 4096 /*!< this bit is set when the lock is created
|
||||
by other transaction */
|
||||
#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_MODE_MASK
|
||||
# error
|
||||
#endif
|
||||
#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION)&LOCK_TYPE_MASK
|
||||
#if (LOCK_WAIT|LOCK_GAP|LOCK_REC_NOT_GAP|LOCK_INSERT_INTENTION|LOCK_CONV_BY_OTHER)&LOCK_TYPE_MASK
|
||||
# error
|
||||
#endif
|
||||
/* @} */
|
||||
|
||||
/** Checks if this is a waiting lock created by lock->trx itself.
|
||||
@param type_mode lock->type_mode
|
||||
@return whether it is a waiting lock belonging to lock->trx */
|
||||
#define lock_is_wait_not_by_other(type_mode) \
|
||||
((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
|
||||
|
||||
/** Lock operation struct */
|
||||
typedef struct lock_op_struct lock_op_t;
|
||||
/** Lock operation struct */
|
||||
|
@ -361,24 +361,6 @@ rec_get_offsets_func(
|
||||
#define rec_get_offsets(rec,index,offsets,n,heap) \
|
||||
rec_get_offsets_func(rec,index,offsets,n,heap,__FILE__,__LINE__)
|
||||
|
||||
/******************************************************//**
|
||||
Determine the offset to each field in a leaf-page record
|
||||
in ROW_FORMAT=COMPACT. This is a special case of
|
||||
rec_init_offsets() and rec_get_offsets_func(). */
|
||||
UNIV_INTERN
|
||||
void
|
||||
rec_init_offsets_comp_ordinary(
|
||||
/*===========================*/
|
||||
const rec_t* rec, /*!< in: physical record in
|
||||
ROW_FORMAT=COMPACT */
|
||||
ulint extra, /*!< in: number of bytes to reserve
|
||||
between the record header and
|
||||
the data payload
|
||||
(usually REC_N_NEW_EXTRA_BYTES) */
|
||||
const dict_index_t* index, /*!< in: record descriptor */
|
||||
ulint* offsets);/*!< in/out: array of offsets;
|
||||
in: n=rec_offs_n_fields(offsets) */
|
||||
|
||||
/******************************************************//**
|
||||
The following function determines the offsets to each field
|
||||
in the record. It can reuse a previously allocated array. */
|
||||
@ -639,8 +621,48 @@ rec_copy(
|
||||
/*=====*/
|
||||
void* buf, /*!< in: buffer */
|
||||
const rec_t* rec, /*!< in: physical record */
|
||||
const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
|
||||
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
|
||||
__attribute__((nonnull));
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**********************************************************//**
|
||||
Determines the size of a data tuple prefix in a temporary file.
|
||||
@return total size */
|
||||
UNIV_INTERN
|
||||
ulint
|
||||
rec_get_converted_size_temp(
|
||||
/*========================*/
|
||||
const dict_index_t* index, /*!< in: record descriptor */
|
||||
const dfield_t* fields, /*!< in: array of data fields */
|
||||
ulint n_fields,/*!< in: number of data fields */
|
||||
ulint* extra) /*!< out: extra size */
|
||||
__attribute__((warn_unused_result, nonnull));
|
||||
|
||||
/******************************************************//**
|
||||
Determine the offset to each field in temporary file.
|
||||
@see rec_convert_dtuple_to_temp() */
|
||||
UNIV_INTERN
|
||||
void
|
||||
rec_init_offsets_temp(
|
||||
/*==================*/
|
||||
const rec_t* rec, /*!< in: temporary file record */
|
||||
const dict_index_t* index, /*!< in: record descriptor */
|
||||
ulint* offsets)/*!< in/out: array of offsets;
|
||||
in: n=rec_offs_n_fields(offsets) */
|
||||
__attribute__((nonnull));
|
||||
|
||||
/*********************************************************//**
|
||||
Builds a temporary file record out of a data tuple.
|
||||
@see rec_init_offsets_temp() */
|
||||
UNIV_INTERN
|
||||
void
|
||||
rec_convert_dtuple_to_temp(
|
||||
/*=======================*/
|
||||
rec_t* rec, /*!< out: record */
|
||||
const dict_index_t* index, /*!< in: record descriptor */
|
||||
const dfield_t* fields, /*!< in: array of data fields */
|
||||
ulint n_fields) /*!< in: number of fields */
|
||||
__attribute__((nonnull));
|
||||
|
||||
/**************************************************************//**
|
||||
Copies the first n fields of a physical record to a new physical record in
|
||||
a buffer.
|
||||
@ -675,21 +697,6 @@ rec_fold(
|
||||
__attribute__((pure));
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/*********************************************************//**
|
||||
Builds a ROW_FORMAT=COMPACT record out of a data tuple. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
rec_convert_dtuple_to_rec_comp(
|
||||
/*===========================*/
|
||||
rec_t* rec, /*!< in: origin of record */
|
||||
ulint extra, /*!< in: number of bytes to
|
||||
reserve between the record
|
||||
header and the data payload
|
||||
(normally REC_N_NEW_EXTRA_BYTES) */
|
||||
const dict_index_t* index, /*!< in: record descriptor */
|
||||
ulint status, /*!< in: status bits of the record */
|
||||
const dfield_t* fields, /*!< in: array of data fields */
|
||||
ulint n_fields);/*!< in: number of data fields */
|
||||
/*********************************************************//**
|
||||
Builds a physical record out of a data tuple and
|
||||
stores it into the given buffer.
|
||||
@return pointer to the origin of physical record */
|
||||
@ -722,10 +729,7 @@ UNIV_INTERN
|
||||
ulint
|
||||
rec_get_converted_size_comp_prefix(
|
||||
/*===============================*/
|
||||
const dict_index_t* index, /*!< in: record descriptor;
|
||||
dict_table_is_comp() is
|
||||
assumed to hold, even if
|
||||
it does not */
|
||||
const dict_index_t* index, /*!< in: record descriptor */
|
||||
const dfield_t* fields, /*!< in: array of data fields */
|
||||
ulint n_fields,/*!< in: number of data fields */
|
||||
ulint* extra); /*!< out: extra size */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user