Merge c-8808e253.1238-1-64736c10.cust.bredbandsbolaget.se:/home/pappa/mysql-5.1-new
into c-8808e253.1238-1-64736c10.cust.bredbandsbolaget.se:/home/pappa/wl2826 configure.in: Auto merged dbug/dbug.c: Auto merged include/my_sys.h: Auto merged sql/ha_ndbcluster.cc: Auto merged sql/ha_ndbcluster.h: Auto merged sql/ha_partition.h: Auto merged sql/mysql_priv.h: Auto merged sql/set_var.cc: Auto merged sql/sql_base.cc: Auto merged sql/sql_class.cc: Auto merged sql/sql_class.h: Auto merged sql/sql_partition.cc: Auto merged sql/sql_table.cc: Auto merged sql/sql_yacc.yy: Auto merged sql/table.cc: Auto merged sql/unireg.cc: Auto merged sql/ha_heap.cc: manual merge sql/ha_myisammrg.cc: manual merge sql/ha_partition.cc: manual merge sql/handler.h: manual merge sql/log.cc: manual merge sql/mysqld.cc: manual merge sql/share/errmsg.txt: manual merge
This commit is contained in:
commit
88a1cff6ef
@ -71,6 +71,7 @@ pentium_cflags="$check_cpu_cflags"
|
|||||||
pentium64_cflags="$check_cpu_cflags -m64"
|
pentium64_cflags="$check_cpu_cflags -m64"
|
||||||
ppc_cflags="$check_cpu_cflags"
|
ppc_cflags="$check_cpu_cflags"
|
||||||
sparc_cflags=""
|
sparc_cflags=""
|
||||||
|
error_inject="--with-error-inject "
|
||||||
|
|
||||||
# be as fast as we can be without losing our ability to backtrace
|
# be as fast as we can be without losing our ability to backtrace
|
||||||
fast_cflags="-O3 -fno-omit-frame-pointer"
|
fast_cflags="-O3 -fno-omit-frame-pointer"
|
||||||
|
@ -6,6 +6,6 @@ path=`dirname $0`
|
|||||||
extra_flags="$pentium_cflags $debug_cflags $max_cflags"
|
extra_flags="$pentium_cflags $debug_cflags $max_cflags"
|
||||||
c_warnings="$c_warnings $debug_extra_warnings"
|
c_warnings="$c_warnings $debug_extra_warnings"
|
||||||
cxx_warnings="$cxx_warnings $debug_extra_warnings"
|
cxx_warnings="$cxx_warnings $debug_extra_warnings"
|
||||||
extra_configs="$pentium_configs $debug_configs $max_configs"
|
extra_configs="$pentium_configs $debug_configs $max_configs $error_inject"
|
||||||
|
|
||||||
. "$path/FINISH.sh"
|
. "$path/FINISH.sh"
|
||||||
|
13
configure.in
13
configure.in
@ -666,6 +666,7 @@ else
|
|||||||
AC_MSG_RESULT([no])
|
AC_MSG_RESULT([no])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
MYSQL_SYS_LARGEFILE
|
MYSQL_SYS_LARGEFILE
|
||||||
|
|
||||||
# Types that must be checked AFTER large file support is checked
|
# Types that must be checked AFTER large file support is checked
|
||||||
@ -1566,6 +1567,18 @@ then
|
|||||||
DEBUG_OPTIMIZE_CXX=""
|
DEBUG_OPTIMIZE_CXX=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# If we should allow error injection tests
|
||||||
|
AC_ARG_WITH(error-inject,
|
||||||
|
[ --with-error-inject Enable error injection in MySQL Server],
|
||||||
|
[ with_error_inject=$withval ],
|
||||||
|
[ with_error_inject=no ])
|
||||||
|
|
||||||
|
if test "$with_error_inject" = "yes"
|
||||||
|
then
|
||||||
|
CFLAGS="-DERROR_INJECT_SUPPORT $CFLAGS"
|
||||||
|
CXXFLAGS="-DERROR_INJECT_SUPPORT $CXXFLAGS"
|
||||||
|
fi
|
||||||
|
|
||||||
AC_ARG_WITH(debug,
|
AC_ARG_WITH(debug,
|
||||||
[ --with-debug Add debug code
|
[ --with-debug Add debug code
|
||||||
--with-debug=full Add debug code (adds memory checker, very slow)],
|
--with-debug=full Add debug code (adds memory checker, very slow)],
|
||||||
|
@ -559,7 +559,7 @@ extern File my_register_filename(File fd, const char *FileName,
|
|||||||
enum file_type type_of_file,
|
enum file_type type_of_file,
|
||||||
uint error_message_number, myf MyFlags);
|
uint error_message_number, myf MyFlags);
|
||||||
extern File my_create(const char *FileName,int CreateFlags,
|
extern File my_create(const char *FileName,int CreateFlags,
|
||||||
int AccsesFlags, myf MyFlags);
|
int AccessFlags, myf MyFlags);
|
||||||
extern int my_close(File Filedes,myf MyFlags);
|
extern int my_close(File Filedes,myf MyFlags);
|
||||||
extern File my_dup(File file, myf MyFlags);
|
extern File my_dup(File file, myf MyFlags);
|
||||||
extern int my_mkdir(const char *dir, int Flags, myf MyFlags);
|
extern int my_mkdir(const char *dir, int Flags, myf MyFlags);
|
||||||
|
@ -4546,7 +4546,9 @@ int ha_ndbcluster::create(const char *name,
|
|||||||
DBUG_RETURN(my_errno);
|
DBUG_RETURN(my_errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ha_ndbcluster::create_handler_files(const char *file)
|
int ha_ndbcluster::create_handler_files(const char *file,
|
||||||
|
const char *old_name,
|
||||||
|
bool rename_flag)
|
||||||
{
|
{
|
||||||
const char *name;
|
const char *name;
|
||||||
Ndb* ndb;
|
Ndb* ndb;
|
||||||
@ -4557,6 +4559,10 @@ int ha_ndbcluster::create_handler_files(const char *file)
|
|||||||
|
|
||||||
DBUG_ENTER("create_handler_files");
|
DBUG_ENTER("create_handler_files");
|
||||||
|
|
||||||
|
if (rename_flag)
|
||||||
|
{
|
||||||
|
DBUG_RETURN(FALSE);
|
||||||
|
}
|
||||||
if (!(ndb= get_ndb()))
|
if (!(ndb= get_ndb()))
|
||||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||||
|
|
||||||
|
@ -611,7 +611,8 @@ class ha_ndbcluster: public handler
|
|||||||
int rename_table(const char *from, const char *to);
|
int rename_table(const char *from, const char *to);
|
||||||
int delete_table(const char *name);
|
int delete_table(const char *name);
|
||||||
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
|
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
|
||||||
int create_handler_files(const char *file);
|
int create_handler_files(const char *file, const char *old_name,
|
||||||
|
bool rename_flag);
|
||||||
int get_default_no_partitions(ulonglong max_rows);
|
int get_default_no_partitions(ulonglong max_rows);
|
||||||
bool get_no_parts(const char *name, uint *no_parts);
|
bool get_no_parts(const char *name, uint *no_parts);
|
||||||
void set_auto_partitions(partition_info *part_info);
|
void set_auto_partitions(partition_info *part_info);
|
||||||
|
@ -103,8 +103,8 @@ handlerton partition_hton = {
|
|||||||
NULL, /* Alter Tablespace */
|
NULL, /* Alter Tablespace */
|
||||||
NULL, /* Fill FILES table */
|
NULL, /* Fill FILES table */
|
||||||
HTON_NOT_USER_SELECTABLE | HTON_HIDDEN,
|
HTON_NOT_USER_SELECTABLE | HTON_HIDDEN,
|
||||||
NULL, /* binlog_func */
|
NULL, /* binlog function */
|
||||||
NULL /* binlog_log_query */
|
NULL /* binlog query */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -395,88 +395,6 @@ int ha_partition::ha_initialise()
|
|||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
MODULE meta data changes
|
MODULE meta data changes
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
/*
|
|
||||||
Create partition names
|
|
||||||
|
|
||||||
SYNOPSIS
|
|
||||||
create_partition_name()
|
|
||||||
out:out Created partition name string
|
|
||||||
in1 First part
|
|
||||||
in2 Second part
|
|
||||||
name_variant Normal, temporary or renamed partition name
|
|
||||||
|
|
||||||
RETURN VALUE
|
|
||||||
NONE
|
|
||||||
|
|
||||||
DESCRIPTION
|
|
||||||
This method is used to calculate the partition name, service routine to
|
|
||||||
the del_ren_cre_table method.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define NORMAL_PART_NAME 0
|
|
||||||
#define TEMP_PART_NAME 1
|
|
||||||
#define RENAMED_PART_NAME 2
|
|
||||||
static void create_partition_name(char *out, const char *in1,
|
|
||||||
const char *in2, uint name_variant,
|
|
||||||
bool translate)
|
|
||||||
{
|
|
||||||
char transl_part_name[FN_REFLEN];
|
|
||||||
const char *transl_part;
|
|
||||||
|
|
||||||
if (translate)
|
|
||||||
{
|
|
||||||
tablename_to_filename(in2, transl_part_name, FN_REFLEN);
|
|
||||||
transl_part= transl_part_name;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
transl_part= in2;
|
|
||||||
if (name_variant == NORMAL_PART_NAME)
|
|
||||||
strxmov(out, in1, "#P#", transl_part, NullS);
|
|
||||||
else if (name_variant == TEMP_PART_NAME)
|
|
||||||
strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
|
|
||||||
else if (name_variant == RENAMED_PART_NAME)
|
|
||||||
strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Create subpartition name
|
|
||||||
|
|
||||||
SYNOPSIS
|
|
||||||
create_subpartition_name()
|
|
||||||
out:out Created partition name string
|
|
||||||
in1 First part
|
|
||||||
in2 Second part
|
|
||||||
in3 Third part
|
|
||||||
name_variant Normal, temporary or renamed partition name
|
|
||||||
|
|
||||||
RETURN VALUE
|
|
||||||
NONE
|
|
||||||
|
|
||||||
DESCRIPTION
|
|
||||||
This method is used to calculate the subpartition name, service routine to
|
|
||||||
the del_ren_cre_table method.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void create_subpartition_name(char *out, const char *in1,
|
|
||||||
const char *in2, const char *in3,
|
|
||||||
uint name_variant)
|
|
||||||
{
|
|
||||||
char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
|
|
||||||
|
|
||||||
tablename_to_filename(in2, transl_part_name, FN_REFLEN);
|
|
||||||
tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
|
|
||||||
if (name_variant == NORMAL_PART_NAME)
|
|
||||||
strxmov(out, in1, "#P#", transl_part_name,
|
|
||||||
"#SP#", transl_subpart_name, NullS);
|
|
||||||
else if (name_variant == TEMP_PART_NAME)
|
|
||||||
strxmov(out, in1, "#P#", transl_part_name,
|
|
||||||
"#SP#", transl_subpart_name, "#TMP#", NullS);
|
|
||||||
else if (name_variant == RENAMED_PART_NAME)
|
|
||||||
strxmov(out, in1, "#P#", transl_part_name,
|
|
||||||
"#SP#", transl_subpart_name, "#REN#", NullS);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Delete a table
|
Delete a table
|
||||||
|
|
||||||
@ -567,7 +485,9 @@ int ha_partition::rename_table(const char *from, const char *to)
|
|||||||
and types of engines in the partitions.
|
and types of engines in the partitions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int ha_partition::create_handler_files(const char *name)
|
int ha_partition::create_handler_files(const char *path,
|
||||||
|
const char *old_path,
|
||||||
|
bool rename_flag)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_partition::create_handler_files()");
|
DBUG_ENTER("ha_partition::create_handler_files()");
|
||||||
|
|
||||||
@ -575,10 +495,26 @@ int ha_partition::create_handler_files(const char *name)
|
|||||||
We need to update total number of parts since we might write the handler
|
We need to update total number of parts since we might write the handler
|
||||||
file as part of a partition management command
|
file as part of a partition management command
|
||||||
*/
|
*/
|
||||||
if (create_handler_file(name))
|
if (rename_flag)
|
||||||
{
|
{
|
||||||
my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
|
char name[FN_REFLEN];
|
||||||
DBUG_RETURN(1);
|
char old_name[FN_REFLEN];
|
||||||
|
|
||||||
|
strxmov(name, path, ha_par_ext, NullS);
|
||||||
|
strxmov(old_name, old_path, ha_par_ext, NullS);
|
||||||
|
if (my_delete(name, MYF(MY_WME)) ||
|
||||||
|
my_rename(old_name, name, MYF(MY_WME)))
|
||||||
|
{
|
||||||
|
DBUG_RETURN(TRUE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (create_handler_file(path))
|
||||||
|
{
|
||||||
|
my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
|
||||||
|
DBUG_RETURN(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
@ -644,45 +580,26 @@ int ha_partition::create(const char *name, TABLE *table_arg,
|
|||||||
int ha_partition::drop_partitions(const char *path)
|
int ha_partition::drop_partitions(const char *path)
|
||||||
{
|
{
|
||||||
List_iterator<partition_element> part_it(m_part_info->partitions);
|
List_iterator<partition_element> part_it(m_part_info->partitions);
|
||||||
List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
|
|
||||||
char part_name_buff[FN_REFLEN];
|
char part_name_buff[FN_REFLEN];
|
||||||
uint no_parts= m_part_info->partitions.elements;
|
uint no_parts= m_part_info->partitions.elements;
|
||||||
uint part_count= 0;
|
uint part_count= 0;
|
||||||
uint no_subparts= m_part_info->no_subparts;
|
uint no_subparts= m_part_info->no_subparts;
|
||||||
uint i= 0;
|
uint i= 0;
|
||||||
uint name_variant;
|
uint name_variant;
|
||||||
int error= 1;
|
int ret_error;
|
||||||
bool reorged_parts= (m_reorged_parts > 0);
|
int error= 0;
|
||||||
bool temp_partitions= (m_part_info->temp_partitions.elements > 0);
|
|
||||||
DBUG_ENTER("ha_partition::drop_partitions");
|
DBUG_ENTER("ha_partition::drop_partitions");
|
||||||
|
|
||||||
if (temp_partitions)
|
|
||||||
no_parts= m_part_info->temp_partitions.elements;
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
partition_element *part_elem;
|
partition_element *part_elem= part_it++;
|
||||||
if (temp_partitions)
|
if (part_elem->part_state == PART_TO_BE_DROPPED)
|
||||||
{
|
|
||||||
/*
|
|
||||||
We need to remove the reorganised partitions that were put in the
|
|
||||||
temp_partitions-list.
|
|
||||||
*/
|
|
||||||
part_elem= temp_it++;
|
|
||||||
DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
part_elem= part_it++;
|
|
||||||
if (part_elem->part_state == PART_TO_BE_DROPPED ||
|
|
||||||
part_elem->part_state == PART_IS_CHANGED)
|
|
||||||
{
|
{
|
||||||
handler *file;
|
handler *file;
|
||||||
/*
|
/*
|
||||||
This part is to be dropped, meaning the part or all its subparts.
|
This part is to be dropped, meaning the part or all its subparts.
|
||||||
*/
|
*/
|
||||||
name_variant= NORMAL_PART_NAME;
|
name_variant= NORMAL_PART_NAME;
|
||||||
if (part_elem->part_state == PART_IS_CHANGED ||
|
|
||||||
(part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions))
|
|
||||||
name_variant= RENAMED_PART_NAME;
|
|
||||||
if (m_is_sub_partitioned)
|
if (m_is_sub_partitioned)
|
||||||
{
|
{
|
||||||
List_iterator<partition_element> sub_it(part_elem->subpartitions);
|
List_iterator<partition_element> sub_it(part_elem->subpartitions);
|
||||||
@ -694,12 +611,10 @@ int ha_partition::drop_partitions(const char *path)
|
|||||||
create_subpartition_name(part_name_buff, path,
|
create_subpartition_name(part_name_buff, path,
|
||||||
part_elem->partition_name,
|
part_elem->partition_name,
|
||||||
sub_elem->partition_name, name_variant);
|
sub_elem->partition_name, name_variant);
|
||||||
if (reorged_parts)
|
file= m_file[part];
|
||||||
file= m_reorged_file[part_count++];
|
|
||||||
else
|
|
||||||
file= m_file[part];
|
|
||||||
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
|
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
|
||||||
error= file->delete_table((const char *) part_name_buff);
|
if ((ret_error= file->delete_table((const char *) part_name_buff)))
|
||||||
|
error= ret_error;
|
||||||
} while (++j < no_subparts);
|
} while (++j < no_subparts);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -707,12 +622,10 @@ int ha_partition::drop_partitions(const char *path)
|
|||||||
create_partition_name(part_name_buff, path,
|
create_partition_name(part_name_buff, path,
|
||||||
part_elem->partition_name, name_variant,
|
part_elem->partition_name, name_variant,
|
||||||
TRUE);
|
TRUE);
|
||||||
if (reorged_parts)
|
file= m_file[i];
|
||||||
file= m_reorged_file[part_count++];
|
|
||||||
else
|
|
||||||
file= m_file[i];
|
|
||||||
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
|
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
|
||||||
error= file->delete_table((const char *) part_name_buff);
|
if ((ret_error= file->delete_table((const char *) part_name_buff)))
|
||||||
|
error= ret_error;
|
||||||
}
|
}
|
||||||
if (part_elem->part_state == PART_IS_CHANGED)
|
if (part_elem->part_state == PART_IS_CHANGED)
|
||||||
part_elem->part_state= PART_NORMAL;
|
part_elem->part_state= PART_NORMAL;
|
||||||
@ -754,7 +667,8 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
uint no_subparts= m_part_info->no_subparts;
|
uint no_subparts= m_part_info->no_subparts;
|
||||||
uint i= 0;
|
uint i= 0;
|
||||||
uint j= 0;
|
uint j= 0;
|
||||||
int error= 1;
|
int error= 0;
|
||||||
|
int ret_error;
|
||||||
uint temp_partitions= m_part_info->temp_partitions.elements;
|
uint temp_partitions= m_part_info->temp_partitions.elements;
|
||||||
handler *file;
|
handler *file;
|
||||||
partition_element *part_elem, *sub_elem;
|
partition_element *part_elem, *sub_elem;
|
||||||
@ -762,6 +676,14 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
|
|
||||||
if (temp_partitions)
|
if (temp_partitions)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
These are the reorganised partitions that have already been copied.
|
||||||
|
We delete the partitions and log the delete by inactivating the
|
||||||
|
delete log entry in the table log. We only need to synchronise
|
||||||
|
these writes before moving to the next loop since there is no
|
||||||
|
interaction among reorganised partitions, they cannot have the
|
||||||
|
same name.
|
||||||
|
*/
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
part_elem= temp_it++;
|
part_elem= temp_it++;
|
||||||
@ -772,39 +694,59 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
{
|
{
|
||||||
sub_elem= sub_it++;
|
sub_elem= sub_it++;
|
||||||
file= m_reorged_file[part_count++];
|
file= m_reorged_file[part_count++];
|
||||||
create_subpartition_name(part_name_buff, path,
|
|
||||||
part_elem->partition_name,
|
|
||||||
sub_elem->partition_name,
|
|
||||||
RENAMED_PART_NAME);
|
|
||||||
create_subpartition_name(norm_name_buff, path,
|
create_subpartition_name(norm_name_buff, path,
|
||||||
part_elem->partition_name,
|
part_elem->partition_name,
|
||||||
sub_elem->partition_name,
|
sub_elem->partition_name,
|
||||||
NORMAL_PART_NAME);
|
NORMAL_PART_NAME);
|
||||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
|
||||||
norm_name_buff, part_name_buff));
|
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||||
error= file->rename_table((const char *) norm_name_buff,
|
error= ret_error;
|
||||||
(const char *) part_name_buff);
|
else if (inactivate_table_log_entry(sub_elem->log_entry->entry_pos))
|
||||||
|
error= 1;
|
||||||
|
else
|
||||||
|
sub_elem->log_entry= NULL; /* Indicate success */
|
||||||
} while (++j < no_subparts);
|
} while (++j < no_subparts);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
file= m_reorged_file[part_count++];
|
file= m_reorged_file[part_count++];
|
||||||
create_partition_name(part_name_buff, path,
|
|
||||||
part_elem->partition_name, RENAMED_PART_NAME,
|
|
||||||
TRUE);
|
|
||||||
create_partition_name(norm_name_buff, path,
|
create_partition_name(norm_name_buff, path,
|
||||||
part_elem->partition_name, NORMAL_PART_NAME,
|
part_elem->partition_name, NORMAL_PART_NAME,
|
||||||
TRUE);
|
TRUE);
|
||||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
|
||||||
norm_name_buff, part_name_buff));
|
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||||
error= file->rename_table((const char *) norm_name_buff,
|
error= ret_error;
|
||||||
(const char *) part_name_buff);
|
else if (inactivate_table_log_entry(part_elem->log_entry->entry_pos))
|
||||||
|
error= 1;
|
||||||
|
else
|
||||||
|
part_elem->log_entry= NULL; /* Indicate success */
|
||||||
}
|
}
|
||||||
} while (++i < temp_partitions);
|
} while (++i < temp_partitions);
|
||||||
|
VOID(sync_table_log());
|
||||||
}
|
}
|
||||||
i= 0;
|
i= 0;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
When state is PART_IS_CHANGED it means that we have created a new
|
||||||
|
TEMP partition that is to be renamed to normal partition name and
|
||||||
|
we are to delete the old partition with currently the normal name.
|
||||||
|
|
||||||
|
We perform this operation by
|
||||||
|
1) Delete old partition with normal partition name
|
||||||
|
2) Signal this in table log entry
|
||||||
|
3) Synch table log to ensure we have consistency in crashes
|
||||||
|
4) Rename temporary partition name to normal partition name
|
||||||
|
5) Signal this to table log entry
|
||||||
|
It is not necessary to synch the last state since a new rename
|
||||||
|
should not corrupt things if there was no temporary partition.
|
||||||
|
|
||||||
|
The only other parts we need to cater for are new parts that
|
||||||
|
replace reorganised parts. The reorganised parts were deleted
|
||||||
|
by the code above that goes through the temp_partitions list.
|
||||||
|
Thus the synch above makes it safe to simply perform step 4 and 5
|
||||||
|
for those entries.
|
||||||
|
*/
|
||||||
part_elem= part_it++;
|
part_elem= part_it++;
|
||||||
if (part_elem->part_state == PART_IS_CHANGED ||
|
if (part_elem->part_state == PART_IS_CHANGED ||
|
||||||
(part_elem->part_state == PART_IS_ADDED && temp_partitions))
|
(part_elem->part_state == PART_IS_ADDED && temp_partitions))
|
||||||
@ -826,14 +768,12 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
if (part_elem->part_state == PART_IS_CHANGED)
|
if (part_elem->part_state == PART_IS_CHANGED)
|
||||||
{
|
{
|
||||||
file= m_reorged_file[part_count++];
|
file= m_reorged_file[part_count++];
|
||||||
create_subpartition_name(part_name_buff, path,
|
DBUG_PRINT("info", ("Delete subpartition %s", norm_name_buff));
|
||||||
part_elem->partition_name,
|
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||||
sub_elem->partition_name,
|
error= ret_error;
|
||||||
RENAMED_PART_NAME);
|
else if (inactivate_table_log_entry(sub_elem->log_entry->entry_pos))
|
||||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
error= 1;
|
||||||
norm_name_buff, part_name_buff));
|
VOID(sync_table_log());
|
||||||
error= file->rename_table((const char *) norm_name_buff,
|
|
||||||
(const char *) part_name_buff);
|
|
||||||
}
|
}
|
||||||
file= m_new_file[part];
|
file= m_new_file[part];
|
||||||
create_subpartition_name(part_name_buff, path,
|
create_subpartition_name(part_name_buff, path,
|
||||||
@ -842,8 +782,13 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
TEMP_PART_NAME);
|
TEMP_PART_NAME);
|
||||||
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
DBUG_PRINT("info", ("Rename subpartition from %s to %s",
|
||||||
part_name_buff, norm_name_buff));
|
part_name_buff, norm_name_buff));
|
||||||
error= file->rename_table((const char *) part_name_buff,
|
if ((ret_error= file->rename_table((const char *) part_name_buff,
|
||||||
(const char *) norm_name_buff);
|
(const char *) norm_name_buff)))
|
||||||
|
error= ret_error;
|
||||||
|
else if (inactivate_table_log_entry(sub_elem->log_entry->entry_pos))
|
||||||
|
error= 1;
|
||||||
|
else
|
||||||
|
sub_elem->log_entry= NULL;
|
||||||
} while (++j < no_subparts);
|
} while (++j < no_subparts);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -854,13 +799,12 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
if (part_elem->part_state == PART_IS_CHANGED)
|
if (part_elem->part_state == PART_IS_CHANGED)
|
||||||
{
|
{
|
||||||
file= m_reorged_file[part_count++];
|
file= m_reorged_file[part_count++];
|
||||||
create_partition_name(part_name_buff, path,
|
DBUG_PRINT("info", ("Delete partition %s", norm_name_buff));
|
||||||
part_elem->partition_name, RENAMED_PART_NAME,
|
if ((ret_error= file->delete_table((const char *) norm_name_buff)))
|
||||||
TRUE);
|
error= ret_error;
|
||||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
else if (inactivate_table_log_entry(part_elem->log_entry->entry_pos))
|
||||||
norm_name_buff, part_name_buff));
|
error= 1;
|
||||||
error= file->rename_table((const char *) norm_name_buff,
|
VOID(sync_table_log());
|
||||||
(const char *) part_name_buff);
|
|
||||||
}
|
}
|
||||||
file= m_new_file[i];
|
file= m_new_file[i];
|
||||||
create_partition_name(part_name_buff, path,
|
create_partition_name(part_name_buff, path,
|
||||||
@ -868,11 +812,17 @@ int ha_partition::rename_partitions(const char *path)
|
|||||||
TRUE);
|
TRUE);
|
||||||
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
DBUG_PRINT("info", ("Rename partition from %s to %s",
|
||||||
part_name_buff, norm_name_buff));
|
part_name_buff, norm_name_buff));
|
||||||
error= file->rename_table((const char *) part_name_buff,
|
if ((ret_error= file->rename_table((const char *) part_name_buff,
|
||||||
(const char *) norm_name_buff);
|
(const char *) norm_name_buff)))
|
||||||
|
error= ret_error;
|
||||||
|
else if (inactivate_table_log_entry(part_elem->log_entry->entry_pos))
|
||||||
|
error= 1;
|
||||||
|
else
|
||||||
|
part_elem->log_entry= NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (++i < no_parts);
|
} while (++i < no_parts);
|
||||||
|
VOID(sync_table_log());
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1864,8 +1814,8 @@ bool ha_partition::create_handler_file(const char *name)
|
|||||||
{
|
{
|
||||||
part_elem= part_it++;
|
part_elem= part_it++;
|
||||||
if (part_elem->part_state != PART_NORMAL &&
|
if (part_elem->part_state != PART_NORMAL &&
|
||||||
part_elem->part_state != PART_IS_ADDED &&
|
part_elem->part_state != PART_TO_BE_ADDED &&
|
||||||
part_elem->part_state != PART_IS_CHANGED)
|
part_elem->part_state != PART_CHANGED)
|
||||||
continue;
|
continue;
|
||||||
tablename_to_filename(part_elem->partition_name, part_name,
|
tablename_to_filename(part_elem->partition_name, part_name,
|
||||||
FN_REFLEN);
|
FN_REFLEN);
|
||||||
@ -1916,8 +1866,8 @@ bool ha_partition::create_handler_file(const char *name)
|
|||||||
{
|
{
|
||||||
part_elem= part_it++;
|
part_elem= part_it++;
|
||||||
if (part_elem->part_state != PART_NORMAL &&
|
if (part_elem->part_state != PART_NORMAL &&
|
||||||
part_elem->part_state != PART_IS_ADDED &&
|
part_elem->part_state != PART_TO_BE_ADDED &&
|
||||||
part_elem->part_state != PART_IS_CHANGED)
|
part_elem->part_state != PART_CHANGED)
|
||||||
continue;
|
continue;
|
||||||
if (!m_is_sub_partitioned)
|
if (!m_is_sub_partitioned)
|
||||||
{
|
{
|
||||||
|
@ -179,7 +179,8 @@ public:
|
|||||||
virtual int rename_table(const char *from, const char *to);
|
virtual int rename_table(const char *from, const char *to);
|
||||||
virtual int create(const char *name, TABLE *form,
|
virtual int create(const char *name, TABLE *form,
|
||||||
HA_CREATE_INFO *create_info);
|
HA_CREATE_INFO *create_info);
|
||||||
virtual int create_handler_files(const char *name);
|
virtual int create_handler_files(const char *name,
|
||||||
|
const char *old_name, bool rename_flag);
|
||||||
virtual void update_create_info(HA_CREATE_INFO *create_info);
|
virtual void update_create_info(HA_CREATE_INFO *create_info);
|
||||||
virtual char *update_table_comment(const char *comment);
|
virtual char *update_table_comment(const char *comment);
|
||||||
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
||||||
|
@ -631,6 +631,7 @@ typedef struct {
|
|||||||
|
|
||||||
#define UNDEF_NODEGROUP 65535
|
#define UNDEF_NODEGROUP 65535
|
||||||
class Item;
|
class Item;
|
||||||
|
struct st_table_log_memory_entry;
|
||||||
|
|
||||||
class partition_info;
|
class partition_info;
|
||||||
|
|
||||||
@ -638,7 +639,6 @@ struct st_partition_iter;
|
|||||||
#define NOT_A_PARTITION_ID ((uint32)-1)
|
#define NOT_A_PARTITION_ID ((uint32)-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
typedef struct st_ha_create_information
|
typedef struct st_ha_create_information
|
||||||
{
|
{
|
||||||
CHARSET_INFO *table_charset, *default_table_charset;
|
CHARSET_INFO *table_charset, *default_table_charset;
|
||||||
@ -1375,7 +1375,11 @@ public:
|
|||||||
virtual void drop_table(const char *name);
|
virtual void drop_table(const char *name);
|
||||||
|
|
||||||
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
|
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
|
||||||
virtual int create_handler_files(const char *name) { return FALSE;}
|
virtual int create_handler_files(const char *name, const char *old_name,
|
||||||
|
bool rename_flag)
|
||||||
|
{
|
||||||
|
return FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
||||||
const char *path,
|
const char *path,
|
||||||
|
128
sql/mysql_priv.h
128
sql/mysql_priv.h
@ -615,6 +615,67 @@ struct Query_cache_query_flags
|
|||||||
#define query_cache_invalidate_by_MyISAM_filename_ref NULL
|
#define query_cache_invalidate_by_MyISAM_filename_ref NULL
|
||||||
#endif /*HAVE_QUERY_CACHE*/
|
#endif /*HAVE_QUERY_CACHE*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Error injector Macros to enable easy testing of recovery after failures
|
||||||
|
in various error cases.
|
||||||
|
*/
|
||||||
|
#ifndef ERROR_INJECT_SUPPORT
|
||||||
|
|
||||||
|
#define ERROR_INJECT(x) 0
|
||||||
|
#define ERROR_INJECT_ACTION(x,action) 0
|
||||||
|
#define ERROR_INJECT_CRASH(x) 0
|
||||||
|
#define ERROR_INJECT_VALUE(x) 0
|
||||||
|
#define ERROR_INJECT_VALUE_ACTION(x,action) 0
|
||||||
|
#define ERROR_INJECT_VALUE_CRASH(x) 0
|
||||||
|
#define SET_ERROR_INJECT_VALUE(x)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define SET_ERROR_INJECT_VALUE(x) \
|
||||||
|
current_thd->error_inject_value= (x)
|
||||||
|
|
||||||
|
inline bool
|
||||||
|
my_error_inject_name(const char *dbug_str)
|
||||||
|
{
|
||||||
|
const char *extra_str= "-d,";
|
||||||
|
char total_str[200];
|
||||||
|
if (_db_strict_keyword_ (dbug_str))
|
||||||
|
{
|
||||||
|
strxmov(total_str, extra_str, dbug_str, NullS);
|
||||||
|
DBUG_SET(total_str);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline bool
|
||||||
|
my_error_inject(int value)
|
||||||
|
{
|
||||||
|
THD *thd= current_thd;
|
||||||
|
if (thd->error_inject_value == (uint)value)
|
||||||
|
{
|
||||||
|
thd->error_inject_value= 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ERROR_INJECT_CRASH(code) \
|
||||||
|
DBUG_EVALUATE_IF(code, (abort(), 0), 0)
|
||||||
|
#define ERROR_INJECT_ACTION(code, action) \
|
||||||
|
(my_error_inject_name(code) ? ((action), 0) : 0)
|
||||||
|
#define ERROR_INJECT(code) \
|
||||||
|
my_error_inject_name(code)
|
||||||
|
#define ERROR_INJECT_VALUE(value) \
|
||||||
|
my_error_inject(value)
|
||||||
|
#define ERROR_INJECT_VALUE_ACTION(value,action) \
|
||||||
|
(my_error_inject(value) ? (action) : 0)
|
||||||
|
#define ERROR_INJECT_VALUE_CRASH(value) \
|
||||||
|
(my_error_inject(value) ? abort() : 0)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
uint build_table_path(char *buff, size_t bufflen, const char *db,
|
uint build_table_path(char *buff, size_t bufflen, const char *db,
|
||||||
const char *table, const char *ext);
|
const char *table, const char *ext);
|
||||||
void write_bin_log(THD *thd, bool clear_error,
|
void write_bin_log(THD *thd, bool clear_error,
|
||||||
@ -1091,6 +1152,16 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
|
|||||||
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
|
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
|
||||||
uint flags);
|
uint flags);
|
||||||
|
|
||||||
|
#define NORMAL_PART_NAME 0
|
||||||
|
#define TEMP_PART_NAME 1
|
||||||
|
#define RENAMED_PART_NAME 2
|
||||||
|
void create_partition_name(char *out, const char *in1,
|
||||||
|
const char *in2, uint name_variant,
|
||||||
|
bool translate);
|
||||||
|
void create_subpartition_name(char *out, const char *in1,
|
||||||
|
const char *in2, const char *in3,
|
||||||
|
uint name_variant);
|
||||||
|
|
||||||
typedef struct st_lock_param_type
|
typedef struct st_lock_param_type
|
||||||
{
|
{
|
||||||
ulonglong copied;
|
ulonglong copied;
|
||||||
@ -1110,14 +1181,62 @@ typedef struct st_lock_param_type
|
|||||||
uint key_count;
|
uint key_count;
|
||||||
uint db_options;
|
uint db_options;
|
||||||
uint pack_frm_len;
|
uint pack_frm_len;
|
||||||
|
partition_info *part_info;
|
||||||
} ALTER_PARTITION_PARAM_TYPE;
|
} ALTER_PARTITION_PARAM_TYPE;
|
||||||
|
|
||||||
void mem_alloc_error(size_t size);
|
void mem_alloc_error(size_t size);
|
||||||
#define WFRM_INITIAL_WRITE 1
|
|
||||||
#define WFRM_CREATE_HANDLER_FILES 2
|
typedef struct st_table_log_entry
|
||||||
|
{
|
||||||
|
const char *name;
|
||||||
|
const char *from_name;
|
||||||
|
const char *handler_type;
|
||||||
|
uint next_entry;
|
||||||
|
uint entry_pos;
|
||||||
|
char action_type;
|
||||||
|
char entry_type;
|
||||||
|
char phase;
|
||||||
|
char not_used;
|
||||||
|
} TABLE_LOG_ENTRY;
|
||||||
|
|
||||||
|
typedef struct st_table_log_memory_entry
|
||||||
|
{
|
||||||
|
uint entry_pos;
|
||||||
|
struct st_table_log_memory_entry *next_log_entry;
|
||||||
|
struct st_table_log_memory_entry *prev_log_entry;
|
||||||
|
struct st_table_log_memory_entry *next_active_log_entry;
|
||||||
|
} TABLE_LOG_MEMORY_ENTRY;
|
||||||
|
|
||||||
|
#define TLOG_EXECUTE_CODE 'e'
|
||||||
|
#define TLOG_LOG_ENTRY_CODE 'l'
|
||||||
|
#define TLOG_IGNORE_LOG_ENTRY_CODE 'i'
|
||||||
|
|
||||||
|
#define TLOG_DELETE_ACTION_CODE 'd'
|
||||||
|
#define TLOG_RENAME_ACTION_CODE 'r'
|
||||||
|
#define TLOG_REPLACE_ACTION_CODE 's'
|
||||||
|
|
||||||
|
#define TLOG_HANDLER_TYPE_LEN 32
|
||||||
|
|
||||||
|
bool write_table_log_entry(TABLE_LOG_ENTRY *table_log_entry,
|
||||||
|
TABLE_LOG_MEMORY_ENTRY **active_entry);
|
||||||
|
bool write_execute_table_log_entry(uint first_entry,
|
||||||
|
bool complete,
|
||||||
|
TABLE_LOG_MEMORY_ENTRY **active_entry);
|
||||||
|
bool inactivate_table_log_entry(uint entry_no);
|
||||||
|
void release_table_log_memory_entry(TABLE_LOG_MEMORY_ENTRY *log_entry);
|
||||||
|
bool sync_table_log();
|
||||||
|
void release_table_log();
|
||||||
|
void execute_table_log_recovery();
|
||||||
|
bool execute_table_log_entry(uint first_entry);
|
||||||
|
void lock_global_table_log();
|
||||||
|
void unlock_global_table_log();
|
||||||
|
|
||||||
|
#define WFRM_WRITE_SHADOW 1
|
||||||
|
#define WFRM_INSTALL_SHADOW 2
|
||||||
#define WFRM_PACK_FRM 4
|
#define WFRM_PACK_FRM 4
|
||||||
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
|
bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
|
||||||
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
|
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt,
|
||||||
|
bool can_be_killed);
|
||||||
void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
|
void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
|
||||||
void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
|
void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
|
||||||
|
|
||||||
@ -1288,6 +1407,9 @@ extern ulong delayed_insert_timeout;
|
|||||||
extern ulong delayed_insert_limit, delayed_queue_size;
|
extern ulong delayed_insert_limit, delayed_queue_size;
|
||||||
extern ulong delayed_insert_threads, delayed_insert_writes;
|
extern ulong delayed_insert_threads, delayed_insert_writes;
|
||||||
extern ulong delayed_rows_in_use,delayed_insert_errors;
|
extern ulong delayed_rows_in_use,delayed_insert_errors;
|
||||||
|
#ifdef ERROR_INJECT_SUPPORT
|
||||||
|
extern ulong error_inject_value;
|
||||||
|
#endif
|
||||||
extern ulong slave_open_temp_tables;
|
extern ulong slave_open_temp_tables;
|
||||||
extern ulong query_cache_size, query_cache_min_res_unit;
|
extern ulong query_cache_size, query_cache_min_res_unit;
|
||||||
extern ulong slow_launch_threads, slow_launch_time;
|
extern ulong slow_launch_threads, slow_launch_time;
|
||||||
|
@ -3665,6 +3665,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
|
|||||||
unireg_abort(1);
|
unireg_abort(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
execute_table_log_recovery();
|
||||||
|
|
||||||
create_shutdown_thread();
|
create_shutdown_thread();
|
||||||
create_maintenance_thread();
|
create_maintenance_thread();
|
||||||
@ -3695,6 +3696,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
|
|||||||
/* (void) pthread_attr_destroy(&connection_attrib); */
|
/* (void) pthread_attr_destroy(&connection_attrib); */
|
||||||
|
|
||||||
DBUG_PRINT("quit",("Exiting main thread"));
|
DBUG_PRINT("quit",("Exiting main thread"));
|
||||||
|
release_table_log();
|
||||||
|
|
||||||
#ifndef __WIN__
|
#ifndef __WIN__
|
||||||
#ifdef EXTRA_DEBUG2
|
#ifdef EXTRA_DEBUG2
|
||||||
|
@ -5826,3 +5826,5 @@ ER_NDB_CANT_SWITCH_BINLOG_FORMAT
|
|||||||
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
|
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
|
||||||
ER_PARTITION_NO_TEMPORARY
|
ER_PARTITION_NO_TEMPORARY
|
||||||
eng "Cannot create temporary table with partitions"
|
eng "Cannot create temporary table with partitions"
|
||||||
|
ER_TABLE_LOG_ERROR
|
||||||
|
eng "Error in table log"
|
||||||
|
@ -6124,7 +6124,8 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
|
|||||||
old_lock_level Old lock level
|
old_lock_level Old lock level
|
||||||
*/
|
*/
|
||||||
|
|
||||||
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
|
bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt,
|
||||||
|
bool can_be_killed)
|
||||||
{
|
{
|
||||||
uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
|
uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
|
||||||
int error= FALSE;
|
int error= FALSE;
|
||||||
@ -6134,7 +6135,7 @@ bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
|
|||||||
VOID(pthread_mutex_lock(&LOCK_open));
|
VOID(pthread_mutex_lock(&LOCK_open));
|
||||||
mysql_lock_abort(lpt->thd, lpt->table, TRUE);
|
mysql_lock_abort(lpt->thd, lpt->table, TRUE);
|
||||||
VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
|
VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
|
||||||
if (lpt->thd->killed)
|
if (can_be_killed && lpt->thd->killed)
|
||||||
{
|
{
|
||||||
lpt->thd->no_warnings_for_error= 0;
|
lpt->thd->no_warnings_for_error= 0;
|
||||||
error= TRUE;
|
error= TRUE;
|
||||||
|
@ -223,6 +223,9 @@ THD::THD()
|
|||||||
cuted_fields= sent_row_count= 0L;
|
cuted_fields= sent_row_count= 0L;
|
||||||
limit_found_rows= 0;
|
limit_found_rows= 0;
|
||||||
statement_id_counter= 0UL;
|
statement_id_counter= 0UL;
|
||||||
|
#ifdef ERROR_INJECT_SUPPORT
|
||||||
|
error_inject_value= 0UL;
|
||||||
|
#endif
|
||||||
// Must be reset to handle error with THD's created for init of mysqld
|
// Must be reset to handle error with THD's created for init of mysqld
|
||||||
lex->current_select= 0;
|
lex->current_select= 0;
|
||||||
start_time=(time_t) 0;
|
start_time=(time_t) 0;
|
||||||
|
@ -1119,6 +1119,9 @@ public:
|
|||||||
query_id_t query_id, warn_id;
|
query_id_t query_id, warn_id;
|
||||||
ulong thread_id, col_access;
|
ulong thread_id, col_access;
|
||||||
|
|
||||||
|
#ifdef ERROR_INJECT_SUPPORT
|
||||||
|
ulong error_inject_value;
|
||||||
|
#endif
|
||||||
/* Statement id is thread-wide. This counter is used to generate ids */
|
/* Statement id is thread-wide. This counter is used to generate ids */
|
||||||
ulong statement_id_counter;
|
ulong statement_id_counter;
|
||||||
ulong rand_saved_seed1, rand_saved_seed2;
|
ulong rand_saved_seed1, rand_saved_seed2;
|
||||||
|
1207
sql/sql_partition.cc
1207
sql/sql_partition.cc
File diff suppressed because it is too large
Load Diff
1037
sql/sql_table.cc
1037
sql/sql_table.cc
File diff suppressed because it is too large
Load Diff
@ -3557,75 +3557,14 @@ part_definition:
|
|||||||
LEX *lex= Lex;
|
LEX *lex= Lex;
|
||||||
partition_info *part_info= lex->part_info;
|
partition_info *part_info= lex->part_info;
|
||||||
partition_element *p_elem= new partition_element();
|
partition_element *p_elem= new partition_element();
|
||||||
uint part_id= part_info->partitions.elements +
|
uint part_id= part_info->partitions.elements;
|
||||||
part_info->temp_partitions.elements;
|
|
||||||
enum partition_state part_state;
|
|
||||||
|
|
||||||
if (part_info->part_state)
|
if (!p_elem || part_info->partitions.push_back(p_elem))
|
||||||
part_state= (enum partition_state)part_info->part_state[part_id];
|
|
||||||
else
|
|
||||||
part_state= PART_NORMAL;
|
|
||||||
switch (part_state)
|
|
||||||
{
|
{
|
||||||
case PART_TO_BE_DROPPED:
|
mem_alloc_error(sizeof(partition_element));
|
||||||
/*
|
YYABORT;
|
||||||
This part is currently removed so we keep it in a
|
|
||||||
temporary list for REPAIR TABLE to be able to handle
|
|
||||||
failures during drop partition process.
|
|
||||||
*/
|
|
||||||
case PART_TO_BE_ADDED:
|
|
||||||
/*
|
|
||||||
This part is currently being added so we keep it in a
|
|
||||||
temporary list for REPAIR TABLE to be able to handle
|
|
||||||
failures during add partition process.
|
|
||||||
*/
|
|
||||||
if (!p_elem || part_info->temp_partitions.push_back(p_elem))
|
|
||||||
{
|
|
||||||
mem_alloc_error(sizeof(partition_element));
|
|
||||||
YYABORT;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case PART_IS_ADDED:
|
|
||||||
/*
|
|
||||||
Part has been added and is now a normal partition
|
|
||||||
*/
|
|
||||||
case PART_TO_BE_REORGED:
|
|
||||||
/*
|
|
||||||
This part is currently reorganised, it is still however
|
|
||||||
used so we keep it in the list of partitions. We do
|
|
||||||
however need the state to be able to handle REPAIR TABLE
|
|
||||||
after failures in the reorganisation process.
|
|
||||||
*/
|
|
||||||
case PART_REORGED_DROPPED:
|
|
||||||
/*
|
|
||||||
This part is currently reorganised as part of a
|
|
||||||
COALESCE PARTITION and it will be dropped without a new
|
|
||||||
replacement partition after completing the reorganisation.
|
|
||||||
*/
|
|
||||||
case PART_CHANGED:
|
|
||||||
/*
|
|
||||||
This part is currently split or merged as part of ADD
|
|
||||||
PARTITION for a hash partition or as part of COALESCE
|
|
||||||
PARTITION for a hash partitioned table.
|
|
||||||
*/
|
|
||||||
case PART_IS_CHANGED:
|
|
||||||
/*
|
|
||||||
This part has been split or merged as part of ADD
|
|
||||||
PARTITION for a hash partition or as part of COALESCE
|
|
||||||
PARTITION for a hash partitioned table.
|
|
||||||
*/
|
|
||||||
case PART_NORMAL:
|
|
||||||
if (!p_elem || part_info->partitions.push_back(p_elem))
|
|
||||||
{
|
|
||||||
mem_alloc_error(sizeof(partition_element));
|
|
||||||
YYABORT;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
mem_alloc_error((part_id * 1000) + part_state);
|
|
||||||
YYABORT;
|
|
||||||
}
|
}
|
||||||
p_elem->part_state= part_state;
|
p_elem->part_state= PART_NORMAL;
|
||||||
part_info->curr_part_elem= p_elem;
|
part_info->curr_part_elem= p_elem;
|
||||||
part_info->current_partition= p_elem;
|
part_info->current_partition= p_elem;
|
||||||
part_info->use_default_partitions= FALSE;
|
part_info->use_default_partitions= FALSE;
|
||||||
|
35
sql/table.cc
35
sql/table.cc
@ -667,36 +667,15 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
|
|||||||
#endif
|
#endif
|
||||||
next_chunk+= 5 + partition_info_len;
|
next_chunk+= 5 + partition_info_len;
|
||||||
}
|
}
|
||||||
if (share->mysql_version > 50105 && next_chunk + 5 < buff_end)
|
#if 0
|
||||||
|
if (share->mysql_version == 50106)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
Partition state was introduced to support partition management in version 5.1.5
|
Partition state array was here in version 5.1.6, this code makes
|
||||||
*/
|
it possible to load a 5.1.6 table in later versions. Can most
|
||||||
uint32 part_state_len= uint4korr(next_chunk);
|
likely be removed at some point in time.
|
||||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
*/
|
||||||
if ((share->part_state_len= part_state_len))
|
next_chunk+= 4;
|
||||||
if (!(share->part_state=
|
|
||||||
(uchar*) memdup_root(&share->mem_root, next_chunk + 4,
|
|
||||||
part_state_len)))
|
|
||||||
{
|
|
||||||
my_free(buff, MYF(0));
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
if (part_state_len)
|
|
||||||
{
|
|
||||||
DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined"));
|
|
||||||
my_free(buff, MYF(0));
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
next_chunk+= 4 + part_state_len;
|
|
||||||
}
|
|
||||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
|
||||||
else
|
|
||||||
{
|
|
||||||
share->part_state_len= 0;
|
|
||||||
share->part_state= NULL;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
keyinfo= share->key_info;
|
keyinfo= share->key_info;
|
||||||
|
@ -136,7 +136,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
|
|||||||
if (part_info)
|
if (part_info)
|
||||||
{
|
{
|
||||||
create_info->extra_size+= part_info->part_info_len;
|
create_info->extra_size+= part_info->part_info_len;
|
||||||
create_info->extra_size+= part_info->part_state_len;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -209,12 +208,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
|
|||||||
my_write(file, (const byte*)part_info->part_info_string,
|
my_write(file, (const byte*)part_info->part_info_string,
|
||||||
part_info->part_info_len + 1, MYF_RW))
|
part_info->part_info_len + 1, MYF_RW))
|
||||||
goto err;
|
goto err;
|
||||||
DBUG_PRINT("info", ("Part state len = %d", part_info->part_state_len));
|
|
||||||
int4store(buff, part_info->part_state_len);
|
|
||||||
if (my_write(file, (const byte*)buff, 4, MYF_RW) ||
|
|
||||||
my_write(file, (const byte*)part_info->part_state,
|
|
||||||
part_info->part_state_len, MYF_RW))
|
|
||||||
goto err;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
@ -330,7 +323,7 @@ int rea_create_table(THD *thd, const char *path,
|
|||||||
|
|
||||||
// Make sure mysql_create_frm din't remove extension
|
// Make sure mysql_create_frm din't remove extension
|
||||||
DBUG_ASSERT(*fn_rext(frm_name));
|
DBUG_ASSERT(*fn_rext(frm_name));
|
||||||
if (file->create_handler_files(path))
|
if (file->create_handler_files(path, NULL, FALSE))
|
||||||
goto err_handler;
|
goto err_handler;
|
||||||
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
|
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
|
||||||
create_info,0))
|
create_info,0))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user