Merge rkalimullin@bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/usr/home/ram/work/4.1.b13659
This commit is contained in:
commit
a63df041da
@ -190,7 +190,7 @@ static int com_quit(String *str,char*),
|
||||
com_connect(String *str,char*), com_status(String *str,char*),
|
||||
com_use(String *str,char*), com_source(String *str, char*),
|
||||
com_rehash(String *str, char*), com_tee(String *str, char*),
|
||||
com_notee(String *str, char*),
|
||||
com_notee(String *str, char*), com_charset(String *str,char*),
|
||||
com_prompt(String *str, char*), com_delimiter(String *str, char*);
|
||||
|
||||
#ifdef USE_POPEN
|
||||
@ -263,6 +263,8 @@ static COMMANDS commands[] = {
|
||||
"Set outfile [to_outfile]. Append everything into given outfile." },
|
||||
{ "use", 'u', com_use, 1,
|
||||
"Use another database. Takes database name as argument." },
|
||||
{ "charset_name", 'C', com_charset, 1,
|
||||
"Switch to another charset. Might be needed for processing binlog." },
|
||||
/* Get bash-like expansion for some commands */
|
||||
{ "create table", 0, 0, 0, ""},
|
||||
{ "create database", 0, 0, 0, ""},
|
||||
@ -1850,6 +1852,28 @@ com_clear(String *buffer,char *line __attribute__((unused)))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
static int
|
||||
com_charset(String *buffer __attribute__((unused)), char *line)
|
||||
{
|
||||
char buff[256], *param;
|
||||
CHARSET_INFO * new_cs;
|
||||
strmake(buff, line, sizeof(buff) - 1);
|
||||
param= get_arg(buff, 0);
|
||||
if (!param || !*param)
|
||||
{
|
||||
return put_info("Usage: \\C char_setname | charset charset_name",
|
||||
INFO_ERROR, 0);
|
||||
}
|
||||
new_cs= get_charset_by_csname(param, MY_CS_PRIMARY, MYF(MY_WME));
|
||||
if (new_cs)
|
||||
{
|
||||
charset_info= new_cs;
|
||||
put_info("Charset changed", INFO_INFO);
|
||||
}
|
||||
else put_info("Charset is not found", INFO_INFO);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Execute command
|
||||
|
@ -2557,8 +2557,11 @@ static const char *check_if_ignore_table(const char *table_name)
|
||||
mysql_free_result(res);
|
||||
return 0; /* assume table is ok */
|
||||
}
|
||||
if (strcmp(row[1], (result= "MRG_MyISAM")) &&
|
||||
strcmp(row[1], (result= "MRG_ISAM")))
|
||||
/* Some forward-compatibility: don't dump data from a VIEW */
|
||||
if (!row[1])
|
||||
result= "VIEW";
|
||||
else if (strcmp(row[1], (result= "MRG_MyISAM")) &&
|
||||
strcmp(row[1], (result= "MRG_ISAM")))
|
||||
result= 0;
|
||||
mysql_free_result(res);
|
||||
return result;
|
||||
|
@ -115,8 +115,8 @@ enum {OPT_MANAGER_USER=256,OPT_MANAGER_HOST,OPT_MANAGER_PASSWD,
|
||||
The list of error codes to --error are stored in an internal array of
|
||||
structs. This struct can hold numeric SQL error codes or SQLSTATE codes
|
||||
as strings. The element next to the last active element in the list is
|
||||
set to type ERR_EMPTY. When an SQL statement return an error we use
|
||||
this list to check if this is an expected error.
|
||||
set to type ERR_EMPTY. When an SQL statement returns an error, we use
|
||||
this list to check if this is an expected error.
|
||||
*/
|
||||
|
||||
enum match_err_type
|
||||
@ -320,13 +320,6 @@ const char *command_names[]=
|
||||
"connection",
|
||||
"query",
|
||||
"connect",
|
||||
/* the difference between sleep and real_sleep is that sleep will use
|
||||
the delay from command line (--sleep) if there is one.
|
||||
real_sleep always uses delay from mysqltest's command line argument.
|
||||
the logic is that sometimes delays are cpu-dependent (and --sleep
|
||||
can be used to set this delay. real_sleep is used for cpu-independent
|
||||
delays
|
||||
*/
|
||||
"sleep",
|
||||
"real_sleep",
|
||||
"inc",
|
||||
@ -986,8 +979,8 @@ int do_source(struct st_query *query)
|
||||
*p++= 0;
|
||||
query->last_argument= p;
|
||||
/*
|
||||
If this file has already been sourced, dont source it again.
|
||||
It's already available in the q_lines cache
|
||||
If this file has already been sourced, don't source it again.
|
||||
It's already available in the q_lines cache.
|
||||
*/
|
||||
if (parser.current_line < (parser.read_lines - 1))
|
||||
return 0;
|
||||
@ -1536,11 +1529,19 @@ int do_disable_rpl_parse(struct st_query *query __attribute__((unused)))
|
||||
do_sleep()
|
||||
q called command
|
||||
real_sleep use the value from opt_sleep as number of seconds to sleep
|
||||
if real_sleep is false
|
||||
|
||||
DESCRIPTION
|
||||
sleep <seconds>
|
||||
real_sleep
|
||||
real_sleep <seconds>
|
||||
|
||||
The difference between the sleep and real_sleep commands is that sleep
|
||||
uses the delay from the --sleep command-line option if there is one.
|
||||
(If the --sleep option is not given, the sleep command uses the delay
|
||||
specified by its argument.) The real_sleep command always uses the
|
||||
delay specified by its argument. The logic is that sometimes delays are
|
||||
cpu-dependent, and --sleep can be used to set this delay. real_sleep is
|
||||
used for cpu-independent delays.
|
||||
*/
|
||||
|
||||
int do_sleep(struct st_query *query, my_bool real_sleep)
|
||||
@ -1549,18 +1550,19 @@ int do_sleep(struct st_query *query, my_bool real_sleep)
|
||||
char *p= query->first_argument;
|
||||
char *sleep_start, *sleep_end= query->end;
|
||||
double sleep_val;
|
||||
char *cmd = (real_sleep ? "real_sleep" : "sleep");
|
||||
|
||||
while (my_isspace(charset_info, *p))
|
||||
p++;
|
||||
if (!*p)
|
||||
die("Missing argument to sleep");
|
||||
die("Missing argument to %s", cmd);
|
||||
sleep_start= p;
|
||||
/* Check that arg starts with a digit, not handled by my_strtod */
|
||||
if (!my_isdigit(charset_info, *sleep_start))
|
||||
die("Invalid argument to sleep \"%s\"", query->first_argument);
|
||||
die("Invalid argument to %s \"%s\"", cmd, query->first_argument);
|
||||
sleep_val= my_strtod(sleep_start, &sleep_end, &error);
|
||||
if (error)
|
||||
die("Invalid argument to sleep \"%s\"", query->first_argument);
|
||||
die("Invalid argument to %s \"%s\"", cmd, query->first_argument);
|
||||
|
||||
/* Fixed sleep time selected by --sleep option */
|
||||
if (opt_sleep && !real_sleep)
|
||||
@ -2151,7 +2153,7 @@ my_bool end_of_query(int c)
|
||||
Normally that means it will read lines until it reaches the
|
||||
"delimiter" that marks end of query. Default delimiter is ';'
|
||||
The function should be smart enough not to detect delimiter's
|
||||
found inside strings sorrounded with '"' and '\'' escaped strings.
|
||||
found inside strings surrounded with '"' and '\'' escaped strings.
|
||||
|
||||
If the first line in a query starts with '#' or '-' this line is treated
|
||||
as a comment. A comment is always terminated when end of line '\n' is
|
||||
@ -2485,7 +2487,7 @@ static struct my_option my_long_options[] =
|
||||
{"result-file", 'R', "Read/Store result from/in this file.",
|
||||
(gptr*) &result_file, (gptr*) &result_file, 0, GET_STR, REQUIRED_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"server-arg", 'A', "Send enbedded server this as a paramenter.",
|
||||
{"server-arg", 'A', "Send option value to embedded server as a parameter.",
|
||||
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"server-file", 'F', "Read embedded server arguments from file.",
|
||||
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
@ -2966,8 +2968,8 @@ static int run_query_normal(MYSQL* mysql, struct st_query* q, int flags)
|
||||
warn_res= mysql_store_result(mysql);
|
||||
}
|
||||
if (!warn_res)
|
||||
verbose_msg("Warning count is %u but didn't get any warnings\n",
|
||||
count);
|
||||
die("Warning count is %u but didn't get any warnings\n",
|
||||
count);
|
||||
else
|
||||
{
|
||||
dynstr_append_mem(ds, "Warnings:\n", 10);
|
||||
@ -3446,8 +3448,8 @@ static void run_query_stmt_handle_warnings(MYSQL *mysql, DYNAMIC_STRING *ds)
|
||||
{
|
||||
MYSQL_RES *warn_res= mysql_store_result(mysql);
|
||||
if (!warn_res)
|
||||
verbose_msg("Warning count is %u but didn't get any warnings\n",
|
||||
count);
|
||||
die("Warning count is %u but didn't get any warnings\n",
|
||||
count);
|
||||
else
|
||||
{
|
||||
dynstr_append_mem(ds, "Warnings:\n", 10);
|
||||
@ -4074,8 +4076,8 @@ int main(int argc, char **argv)
|
||||
/*
|
||||
my_stat() successful on result file. Check if we have not run a
|
||||
single query, but we do have a result file that contains data.
|
||||
Note that we don't care, if my_stat() fails. For example for
|
||||
non-existing or non-readable file we assume it's fine to have
|
||||
Note that we don't care, if my_stat() fails. For example, for a
|
||||
non-existing or non-readable file, we assume it's fine to have
|
||||
no query output from the test file, e.g. regarded as no error.
|
||||
*/
|
||||
if (res_info.st_size)
|
||||
|
@ -5,7 +5,7 @@ AC_INIT(sql/mysqld.cc)
|
||||
AC_CANONICAL_SYSTEM
|
||||
# The Docs Makefile.am parses this line!
|
||||
# remember to also change ndb version below and update version.c in ndb
|
||||
AM_INIT_AUTOMAKE(mysql, 4.1.18)
|
||||
AM_INIT_AUTOMAKE(mysql, 4.1.19)
|
||||
AM_CONFIG_HEADER(config.h)
|
||||
|
||||
PROTOCOL_VERSION=10
|
||||
@ -17,7 +17,7 @@ SHARED_LIB_VERSION=$SHARED_LIB_MAJOR_VERSION:0:0
|
||||
# ndb version
|
||||
NDB_VERSION_MAJOR=4
|
||||
NDB_VERSION_MINOR=1
|
||||
NDB_VERSION_BUILD=18
|
||||
NDB_VERSION_BUILD=19
|
||||
NDB_VERSION_STATUS=""
|
||||
|
||||
# Set all version vars based on $VERSION. How do we do this more elegant ?
|
||||
@ -435,7 +435,7 @@ fi
|
||||
|
||||
# libmysqlclient versioning when linked with GNU ld.
|
||||
if $LD --version 2>/dev/null|grep -q GNU; then
|
||||
LD_VERSION_SCRIPT="-Wl,--version-script=\$(top_srcdir)/libmysql/libmysql.ver"
|
||||
LD_VERSION_SCRIPT="-Wl,--version-script=\$(top_builddir)/libmysql/libmysql.ver"
|
||||
AC_CONFIG_FILES(libmysql/libmysql.ver)
|
||||
fi
|
||||
AC_SUBST(LD_VERSION_SCRIPT)
|
||||
|
@ -104,6 +104,7 @@ int heap_create(const char *name, uint keys, HP_KEYDEF *keydef,
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
share->keydef= (HP_KEYDEF*) (share + 1);
|
||||
share->key_stat_version= 1;
|
||||
keyseg= (HA_KEYSEG*) (share->keydef + keys);
|
||||
init_block(&share->block, reclength + 1, min_records, max_records);
|
||||
/* Fix keys */
|
||||
|
@ -136,6 +136,7 @@ typedef struct st_heap_share
|
||||
HP_KEYDEF *keydef;
|
||||
ulong min_records,max_records; /* Params to open */
|
||||
ulong data_length,index_length,max_table_size;
|
||||
uint key_stat_version; /* version to indicate insert/delete */
|
||||
uint records; /* records */
|
||||
uint blength; /* records rounded up to 2^n */
|
||||
uint deleted; /* Deleted records in database */
|
||||
|
@ -191,7 +191,7 @@ static
|
||||
void
|
||||
btr_search_info_update_hash(
|
||||
/*========================*/
|
||||
btr_search_t* info, /* in: search info */
|
||||
btr_search_t* info, /* in/out: search info */
|
||||
btr_cur_t* cursor) /* in: cursor which was just positioned */
|
||||
{
|
||||
dict_index_t* index;
|
||||
@ -443,7 +443,7 @@ Updates the search info. */
|
||||
void
|
||||
btr_search_info_update_slow(
|
||||
/*========================*/
|
||||
btr_search_t* info, /* in: search info */
|
||||
btr_search_t* info, /* in/out: search info */
|
||||
btr_cur_t* cursor) /* in: cursor which was just positioned */
|
||||
{
|
||||
buf_block_t* block;
|
||||
@ -931,7 +931,7 @@ btr_search_drop_page_hash_index(
|
||||
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
|
||||
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
retry:
|
||||
rw_lock_s_lock(&btr_search_latch);
|
||||
|
||||
block = buf_block_align(page);
|
||||
@ -1007,6 +1007,24 @@ next_rec:
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
if (!block->is_hashed) {
|
||||
/* Someone else has meanwhile dropped the hash index */
|
||||
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (block->curr_n_fields != n_fields
|
||||
|| block->curr_n_bytes != n_bytes) {
|
||||
|
||||
/* Someone else has meanwhile built a new hash index on the
|
||||
page, with different parameters */
|
||||
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
|
||||
mem_free(folds);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_cached; i++) {
|
||||
|
||||
ha_remove_all_nodes_to_page(table, folds[i], page);
|
||||
@ -1014,7 +1032,22 @@ next_rec:
|
||||
|
||||
block->is_hashed = FALSE;
|
||||
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
cleanup:
|
||||
if (block->n_pointers) {
|
||||
/* Corruption */
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Corruption of adaptive hash index. After dropping\n"
|
||||
"InnoDB: the hash index to a page of %lu %lu, still %lu hash nodes remain.\n",
|
||||
(ulong) ut_dulint_get_high(tree_id),
|
||||
(ulong) ut_dulint_get_low(tree_id),
|
||||
(ulong) block->n_pointers);
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
|
||||
btr_search_validate();
|
||||
} else {
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
}
|
||||
|
||||
mem_free(folds);
|
||||
}
|
||||
|
@ -2755,7 +2755,8 @@ dict_table_get_highest_foreign_id(
|
||||
if (ut_strlen(foreign->id) > ((sizeof dict_ibfk) - 1) + len
|
||||
&& 0 == ut_memcmp(foreign->id, table->name, len)
|
||||
&& 0 == ut_memcmp(foreign->id + len,
|
||||
dict_ibfk, (sizeof dict_ibfk) - 1)) {
|
||||
dict_ibfk, (sizeof dict_ibfk) - 1)
|
||||
&& foreign->id[len + ((sizeof dict_ibfk) - 1)] != '0') {
|
||||
/* It is of the >= 4.0.18 format */
|
||||
|
||||
id = strtoul(foreign->id + len + ((sizeof dict_ibfk) - 1),
|
||||
|
@ -179,6 +179,11 @@ struct fil_space_struct {
|
||||
hash_node_t name_hash;/* hash chain the name_hash table */
|
||||
rw_lock_t latch; /* latch protecting the file space storage
|
||||
allocation */
|
||||
UT_LIST_NODE_T(fil_space_t) unflushed_spaces;
|
||||
/* list of spaces with at least one unflushed
|
||||
file we have written to */
|
||||
ibool is_in_unflushed_spaces; /* TRUE if this space is
|
||||
currently in the list above */
|
||||
UT_LIST_NODE_T(fil_space_t) space_list;
|
||||
/* list of all spaces */
|
||||
ibuf_data_t* ibuf_data;
|
||||
@ -211,6 +216,12 @@ struct fil_system_struct {
|
||||
not put to this list: they are opened
|
||||
after the startup, and kept open until
|
||||
shutdown */
|
||||
UT_LIST_BASE_NODE_T(fil_space_t) unflushed_spaces;
|
||||
/* base node for the list of those
|
||||
tablespaces whose files contain
|
||||
unflushed writes; those spaces have
|
||||
at least one file node where
|
||||
modification_counter > flush_counter */
|
||||
ulint n_open; /* number of files currently open */
|
||||
ulint max_n_open; /* n_open is not allowed to exceed
|
||||
this */
|
||||
@ -387,6 +398,36 @@ fil_space_get_ibuf_data(
|
||||
return(space->ibuf_data);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Checks if all the file nodes in a space are flushed. The caller must hold
|
||||
the fil_system mutex. */
|
||||
static
|
||||
ibool
|
||||
fil_space_is_flushed(
|
||||
/*=================*/
|
||||
/* out: TRUE if all are flushed */
|
||||
fil_space_t* space) /* in: space */
|
||||
{
|
||||
fil_node_t* node;
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(mutex_own(&(fil_system->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
node = UT_LIST_GET_FIRST(space->chain);
|
||||
|
||||
while (node) {
|
||||
if (node->modification_counter > node->flush_counter) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
node = UT_LIST_GET_NEXT(chain, node);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Appends a new file to the chain of files of a space. File must be closed. */
|
||||
|
||||
@ -517,7 +558,7 @@ fil_node_open_file(
|
||||
if (size_bytes < FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: the size of single-table tablespace file %s\n"
|
||||
"InnoDB: is only %lu %lu, should be at least %lu!", node->name,
|
||||
"InnoDB: is only %lu %lu, should be at least %lu!\n", node->name,
|
||||
(ulong) size_high,
|
||||
(ulong) size_low, (ulong) (4 * UNIV_PAGE_SIZE));
|
||||
|
||||
@ -687,8 +728,8 @@ fil_try_to_close_file_in_LRU(
|
||||
ut_print_filename(stderr, node->name);
|
||||
fprintf(stderr,
|
||||
", because mod_count %ld != fl_count %ld\n",
|
||||
(ulong) node->modification_counter,
|
||||
(ulong) node->flush_counter);
|
||||
(long) node->modification_counter,
|
||||
(long) node->flush_counter);
|
||||
}
|
||||
|
||||
node = UT_LIST_GET_PREV(LRU, node);
|
||||
@ -839,6 +880,16 @@ fil_node_free(
|
||||
|
||||
node->modification_counter = node->flush_counter;
|
||||
|
||||
if (space->is_in_unflushed_spaces
|
||||
&& fil_space_is_flushed(space)) {
|
||||
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_REMOVE(unflushed_spaces,
|
||||
system->unflushed_spaces,
|
||||
space);
|
||||
}
|
||||
|
||||
fil_node_close_file(node, system);
|
||||
}
|
||||
|
||||
@ -1002,6 +1053,8 @@ try_again:
|
||||
|
||||
HASH_INSERT(fil_space_t, name_hash, system->name_hash,
|
||||
ut_fold_string(name), space);
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_ADD_LAST(space_list, system->space_list, space);
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
@ -1097,6 +1150,13 @@ fil_space_free(
|
||||
HASH_DELETE(fil_space_t, name_hash, system->name_hash,
|
||||
ut_fold_string(space->name), space);
|
||||
|
||||
if (space->is_in_unflushed_spaces) {
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_REMOVE(unflushed_spaces, system->unflushed_spaces,
|
||||
space);
|
||||
}
|
||||
|
||||
UT_LIST_REMOVE(space_list, system->space_list, space);
|
||||
|
||||
ut_a(space->magic_n == FIL_SPACE_MAGIC_N);
|
||||
@ -1248,6 +1308,7 @@ fil_system_create(
|
||||
|
||||
system->tablespace_version = 0;
|
||||
|
||||
UT_LIST_INIT(system->unflushed_spaces);
|
||||
UT_LIST_INIT(system->space_list);
|
||||
|
||||
return(system);
|
||||
@ -2612,12 +2673,12 @@ fil_open_single_table_tablespace(
|
||||
fputs("!\n"
|
||||
"InnoDB: Have you moved InnoDB .ibd files around without using the\n"
|
||||
"InnoDB: commands DISCARD TABLESPACE and IMPORT TABLESPACE?\n"
|
||||
"InnoDB: It is also possible that this is a table created with\n"
|
||||
"InnoDB: CREATE TEMPORARY TABLE, and MySQL removed the .ibd file for this.\n"
|
||||
"InnoDB: It is also possible that this is a temporary table #sql...,\n"
|
||||
"InnoDB: and MySQL removed the .ibd file for this.\n"
|
||||
"InnoDB: Please refer to\n"
|
||||
"InnoDB:"
|
||||
" http://dev.mysql.com/doc/mysql/en/InnoDB_troubleshooting_datadict.html\n"
|
||||
"InnoDB: how to resolve the issue.\n", stderr);
|
||||
"InnoDB: for how to resolve the issue.\n", stderr);
|
||||
|
||||
mem_free(filepath);
|
||||
|
||||
@ -2657,7 +2718,7 @@ fil_open_single_table_tablespace(
|
||||
"InnoDB: Please refer to\n"
|
||||
"InnoDB:"
|
||||
" http://dev.mysql.com/doc/mysql/en/InnoDB_troubleshooting_datadict.html\n"
|
||||
"InnoDB: how to resolve the issue.\n", (ulong) space_id, (ulong) id);
|
||||
"InnoDB: for how to resolve the issue.\n", (ulong) space_id, (ulong) id);
|
||||
|
||||
ret = FALSE;
|
||||
|
||||
@ -3292,7 +3353,7 @@ fil_space_for_table_exists_in_mem(
|
||||
ut_print_filename(stderr, name);
|
||||
fprintf(stderr, "\n"
|
||||
"InnoDB: in InnoDB data dictionary has tablespace id %lu,\n"
|
||||
"InnoDB: but tablespace with that id does not exist. There is\n"
|
||||
"InnoDB: but a tablespace with that id does not exist. There is\n"
|
||||
"InnoDB: a tablespace of name %s and id %lu, though. Have\n"
|
||||
"InnoDB: you deleted or moved .ibd files?\n",
|
||||
(ulong) id, namespace->name,
|
||||
@ -3303,7 +3364,7 @@ fil_space_for_table_exists_in_mem(
|
||||
"InnoDB: Please refer to\n"
|
||||
"InnoDB:"
|
||||
" http://dev.mysql.com/doc/mysql/en/InnoDB_troubleshooting_datadict.html\n"
|
||||
"InnoDB: how to resolve the issue.\n", stderr);
|
||||
"InnoDB: for how to resolve the issue.\n", stderr);
|
||||
|
||||
mem_free(path);
|
||||
mutex_exit(&(system->mutex));
|
||||
@ -3317,7 +3378,7 @@ fil_space_for_table_exists_in_mem(
|
||||
ut_print_filename(stderr, name);
|
||||
fprintf(stderr, "\n"
|
||||
"InnoDB: in InnoDB data dictionary has tablespace id %lu,\n"
|
||||
"InnoDB: but tablespace with that id has name %s.\n"
|
||||
"InnoDB: but the tablespace with that id has name %s.\n"
|
||||
"InnoDB: Have you deleted or moved .ibd files?\n", (ulong) id, space->name);
|
||||
|
||||
if (namespace != NULL) {
|
||||
@ -3732,6 +3793,14 @@ fil_node_complete_io(
|
||||
if (type == OS_FILE_WRITE) {
|
||||
system->modification_counter++;
|
||||
node->modification_counter = system->modification_counter;
|
||||
|
||||
if (!node->space->is_in_unflushed_spaces) {
|
||||
|
||||
node->space->is_in_unflushed_spaces = TRUE;
|
||||
UT_LIST_ADD_FIRST(unflushed_spaces,
|
||||
system->unflushed_spaces,
|
||||
node->space);
|
||||
}
|
||||
}
|
||||
|
||||
if (node->n_pending == 0 && node->space->purpose == FIL_TABLESPACE
|
||||
@ -4145,6 +4214,16 @@ retry:
|
||||
skip_flush:
|
||||
if (node->flush_counter < old_mod_counter) {
|
||||
node->flush_counter = old_mod_counter;
|
||||
|
||||
if (space->is_in_unflushed_spaces
|
||||
&& fil_space_is_flushed(space)) {
|
||||
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_REMOVE(unflushed_spaces,
|
||||
system->unflushed_spaces,
|
||||
space);
|
||||
}
|
||||
}
|
||||
|
||||
if (space->purpose == FIL_TABLESPACE) {
|
||||
@ -4176,7 +4255,7 @@ fil_flush_file_spaces(
|
||||
|
||||
mutex_enter(&(system->mutex));
|
||||
|
||||
space = UT_LIST_GET_FIRST(system->space_list);
|
||||
space = UT_LIST_GET_FIRST(system->unflushed_spaces);
|
||||
|
||||
while (space) {
|
||||
if (space->purpose == purpose && !space->is_being_deleted) {
|
||||
@ -4192,7 +4271,7 @@ fil_flush_file_spaces(
|
||||
|
||||
space->n_pending_flushes--;
|
||||
}
|
||||
space = UT_LIST_GET_NEXT(space_list, space);
|
||||
space = UT_LIST_GET_NEXT(unflushed_spaces, space);
|
||||
}
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
|
@ -16,7 +16,7 @@ Updates the search info. */
|
||||
void
|
||||
btr_search_info_update_slow(
|
||||
/*========================*/
|
||||
btr_search_t* info, /* in: search info */
|
||||
btr_search_t* info, /* in/out: search info */
|
||||
btr_cur_t* cursor);/* in: cursor which was just positioned */
|
||||
|
||||
/************************************************************************
|
||||
|
@ -4628,6 +4628,12 @@ my_bool STDCALL mysql_stmt_reset(MYSQL_STMT *stmt)
|
||||
/* If statement hasnt been prepared there is nothing to reset */
|
||||
if ((int) stmt->state < (int) MYSQL_STMT_PREPARE_DONE)
|
||||
DBUG_RETURN(0);
|
||||
if (!stmt->mysql)
|
||||
{
|
||||
/* mysql can be reset in mysql_close called from mysql_reconnect */
|
||||
set_stmt_error(stmt, CR_SERVER_LOST, unknown_sqlstate);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
mysql= stmt->mysql->last_used_con;
|
||||
int4store(buff, stmt->stmt_id); /* Send stmt id to server */
|
||||
|
@ -1,45 +1,51 @@
|
||||
This directory contains a test suite for mysql daemon. To run
|
||||
This directory contains a test suite for the MySQL daemon. To run
|
||||
the currently existing test cases, simply execute ./mysql-test-run in
|
||||
this directory. It will fire up the newly built mysqld and test it.
|
||||
|
||||
If you want to run a test with a running MySQL server use the --extern
|
||||
option to mysql-test-run. Please note that in this mode the test suite
|
||||
expects user to specify test names to run. Otherwise it falls back to the
|
||||
normal "non-extern" behaviour. The reason is that some tests
|
||||
could not run with external server. Here is the sample command
|
||||
to test "alias" and "analyze" tests on external server:
|
||||
|
||||
mysql-test-run --extern alias analyze
|
||||
|
||||
To match your setup you might also need to provide --socket, --user and
|
||||
other relevant options.
|
||||
|
||||
Note that you do not have to have to do make install, and you could
|
||||
actually have a co-existing MySQL installation - the tests will not
|
||||
Note that you do not have to have to do "make install", and you could
|
||||
actually have a co-existing MySQL installation. The tests will not
|
||||
conflict with it.
|
||||
|
||||
All tests must pass. If one or more of them fail on your system, please
|
||||
read the following manual section of how to report the problem:
|
||||
read the following manual section for instructions on how to report the
|
||||
problem:
|
||||
|
||||
http://dev.mysql.com/doc/mysql/en/MySQL_test_suite.html
|
||||
http://dev.mysql.com/doc/mysql/en/mysql-test-suite.html
|
||||
|
||||
If you want to use an already running MySQL server for specific tests,
|
||||
use the --extern option to mysql-test-run. Please note that in this mode,
|
||||
the test suite expects you to provide the names of the tests to run.
|
||||
For example, here is the command to run the "alias" and "analyze" tests
|
||||
with an external server:
|
||||
|
||||
mysql-test-run --extern alias analyze
|
||||
|
||||
To match your setup, you might also need to provide --socket, --user, and
|
||||
other relevant options.
|
||||
|
||||
With no test cases named on the command line, mysql-test-run falls back
|
||||
to the normal "non-extern" behavior. The reason for this is that some
|
||||
tests cannot run with an external server.
|
||||
|
||||
|
||||
You can create your own test cases. To create a test case:
|
||||
You can create your own test cases. To create a test case, create a new
|
||||
file in the t subdirectory using a text editor. The file should have a .test
|
||||
extension. For example:
|
||||
|
||||
xemacs t/test_case_name.test
|
||||
|
||||
in the file, put a set of SQL commands that will create some tables,
|
||||
load test data, run some queries to manipulate it.
|
||||
In the file, put a set of SQL statements that create some tables,
|
||||
load test data, and run some queries to manipulate it.
|
||||
|
||||
We would appreciate if the test tables were called t1, t2, t3 ... (to not
|
||||
We would appreciate it if you name your test tables t1, t2, t3 ... (to not
|
||||
conflict too much with existing tables).
|
||||
|
||||
Your test should begin by dropping the tables you are going to create and
|
||||
end by dropping them again. This will ensure that one can run the test
|
||||
over and over again.
|
||||
end by dropping them again. This ensures that you can run the test over
|
||||
and over again.
|
||||
|
||||
If you are using mysqltest commands (like result file names) in your
|
||||
test case you should do create the result file as follows:
|
||||
test case, you should create the result file as follows:
|
||||
|
||||
mysql-test-run --record test_case_name
|
||||
|
||||
@ -47,8 +53,8 @@ You can create your own test cases. To create a test case:
|
||||
|
||||
mysqltest --record < t/test_case_name.test
|
||||
|
||||
If you only have a simple test cases consistent of SQL commands and comments
|
||||
you can create the test case one of the following ways:
|
||||
If you only have a simple test cases consisting of SQL statements and
|
||||
comments, you can create the test case in one of the following ways:
|
||||
|
||||
mysql-test-run --record test_case_name
|
||||
|
||||
@ -57,11 +63,11 @@ You can create your own test cases. To create a test case:
|
||||
mysqltest --record --record-file=r/test_case_name.result < t/test_case_name.test
|
||||
|
||||
When this is done, take a look at r/test_case_name.result
|
||||
- If the result is wrong, you have found a bug; In this case you should
|
||||
- If the result is incorrect, you have found a bug. In this case, you should
|
||||
edit the test result to the correct results so that we can verify
|
||||
that the bug is corrected in future releases.
|
||||
|
||||
To submit your test case, put your .test file and .result file(s) into
|
||||
a tar.gz archive, add a README that explains the problem, ftp the
|
||||
archive to ftp://support.mysql.com/pub/mysql/secret/ and send a mail
|
||||
archive to ftp://support.mysql.com/pub/mysql/secret/ and send a mail
|
||||
to bugs@lists.mysql.com
|
||||
|
@ -1,11 +1,13 @@
|
||||
To be able to see the level of coverage with the current test suite,
|
||||
do the following:
|
||||
|
||||
- make sure gcov is installed
|
||||
- compile with BUILD/compile-pentium-gcov ( if your machine is not pentium, hack
|
||||
this script, or just live with the pentium-specific stuff)
|
||||
- ./mysql-test-run -gcov
|
||||
- to see the level of coverage for a given source file:
|
||||
- Make sure gcov is installed
|
||||
- Compile the MySQL distribution with BUILD/compile-pentium-gcov (if your
|
||||
machine does not have a pentium CPU, hack this script, or just live with
|
||||
the pentium-specific stuff)
|
||||
- In the mysql-test directory, run this command: ./mysql-test-run -gcov
|
||||
- To see the level of coverage for a given source file:
|
||||
grep source_file_name /tmp/gcov.out
|
||||
- to see which lines are not yet covered, look at source_file_name.gcov in the source tree. Then think hard about a test case that will cover those
|
||||
lines, and write one!
|
||||
- To see which lines are not yet covered, look at source_file_name.gcov in
|
||||
the source tree. Then think hard about a test case that will cover those
|
||||
lines, and write one!
|
||||
|
@ -770,7 +770,15 @@ sub mtr_record_dead_children () {
|
||||
}
|
||||
|
||||
sub start_reap_all {
|
||||
$SIG{CHLD}= 'IGNORE'; # FIXME is this enough?
|
||||
# This causes terminating processes to not become zombies, avoiding
|
||||
# the need for (or possibility of) explicit waitpid().
|
||||
$SIG{CHLD}= 'IGNORE';
|
||||
|
||||
# On some platforms (Linux, QNX, OSX, ...) there is potential race
|
||||
# here. If a process terminated before setting $SIG{CHLD} (but after
|
||||
# any attempt to waitpid() it), it will still be a zombie. So we
|
||||
# have to handle any such process here.
|
||||
while(waitpid(-1, &WNOHANG) > 0) { };
|
||||
}
|
||||
|
||||
sub stop_reap_all {
|
||||
|
@ -184,6 +184,7 @@ our $opt_big_test= 0; # Send --big-test to mysqltest
|
||||
|
||||
our @opt_extra_mysqld_opt;
|
||||
|
||||
our $opt_comment;
|
||||
our $opt_compress;
|
||||
our $opt_current_test;
|
||||
our $opt_ddd;
|
||||
@ -463,11 +464,21 @@ sub command_line_setup () {
|
||||
my $opt_slave_myport= 9308;
|
||||
$opt_ndbcluster_port= 9350;
|
||||
|
||||
#
|
||||
# To make it easier for different devs to work on the same host,
|
||||
# an environment variable can be used to control all ports. A small
|
||||
# number is to be used, 0 - 16 or similar.
|
||||
#
|
||||
# Note the MASTER_MYPORT has to be set the same in all 4.x and 5.x
|
||||
# versions of this script, else a 4.0 test run might conflict with a
|
||||
# 5.1 test run, even if different MTR_BUILD_THREAD is used. This means
|
||||
# all port numbers might not be used in this version of the script.
|
||||
#
|
||||
if ( $ENV{'MTR_BUILD_THREAD'} )
|
||||
{
|
||||
$opt_master_myport= $ENV{'MTR_BUILD_THREAD'} * 40 + 8120;
|
||||
$opt_slave_myport= $opt_master_myport + 16;
|
||||
$opt_ndbcluster_port= $opt_master_myport + 24;
|
||||
$opt_master_myport= $ENV{'MTR_BUILD_THREAD'} * 10 + 10000;
|
||||
$opt_slave_myport= $opt_master_myport + 2; # and 3 4
|
||||
$opt_ndbcluster_port= $opt_master_myport + 5;
|
||||
}
|
||||
|
||||
# Read the command line
|
||||
@ -526,6 +537,7 @@ sub command_line_setup () {
|
||||
|
||||
# Misc
|
||||
'big-test' => \$opt_big_test,
|
||||
'comment=s' => \$opt_comment,
|
||||
'compress' => \$opt_compress,
|
||||
'debug' => \$opt_debug,
|
||||
'fast' => \$opt_fast,
|
||||
@ -561,6 +573,14 @@ sub command_line_setup () {
|
||||
usage("");
|
||||
}
|
||||
|
||||
if ( $opt_comment )
|
||||
{
|
||||
print "\n";
|
||||
print '#' x 78, "\n";
|
||||
print "# $opt_comment\n";
|
||||
print '#' x 78, "\n\n";
|
||||
}
|
||||
|
||||
foreach my $arg ( @ARGV )
|
||||
{
|
||||
if ( $arg =~ /^--skip-/ )
|
||||
@ -975,12 +995,14 @@ sub environment_setup () {
|
||||
$ENV{'USE_RUNNING_SERVER'}= $glob_use_running_server;
|
||||
$ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir;
|
||||
$ENV{'MYSQL_TEST_WINDIR'}= $glob_mysql_test_dir;
|
||||
$ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'};
|
||||
$ENV{'MASTER_WINMYSOCK'}= $master->[0]->{'path_mysock'};
|
||||
$ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'};
|
||||
$ENV{'MASTER_MYSOCK1'}= $master->[1]->{'path_mysock'};
|
||||
$ENV{'MASTER_MYPORT'}= $master->[0]->{'path_myport'};
|
||||
$ENV{'MASTER_MYPORT1'}= $master->[1]->{'path_myport'};
|
||||
$ENV{'SLAVE_MYPORT'}= $slave->[0]->{'path_myport'};
|
||||
$ENV{'SLAVE_MYPORT1'}= $slave->[1]->{'path_myport'};
|
||||
$ENV{'SLAVE_MYPORT2'}= $slave->[2]->{'path_myport'};
|
||||
# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME
|
||||
$ENV{'MYSQL_TCP_PORT'}= 3306;
|
||||
|
||||
@ -994,11 +1016,15 @@ sub environment_setup () {
|
||||
}
|
||||
}
|
||||
|
||||
$ENV{MTR_BUILD_THREAD}= 0 unless $ENV{MTR_BUILD_THREAD}; # Set if not set
|
||||
|
||||
# We are nice and report a bit about our settings
|
||||
print "Using MTR_BUILD_THREAD = ",$ENV{MTR_BUILD_THREAD} || 0,"\n";
|
||||
print "Using MTR_BUILD_THREAD = $ENV{MTR_BUILD_THREAD}\n";
|
||||
print "Using MASTER_MYPORT = $ENV{MASTER_MYPORT}\n";
|
||||
print "Using MASTER_MYPORT1 = $ENV{MASTER_MYPORT1}\n";
|
||||
print "Using SLAVE_MYPORT = $ENV{SLAVE_MYPORT}\n";
|
||||
print "Using SLAVE_MYPORT1 = $ENV{SLAVE_MYPORT1}\n";
|
||||
print "Using SLAVE_MYPORT2 = $ENV{SLAVE_MYPORT2}\n";
|
||||
print "Using NDBCLUSTER_PORT = $opt_ndbcluster_port\n";
|
||||
}
|
||||
|
||||
@ -2382,6 +2408,7 @@ Misc options
|
||||
|
||||
verbose Verbose output from this script
|
||||
script-debug Debug this script itself
|
||||
comment=STR Write STR to the output
|
||||
compress Use the compressed protocol between client and server
|
||||
timer Show test case execution time
|
||||
start-and-exit Only initiate and start the "mysqld" servers, use the startup
|
||||
|
@ -215,11 +215,16 @@ MYSQL_MANAGER_USER=root
|
||||
# an environment variable can be used to control all ports. A small
|
||||
# number is to be used, 0 - 16 or similar.
|
||||
#
|
||||
# Note the MASTER_MYPORT has to be set the same in all 4.x and 5.x
|
||||
# versions of this script, else a 4.0 test run might conflict with a
|
||||
# 5.1 test run, even if different MTR_BUILD_THREAD is used. This means
|
||||
# all port numbers might not be used in this version of the script.
|
||||
#
|
||||
if [ -n "$MTR_BUILD_THREAD" ] ; then
|
||||
MASTER_MYPORT=`expr $MTR_BUILD_THREAD '*' 5 + 10000`
|
||||
MASTER_MYPORT=`expr $MTR_BUILD_THREAD '*' 10 + 10000`
|
||||
MYSQL_MANAGER_PORT=`expr $MASTER_MYPORT + 2`
|
||||
SLAVE_MYPORT=`expr $MASTER_MYPORT + 3`
|
||||
NDBCLUSTER_PORT=`expr $MASTER_MYPORT + 4`
|
||||
NDBCLUSTER_PORT=`expr $MASTER_MYPORT + 6`
|
||||
|
||||
echo "Using MTR_BUILD_THREAD = $MTR_BUILD_THREAD"
|
||||
echo "Using MASTER_MYPORT = $MASTER_MYPORT"
|
||||
@ -471,6 +476,13 @@ while test $# -gt 0; do
|
||||
--fast)
|
||||
FAST_START=1
|
||||
;;
|
||||
--comment=*)
|
||||
TMP=`$ECHO "$1" | $SED -e "s;--comment=;;"`
|
||||
echo
|
||||
echo '############################################'
|
||||
echo "# $TMP"
|
||||
echo '############################################'
|
||||
;;
|
||||
-- ) shift; break ;;
|
||||
--* ) $ECHO "Unrecognized option: $1"; exit 1 ;;
|
||||
* ) break ;;
|
||||
@ -784,7 +796,7 @@ show_failed_diff ()
|
||||
$DIFF -c $result_file $reject_file
|
||||
echo "-------------------------------------------------------"
|
||||
echo "Please follow the instructions outlined at"
|
||||
echo "http://www.mysql.com/doc/en/Reporting_mysqltest_bugs.html"
|
||||
echo "http://dev.mysql.com/doc/mysql/en/reporting-mysqltest-bugs.html"
|
||||
echo "to find the reason to this problem and how to report this."
|
||||
echo ""
|
||||
fi
|
||||
@ -879,7 +891,7 @@ report_stats () {
|
||||
$ECHO "The log files in $MY_LOG_DIR may give you some hint"
|
||||
$ECHO "of what when wrong."
|
||||
$ECHO "If you want to report this error, please read first the documentation at"
|
||||
$ECHO "http://www.mysql.com/doc/en/MySQL_test_suite.html"
|
||||
$ECHO "http://dev.mysql.com/doc/mysql/en/mysql-test-suite.html"
|
||||
fi
|
||||
|
||||
if test -z "$USE_RUNNING_SERVER"
|
||||
|
@ -436,3 +436,14 @@ SELECT a FROM t1 WHERE MATCH a AGAINST('testword\'\'' IN BOOLEAN MODE);
|
||||
a
|
||||
testword''
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (a TEXT, FULLTEXT KEY(a));
|
||||
INSERT INTO t1 VALUES('test'),('test1'),('test');
|
||||
PREPARE stmt from "SELECT a, MATCH(a) AGAINST('test1 test') FROM t1 WHERE MATCH(a) AGAINST('test1 test')";
|
||||
EXECUTE stmt;
|
||||
a MATCH(a) AGAINST('test1 test')
|
||||
test1 0.68526661396027
|
||||
EXECUTE stmt;
|
||||
a MATCH(a) AGAINST('test1 test')
|
||||
test1 0.68526661396027
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
@ -182,7 +182,7 @@ SELECT * FROM t1 WHERE a=NULL;
|
||||
a b
|
||||
explain SELECT * FROM t1 WHERE a IS NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 5 const 1 Using where
|
||||
1 SIMPLE t1 ref a a 5 const 2 Using where
|
||||
SELECT * FROM t1 WHERE a<=>NULL;
|
||||
a b
|
||||
NULL 99
|
||||
@ -296,3 +296,13 @@ insert into t1 values ("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd
|
||||
insert into t1 values ("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz");
|
||||
ERROR 23000: Duplicate entry 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijkl' for key 1
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a int, key(a)) engine=heap;
|
||||
insert into t1 values (0);
|
||||
delete from t1;
|
||||
select * from t1;
|
||||
a
|
||||
insert into t1 values (0), (1);
|
||||
select * from t1 where a = 0;
|
||||
a
|
||||
0
|
||||
drop table t1;
|
||||
|
@ -182,7 +182,7 @@ SELECT * FROM t1 WHERE a=NULL;
|
||||
a b
|
||||
explain SELECT * FROM t1 WHERE a IS NULL;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 5 const 1 Using where
|
||||
1 SIMPLE t1 ref a a 5 const 2 Using where
|
||||
SELECT * FROM t1 WHERE a<=>NULL;
|
||||
a b
|
||||
NULL 99
|
||||
@ -220,16 +220,16 @@ insert into t1 values ('aaag', 'prefill-hash=3',0);
|
||||
insert into t1 values ('aaah', 'prefill-hash=6',0);
|
||||
explain select * from t1 where a='aaaa';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 8 const 1 Using where
|
||||
1 SIMPLE t1 ref a a 8 const 2 Using where
|
||||
explain select * from t1 where a='aaab';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 8 const 1 Using where
|
||||
1 SIMPLE t1 ref a a 8 const 2 Using where
|
||||
explain select * from t1 where a='aaac';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 8 const 1 Using where
|
||||
1 SIMPLE t1 ref a a 8 const 2 Using where
|
||||
explain select * from t1 where a='aaad';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref a a 8 const 1 Using where
|
||||
1 SIMPLE t1 ref a a 8 const 2 Using where
|
||||
insert into t1 select * from t1;
|
||||
flush tables;
|
||||
explain select * from t1 where a='aaaa';
|
||||
@ -291,25 +291,25 @@ insert into t1 (name) values ('Matt'), ('Lilu'), ('Corbin'), ('Carly'),
|
||||
insert into t2 select * from t1;
|
||||
explain select * from t1 where name='matt';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx heap_idx 20 const 1 Using where
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t2 where name='matt';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 ref btree_idx,heap_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t1 where name='Lilu';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx heap_idx 20 const 1 Using where
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t2 where name='Lilu';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 ref btree_idx,heap_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t1 where name='Phil';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx heap_idx 20 const 1 Using where
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t2 where name='Phil';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 ref btree_idx,heap_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t1 where name='Lilu';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx heap_idx 20 const 1 Using where
|
||||
1 SIMPLE t1 ref heap_idx,btree_idx btree_idx 20 const 1 Using where
|
||||
explain select * from t2 where name='Lilu';
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 ref btree_idx,heap_idx btree_idx 20 const 1 Using where
|
||||
@ -364,5 +364,5 @@ a
|
||||
3
|
||||
explain select a from t1 where a in (1,3);
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 range a a 5 NULL 2 Using where
|
||||
1 SIMPLE t1 range a a 5 NULL 4 Using where
|
||||
drop table t1;
|
||||
|
@ -1794,3 +1794,16 @@ a hex(b)
|
||||
7 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2
|
||||
update t1 set b = 'three' where a = 6;
|
||||
drop table t1;
|
||||
CREATE TABLE t1(a INT, PRIMARY KEY(a)) ENGINE=InnoDB;
|
||||
CREATE TABLE t2(a INT) ENGINE=InnoDB;
|
||||
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_1;
|
||||
ALTER TABLE t2 ADD CONSTRAINT t2_ibfk_0 FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_0;
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`a` int(11) default NULL,
|
||||
KEY `t2_ibfk_0` (`a`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t2,t1;
|
||||
|
@ -1,4 +1,4 @@
|
||||
drop table if exists t1;
|
||||
drop table if exists t1, t2, t3;
|
||||
create table t1 (kill_id int);
|
||||
insert into t1 values(connection_id());
|
||||
select ((@id := kill_id) - kill_id) from t1;
|
||||
@ -17,3 +17,15 @@ select 4;
|
||||
4
|
||||
4
|
||||
drop table t1;
|
||||
create table t1 (id int primary key);
|
||||
create table t2 (id int unsigned not null);
|
||||
insert into t2 select id from t1;
|
||||
create table t3 (kill_id int);
|
||||
insert into t3 values(connection_id());
|
||||
select id from t1 where id in (select distinct id from t2);
|
||||
select ((@id := kill_id) - kill_id) from t3;
|
||||
((@id := kill_id) - kill_id)
|
||||
0
|
||||
kill @id;
|
||||
Got one of the listed errors
|
||||
drop table t1, t2, t3;
|
||||
|
@ -554,7 +554,7 @@ Warnings:
|
||||
Note 1031 Table storage engine for 't1' doesn't have this option
|
||||
show keys from t1;
|
||||
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||
t1 1 a 1 a NULL 1000 NULL NULL YES HASH
|
||||
t1 1 a 1 a NULL 500 NULL NULL YES HASH
|
||||
drop table t1,t2;
|
||||
create table t1 ( a tinytext, b char(1), index idx (a(1),b) );
|
||||
insert into t1 values (null,''), (null,'');
|
||||
|
@ -84,4 +84,21 @@ SET INSERT_ID=1;
|
||||
use test;
|
||||
SET TIMESTAMP=1000000000;
|
||||
insert into t1 values ("Alas");
|
||||
drop table t1, t2;
|
||||
flush logs;
|
||||
create table t3 (f text character set utf8);
|
||||
create table t4 (f text character set cp932);
|
||||
flush logs;
|
||||
rename table t3 to t03, t4 to t04;
|
||||
select HEX(f) from t03;
|
||||
HEX(f)
|
||||
E382BD
|
||||
select HEX(f) from t3;
|
||||
HEX(f)
|
||||
E382BD
|
||||
select HEX(f) from t04;
|
||||
HEX(f)
|
||||
835C
|
||||
select HEX(f) from t4;
|
||||
HEX(f)
|
||||
835C
|
||||
drop table t1, t2, t03, t04, t3, t4;
|
||||
|
@ -280,7 +280,9 @@ here is the sourced script
|
||||
In loop
|
||||
here is the sourced script
|
||||
mysqltest: At line 1: Missing argument to sleep
|
||||
mysqltest: At line 1: Missing argument to real_sleep
|
||||
mysqltest: At line 1: Invalid argument to sleep "abc"
|
||||
mysqltest: At line 1: Invalid argument to real_sleep "abc"
|
||||
1
|
||||
2
|
||||
101
|
||||
|
@ -671,3 +671,58 @@ CREATE TABLE t1 ( b INT ) PACK_KEYS = 0 ENGINE = ndb;
|
||||
select * from t1;
|
||||
b
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=ndb;
|
||||
create table t2 (a int) engine=ndb;
|
||||
insert into t1 values (1);
|
||||
insert into t2 values (1);
|
||||
delete t1.* from t1, t2 where t1.a = t2.a;
|
||||
select * from t1;
|
||||
a
|
||||
select * from t2;
|
||||
a
|
||||
1
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
CREATE TABLE t1 (
|
||||
i INT,
|
||||
j INT,
|
||||
x INT,
|
||||
y INT,
|
||||
z INT
|
||||
) engine=ndb;
|
||||
CREATE TABLE t2 (
|
||||
i INT,
|
||||
k INT,
|
||||
x INT,
|
||||
y INT,
|
||||
z INT
|
||||
) engine=ndb;
|
||||
CREATE TABLE t3 (
|
||||
j INT,
|
||||
k INT,
|
||||
x INT,
|
||||
y INT,
|
||||
z INT
|
||||
) engine=ndb;
|
||||
INSERT INTO t1 VALUES ( 1, 2,13,14,15);
|
||||
INSERT INTO t2 VALUES ( 1, 3,23,24,25);
|
||||
INSERT INTO t3 VALUES ( 2, 3, 1,34,35), ( 2, 3, 1,34,36);
|
||||
UPDATE t1 AS a
|
||||
INNER JOIN t2 AS b
|
||||
ON a.i = b.i
|
||||
INNER JOIN t3 AS c
|
||||
ON a.j = c.j AND b.k = c.k
|
||||
SET a.x = b.x,
|
||||
a.y = b.y,
|
||||
a.z = (
|
||||
SELECT sum(z)
|
||||
FROM t3
|
||||
WHERE y = 34
|
||||
)
|
||||
WHERE b.x = 23;
|
||||
select * from t1;
|
||||
i j x y z
|
||||
1 2 23 24 71
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
drop table t3;
|
||||
|
@ -428,6 +428,13 @@ delete from t1;
|
||||
select * from t1;
|
||||
a b
|
||||
commit;
|
||||
replace t1 set a=2, b='y';
|
||||
select * from t1;
|
||||
a b
|
||||
2 y
|
||||
delete from t1;
|
||||
select * from t1;
|
||||
a b
|
||||
drop table t1;
|
||||
set autocommit=0;
|
||||
create table t1 (
|
||||
|
80
mysql-test/r/ndb_load.result
Normal file
80
mysql-test/r/ndb_load.result
Normal file
@ -0,0 +1,80 @@
|
||||
DROP TABLE IF EXISTS t1;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL PRIMARY KEY) ENGINE=NDB;
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
ERROR 23000: Can't write; duplicate key in table 't1'
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL) ENGINE=NDB;
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
SELECT * FROM t1 ORDER BY word;
|
||||
word
|
||||
Aarhus
|
||||
Aarhus
|
||||
Aaron
|
||||
Aaron
|
||||
Ababa
|
||||
Ababa
|
||||
aback
|
||||
aback
|
||||
abaft
|
||||
abaft
|
||||
abandon
|
||||
abandon
|
||||
abandoned
|
||||
abandoned
|
||||
abandoning
|
||||
abandoning
|
||||
abandonment
|
||||
abandonment
|
||||
abandons
|
||||
abandons
|
||||
abase
|
||||
abased
|
||||
abasement
|
||||
abasements
|
||||
abases
|
||||
abash
|
||||
abashed
|
||||
abashes
|
||||
abashing
|
||||
abasing
|
||||
abate
|
||||
abated
|
||||
abatement
|
||||
abatements
|
||||
abater
|
||||
abates
|
||||
abating
|
||||
Abba
|
||||
abbe
|
||||
abbey
|
||||
abbeys
|
||||
abbot
|
||||
abbots
|
||||
Abbott
|
||||
abbreviate
|
||||
abbreviated
|
||||
abbreviates
|
||||
abbreviating
|
||||
abbreviation
|
||||
abbreviations
|
||||
Abby
|
||||
abdomen
|
||||
abdomens
|
||||
abdominal
|
||||
abduct
|
||||
abducted
|
||||
abduction
|
||||
abductions
|
||||
abductor
|
||||
abductors
|
||||
abducts
|
||||
Abe
|
||||
abed
|
||||
Abel
|
||||
Abelian
|
||||
Abelson
|
||||
Aberdeen
|
||||
Abernathy
|
||||
aberrant
|
||||
aberration
|
||||
DROP TABLE t1;
|
16
mysql-test/r/rpl_ignore_table.result
Normal file
16
mysql-test/r/rpl_ignore_table.result
Normal file
@ -0,0 +1,16 @@
|
||||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
**** Test case for BUG#16487 ****
|
||||
**** Master ****
|
||||
CREATE TABLE test.t4 (a int);
|
||||
CREATE TABLE test.t1 (a int);
|
||||
UPDATE test.t4 NATURAL JOIN test.t1 SET t1.a=5;
|
||||
**** Slave ****
|
||||
SELECT * FROM t4;
|
||||
a
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t4;
|
25
mysql-test/r/rpl_multi_update4.result
Normal file
25
mysql-test/r/rpl_multi_update4.result
Normal file
@ -0,0 +1,25 @@
|
||||
stop slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
start slave;
|
||||
drop database if exists d1;
|
||||
drop database if exists d2;
|
||||
drop database if exists d2;
|
||||
create database d1;
|
||||
create table d1.t0 (id int);
|
||||
create database d2;
|
||||
use d2;
|
||||
create table t1 (id int);
|
||||
create table t2 (id int);
|
||||
insert into t1 values (1), (2), (3), (4), (5);
|
||||
insert into t2 select id + 3 from t1;
|
||||
update t1 join t2 using (id) set t1.id = 0;
|
||||
insert into d1.t0 values (0);
|
||||
use d1;
|
||||
select * from t0 where id=0;
|
||||
id
|
||||
0
|
||||
drop database d1;
|
||||
drop database d2;
|
@ -358,3 +358,22 @@ update t2,t1 set f1=3,f2=3 where f1=f2 and f1=1;
|
||||
affected rows: 3
|
||||
info: Rows matched: 3 Changed: 3 Warnings: 0
|
||||
drop table t1,t2;
|
||||
create table t1 (a int);
|
||||
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t2 (a int, filler1 char(200), filler2 char(200), key(a));
|
||||
insert into t2 select A.a + 10*B.a, 'filler','filler' from t1 A, t1 B;
|
||||
flush status;
|
||||
update t2 set a=3 where a=2;
|
||||
show status like 'handler_read%';
|
||||
Variable_name Value
|
||||
Handler_read_first 0
|
||||
Handler_read_key 1
|
||||
Handler_read_next 1
|
||||
Handler_read_prev 0
|
||||
Handler_read_rnd 1
|
||||
Handler_read_rnd_next 0
|
||||
drop table t1, t2;
|
||||
create table t1(f1 int, `*f2` int);
|
||||
insert into t1 values (1,1);
|
||||
update t1 set `*f2`=1;
|
||||
drop table t1;
|
||||
|
@ -422,7 +422,9 @@ drop table t1;
|
||||
#
|
||||
# Bug #14583 Bug on query using a LIKE on indexed field with ucs2_bin collation
|
||||
#
|
||||
--disable_warnings
|
||||
create table t1(f1 varchar(5) CHARACTER SET ucs2 COLLATE ucs2_bin NOT NULL) engine=InnoDB;
|
||||
--enable_warnings
|
||||
insert into t1 values('a');
|
||||
create index t1f1 on t1(f1);
|
||||
select f1 from t1 where f1 like 'a%';
|
||||
|
@ -13,4 +13,3 @@
|
||||
rpl_relayrotate : Unstable test case, bug#12429
|
||||
rpl_until : Unstable test case, bug#12429
|
||||
rpl_deadlock : Unstable test case, bug#12429
|
||||
kill : Unstable test case, bug#9712
|
||||
|
@ -357,4 +357,16 @@ SELECT a FROM t1 WHERE MATCH a AGAINST('testword' IN BOOLEAN MODE);
|
||||
SELECT a FROM t1 WHERE MATCH a AGAINST('testword\'\'' IN BOOLEAN MODE);
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# BUG#14496: Crash or strange results with prepared statement,
|
||||
# MATCH and FULLTEXT
|
||||
#
|
||||
CREATE TABLE t1 (a TEXT, FULLTEXT KEY(a));
|
||||
INSERT INTO t1 VALUES('test'),('test1'),('test');
|
||||
PREPARE stmt from "SELECT a, MATCH(a) AGAINST('test1 test') FROM t1 WHERE MATCH(a) AGAINST('test1 test')";
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -234,4 +234,15 @@ insert into t1 values ("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd
|
||||
insert into t1 values ("abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz");
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug 12796: Record doesn't show when selecting through index
|
||||
#
|
||||
CREATE TABLE t1 (a int, key(a)) engine=heap;
|
||||
insert into t1 values (0);
|
||||
delete from t1;
|
||||
select * from t1;
|
||||
insert into t1 values (0), (1);
|
||||
select * from t1 where a = 0;
|
||||
drop table t1;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -1365,4 +1365,17 @@ insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1
|
||||
select a,hex(b) from t1 order by b;
|
||||
update t1 set b = 'three' where a = 6;
|
||||
drop table t1;
|
||||
|
||||
# Ensure that <tablename>_ibfk_0 is not mistreated as a
|
||||
# generated foreign key identifier. (Bug #16387)
|
||||
|
||||
CREATE TABLE t1(a INT, PRIMARY KEY(a)) ENGINE=InnoDB;
|
||||
CREATE TABLE t2(a INT) ENGINE=InnoDB;
|
||||
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_1;
|
||||
ALTER TABLE t2 ADD CONSTRAINT t2_ibfk_0 FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_0;
|
||||
SHOW CREATE TABLE t2;
|
||||
DROP TABLE t2,t1;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -12,7 +12,7 @@ connect (con2, localhost, root,,);
|
||||
#remember id of con1
|
||||
connection con1;
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
drop table if exists t1, t2, t3;
|
||||
--enable_warnings
|
||||
|
||||
create table t1 (kill_id int);
|
||||
@ -40,4 +40,51 @@ connection con2;
|
||||
select 4;
|
||||
drop table t1;
|
||||
|
||||
disconnect con2;
|
||||
connection default;
|
||||
#
|
||||
# BUG#14851: killing long running subquery processed via a temporary table.
|
||||
#
|
||||
create table t1 (id int primary key);
|
||||
create table t2 (id int unsigned not null);
|
||||
|
||||
connect (conn1, localhost, root,,);
|
||||
connection conn1;
|
||||
|
||||
-- disable_result_log
|
||||
-- disable_query_log
|
||||
let $1 = 4096;
|
||||
while ($1)
|
||||
{
|
||||
eval insert into t1 values ($1);
|
||||
dec $1;
|
||||
}
|
||||
-- enable_query_log
|
||||
-- enable_result_log
|
||||
|
||||
insert into t2 select id from t1;
|
||||
|
||||
create table t3 (kill_id int);
|
||||
insert into t3 values(connection_id());
|
||||
|
||||
-- disable_result_log
|
||||
send select id from t1 where id in (select distinct id from t2);
|
||||
-- enable_result_log
|
||||
|
||||
connect (conn2, localhost, root,,);
|
||||
connection conn2;
|
||||
select ((@id := kill_id) - kill_id) from t3;
|
||||
-- sleep 1
|
||||
kill @id;
|
||||
|
||||
connection conn1;
|
||||
-- error 1053,2013
|
||||
reap;
|
||||
|
||||
disconnect conn1;
|
||||
disconnect conn2;
|
||||
connection default;
|
||||
|
||||
drop table t1, t2, t3;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -98,7 +98,24 @@ select "--- --position --" as "";
|
||||
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
|
||||
--exec $MYSQL_BINLOG --short-form --local-load=$MYSQL_TEST_DIR/var/tmp/ --read-from-remote-server --position=27 --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000002
|
||||
|
||||
|
||||
# Bug#16217 (mysql client did not know how not switch its internal charset)
|
||||
flush logs;
|
||||
create table t3 (f text character set utf8);
|
||||
create table t4 (f text character set cp932);
|
||||
--exec $MYSQL --default-character-set=utf8 test -e "insert into t3 values(_utf8'ソ')"
|
||||
--exec $MYSQL --default-character-set=cp932 test -e "insert into t4 values(_cp932'ƒ\');"
|
||||
flush logs;
|
||||
rename table t3 to t03, t4 to t04;
|
||||
--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000004 | $MYSQL --default-character-set=utf8
|
||||
# original and recovered data must be equal
|
||||
select HEX(f) from t03;
|
||||
select HEX(f) from t3;
|
||||
select HEX(f) from t04;
|
||||
select HEX(f) from t4;
|
||||
|
||||
|
||||
# clean up
|
||||
drop table t1, t2;
|
||||
drop table t1, t2, t03, t04, t3, t4;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -605,10 +605,14 @@ real_sleep 1;
|
||||
# Missing parameter
|
||||
--error 1
|
||||
--exec echo "sleep ;" | $MYSQL_TEST 2>&1
|
||||
--error 1
|
||||
--exec echo "real_sleep ;" | $MYSQL_TEST 2>&1
|
||||
|
||||
# Illegal parameter
|
||||
--error 1
|
||||
--exec echo "sleep abc;" | $MYSQL_TEST 2>&1
|
||||
--error 1
|
||||
--exec echo "real_sleep abc;" | $MYSQL_TEST 2>&1
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Test inc
|
||||
|
@ -614,4 +614,70 @@ CREATE TABLE t1 ( b INT ) PACK_KEYS = 0 ENGINE = ndb;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug #17249 delete statement with join where clause fails
|
||||
# when table do not have pk
|
||||
#
|
||||
|
||||
create table t1 (a int) engine=ndb;
|
||||
create table t2 (a int) engine=ndb;
|
||||
insert into t1 values (1);
|
||||
insert into t2 values (1);
|
||||
delete t1.* from t1, t2 where t1.a = t2.a;
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
|
||||
#
|
||||
# Bug #17257 update fails for inner joins if tables
|
||||
# do not have Primary Key
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
i INT,
|
||||
j INT,
|
||||
x INT,
|
||||
y INT,
|
||||
z INT
|
||||
) engine=ndb;
|
||||
|
||||
CREATE TABLE t2 (
|
||||
i INT,
|
||||
k INT,
|
||||
x INT,
|
||||
y INT,
|
||||
z INT
|
||||
) engine=ndb;
|
||||
|
||||
CREATE TABLE t3 (
|
||||
j INT,
|
||||
k INT,
|
||||
x INT,
|
||||
y INT,
|
||||
z INT
|
||||
) engine=ndb;
|
||||
|
||||
INSERT INTO t1 VALUES ( 1, 2,13,14,15);
|
||||
INSERT INTO t2 VALUES ( 1, 3,23,24,25);
|
||||
INSERT INTO t3 VALUES ( 2, 3, 1,34,35), ( 2, 3, 1,34,36);
|
||||
|
||||
UPDATE t1 AS a
|
||||
INNER JOIN t2 AS b
|
||||
ON a.i = b.i
|
||||
INNER JOIN t3 AS c
|
||||
ON a.j = c.j AND b.k = c.k
|
||||
SET a.x = b.x,
|
||||
a.y = b.y,
|
||||
a.z = (
|
||||
SELECT sum(z)
|
||||
FROM t3
|
||||
WHERE y = 34
|
||||
)
|
||||
WHERE b.x = 23;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
drop table t3;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -338,7 +338,7 @@ select * from t1 order by a;
|
||||
drop table t1;
|
||||
drop database test2;
|
||||
|
||||
# -- bug-5252 tinytext crashes plus no-commit result --
|
||||
# -- bug-5252 tinytext crashes + no-commit result + replace --
|
||||
|
||||
set autocommit=0;
|
||||
create table t1 (
|
||||
@ -352,6 +352,10 @@ select * from t1;
|
||||
delete from t1;
|
||||
select * from t1;
|
||||
commit;
|
||||
replace t1 set a=2, b='y';
|
||||
select * from t1;
|
||||
delete from t1;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
# -- bug-5013 insert empty string to text --
|
||||
|
24
mysql-test/t/ndb_load.test
Normal file
24
mysql-test/t/ndb_load.test
Normal file
@ -0,0 +1,24 @@
|
||||
-- source include/have_ndb.inc
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
--disable_warnings
|
||||
DROP TABLE IF EXISTS t1;
|
||||
--enable_warnings
|
||||
|
||||
#
|
||||
# Basic test for different types of loading data
|
||||
#
|
||||
|
||||
# should give duplicate key
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL PRIMARY KEY) ENGINE=NDB;
|
||||
--error 1022
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
DROP TABLE t1;
|
||||
|
||||
# now without a primary key we should be ok
|
||||
CREATE TABLE t1 (word CHAR(20) NOT NULL) ENGINE=NDB;
|
||||
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE t1 ;
|
||||
SELECT * FROM t1 ORDER BY word;
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 4.1 tests
|
1
mysql-test/t/rpl_ignore_table-slave.opt
Normal file
1
mysql-test/t/rpl_ignore_table-slave.opt
Normal file
@ -0,0 +1 @@
|
||||
--replicate-ignore-table=test.t1 --replicate-ignore-table=test.t2 --replicate-ignore-table=test.t3
|
28
mysql-test/t/rpl_ignore_table.test
Normal file
28
mysql-test/t/rpl_ignore_table.test
Normal file
@ -0,0 +1,28 @@
|
||||
source include/master-slave.inc;
|
||||
|
||||
#
|
||||
# BUG#16487
|
||||
#
|
||||
# Requirement:
|
||||
# Multi-updates on ignored tables should not fail even if the slave does
|
||||
# not have the ignored tables.
|
||||
#
|
||||
# Note table t1, t2, and t3 are ignored in the option file to this test.
|
||||
#
|
||||
|
||||
--echo **** Test case for BUG#16487 ****
|
||||
--echo **** Master ****
|
||||
connection master;
|
||||
CREATE TABLE test.t4 (a int);
|
||||
CREATE TABLE test.t1 (a int);
|
||||
|
||||
# Expect: The row must *not* by updated on slave, since t1 is ignored
|
||||
UPDATE test.t4 NATURAL JOIN test.t1 SET t1.a=5;
|
||||
|
||||
--echo **** Slave ****
|
||||
sync_slave_with_master;
|
||||
SELECT * FROM t4;
|
||||
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t4;
|
1
mysql-test/t/rpl_multi_update4-slave.opt
Normal file
1
mysql-test/t/rpl_multi_update4-slave.opt
Normal file
@ -0,0 +1 @@
|
||||
--replicate-wild-do-table=d1.%
|
44
mysql-test/t/rpl_multi_update4.test
Normal file
44
mysql-test/t/rpl_multi_update4.test
Normal file
@ -0,0 +1,44 @@
|
||||
# Let's verify that multi-update is not always skipped by slave if
|
||||
# some replicate-* rules exist.
|
||||
# (BUG#15699)
|
||||
|
||||
source include/master-slave.inc;
|
||||
|
||||
### Clean-up
|
||||
|
||||
connection master;
|
||||
--disable_warnings
|
||||
drop database if exists d1;
|
||||
drop database if exists d2;
|
||||
|
||||
connection slave;
|
||||
drop database if exists d2;
|
||||
--enable_warnings
|
||||
|
||||
### Test
|
||||
|
||||
connection master;
|
||||
create database d1; # accepted by slave
|
||||
create table d1.t0 (id int);
|
||||
create database d2; # ignored by slave
|
||||
use d2;
|
||||
create table t1 (id int);
|
||||
create table t2 (id int);
|
||||
insert into t1 values (1), (2), (3), (4), (5);
|
||||
insert into t2 select id + 3 from t1;
|
||||
# a problematic query which must be filter out by slave
|
||||
update t1 join t2 using (id) set t1.id = 0;
|
||||
insert into d1.t0 values (0); # replication works
|
||||
|
||||
sync_slave_with_master;
|
||||
use d1;
|
||||
#connection slave;
|
||||
select * from t0 where id=0; # must find
|
||||
|
||||
### Clean-up
|
||||
connection master;
|
||||
drop database d1;
|
||||
drop database d2;
|
||||
|
||||
|
||||
# End of test
|
@ -287,4 +287,23 @@ update t1 set f1=1 where f1=3;
|
||||
update t2,t1 set f1=3,f2=3 where f1=f2 and f1=1;
|
||||
--disable_info
|
||||
drop table t1,t2;
|
||||
|
||||
|
||||
# BUG#15935
|
||||
create table t1 (a int);
|
||||
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||
create table t2 (a int, filler1 char(200), filler2 char(200), key(a));
|
||||
insert into t2 select A.a + 10*B.a, 'filler','filler' from t1 A, t1 B;
|
||||
flush status;
|
||||
update t2 set a=3 where a=2;
|
||||
show status like 'handler_read%';
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Bug #16510 Updating field named like '*name' caused server crash
|
||||
#
|
||||
create table t1(f1 int, `*f2` int);
|
||||
insert into t1 values (1,1);
|
||||
update t1 set `*f2`=1;
|
||||
drop table t1;
|
||||
# End of 4.1 tests
|
||||
|
@ -152,6 +152,7 @@ void my_end(int infoflag)
|
||||
DBUG_PRINT("error",("%s",errbuff[0]));
|
||||
}
|
||||
}
|
||||
free_charsets();
|
||||
my_once_free();
|
||||
|
||||
if ((infoflag & MY_GIVE_INFO) || print_info)
|
||||
|
@ -85,6 +85,11 @@
|
||||
#define CFG_DB_BACKUP_DATADIR 158
|
||||
|
||||
#define CFG_DB_MAX_OPEN_FILES 159
|
||||
#define CFG_DB_DISK_PAGE_BUFFER_MEMORY 160 /* used from 5.1 */
|
||||
#define CFG_DB_STRING_MEMORY 161 /* used from 5.1 */
|
||||
#define CFG_DB_INITIAL_OPEN_FILES 162 /* used from 5.1 */
|
||||
|
||||
#define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */
|
||||
|
||||
#define CFG_NODE_ARBIT_RANK 200
|
||||
#define CFG_NODE_ARBIT_DELAY 201
|
||||
|
@ -2397,7 +2397,19 @@ Dbdict::restartCreateTab_readTableConf(Signal* signal,
|
||||
Uint32 sz = c_readTableRecord.noOfPages * ZSIZE_OF_PAGES_IN_WORDS;
|
||||
SimplePropertiesLinearReader r(&pageRecPtr.p->word[0], sz);
|
||||
handleTabInfoInit(r, &parseRecord);
|
||||
ndbrequire(parseRecord.errorCode == 0);
|
||||
if (parseRecord.errorCode != 0)
|
||||
{
|
||||
char buf[255];
|
||||
BaseString::snprintf(buf, sizeof(buf),
|
||||
"Unable to restart, fail while creating table %d"
|
||||
" error: %d. Most likely change of configuration",
|
||||
c_readTableRecord.tableId,
|
||||
parseRecord.errorCode);
|
||||
progError(__LINE__,
|
||||
ERR_INVALID_CONFIG,
|
||||
buf);
|
||||
ndbrequire(parseRecord.errorCode == 0);
|
||||
}
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
// We have read the table description from disk as part of system restart.
|
||||
|
@ -774,7 +774,7 @@ private:
|
||||
//------------------------------------
|
||||
// Methods for LCP functionality
|
||||
//------------------------------------
|
||||
void checkKeepGci(Uint32 replicaStartIndex);
|
||||
void checkKeepGci(TabRecordPtr, Uint32, Fragmentstore*, Uint32);
|
||||
void checkLcpStart(Signal *, Uint32 lineNo);
|
||||
void checkStartMoreLcp(Signal *, Uint32 nodeId);
|
||||
bool reportLcpCompletion(const class LcpFragRep *);
|
||||
@ -1292,7 +1292,7 @@ private:
|
||||
}
|
||||
|
||||
Uint32 lcpStart;
|
||||
Uint32 lcpStartGcp;
|
||||
Uint32 lcpStopGcp;
|
||||
Uint32 keepGci; /* USED TO CALCULATE THE GCI TO KEEP AFTER A LCP */
|
||||
Uint32 oldestRestorableGci;
|
||||
|
||||
@ -1361,7 +1361,8 @@ private:
|
||||
Uint32 cstarttype;
|
||||
Uint32 csystemnodes;
|
||||
Uint32 currentgcp;
|
||||
|
||||
Uint32 c_newest_restorable_gci;
|
||||
|
||||
enum GcpMasterTakeOverState {
|
||||
GMTOS_IDLE = 0,
|
||||
GMTOS_INITIAL = 1,
|
||||
|
@ -674,6 +674,7 @@ void Dbdih::execCOPY_GCIREQ(Signal* signal)
|
||||
jam();
|
||||
coldgcp = SYSFILE->newestRestorableGCI;
|
||||
crestartGci = SYSFILE->newestRestorableGCI;
|
||||
c_newest_restorable_gci = SYSFILE->newestRestorableGCI;
|
||||
Sysfile::setRestartOngoing(SYSFILE->systemRestartBits);
|
||||
currentgcp = coldgcp + 1;
|
||||
cnewgcp = coldgcp + 1;
|
||||
@ -692,6 +693,7 @@ void Dbdih::execCOPY_GCIREQ(Signal* signal)
|
||||
ok = true;
|
||||
jam();
|
||||
cgcpParticipantState = GCP_PARTICIPANT_COPY_GCI_RECEIVED;
|
||||
c_newest_restorable_gci = SYSFILE->newestRestorableGCI;
|
||||
setNodeInfo(signal);
|
||||
break;
|
||||
}//if
|
||||
@ -7749,6 +7751,8 @@ void Dbdih::execCOPY_GCICONF(Signal* signal)
|
||||
signal->theData[1] = coldgcp;
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
|
||||
|
||||
c_newest_restorable_gci = coldgcp;
|
||||
|
||||
CRASH_INSERTION(7004);
|
||||
emptyWaitGCPMasterQueue(signal);
|
||||
cgcpStatus = GCP_READY;
|
||||
@ -9155,7 +9159,7 @@ void Dbdih::checkTcCounterLab(Signal* signal)
|
||||
}//if
|
||||
c_lcpState.ctimer += 32;
|
||||
if ((c_nodeStartMaster.blockLcp == true) ||
|
||||
((c_lcpState.lcpStartGcp + 1) > currentgcp)) {
|
||||
(c_lcpState.lcpStopGcp >= c_newest_restorable_gci)) {
|
||||
jam();
|
||||
/* --------------------------------------------------------------------- */
|
||||
// No reason to start juggling the states and checking for start of LCP if
|
||||
@ -9238,7 +9242,6 @@ void Dbdih::execTCGETOPSIZECONF(Signal* signal)
|
||||
/* ----------------------------------------------------------------------- */
|
||||
c_lcpState.ctimer = 0;
|
||||
c_lcpState.keepGci = coldgcp;
|
||||
c_lcpState.lcpStartGcp = currentgcp;
|
||||
/* ----------------------------------------------------------------------- */
|
||||
/* UPDATE THE NEW LATEST LOCAL CHECKPOINT ID. */
|
||||
/* ----------------------------------------------------------------------- */
|
||||
@ -9310,7 +9313,7 @@ void Dbdih::calculateKeepGciLab(Signal* signal, Uint32 tableId, Uint32 fragId)
|
||||
cnoOfActiveTables++;
|
||||
FragmentstorePtr fragPtr;
|
||||
getFragstore(tabPtr.p, fragId, fragPtr);
|
||||
checkKeepGci(fragPtr.p->storedReplicas);
|
||||
checkKeepGci(tabPtr, fragId, fragPtr.p, fragPtr.p->storedReplicas);
|
||||
fragId++;
|
||||
if (fragId >= tabPtr.p->totalfragments) {
|
||||
jam();
|
||||
@ -10168,6 +10171,7 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
|
||||
signal->theData[0] = EventReport::LocalCheckpointCompleted; //Event type
|
||||
signal->theData[1] = SYSFILE->latestLCP_ID;
|
||||
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
|
||||
c_lcpState.lcpStopGcp = c_newest_restorable_gci;
|
||||
|
||||
/**
|
||||
* Start checking for next LCP
|
||||
@ -10522,7 +10526,8 @@ void Dbdih::checkEscalation()
|
||||
/* DESCRIPTION: CHECK FOR MINIMUM GCI RESTORABLE WITH NEW LOCAL */
|
||||
/* CHECKPOINT. */
|
||||
/*************************************************************************/
|
||||
void Dbdih::checkKeepGci(Uint32 replicaStartIndex)
|
||||
void Dbdih::checkKeepGci(TabRecordPtr tabPtr, Uint32 fragId, Fragmentstore*,
|
||||
Uint32 replicaStartIndex)
|
||||
{
|
||||
ReplicaRecordPtr ckgReplicaPtr;
|
||||
ckgReplicaPtr.i = replicaStartIndex;
|
||||
@ -10544,7 +10549,6 @@ void Dbdih::checkKeepGci(Uint32 replicaStartIndex)
|
||||
if (oldestRestorableGci > c_lcpState.oldestRestorableGci) {
|
||||
jam();
|
||||
c_lcpState.oldestRestorableGci = oldestRestorableGci;
|
||||
ndbrequire(((int)c_lcpState.oldestRestorableGci) >= 0);
|
||||
}//if
|
||||
ckgReplicaPtr.i = ckgReplicaPtr.p->nextReplica;
|
||||
}//while
|
||||
@ -10838,7 +10842,7 @@ void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
|
||||
do {
|
||||
ndbrequire(lcpNo < MAX_LCP_STORED);
|
||||
if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID &&
|
||||
fmgReplicaPtr.p->maxGciStarted[lcpNo] <= coldgcp)
|
||||
fmgReplicaPtr.p->maxGciStarted[lcpNo] < c_newest_restorable_gci)
|
||||
{
|
||||
jam();
|
||||
keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
|
||||
@ -10960,7 +10964,7 @@ void Dbdih::initCommonData()
|
||||
|
||||
c_lcpState.clcpDelay = 0;
|
||||
c_lcpState.lcpStart = ZIDLE;
|
||||
c_lcpState.lcpStartGcp = 0;
|
||||
c_lcpState.lcpStopGcp = 0;
|
||||
c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__);
|
||||
c_lcpState.currentFragment.tableId = 0;
|
||||
c_lcpState.currentFragment.fragmentId = 0;
|
||||
@ -10996,6 +11000,7 @@ void Dbdih::initCommonData()
|
||||
csystemnodes = 0;
|
||||
c_updateToLock = RNIL;
|
||||
currentgcp = 0;
|
||||
c_newest_restorable_gci = 0;
|
||||
cverifyQueueCounter = 0;
|
||||
cwaitLcpSr = false;
|
||||
|
||||
@ -11071,6 +11076,7 @@ void Dbdih::initRestartInfo()
|
||||
currentgcp = 2;
|
||||
cnewgcp = 2;
|
||||
crestartGci = 1;
|
||||
c_newest_restorable_gci = 1;
|
||||
|
||||
SYSFILE->keepGCI = 1;
|
||||
SYSFILE->oldestRestorableGCI = 1;
|
||||
@ -13042,9 +13048,9 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
|
||||
if (signal->theData[0] == 7001) {
|
||||
infoEvent("c_lcpState.keepGci = %d",
|
||||
c_lcpState.keepGci);
|
||||
infoEvent("c_lcpState.lcpStatus = %d, clcpStartGcp = %d",
|
||||
infoEvent("c_lcpState.lcpStatus = %d, clcpStopGcp = %d",
|
||||
c_lcpState.lcpStatus,
|
||||
c_lcpState.lcpStartGcp);
|
||||
c_lcpState.lcpStopGcp);
|
||||
infoEvent("cgcpStartCounter = %d, cimmediateLcpStart = %d",
|
||||
cgcpStartCounter, c_lcpState.immediateLcpStart);
|
||||
}//if
|
||||
@ -13225,8 +13231,8 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
|
||||
infoEvent("lcpStatus = %d (update place = %d) ",
|
||||
c_lcpState.lcpStatus, c_lcpState.lcpStatusUpdatedPlace);
|
||||
infoEvent
|
||||
("lcpStart = %d lcpStartGcp = %d keepGci = %d oldestRestorable = %d",
|
||||
c_lcpState.lcpStart, c_lcpState.lcpStartGcp,
|
||||
("lcpStart = %d lcpStopGcp = %d keepGci = %d oldestRestorable = %d",
|
||||
c_lcpState.lcpStart, c_lcpState.lcpStopGcp,
|
||||
c_lcpState.keepGci, c_lcpState.oldestRestorableGci);
|
||||
|
||||
infoEvent
|
||||
|
@ -14767,7 +14767,9 @@ void Dblqh::execSr(Signal* signal)
|
||||
signal->theData[4] = logFilePtr.p->currentFilepage;
|
||||
signal->theData[5] = logFilePtr.p->currentMbyte;
|
||||
signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
|
||||
sendSignal(cownref, GSN_DEBUG_SIG, signal, 7, JBA);
|
||||
signal->theData[7] = ~0;
|
||||
signal->theData[8] = __LINE__;
|
||||
sendSignal(cownref, GSN_DEBUG_SIG, signal, 9, JBA);
|
||||
return;
|
||||
}//if
|
||||
}//if
|
||||
@ -14833,7 +14835,8 @@ void Dblqh::execSr(Signal* signal)
|
||||
signal->theData[5] = logFilePtr.p->currentFilepage;
|
||||
signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
|
||||
signal->theData[7] = logWord;
|
||||
sendSignal(cownref, GSN_DEBUG_SIG, signal, 8, JBA);
|
||||
signal->theData[8] = __LINE__;
|
||||
sendSignal(cownref, GSN_DEBUG_SIG, signal, 9, JBA);
|
||||
return;
|
||||
break;
|
||||
}//switch
|
||||
@ -14862,8 +14865,9 @@ void Dblqh::execDEBUG_SIG(Signal* signal)
|
||||
|
||||
char buf[100];
|
||||
BaseString::snprintf(buf, 100,
|
||||
"Error while reading REDO log.\n"
|
||||
"Error while reading REDO log. from %d\n"
|
||||
"D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d",
|
||||
signal->theData[8],
|
||||
signal->theData[2], signal->theData[3], signal->theData[4],
|
||||
signal->theData[5], signal->theData[6], signal->theData[7]);
|
||||
|
||||
@ -15439,6 +15443,10 @@ void Dblqh::readSrFourthZeroLab(Signal* signal)
|
||||
// to read a page from file.
|
||||
lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
|
||||
|
||||
/**
|
||||
* Make sure we dont release zero page
|
||||
*/
|
||||
seizeLogpage(signal);
|
||||
invalidateLogAfterLastGCI(signal);
|
||||
return;
|
||||
}//Dblqh::readSrFourthZeroLab()
|
||||
@ -16096,8 +16104,22 @@ void Dblqh::findLogfile(Signal* signal,
|
||||
}//if
|
||||
locLogFilePtr.i = locLogFilePtr.p->nextLogFile;
|
||||
loopCount++;
|
||||
if (loopCount >= flfLogPartPtr.p->noLogFiles &&
|
||||
getNodeState().startLevel != NodeState::SL_STARTED)
|
||||
{
|
||||
goto error;
|
||||
}
|
||||
ndbrequire(loopCount < flfLogPartPtr.p->noLogFiles);
|
||||
}//while
|
||||
|
||||
error:
|
||||
char buf[255];
|
||||
BaseString::snprintf(buf, sizeof(buf),
|
||||
"Unable to restart, failed while reading redo."
|
||||
" Likely invalid change of configuration");
|
||||
progError(__LINE__,
|
||||
ERR_INVALID_CONFIG,
|
||||
buf);
|
||||
}//Dblqh::findLogfile()
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
@ -959,6 +959,8 @@ int
|
||||
NdbBlob::deletePartsUnknown(Uint32 part)
|
||||
{
|
||||
DBG("deletePartsUnknown [in] part=" << part << " count=all");
|
||||
if (thePartSize == 0) // tinyblob
|
||||
return 0;
|
||||
static const unsigned maxbat = 256;
|
||||
static const unsigned minbat = 1;
|
||||
unsigned bat = minbat;
|
||||
|
@ -841,9 +841,6 @@ insertPk(int style)
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
CHK(writeBlobData(tup) == 0);
|
||||
}
|
||||
// just another trap
|
||||
if (urandom(10) == 0)
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
if (++n == g_opt.m_batch) {
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
g_ndb->closeTransaction(g_con);
|
||||
@ -965,21 +962,31 @@ static int
|
||||
deletePk()
|
||||
{
|
||||
DBG("--- deletePk ---");
|
||||
unsigned n = 0;
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
for (unsigned k = 0; k < g_opt.m_rows; k++) {
|
||||
Tup& tup = g_tups[k];
|
||||
DBG("deletePk pk1=" << hex << tup.m_pk1);
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
|
||||
CHK(g_opr->deleteTuple() == 0);
|
||||
CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
|
||||
if (g_opt.m_pk2len != 0)
|
||||
CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
g_ndb->closeTransaction(g_con);
|
||||
if (++n == g_opt.m_batch) {
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
g_ndb->closeTransaction(g_con);
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
n = 0;
|
||||
}
|
||||
g_opr = 0;
|
||||
g_con = 0;
|
||||
tup.m_exists = false;
|
||||
}
|
||||
if (n != 0) {
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
n = 0;
|
||||
}
|
||||
g_ndb->closeTransaction(g_con);
|
||||
g_con = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1082,19 +1089,27 @@ static int
|
||||
deleteIdx()
|
||||
{
|
||||
DBG("--- deleteIdx ---");
|
||||
unsigned n = 0;
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
for (unsigned k = 0; k < g_opt.m_rows; k++) {
|
||||
Tup& tup = g_tups[k];
|
||||
DBG("deleteIdx pk1=" << hex << tup.m_pk1);
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
CHK((g_opx = g_con->getNdbIndexOperation(g_opt.m_x1name, g_opt.m_tname)) != 0);
|
||||
CHK(g_opx->deleteTuple() == 0);
|
||||
CHK(g_opx->equal("PK2", tup.m_pk2) == 0);
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
g_ndb->closeTransaction(g_con);
|
||||
if (++n == g_opt.m_batch) {
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
g_ndb->closeTransaction(g_con);
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
n = 0;
|
||||
}
|
||||
g_opx = 0;
|
||||
g_con = 0;
|
||||
tup.m_exists = false;
|
||||
}
|
||||
if (n != 0) {
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
n = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1225,20 +1240,49 @@ deleteScan(bool idx)
|
||||
CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
unsigned rows = 0;
|
||||
unsigned n = 0;
|
||||
while (1) {
|
||||
int ret;
|
||||
tup.m_pk1 = (Uint32)-1;
|
||||
memset(tup.m_pk2, 'x', g_opt.m_pk2len);
|
||||
CHK((ret = rs->nextResult()) == 0 || ret == 1);
|
||||
CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
|
||||
if (ret == 1)
|
||||
break;
|
||||
DBG("deleteScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
|
||||
CHK(rs->deleteTuple() == 0);
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
|
||||
CHK(k < g_opt.m_rows && g_tups[k].m_exists);
|
||||
g_tups[k].m_exists = false;
|
||||
rows++;
|
||||
while (1) {
|
||||
DBG("deleteScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
|
||||
Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
|
||||
CHK(k < g_opt.m_rows && g_tups[k].m_exists);
|
||||
g_tups[k].m_exists = false;
|
||||
CHK(rs->deleteTuple() == 0);
|
||||
rows++;
|
||||
tup.m_pk1 = (Uint32)-1;
|
||||
memset(tup.m_pk2, 'x', g_opt.m_pk2len);
|
||||
CHK((ret = rs->nextResult(false)) == 0 || ret == 1 || ret == 2);
|
||||
if (++n == g_opt.m_batch || ret == 2) {
|
||||
DBG("execute batch: n=" << n << " ret=" << ret);
|
||||
switch (0) {
|
||||
case 0: // works normally
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
CHK(true || g_con->restart() == 0);
|
||||
break;
|
||||
case 1: // nonsense - g_con is invalid for 2nd batch
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
CHK(true || g_con->restart() == 0);
|
||||
break;
|
||||
case 2: // DBTC sendSignalErrorRefuseLab
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
CHK(g_con->restart() == 0);
|
||||
break;
|
||||
case 3: // 266 time-out
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
CHK(g_con->restart() == 0);
|
||||
break;
|
||||
}
|
||||
n = 0;
|
||||
}
|
||||
if (ret == 2)
|
||||
break;
|
||||
}
|
||||
}
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
g_ndb->closeTransaction(g_con);
|
||||
@ -1488,13 +1532,16 @@ testperf()
|
||||
// insert char (one trans)
|
||||
{
|
||||
DBG("--- insert char ---");
|
||||
char b[20];
|
||||
t1.on();
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
|
||||
CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
|
||||
CHK(g_opr->insertTuple() == 0);
|
||||
CHK(g_opr->equal(cA, (char*)&k) == 0);
|
||||
CHK(g_opr->setValue(cB, "b") == 0);
|
||||
memset(b, 0x20, sizeof(b));
|
||||
b[0] = 'b';
|
||||
CHK(g_opr->setValue(cB, b) == 0);
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
}
|
||||
t1.off(g_opt.m_rowsperf);
|
||||
@ -1531,12 +1578,15 @@ testperf()
|
||||
{
|
||||
DBG("--- insert for read test ---");
|
||||
unsigned n = 0;
|
||||
char b[20];
|
||||
CHK((g_con = g_ndb->startTransaction()) != 0);
|
||||
for (Uint32 k = 0; k < g_opt.m_rowsperf; k++) {
|
||||
CHK((g_opr = g_con->getNdbOperation(tab.getName())) != 0);
|
||||
CHK(g_opr->insertTuple() == 0);
|
||||
CHK(g_opr->equal(cA, (char*)&k) == 0);
|
||||
CHK(g_opr->setValue(cB, "b") == 0);
|
||||
memset(b, 0x20, sizeof(b));
|
||||
b[0] = 'b';
|
||||
CHK(g_opr->setValue(cB, b) == 0);
|
||||
CHK((g_bh1 = g_opr->getBlobHandle(cC)) != 0);
|
||||
CHK((g_bh1->setValue("c", 1) == 0));
|
||||
if (++n == g_opt.m_batch) {
|
||||
@ -1570,7 +1620,7 @@ testperf()
|
||||
a = (Uint32)-1;
|
||||
b[0] = 0;
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
CHK(a == k && strcmp(b, "b") == 0);
|
||||
CHK(a == k && b[0] == 'b');
|
||||
}
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
t1.off(g_opt.m_rowsperf);
|
||||
@ -1596,7 +1646,7 @@ testperf()
|
||||
CHK(g_con->execute(NoCommit) == 0);
|
||||
Uint32 m = 20;
|
||||
CHK(g_bh1->readData(c, m) == 0);
|
||||
CHK(a == k && m == 1 && strcmp(c, "c") == 0);
|
||||
CHK(a == k && m == 1 && c[0] == 'c');
|
||||
}
|
||||
CHK(g_con->execute(Commit) == 0);
|
||||
t2.off(g_opt.m_rowsperf);
|
||||
@ -1629,7 +1679,7 @@ testperf()
|
||||
CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
|
||||
if (ret == 1)
|
||||
break;
|
||||
CHK(a < g_opt.m_rowsperf && strcmp(b, "b") == 0);
|
||||
CHK(a < g_opt.m_rowsperf && b[0] == 'b');
|
||||
n++;
|
||||
}
|
||||
CHK(n == g_opt.m_rowsperf);
|
||||
@ -1661,7 +1711,7 @@ testperf()
|
||||
break;
|
||||
Uint32 m = 20;
|
||||
CHK(g_bh1->readData(c, m) == 0);
|
||||
CHK(a < g_opt.m_rowsperf && m == 1 && strcmp(c, "c") == 0);
|
||||
CHK(a < g_opt.m_rowsperf && m == 1 && c[0] == 'c');
|
||||
n++;
|
||||
}
|
||||
CHK(n == g_opt.m_rowsperf);
|
||||
|
@ -22,7 +22,8 @@
|
||||
#include <NdbSleep.h>
|
||||
#include <NDBT.hpp>
|
||||
|
||||
static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism=240);
|
||||
static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
|
||||
bool commit_across_open_cursor, int parallelism=240);
|
||||
|
||||
NDB_STD_OPTS_VARS;
|
||||
|
||||
@ -81,8 +82,18 @@ int main(int argc, char** argv){
|
||||
ndbout << " Table " << argv[i] << " does not exist!" << endl;
|
||||
return NDBT_ProgramExit(NDBT_WRONGARGS);
|
||||
}
|
||||
// Check if we have any blobs
|
||||
bool commit_across_open_cursor = true;
|
||||
for (int j = 0; j < pTab->getNoOfColumns(); j++) {
|
||||
NdbDictionary::Column::Type t = pTab->getColumn(j)->getType();
|
||||
if (t == NdbDictionary::Column::Blob ||
|
||||
t == NdbDictionary::Column::Text) {
|
||||
commit_across_open_cursor = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ndbout << "Deleting all from " << argv[i] << "...";
|
||||
if(clear_table(&MyNdb, pTab) == NDBT_FAILED){
|
||||
if(clear_table(&MyNdb, pTab, commit_across_open_cursor) == NDBT_FAILED){
|
||||
res = NDBT_FAILED;
|
||||
ndbout << "FAILED" << endl;
|
||||
}
|
||||
@ -91,7 +102,8 @@ int main(int argc, char** argv){
|
||||
}
|
||||
|
||||
|
||||
int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
|
||||
int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
|
||||
bool commit_across_open_cursor, int parallelism)
|
||||
{
|
||||
// Scan all records exclusive and delete
|
||||
// them one by one
|
||||
@ -153,8 +165,12 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
|
||||
} while((check = rs->nextResult(false)) == 0);
|
||||
|
||||
if(check != -1){
|
||||
check = pTrans->execute(Commit);
|
||||
pTrans->restart();
|
||||
if (commit_across_open_cursor) {
|
||||
check = pTrans->execute(Commit);
|
||||
pTrans->restart(); // new tx id
|
||||
} else {
|
||||
check = pTrans->execute(NoCommit);
|
||||
}
|
||||
}
|
||||
|
||||
err = pTrans->getNdbError();
|
||||
@ -180,6 +196,10 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
|
||||
}
|
||||
goto failed;
|
||||
}
|
||||
if (! commit_across_open_cursor && pTrans->execute(Commit) != 0) {
|
||||
err = pTrans->getNdbError();
|
||||
goto failed;
|
||||
}
|
||||
pNdb->closeTransaction(pTrans);
|
||||
return NDBT_OK;
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ BASE=$BASE2
|
||||
if [ x"@GXX@" = x"yes" ] ; then
|
||||
gcclib=`@CC@ --print-libgcc-file`
|
||||
if [ $? -ne 0 ] ; then
|
||||
print "Warning: Couldn't find libgcc.a!"
|
||||
echo "Warning: Couldn't find libgcc.a!"
|
||||
else
|
||||
$CP $gcclib $BASE/lib/libmygcc.a
|
||||
fi
|
||||
|
@ -68,7 +68,7 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
|
||||
ha_heap::info(), which is always called before key statistics are
|
||||
used.
|
||||
*/
|
||||
key_stats_ok= FALSE;
|
||||
key_stat_version= file->s->key_stat_version-1;
|
||||
}
|
||||
return (file ? 0 : 1);
|
||||
}
|
||||
@ -114,14 +114,21 @@ void ha_heap::update_key_stats()
|
||||
continue;
|
||||
if (key->algorithm != HA_KEY_ALG_BTREE)
|
||||
{
|
||||
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
|
||||
key->rec_per_key[key->key_parts-1]=
|
||||
hash_buckets ? file->s->records/hash_buckets : 0;
|
||||
if (key->flags & HA_NOSAME)
|
||||
key->rec_per_key[key->key_parts-1]= 1;
|
||||
else
|
||||
{
|
||||
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
|
||||
uint no_records= hash_buckets ? file->s->records/hash_buckets : 2;
|
||||
if (no_records < 2)
|
||||
no_records= 2;
|
||||
key->rec_per_key[key->key_parts-1]= no_records;
|
||||
}
|
||||
}
|
||||
}
|
||||
records_changed= 0;
|
||||
/* At the end of update_key_stats() we can proudly claim they are OK. */
|
||||
key_stats_ok= TRUE;
|
||||
key_stat_version= file->s->key_stat_version;
|
||||
}
|
||||
|
||||
int ha_heap::write_row(byte * buf)
|
||||
@ -135,7 +142,13 @@ int ha_heap::write_row(byte * buf)
|
||||
res= heap_write(file,buf);
|
||||
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
|
||||
file->s->records)
|
||||
key_stats_ok= FALSE;
|
||||
{
|
||||
/*
|
||||
We can perform this safely since only one writer at the time is
|
||||
allowed on the table.
|
||||
*/
|
||||
file->s->key_stat_version++;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -148,7 +161,13 @@ int ha_heap::update_row(const byte * old_data, byte * new_data)
|
||||
res= heap_update(file,old_data,new_data);
|
||||
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
|
||||
file->s->records)
|
||||
key_stats_ok= FALSE;
|
||||
{
|
||||
/*
|
||||
We can perform this safely since only one writer at the time is
|
||||
allowed on the table.
|
||||
*/
|
||||
file->s->key_stat_version++;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -159,7 +178,13 @@ int ha_heap::delete_row(const byte * buf)
|
||||
res= heap_delete(file,buf);
|
||||
if (!res && table->tmp_table == NO_TMP_TABLE &&
|
||||
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
|
||||
key_stats_ok= FALSE;
|
||||
{
|
||||
/*
|
||||
We can perform this safely since only one writer at the time is
|
||||
allowed on the table.
|
||||
*/
|
||||
file->s->key_stat_version++;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -277,7 +302,7 @@ void ha_heap::info(uint flag)
|
||||
have to update the key statistics. Hoping that a table lock is now
|
||||
in place.
|
||||
*/
|
||||
if (! key_stats_ok)
|
||||
if (key_stat_version != file->s->key_stat_version)
|
||||
update_key_stats();
|
||||
}
|
||||
|
||||
@ -290,7 +315,13 @@ int ha_heap::delete_all_rows()
|
||||
{
|
||||
heap_clear(file);
|
||||
if (table->tmp_table == NO_TMP_TABLE)
|
||||
key_stats_ok= FALSE;
|
||||
{
|
||||
/*
|
||||
We can perform this safely since only one writer at the time is
|
||||
allowed on the table.
|
||||
*/
|
||||
file->s->key_stat_version++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -451,9 +482,14 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
|
||||
return HA_POS_ERROR; // Can only use exact keys
|
||||
else
|
||||
{
|
||||
/* Assert that info() did run. We need current statistics here. */
|
||||
DBUG_ASSERT(key_stats_ok);
|
||||
return key->rec_per_key[key->key_parts-1];
|
||||
if (records <= 1)
|
||||
return records;
|
||||
else
|
||||
{
|
||||
/* Assert that info() did run. We need current statistics here. */
|
||||
DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
|
||||
return key->rec_per_key[key->key_parts-1];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,10 +29,10 @@ class ha_heap: public handler
|
||||
key_map btree_keys;
|
||||
/* number of records changed since last statistics update */
|
||||
uint records_changed;
|
||||
bool key_stats_ok;
|
||||
uint key_stat_version;
|
||||
public:
|
||||
ha_heap(TABLE *table): handler(table), file(0), records_changed(0),
|
||||
key_stats_ok(0) {}
|
||||
key_stat_version(0) {}
|
||||
~ha_heap() {}
|
||||
const char *table_type() const { return "HEAP"; }
|
||||
const char *index_type(uint inx)
|
||||
|
@ -45,8 +45,6 @@ static const int max_transactions= 256;
|
||||
|
||||
static const char *ha_ndb_ext=".ndb";
|
||||
|
||||
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
|
||||
|
||||
#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0
|
||||
#define NDB_AUTO_INCREMENT_RETRIES 10
|
||||
|
||||
@ -747,7 +745,7 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
|
||||
}
|
||||
|
||||
// Used for hidden key only
|
||||
m_value[fieldnr].rec= ndb_op->getValue(fieldnr, NULL);
|
||||
m_value[fieldnr].rec= ndb_op->getValue(fieldnr, m_ref);
|
||||
DBUG_RETURN(m_value[fieldnr].rec == NULL);
|
||||
}
|
||||
|
||||
@ -2098,13 +2096,10 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
|
||||
DBUG_PRINT("info", ("Using hidden key"));
|
||||
|
||||
// Require that the PK for this record has previously been
|
||||
// read into m_value
|
||||
uint no_fields= table->fields;
|
||||
NdbRecAttr* rec= m_value[no_fields].rec;
|
||||
DBUG_ASSERT(rec);
|
||||
DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH);
|
||||
// read into m_ref
|
||||
DBUG_DUMP("key", m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH);
|
||||
|
||||
if (set_hidden_key(op, no_fields, rec->aRef()))
|
||||
if (set_hidden_key(op, table->fields, m_ref))
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
else
|
||||
@ -2181,11 +2176,8 @@ int ha_ndbcluster::delete_row(const byte *record)
|
||||
{
|
||||
// This table has no primary key, use "hidden" primary key
|
||||
DBUG_PRINT("info", ("Using hidden key"));
|
||||
uint no_fields= table->fields;
|
||||
NdbRecAttr* rec= m_value[no_fields].rec;
|
||||
DBUG_ASSERT(rec != NULL);
|
||||
|
||||
if (set_hidden_key(op, no_fields, rec->aRef()))
|
||||
if (set_hidden_key(op, table->fields, m_ref))
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
else
|
||||
@ -2792,7 +2784,7 @@ void ha_ndbcluster::position(const byte *record)
|
||||
hidden_col->getAutoIncrement() &&
|
||||
rec != NULL &&
|
||||
ref_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH);
|
||||
memcpy(ref, (const void*)rec->aRef(), ref_length);
|
||||
memcpy(ref, m_ref, ref_length);
|
||||
}
|
||||
|
||||
DBUG_DUMP("ref", (char*)ref, ref_length);
|
||||
@ -3046,9 +3038,26 @@ int ha_ndbcluster::end_bulk_insert()
|
||||
"rows_inserted:%d, bulk_insert_rows: %d",
|
||||
(int) m_rows_inserted, (int) m_bulk_insert_rows));
|
||||
m_bulk_insert_not_flushed= FALSE;
|
||||
if (execute_no_commit(this,trans) != 0) {
|
||||
no_uncommitted_rows_execute_failure();
|
||||
my_errno= error= ndb_err(trans);
|
||||
if (m_transaction_on)
|
||||
{
|
||||
if (execute_no_commit(this, trans) != 0)
|
||||
{
|
||||
no_uncommitted_rows_execute_failure();
|
||||
my_errno= error= ndb_err(trans);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (execute_commit(this, trans) != 0)
|
||||
{
|
||||
no_uncommitted_rows_execute_failure();
|
||||
my_errno= error= ndb_err(trans);
|
||||
}
|
||||
else
|
||||
{
|
||||
int res= trans->restart();
|
||||
DBUG_ASSERT(res == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4867,7 +4876,7 @@ bool ha_ndbcluster::low_byte_first() const
|
||||
}
|
||||
bool ha_ndbcluster::has_transactions()
|
||||
{
|
||||
return m_transaction_on;
|
||||
return TRUE;
|
||||
}
|
||||
const char* ha_ndbcluster::index_type(uint key_number)
|
||||
{
|
||||
|
@ -27,6 +27,8 @@
|
||||
|
||||
#include <ndbapi_limits.h>
|
||||
|
||||
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
|
||||
|
||||
class Ndb; // Forward declaration
|
||||
class NdbOperation; // Forward declaration
|
||||
class NdbConnection; // Forward declaration
|
||||
@ -226,6 +228,7 @@ class ha_ndbcluster: public handler
|
||||
// NdbRecAttr has no reference to blob
|
||||
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
|
||||
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
|
||||
byte m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH];
|
||||
bool m_use_write;
|
||||
bool m_ignore_dup_key;
|
||||
bool m_primary_key_update;
|
||||
|
@ -1056,16 +1056,13 @@ public:
|
||||
if (!master && ft_handler)
|
||||
{
|
||||
ft_handler->please->close_search(ft_handler);
|
||||
ft_handler=0;
|
||||
if (join_key)
|
||||
table->file->ft_handler=0;
|
||||
table->fulltext_searched=0;
|
||||
}
|
||||
if (concat)
|
||||
{
|
||||
delete concat;
|
||||
concat= 0;
|
||||
}
|
||||
ft_handler= 0;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
enum Functype functype() const { return FT_FUNC; }
|
||||
|
@ -949,6 +949,7 @@ Query_log_event::Query_log_event(const char* buf, int event_len,
|
||||
void Query_log_event::print(FILE* file, bool short_form, char* last_db)
|
||||
{
|
||||
char buff[40],*end; // Enough for SET TIMESTAMP
|
||||
const uint set_len= sizeof("SET ONE_SHOT CHARACTER_SET_CLIENT=") - 1;
|
||||
if (!short_form)
|
||||
{
|
||||
print_header(file);
|
||||
@ -978,6 +979,17 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db)
|
||||
my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME));
|
||||
if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
|
||||
fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id);
|
||||
/* charset_name command for mysql client */
|
||||
if (!strncmp(query, "SET ONE_SHOT CHARACTER_SET_CLIENT=", set_len))
|
||||
{
|
||||
char * endptr;
|
||||
int cs_number= strtoul(query + set_len, &endptr, 10);
|
||||
DBUG_ASSERT(*endptr == ',');
|
||||
CHARSET_INFO *cs_info= get_charset(cs_number, MYF(MY_WME));
|
||||
if (cs_info) {
|
||||
fprintf(file, "/*!\\C %s */;\n", cs_info->csname);
|
||||
}
|
||||
}
|
||||
my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
|
||||
fprintf(file, ";\n");
|
||||
}
|
||||
|
@ -988,6 +988,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
|
||||
table->status=STATUS_NO_RECORD;
|
||||
table->keys_in_use_for_query= table->keys_in_use;
|
||||
table->used_keys= table->keys_for_keyread;
|
||||
table->file->ft_handler=0;
|
||||
table->fulltext_searched=0;
|
||||
if (table->timestamp_field)
|
||||
table->timestamp_field_type= table->timestamp_field->get_auto_set_type();
|
||||
DBUG_ASSERT(table->key_read == 0);
|
||||
|
@ -1681,7 +1681,10 @@ bool select_dumpvar::send_eof()
|
||||
|
||||
void TMP_TABLE_PARAM::init()
|
||||
{
|
||||
DBUG_ENTER("TMP_TABLE_PARAM::init");
|
||||
DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
|
||||
field_count= sum_func_count= func_count= hidden_field_count= 0;
|
||||
group_parts= group_length= group_null_parts= 0;
|
||||
quick_group= 1;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@ -388,6 +388,12 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
|
||||
silent Used by replication when internally creating a database.
|
||||
In this case the entry should not be logged.
|
||||
|
||||
SIDE-EFFECTS
|
||||
1. Report back to client that command succeeded (send_ok)
|
||||
2. Report errors to client
|
||||
3. Log event to binary log
|
||||
(The 'silent' flags turns off 1 and 3.)
|
||||
|
||||
RETURN VALUES
|
||||
0 ok
|
||||
-1 Error
|
||||
@ -421,16 +427,17 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
|
||||
|
||||
if (my_stat(path,&stat_info,MYF(0)))
|
||||
{
|
||||
if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS))
|
||||
if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS))
|
||||
{
|
||||
my_error(ER_DB_CREATE_EXISTS,MYF(0),db);
|
||||
error= -1;
|
||||
goto exit;
|
||||
}
|
||||
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
|
||||
ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db);
|
||||
ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db);
|
||||
if (!silent)
|
||||
send_ok(thd);
|
||||
error= 0;
|
||||
send_ok(thd);
|
||||
goto exit;
|
||||
}
|
||||
else
|
||||
|
@ -285,8 +285,11 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
|
||||
else
|
||||
error=read_sep_field(thd,info,table,fields,read_info,*enclosed,
|
||||
skip_lines);
|
||||
if (table->file->end_bulk_insert())
|
||||
error=1; /* purecov: inspected */
|
||||
if (table->file->end_bulk_insert() && !error)
|
||||
{
|
||||
table->file->print_error(my_errno, MYF(0));
|
||||
error= 1;
|
||||
}
|
||||
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
|
||||
table->next_number_field=0;
|
||||
}
|
||||
|
@ -2840,21 +2840,26 @@ unsent_create_error:
|
||||
if ((res= multi_update_precheck(thd, tables)))
|
||||
break;
|
||||
|
||||
if ((res= mysql_multi_update_lock(thd, tables, &select_lex->item_list,
|
||||
select_lex)))
|
||||
break;
|
||||
|
||||
res= mysql_multi_update_lock(thd, tables, &select_lex->item_list,
|
||||
select_lex);
|
||||
#ifdef HAVE_REPLICATION
|
||||
/* Check slave filtering rules */
|
||||
if (thd->slave_thread)
|
||||
if (all_tables_not_ok(thd,tables))
|
||||
{
|
||||
if (res!= 0)
|
||||
{
|
||||
res= 0; /* don't care of prev failure */
|
||||
thd->clear_error(); /* filters are of highest prior */
|
||||
}
|
||||
/* we warn the slave SQL thread */
|
||||
my_error(ER_SLAVE_IGNORED_TABLE, MYF(0));
|
||||
break;
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
|
||||
if (res)
|
||||
break;
|
||||
|
||||
res= mysql_multi_update(thd,tables,
|
||||
&select_lex->item_list,
|
||||
&lex->value_list,
|
||||
|
@ -4107,6 +4107,20 @@ JOIN::join_free(bool full)
|
||||
problems in free_elements() as some of the elements are then deleted.
|
||||
*/
|
||||
tmp_table_param.copy_funcs.empty();
|
||||
/*
|
||||
If we have tmp_join and 'this' JOIN is not tmp_join and
|
||||
tmp_table_param.copy_field's of them are equal then we have to remove
|
||||
pointer to tmp_table_param.copy_field from tmp_join, because it qill
|
||||
be removed in tmp_table_param.cleanup().
|
||||
*/
|
||||
if (tmp_join &&
|
||||
tmp_join != this &&
|
||||
tmp_join->tmp_table_param.copy_field ==
|
||||
tmp_table_param.copy_field)
|
||||
{
|
||||
tmp_join->tmp_table_param.copy_field=
|
||||
tmp_join->tmp_table_param.save_copy_field= 0;
|
||||
}
|
||||
tmp_table_param.cleanup();
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
|
@ -227,7 +227,14 @@ class JOIN :public Sql_alloc
|
||||
{
|
||||
init(thd_arg, fields_arg, select_options_arg, result_arg);
|
||||
}
|
||||
|
||||
|
||||
JOIN(JOIN &join)
|
||||
:fields_list(join.fields_list)
|
||||
{
|
||||
init(join.thd, join.fields_list, join.select_options,
|
||||
join.result);
|
||||
}
|
||||
|
||||
void init(THD *thd_arg, List<Item> &fields_arg, ulong select_options_arg,
|
||||
select_result *result_arg)
|
||||
{
|
||||
@ -272,7 +279,7 @@ class JOIN :public Sql_alloc
|
||||
|
||||
fields_list= fields_arg;
|
||||
bzero((char*) &keyuse,sizeof(keyuse));
|
||||
tmp_table_param.copy_field=0;
|
||||
tmp_table_param.init();
|
||||
tmp_table_param.end_write_records= HA_POS_ERROR;
|
||||
rollup.state= ROLLUP::STATE_NONE;
|
||||
}
|
||||
|
@ -212,7 +212,6 @@ int mysql_update(THD *thd,
|
||||
SORT_FIELD *sortorder;
|
||||
ha_rows examined_rows;
|
||||
|
||||
used_index= MAX_KEY; // For call to init_read_record()
|
||||
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
|
||||
MYF(MY_FAE | MY_ZEROFILL));
|
||||
if (!(sortorder=make_unireg_sortorder(order, &length)) ||
|
||||
@ -244,7 +243,17 @@ int mysql_update(THD *thd,
|
||||
DISK_BUFFER_SIZE, MYF(MY_WME)))
|
||||
goto err;
|
||||
|
||||
if (used_index == MAX_KEY)
|
||||
/*
|
||||
When we get here, we have one of the following options:
|
||||
A. used_index == MAX_KEY
|
||||
This means we should use full table scan, and start it with
|
||||
init_read_record call
|
||||
B. used_index != MAX_KEY
|
||||
B.1 quick select is used, start the scan with init_read_record
|
||||
B.2 quick select is not used, this is full index scan (with LIMIT)
|
||||
Full index scan must be started with init_read_record_idx
|
||||
*/
|
||||
if (used_index == MAX_KEY || (select && select->quick))
|
||||
init_read_record(&info,thd,table,select,0,1);
|
||||
else
|
||||
init_read_record_idx(&info, thd, table, 1, used_index);
|
||||
|
@ -309,7 +309,7 @@ BuildMySQL "--enable-shared \
|
||||
--with-comment=\"MySQL Community Edition - Max (GPL)\" \
|
||||
--with-server-suffix='-Max'"
|
||||
|
||||
make test-force || true
|
||||
make -i test-force || true
|
||||
|
||||
# Save mysqld-max
|
||||
mv sql/mysqld sql/mysqld-max
|
||||
@ -363,7 +363,7 @@ BuildMySQL "--disable-shared \
|
||||
--without-openssl"
|
||||
nm --numeric-sort sql/mysqld > sql/mysqld.sym
|
||||
|
||||
make test-force || true
|
||||
make -i test-force || true
|
||||
|
||||
%install
|
||||
RBR=$RPM_BUILD_ROOT
|
||||
@ -689,6 +689,11 @@ fi
|
||||
# itself - note that they must be ordered by date (important when
|
||||
# merging BK trees)
|
||||
%changelog
|
||||
* Fri Jan 10 2006 Joerg Bruehe <joerg@mysql.com>
|
||||
|
||||
- Use "-i" on "make test-force";
|
||||
this is essential for later evaluation of this log file.
|
||||
|
||||
* Mon Dec 05 2005 Joerg Bruehe <joerg@mysql.com>
|
||||
|
||||
- Avoid using the "bundled" zlib on "shared" builds:
|
||||
|
@ -11706,6 +11706,37 @@ static void test_bug12001()
|
||||
DIE_UNLESS(res==1);
|
||||
}
|
||||
|
||||
static void test_bug12744()
|
||||
{
|
||||
MYSQL_STMT *prep_stmt = NULL;
|
||||
int rc;
|
||||
myheader("test_bug12744");
|
||||
|
||||
prep_stmt= mysql_stmt_init(mysql);
|
||||
rc= mysql_stmt_prepare(prep_stmt, "SELECT 1", 8);
|
||||
DIE_UNLESS(rc==0);
|
||||
|
||||
rc= mysql_kill(mysql, mysql_thread_id(mysql));
|
||||
DIE_UNLESS(rc==0);
|
||||
|
||||
if (rc= mysql_stmt_execute(prep_stmt))
|
||||
{
|
||||
if (rc= mysql_stmt_reset(prep_stmt))
|
||||
printf("OK!\n");
|
||||
else
|
||||
{
|
||||
printf("Error!");
|
||||
DIE_UNLESS(1==0);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "expected error but no error occured\n");
|
||||
DIE_UNLESS(1==0);
|
||||
}
|
||||
rc= mysql_stmt_close(prep_stmt);
|
||||
}
|
||||
|
||||
/*
|
||||
Bug#11718: query with function, join and order by returns wrong type
|
||||
*/
|
||||
@ -12054,6 +12085,7 @@ static struct my_tests_st my_tests[]= {
|
||||
{ "test_bug8378", test_bug8378 },
|
||||
{ "test_bug9735", test_bug9735 },
|
||||
{ "test_bug11183", test_bug11183 },
|
||||
{ "test_bug12744", test_bug12744 },
|
||||
{ "test_bug12001", test_bug12001 },
|
||||
{ "test_bug11718", test_bug11718 },
|
||||
{ "test_bug12925", test_bug12925 },
|
||||
|
Loading…
x
Reference in New Issue
Block a user