Merge branch '10.4' into 10.5

This commit is contained in:
Oleksandr Byelkin 2023-01-27 13:54:14 +01:00
commit 7fa02f5c0b
114 changed files with 3222 additions and 636 deletions

View File

@ -9,6 +9,7 @@ MariaDB Corporation https://www.mariadb.com (2013)
Microsoft https://microsoft.com/ (2017) Microsoft https://microsoft.com/ (2017)
ServiceNow https://servicenow.com (2019) ServiceNow https://servicenow.com (2019)
SIT https://sit.org (2022) SIT https://sit.org (2022)
Tencent Cloud https://cloud.tencent.com (2017)
Development Bank of Singapore https://dbs.com (2016) Development Bank of Singapore https://dbs.com (2016)
IBM https://www.ibm.com (2017) IBM https://www.ibm.com (2017)
Automattic https://automattic.com (2019) Automattic https://automattic.com (2019)

View File

@ -3606,7 +3606,10 @@ print_table_data(MYSQL_RES *result)
{ {
print_field_types(result); print_field_types(result);
if (!mysql_num_rows(result)) if (!mysql_num_rows(result))
{
my_afree((uchar*) num_flag);
return; return;
}
mysql_field_seek(result,0); mysql_field_seek(result,0);
} }
separator.copy("+",1,charset_info); separator.copy("+",1,charset_info);

View File

@ -575,8 +575,8 @@ static int search_dir(const char * base_path, const char *tool_name,
char new_path[FN_REFLEN]; char new_path[FN_REFLEN];
char source_path[FN_REFLEN]; char source_path[FN_REFLEN];
strcpy(source_path, base_path); safe_strcpy(source_path, sizeof(source_path), base_path);
strcat(source_path, subdir); safe_strcat(source_path, sizeof(source_path), subdir);
fn_format(new_path, tool_name, source_path, "", MY_UNPACK_FILENAME); fn_format(new_path, tool_name, source_path, "", MY_UNPACK_FILENAME);
if (file_exists(new_path)) if (file_exists(new_path))
{ {
@ -632,7 +632,7 @@ static int load_plugin_data(char *plugin_name, char *config_file)
FILE *file_ptr; FILE *file_ptr;
char path[FN_REFLEN]; char path[FN_REFLEN];
char line[1024]; char line[1024];
char *reason= 0; const char *reason= 0;
char *res; char *res;
int i= -1; int i= -1;
@ -643,14 +643,14 @@ static int load_plugin_data(char *plugin_name, char *config_file)
} }
if (!file_exists(opt_plugin_ini)) if (!file_exists(opt_plugin_ini))
{ {
reason= (char *)"File does not exist."; reason= "File does not exist.";
goto error; goto error;
} }
file_ptr= fopen(opt_plugin_ini, "r"); file_ptr= fopen(opt_plugin_ini, "r");
if (file_ptr == NULL) if (file_ptr == NULL)
{ {
reason= (char *)"Cannot open file."; reason= "Cannot open file.";
goto error; goto error;
} }
@ -660,17 +660,20 @@ static int load_plugin_data(char *plugin_name, char *config_file)
/* Read plugin components */ /* Read plugin components */
while (i < 16) while (i < 16)
{ {
size_t line_len;
res= fgets(line, sizeof(line), file_ptr); res= fgets(line, sizeof(line), file_ptr);
line_len= strlen(line);
/* strip /n */ /* strip /n */
if (line[strlen(line)-1] == '\n') if (line[line_len - 1] == '\n')
{ line[line_len - 1]= '\0';
line[strlen(line)-1]= '\0';
}
if (res == NULL) if (res == NULL)
{ {
if (i < 1) if (i < 1)
{ {
reason= (char *)"Bad format in plugin configuration file."; reason= "Bad format in plugin configuration file.";
fclose(file_ptr); fclose(file_ptr);
goto error; goto error;
} }
@ -683,14 +686,19 @@ static int load_plugin_data(char *plugin_name, char *config_file)
if (i == -1) /* if first pass, read this line as so_name */ if (i == -1) /* if first pass, read this line as so_name */
{ {
/* Add proper file extension for soname */ /* Add proper file extension for soname */
strcat(line, FN_SOEXT); if (safe_strcpy(line + line_len - 1, sizeof(line), FN_SOEXT))
{
reason= "Plugin name too long.";
fclose(file_ptr);
goto error;
}
/* save so_name */ /* save so_name */
plugin_data.so_name= my_strdup(PSI_NOT_INSTRUMENTED, line, MYF(MY_WME|MY_ZEROFILL)); plugin_data.so_name= my_strdup(PSI_NOT_INSTRUMENTED, line, MYF(MY_WME|MY_ZEROFILL));
i++; i++;
} }
else else
{ {
if (strlen(line) > 0) if (line_len > 0)
{ {
plugin_data.components[i]= my_strdup(PSI_NOT_INSTRUMENTED, line, MYF(MY_WME)); plugin_data.components[i]= my_strdup(PSI_NOT_INSTRUMENTED, line, MYF(MY_WME));
i++; i++;
@ -779,14 +787,13 @@ static int check_options(int argc, char **argv, char *operation)
/* read the plugin config file and check for match against argument */ /* read the plugin config file and check for match against argument */
else else
{ {
if (strlen(argv[i]) + 4 + 1 > FN_REFLEN) if (safe_strcpy(plugin_name, sizeof(plugin_name), argv[i]) ||
safe_strcpy(config_file, sizeof(config_file), argv[i]) ||
safe_strcat(config_file, sizeof(config_file), ".ini"))
{ {
fprintf(stderr, "ERROR: argument is too long.\n"); fprintf(stderr, "ERROR: argument is too long.\n");
return 1; return 1;
} }
strcpy(plugin_name, argv[i]);
strcpy(config_file, argv[i]);
strcat(config_file, ".ini");
} }
} }
@ -855,35 +862,30 @@ static int check_options(int argc, char **argv, char *operation)
static int process_options(int argc, char *argv[], char *operation) static int process_options(int argc, char *argv[], char *operation)
{ {
int error= 0; int error= 0;
int i= 0;
/* Parse and execute command-line options */ /* Parse and execute command-line options */
if ((error= handle_options(&argc, &argv, my_long_options, get_one_option))) if ((error= handle_options(&argc, &argv, my_long_options, get_one_option)))
goto exit; return error;
/* If the print defaults option used, exit. */ /* If the print defaults option used, exit. */
if (opt_print_defaults) if (opt_print_defaults)
{ return -1;
error= -1;
goto exit;
}
/* Add a trailing directory separator if not present */ /* Add a trailing directory separator if not present */
if (opt_basedir) if (opt_basedir)
{ {
i= (int)strlength(opt_basedir); size_t basedir_len= strlength(opt_basedir);
if (opt_basedir[i-1] != FN_LIBCHAR || opt_basedir[i-1] != FN_LIBCHAR2) if (opt_basedir[basedir_len - 1] != FN_LIBCHAR ||
opt_basedir[basedir_len - 1] != FN_LIBCHAR2)
{ {
char buff[FN_REFLEN]; char buff[FN_REFLEN];
memset(buff, 0, sizeof(buff)); if (basedir_len + 2 > FN_REFLEN)
return -1;
memcpy(buff, opt_basedir, basedir_len);
buff[basedir_len]= '/';
buff[basedir_len + 1]= '\0';
strncpy(buff, opt_basedir, sizeof(buff) - 1);
#ifdef __WIN__
strncat(buff, "/", sizeof(buff) - strlen(buff) - 1);
#else
strncat(buff, FN_DIRSEP, sizeof(buff) - strlen(buff) - 1);
#endif
buff[sizeof(buff) - 1]= 0;
my_free(opt_basedir); my_free(opt_basedir);
opt_basedir= my_strdup(PSI_NOT_INSTRUMENTED, buff, MYF(MY_FAE)); opt_basedir= my_strdup(PSI_NOT_INSTRUMENTED, buff, MYF(MY_FAE));
} }
@ -895,10 +897,7 @@ static int process_options(int argc, char *argv[], char *operation)
generated when the defaults were read from the file, exit. generated when the defaults were read from the file, exit.
*/ */
if (!opt_no_defaults && ((error= get_default_values()))) if (!opt_no_defaults && ((error= get_default_values())))
{ return -1;
error= -1;
goto exit;
}
/* /*
Check to ensure required options are present and validate the operation. Check to ensure required options are present and validate the operation.
@ -906,11 +905,9 @@ static int process_options(int argc, char *argv[], char *operation)
read a configuration file named <plugin_name>.ini from the --plugin-dir read a configuration file named <plugin_name>.ini from the --plugin-dir
or --plugin-ini location if the --plugin-ini option presented. or --plugin-ini location if the --plugin-ini option presented.
*/ */
strcpy(operation, ""); operation[0]= '\0';
if ((error= check_options(argc, argv, operation))) if ((error= check_options(argc, argv, operation)))
{ return error;
goto exit;
}
if (opt_verbose) if (opt_verbose)
{ {
@ -922,8 +919,7 @@ static int process_options(int argc, char *argv[], char *operation)
printf("# lc_messages_dir = %s\n", opt_lc_messages_dir); printf("# lc_messages_dir = %s\n", opt_lc_messages_dir);
} }
exit: return 0;
return error;
} }

View File

@ -2515,7 +2515,7 @@ static uint dump_events_for_db(char *db)
if (mysql_query_with_error_report(mysql, &event_list_res, "show events")) if (mysql_query_with_error_report(mysql, &event_list_res, "show events"))
DBUG_RETURN(0); DBUG_RETURN(0);
strcpy(delimiter, ";"); safe_strcpy(delimiter, sizeof(delimiter), ";");
if (mysql_num_rows(event_list_res) > 0) if (mysql_num_rows(event_list_res) > 0)
{ {
if (opt_xml) if (opt_xml)

View File

@ -6167,7 +6167,9 @@ int do_done(struct st_command *command)
if (*cur_block->delim) if (*cur_block->delim)
{ {
/* Restore "old" delimiter after false if block */ /* Restore "old" delimiter after false if block */
strcpy (delimiter, cur_block->delim); if (safe_strcpy(delimiter, sizeof(delimiter), cur_block->delim))
die("Delimiter too long, truncated");
delimiter_length= strlen(delimiter); delimiter_length= strlen(delimiter);
} }
/* Pop block from stack, goto next line */ /* Pop block from stack, goto next line */
@ -6422,10 +6424,12 @@ void do_block(enum block_cmd cmd, struct st_command* command)
if (cur_block->ok) if (cur_block->ok)
{ {
cur_block->delim[0]= '\0'; cur_block->delim[0]= '\0';
} else }
else
{ {
/* Remember "old" delimiter if entering a false if block */ /* Remember "old" delimiter if entering a false if block */
strcpy (cur_block->delim, delimiter); if (safe_strcpy(cur_block->delim, sizeof(cur_block->delim), delimiter))
die("Delimiter too long, truncated");
} }
DBUG_PRINT("info", ("OK: %d", cur_block->ok)); DBUG_PRINT("info", ("OK: %d", cur_block->ok));
@ -11769,9 +11773,8 @@ static int setenv(const char *name, const char *value, int overwrite)
char *envvar= (char *)malloc(buflen); char *envvar= (char *)malloc(buflen);
if(!envvar) if(!envvar)
return ENOMEM; return ENOMEM;
strcpy(envvar, name);
strcat(envvar, "="); snprintf(envvar, buflen, "%s=%s", name, value);
strcat(envvar, value);
putenv(envvar); putenv(envvar);
return 0; return 0;
} }

View File

@ -180,7 +180,7 @@ IF(WIN32)
MARK_AS_ADVANCED(SIGNCODE) MARK_AS_ADVANCED(SIGNCODE)
IF(SIGNCODE) IF(SIGNCODE)
SET(SIGNTOOL_PARAMETERS SET(SIGNTOOL_PARAMETERS
/a /t http://timestamp.globalsign.com/?signature=sha2 /a /fd SHA256 /t http://timestamp.globalsign.com/?signature=sha2
CACHE STRING "parameters for signtool (list)") CACHE STRING "parameters for signtool (list)")
IF(NOT SIGNTOOL_EXECUTABLE) IF(NOT SIGNTOOL_EXECUTABLE)
FILE(GLOB path_list FILE(GLOB path_list

View File

@ -508,7 +508,7 @@ static int DbugParse(CODE_STATE *cs, const char *control)
stack->delay= stack->next->delay; stack->delay= stack->next->delay;
stack->maxdepth= stack->next->maxdepth; stack->maxdepth= stack->next->maxdepth;
stack->sub_level= stack->next->sub_level; stack->sub_level= stack->next->sub_level;
strcpy(stack->name, stack->next->name); safe_strcpy(stack->name, sizeof(stack->name), stack->next->name);
stack->out_file= stack->next->out_file; stack->out_file= stack->next->out_file;
stack->out_file->used++; stack->out_file->used++;
if (stack->next == &init_settings) if (stack->next == &init_settings)

View File

@ -837,7 +837,7 @@ parse_page(
{ {
unsigned long long id; unsigned long long id;
uint16_t undo_page_type; uint16_t undo_page_type;
char str[20]={'\0'}; const char *str;
ulint n_recs; ulint n_recs;
uint32_t page_no, left_page_no, right_page_no; uint32_t page_no, left_page_no, right_page_no;
ulint data_bytes; ulint data_bytes;
@ -845,11 +845,7 @@ parse_page(
ulint size_range_id; ulint size_range_id;
/* Check whether page is doublewrite buffer. */ /* Check whether page is doublewrite buffer. */
if(skip_page) { str = skip_page ? "Double_write_buffer" : "-";
strcpy(str, "Double_write_buffer");
} else {
strcpy(str, "-");
}
switch (fil_page_get_type(page)) { switch (fil_page_get_type(page)) {

View File

@ -57,6 +57,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include "backup_copy.h" #include "backup_copy.h"
#include "backup_mysql.h" #include "backup_mysql.h"
#include <btr0btr.h> #include <btr0btr.h>
#ifdef _WIN32
#include <direct.h> /* rmdir */
#endif
#define ROCKSDB_BACKUP_DIR "#rocksdb" #define ROCKSDB_BACKUP_DIR "#rocksdb"
@ -1618,7 +1621,49 @@ bool backup_finish()
return(true); return(true);
} }
bool
/*
Drop all empty database directories in the base backup
that do not exists in the icremental backup.
This effectively re-plays all DROP DATABASE statements happened
in between base backup and incremental backup creation time.
Note, only checking if base_dir/db/ is empty is not enough,
because inc_dir/db/db.opt might have been dropped for some reasons,
which may also result into empty base_dir/db/.
Only the fact that at the same time:
- base_dir/db/ exists
- inc_dir/db/ does not exist
means that DROP DATABASE happened.
*/
static void
ibx_incremental_drop_databases(const char *base_dir,
const char *inc_dir)
{
datadir_node_t node;
datadir_node_init(&node);
datadir_iter_t *it = datadir_iter_new(base_dir);
while (datadir_iter_next(it, &node)) {
if (node.is_empty_dir) {
char path[FN_REFLEN];
snprintf(path, sizeof(path), "%s/%s",
inc_dir, node.filepath_rel);
if (!directory_exists(path, false)) {
msg("Removing %s", node.filepath);
rmdir(node.filepath);
}
}
}
datadir_iter_free(it);
datadir_node_free(&node);
}
static bool
ibx_copy_incremental_over_full() ibx_copy_incremental_over_full()
{ {
const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI", const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI",
@ -1701,6 +1746,8 @@ ibx_copy_incremental_over_full()
} }
copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true); copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true);
} }
ibx_incremental_drop_databases(xtrabackup_target_dir,
xtrabackup_incremental_dir);
} }

View File

@ -1676,8 +1676,11 @@ container_list_add_object(container_list *list, const char *name,
list->object_count += object_count_step; list->object_count += object_count_step;
} }
assert(list->idx <= list->object_count); assert(list->idx <= list->object_count);
strcpy(list->objects[list->idx].name, name); safe_strcpy(list->objects[list->idx].name,
strcpy(list->objects[list->idx].hash, hash); sizeof(list->objects[list->idx].name), name);
safe_strcpy(list->objects[list->idx].hash,
sizeof(list->objects[list->idx].hash), hash);
list->objects[list->idx].bytes = bytes; list->objects[list->idx].bytes = bytes;
++list->idx; ++list->idx;
} }

View File

@ -4410,10 +4410,12 @@ static bool xtrabackup_backup_low()
} }
if (!xtrabackup_incremental) { if (!xtrabackup_incremental) {
strcpy(metadata_type, "full-backuped"); safe_strcpy(metadata_type, sizeof(metadata_type),
"full-backuped");
metadata_from_lsn = 0; metadata_from_lsn = 0;
} else { } else {
strcpy(metadata_type, "incremental"); safe_strcpy(metadata_type, sizeof(metadata_type),
"incremental");
metadata_from_lsn = incremental_lsn; metadata_from_lsn = incremental_lsn;
} }
metadata_last_lsn = log_copy_scanned_lsn; metadata_last_lsn = log_copy_scanned_lsn;
@ -6074,7 +6076,8 @@ static bool xtrabackup_prepare_func(char** argv)
if (ok) { if (ok) {
char filename[FN_REFLEN]; char filename[FN_REFLEN];
strcpy(metadata_type, "log-applied"); safe_strcpy(metadata_type, sizeof(metadata_type),
"log-applied");
if(xtrabackup_incremental if(xtrabackup_incremental
&& metadata_to_lsn < incremental_to_lsn) && metadata_to_lsn < incremental_to_lsn)

View File

@ -226,6 +226,44 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str,
lex_str->length= len; lex_str->length= len;
} }
/*
Copies src into dst and ensures dst is a NULL terminated C string.
Returns 1 if the src string was truncated due to too small size of dst.
Returns 0 if src completely fit within dst. Pads the remaining dst with '\0'
Note: dst_size must be > 0
*/
static inline int safe_strcpy(char *dst, size_t dst_size, const char *src)
{
memset(dst, '\0', dst_size);
strncpy(dst, src, dst_size - 1);
/*
If the first condition is true, we are guaranteed to have src length
>= (dst_size - 1), hence safe to access src[dst_size - 1].
*/
if (dst[dst_size - 2] != '\0' && src[dst_size - 1] != '\0')
return 1; /* Truncation of src. */
return 0;
}
/*
Appends src to dst and ensures dst is a NULL terminated C string.
Returns 1 if the src string was truncated due to too small size of dst.
Returns 0 if src completely fit within the remaining dst space. Pads the
remaining dst with '\0'.
Note: dst_size must be > 0
*/
static inline int safe_strcat(char *dst, size_t dst_size, const char *src)
{
size_t init_len= strlen(dst);
if (unlikely(init_len >= dst_size - 1))
return 1;
return safe_strcpy(dst + init_len, dst_size - init_len, src);
}
#ifdef __cplusplus #ifdef __cplusplus
static inline char *safe_str(char *str) static inline char *safe_str(char *str)
{ return str ? str : const_cast<char*>(""); } { return str ? str : const_cast<char*>(""); }

View File

@ -5603,6 +5603,178 @@ r
3 3
drop table t1,t2,t3,x; drop table t1,t2,t3,x;
# #
# MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
# within a CTE with name 'x' used in a subquery from
# select list of another CTE
#
CREATE TABLE x (a int) ENGINE=MyISAM;
INSERT INTO x VALUES (3),(7),(1);
CREATE TABLE t1 (b int) ENGINE=MYISAM;
INSERT INTO t1 VALUES (1);
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
c
1
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
c
1
WITH cte AS
(
SELECT
(
WITH x AS
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
c
1
WITH cte AS
(
SELECT
(
WITH x AS
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
c
3
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
c
3
WITH x AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
SELECT b FROM x AS r
) AS c
)
SELECT x.c from x;
c
1
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
) AS c
)
SELECT cte.c from cte;
c
2
DROP TABLE x;
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
ERROR 42S02: Table 'test.x' doesn't exist
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
ERROR 42S02: Table 'test.x' doesn't exist
WITH cte AS
(
SELECT
(
WITH x AS
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
ERROR 42S02: Table 'test.x' doesn't exist
WITH cte AS
(
SELECT
(
WITH x AS
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
ERROR 42S02: Table 'test.x' doesn't exist
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
ERROR 42S02: Table 'test.x' doesn't exist
WITH x AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
SELECT b FROM x AS r
) AS c
)
SELECT x.c from x;
ERROR 42S02: Table 'test.x' doesn't exist
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
) AS c
)
SELECT cte.c from cte;
ERROR 42S02: Table 'test.x' doesn't exist
DROP TABLE t1;
#
# End of 10.3 tests # End of 10.3 tests
# #
# #

View File

@ -3874,6 +3874,129 @@ select * from cte;
drop table t1,t2,t3,x; drop table t1,t2,t3,x;
--echo #
--echo # MDEV-30248: Embedded non-recursive CTE referring to base table 'x'
--echo # within a CTE with name 'x' used in a subquery from
--echo # select list of another CTE
--echo #
CREATE TABLE x (a int) ENGINE=MyISAM;
INSERT INTO x VALUES (3),(7),(1);
CREATE TABLE t1 (b int) ENGINE=MYISAM;
INSERT INTO t1 VALUES (1);
let $q1=
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
eval $q1;
let $q2=
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT b FROM t1)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
eval $q2;
let $q3=
WITH cte AS
(
SELECT
(
WITH x AS
(WITH y AS (SELECT a FROM x AS t) SELECT b FROM t1)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
eval $q3;
let $q4=
WITH cte AS
(
SELECT
(
WITH x AS
(WITH y(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM y)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
eval $q4;
let $q5=
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x(b) AS (SELECT a FROM x AS t LIMIT 1) SELECT b FROM x)
SELECT b FROM x AS r
) AS c
)
SELECT cte.c FROM cte;
eval $q5;
let $q6=
WITH x AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 1 AS b)
SELECT b FROM x AS r
) AS c
)
SELECT x.c from x;
eval $q6;
let $q7=
WITH cte AS
(
SELECT
(
WITH x AS
(WITH x AS (SELECT a FROM x AS t) SELECT 2 AS b)
SELECT r1.b FROM x AS r1, x AS r2 WHERE r1.b=r2.b
) AS c
)
SELECT cte.c from cte;
eval $q7;
DROP TABLE x;
--ERROR ER_NO_SUCH_TABLE
eval $q1;
--ERROR ER_NO_SUCH_TABLE
eval $q2;
--ERROR ER_NO_SUCH_TABLE
eval $q3;
--ERROR ER_NO_SUCH_TABLE
eval $q4;
--ERROR ER_NO_SUCH_TABLE
eval $q5;
--ERROR ER_NO_SUCH_TABLE
eval $q6;
--ERROR ER_NO_SUCH_TABLE
eval $q7;
DROP TABLE t1;
--echo # --echo #
--echo # End of 10.3 tests --echo # End of 10.3 tests
--echo # --echo #

View File

@ -11383,6 +11383,181 @@ a
# End of 10.3 tests # End of 10.3 tests
# #
# #
# Start of 10.4 tests
#
#
# MDEV-27653 long uniques don't work with unicode collations
#
SET NAMES utf8mb3;
CREATE TABLE t1 (
a CHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a) USING HASH
);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` char(30) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t1 VALUES ('a');
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
SELECT * FROM t1;
a
a
DROP TABLE t1;
CREATE TABLE t1 (
a CHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a(10)) USING HASH
);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` char(30) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`(10)) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t1 VALUES ('a');
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
SELECT * FROM t1;
a
a
DROP TABLE t1;
CREATE TABLE t1 (
a VARCHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a) USING HASH
);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` varchar(30) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t1 VALUES ('a');
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
SELECT * FROM t1;
a
a
DROP TABLE t1;
CREATE TABLE t1 (
a VARCHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a(10)) USING HASH
);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` varchar(30) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`(10)) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t1 VALUES ('a');
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
SELECT * FROM t1;
a
a
DROP TABLE t1;
CREATE TABLE t1 (a TEXT COLLATE utf8mb3_general_ci UNIQUE);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t1 VALUES ('a');
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
SELECT * FROM t1;
a
a
DROP TABLE t1;
CREATE TABLE t1 (
a LONGTEXT COLLATE utf8mb3_general_ci,
UNIQUE KEY(a(10)) USING HASH
);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` longtext CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`(10)) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
INSERT INTO t1 VALUES ('a');
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
SELECT * FROM t1;
a
a
DROP TABLE t1;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
a OCTET_LENGTH(a)
a 1
ä 2
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
INSERT INTO t1 VALUES ('A');
ERROR 23000: Duplicate entry 'A' for key 'a'
INSERT INTO t1 VALUES ('Ä');
ERROR 23000: Duplicate entry 'Ä' for key 'a'
INSERT INTO t1 VALUES ('Ấ');
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
a OCTET_LENGTH(a)
a 1
ä 2
Ấ 3
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
ALTER TABLE t1 FORCE;
ERROR 23000: Duplicate entry 'ä' for key 'a'
DELETE FROM t1 WHERE OCTET_LENGTH(a)>1;
ALTER TABLE t1 FORCE;
INSERT INTO t1 VALUES ('ä');
ERROR 23000: Duplicate entry 'ä' for key 'a'
DROP TABLE t1;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
a OCTET_LENGTH(a)
a 1
ä 2
ALTER IGNORE TABLE t1 FORCE;
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
a OCTET_LENGTH(a)
a 1
DROP TABLE t1;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` text CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
a OCTET_LENGTH(a)
a 1
ä 2
REPAIR TABLE t1;
Table Op Msg_type Msg_text
test.t1 repair Warning Number of rows changed from 2 to 1
test.t1 repair status OK
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
a OCTET_LENGTH(a)
a 1
DROP TABLE t1;
#
# End of 10.4 tests
#
#
# Start of 10.5 tests # Start of 10.5 tests
# #
# #

View File

@ -2313,6 +2313,164 @@ VALUES (_latin1 0xDF) UNION VALUES(_utf8'a' COLLATE utf8_bin);
--echo # --echo #
--echo #
--echo # Start of 10.4 tests
--echo #
--echo #
--echo # MDEV-27653 long uniques don't work with unicode collations
--echo #
SET NAMES utf8mb3;
# CHAR
CREATE TABLE t1 (
a CHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a) USING HASH
);
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES ('a');
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
SELECT * FROM t1;
DROP TABLE t1;
CREATE TABLE t1 (
a CHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a(10)) USING HASH
);
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES ('a');
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
SELECT * FROM t1;
DROP TABLE t1;
# VARCHAR
CREATE TABLE t1 (
a VARCHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a) USING HASH
);
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES ('a');
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
SELECT * FROM t1;
DROP TABLE t1;
CREATE TABLE t1 (
a VARCHAR(30) COLLATE utf8mb3_general_ci,
UNIQUE KEY(a(10)) USING HASH
);
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES ('a');
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
SELECT * FROM t1;
DROP TABLE t1;
# TEXT
CREATE TABLE t1 (a TEXT COLLATE utf8mb3_general_ci UNIQUE);
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES ('a');
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
SELECT * FROM t1;
DROP TABLE t1;
CREATE TABLE t1 (
a LONGTEXT COLLATE utf8mb3_general_ci,
UNIQUE KEY(a(10)) USING HASH
);
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES ('a');
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
SELECT * FROM t1;
DROP TABLE t1;
# Testing upgrade:
# Prior to MDEV-27653, the UNIQUE HASH function errorneously
# took into account string octet length.
# Old tables should still open and work, but with wrong results.
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.frm $MYSQLD_DATADIR/test/t1.frm;
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYD $MYSQLD_DATADIR/test/t1.MYD;
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYI $MYSQLD_DATADIR/test/t1.MYI;
SHOW CREATE TABLE t1;
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
CHECK TABLE t1;
# There is already a one byte value 'a' in the table
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('A');
# There is already a two-byte value 'ä' in the table
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('Ä');
# There were no three-byte values in the table so far.
# The below value violates UNIQUE, but it gets inserted.
# This is wrong but expected for a pre-MDEV-27653 table.
INSERT INTO t1 VALUES ('Ấ');
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
CHECK TABLE t1;
# ALTER FORCE fails: it tries to rebuild the table
# with a correct UNIQUE HASH function, but there are duplicates!
--error ER_DUP_ENTRY
ALTER TABLE t1 FORCE;
# Let's remove all duplicate values, so only the one-byte 'a' stays.
# ALTER..FORCE should work after that.
DELETE FROM t1 WHERE OCTET_LENGTH(a)>1;
ALTER TABLE t1 FORCE;
# Make sure that 'a' and 'ä' cannot co-exists any more,
# because the table was recreated with a correct UNIQUE HASH function.
--error ER_DUP_ENTRY
INSERT INTO t1 VALUES ('ä');
DROP TABLE t1;
#
# Testing an old table with ALTER IGNORE.
# The table is expected to rebuild with a new hash function,
# duplicates go away.
#
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.frm $MYSQLD_DATADIR/test/t1.frm;
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYD $MYSQLD_DATADIR/test/t1.MYD;
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYI $MYSQLD_DATADIR/test/t1.MYI;
SHOW CREATE TABLE t1;
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
ALTER IGNORE TABLE t1 FORCE;
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
DROP TABLE t1;
#
# Testing an old table with REPAIR.
# The table is expected to rebuild with a new hash function,
# duplicates go away.
#
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.frm $MYSQLD_DATADIR/test/t1.frm;
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYD $MYSQLD_DATADIR/test/t1.MYD;
copy_file std_data/mysql_upgrade/mdev27653_100422_myisam_text.MYI $MYSQLD_DATADIR/test/t1.MYI;
SHOW CREATE TABLE t1;
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
REPAIR TABLE t1;
SELECT a, OCTET_LENGTH(a) FROM t1 ORDER BY BINARY a;
DROP TABLE t1;
--echo #
--echo # End of 10.4 tests
--echo #
--echo # --echo #
--echo # Start of 10.5 tests --echo # Start of 10.5 tests
--echo # --echo #

View File

@ -18162,6 +18162,69 @@ DROP TABLE transaction_items;
DROP TABLE transactions; DROP TABLE transactions;
DROP TABLE charges; DROP TABLE charges;
DROP TABLE ledgers; DROP TABLE ledgers;
#
# MDEV-30081: Splitting from a constant mergeable derived table
# used in inner part of an outer join.
#
CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
INSERT INTO t1 VALUES (3),(4),(7);
CREATE TABLE t2 (
id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
) ENGINE=MyISAM;
INSERT INTO t2 VALUES (4,4,6),(7,7,7);
CREATE TABLE t3 (
wid int, wtid int, otid int, oid int,
PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
) ENGINE=MyISAM;
INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
CREATE TABLE t5 (
id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
) ENGINE=MyISAM ;
INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
ANALYZE TABLE t1,t2,t3,t4,t5;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
test.t5 analyze status Engine-independent statistics collected
test.t5 analyze status OK
CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
FROM
t1, t2, t3
LEFT JOIN
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
ON t3.oid = dt.id AND t3.otid = 14
LEFT JOIN v1
ON (v1.id1 = dt.a)
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
wid wtid otid oid t1_id t2_id id a id1
7 17 7 7 7 7 NULL NULL NULL
EXPLAIN SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
FROM
t1, t2, t3
LEFT JOIN
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
ON t3.oid = dt.id AND t3.otid = 14
LEFT JOIN v1
ON (v1.id1 = dt.a)
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 const PRIMARY,oid PRIMARY 4 const 1
1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t2 const PRIMARY PRIMARY 4 const 1 Using index
1 PRIMARY t4 const PRIMARY,a NULL NULL NULL 1 Impossible ON condition
1 PRIMARY <derived3> ref key0 key0 5 const 0 Using where
3 LATERAL DERIVED t5 ref id1 id1 5 const 0 Using index
DROP VIEW v1;
DROP TABLE t1,t2,t3,t4,t5;
# End of 10.3 tests # End of 10.3 tests
# #
# MDEV-18679: materialized view with SELECT S containing materialized # MDEV-18679: materialized view with SELECT S containing materialized

View File

@ -3870,6 +3870,55 @@ DROP TABLE transactions;
DROP TABLE charges; DROP TABLE charges;
DROP TABLE ledgers; DROP TABLE ledgers;
--echo #
--echo # MDEV-30081: Splitting from a constant mergeable derived table
--echo # used in inner part of an outer join.
--echo #
CREATE TABLE t1 ( id int PRIMARY KEY ) ENGINE=MyISAM;
INSERT INTO t1 VALUES (3),(4),(7);
CREATE TABLE t2 (
id int, id1 int, wid int, PRIMARY KEY (id), KEY (id1), KEY (wid)
) ENGINE=MyISAM;
INSERT INTO t2 VALUES (4,4,6),(7,7,7);
CREATE TABLE t3 (
wid int, wtid int, otid int, oid int,
PRIMARY KEY (wid), KEY (wtid), KEY (otid), KEY (oid)
) ENGINE=MyISAM;
INSERT INTO t3 VALUES (6,30,6,6),(7,17,7,7);
CREATE TABLE t4 ( id int, a int, PRIMARY KEY (id), KEY (a) ) ENGINE=MyISAM;
INSERT INTO t4 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
CREATE TABLE t5 (
id int, id1 int, PRIMARY KEY (id), KEY id1 (id1)
) ENGINE=MyISAM ;
INSERT INTO t5 VALUES (1,17),(2,15),(3,49),(4,3),(5,45),(6,38),(7,17);
ANALYZE TABLE t1,t2,t3,t4,t5;
CREATE VIEW v1 AS (SELECT id1 FROM t5 GROUP BY id1);
let $q=
SELECT t3.*, t1.id AS t1_id, t2.id AS t2_id, dt.*, v1.*
FROM
t1, t2, t3
LEFT JOIN
(SELECT t4.* FROM t4 WHERE t4.a=3) dt
ON t3.oid = dt.id AND t3.otid = 14
LEFT JOIN v1
ON (v1.id1 = dt.a)
WHERE t3.oid = t1.id AND t3.oid = t2.id AND t3.wid = 7;
eval $q;
eval EXPLAIN $q;
DROP VIEW v1;
DROP TABLE t1,t2,t3,t4,t5;
--echo # End of 10.3 tests --echo # End of 10.3 tests
--echo # --echo #

View File

@ -1,3 +1,6 @@
#
# MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list
#
create table t1 (a int, b int, c int, d int, e int); create table t1 (a int, b int, c int, d int, e int);
insert into t1 () values insert into t1 () values
(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(), (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
@ -11,6 +14,9 @@ load data infile 'load.data' into table tmp;
delete from tmp; delete from tmp;
drop table t1; drop table t1;
drop table tmp; drop table tmp;
#
# MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob
#
create table t1 (b blob) engine=innodb; create table t1 (b blob) engine=innodb;
alter table t1 add unique (b); alter table t1 add unique (b);
alter table t1 force; alter table t1 force;
@ -21,12 +27,18 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `b` (`b`) USING HASH UNIQUE KEY `b` (`b`) USING HASH
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
drop table t1; drop table t1;
#
# MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob
#
create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam; create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam;
insert into t1 values (1,'foo'); insert into t1 values (1,'foo');
replace into t1 (pk) values (1); replace into t1 (pk) values (1);
alter table t1 force; alter table t1 force;
replace into t1 (pk) values (1); replace into t1 (pk) values (1);
drop table t1; drop table t1;
#
# MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob
#
create table t1 (t time, unique(t)) engine=innodb; create table t1 (t time, unique(t)) engine=innodb;
insert into t1 values (null),(null); insert into t1 values (null),(null);
alter ignore table t1 modify t text not null default ''; alter ignore table t1 modify t text not null default '';
@ -34,6 +46,9 @@ Warnings:
Warning 1265 Data truncated for column 't' at row 1 Warning 1265 Data truncated for column 't' at row 1
Warning 1265 Data truncated for column 't' at row 2 Warning 1265 Data truncated for column 't' at row 2
drop table t1; drop table t1;
#
# MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob
#
create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning; create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning;
insert into t1 values (1,'foo'); insert into t1 values (1,'foo');
update t1 set f = 'bar'; update t1 set f = 'bar';
@ -50,20 +65,32 @@ pk f row_end > DATE'2030-01-01'
1 foo 0 1 foo 0
1 bar 0 1 bar 0
drop table t1; drop table t1;
#
# MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob
#
create temporary table t1 (f blob, unique(f)) engine=innodb; create temporary table t1 (f blob, unique(f)) engine=innodb;
insert into t1 values (1); insert into t1 values (1);
replace into t1 values (1); replace into t1 values (1);
drop table t1; drop table t1;
#
# MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table
#
create table t (b blob, unique(b)) engine=myisam; create table t (b blob, unique(b)) engine=myisam;
insert into t values ('foo'); insert into t values ('foo');
replace into t values ('foo'); replace into t values ('foo');
drop table t; drop table t;
#
# MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index
#
CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x)); CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x));
INSERT INTO t1 VALUES (1,'foo'); INSERT INTO t1 VALUES (1,'foo');
ALTER TABLE t1 DROP x, ALGORITHM=INPLACE; ALTER TABLE t1 DROP x, ALGORITHM=INPLACE;
ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY
UPDATE t1 SET x = 'bar'; UPDATE t1 SET x = 'bar';
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18799 Long unique does not work after failed alter table
#
create table t1(a blob unique , b blob); create table t1(a blob unique , b blob);
insert into t1 values(1,1),(2,1); insert into t1 values(1,1),(2,1);
alter table t1 add unique(b); alter table t1 add unique(b);
@ -85,16 +112,26 @@ Index_comment
insert into t1 values(1,1); insert into t1 values(1,1);
ERROR 23000: Duplicate entry '1' for key 'a' ERROR 23000: Duplicate entry '1' for key 'a'
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key
#
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM; CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM;
ALTER TABLE t1 DROP x; ALTER TABLE t1 DROP x;
ERROR 42000: Can't DROP COLUMN `x`; check that it exists ERROR 42000: Can't DROP COLUMN `x`; check that it exists
UPDATE t1 SET b = 0 WHERE a = 'foo'; UPDATE t1 SET b = 0 WHERE a = 'foo';
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in
# row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search"
#
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB; CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB;
ALTER TABLE t1 DROP x; ALTER TABLE t1 DROP x;
ERROR 42000: Can't DROP COLUMN `x`; check that it exists ERROR 42000: Can't DROP COLUMN `x`; check that it exists
UPDATE t1 SET b = 0 WHERE a = 'foo'; UPDATE t1 SET b = 0 WHERE a = 'foo';
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index
#
CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT; CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
ALTER TABLE t1 ADD KEY (f); ALTER TABLE t1 ADD KEY (f);
ERROR HY000: Index column size too large. The maximum column size is 767 bytes ERROR HY000: Index column size too large. The maximum column size is 767 bytes
@ -102,17 +139,29 @@ TRUNCATE TABLE t1;
SELECT * FROM t1 WHERE f LIKE 'foo'; SELECT * FROM t1 WHERE f LIKE 'foo';
f f
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in
# ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key
#
CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB; CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB;
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT; ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT;
Warnings: Warnings:
Note 1054 Unknown column 'b' in 't1' Note 1054 Unknown column 'b' in 't1'
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow
# in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index
#
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB; CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
ALTER TABLE t1 DROP x; ALTER TABLE t1 DROP x;
ERROR 42000: Can't DROP COLUMN `x`; check that it exists ERROR 42000: Can't DROP COLUMN `x`; check that it exists
SELECT * FROM t1 WHERE f LIKE 'foo'; SELECT * FROM t1 WHERE f LIKE 'foo';
f f
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18800 Server crash in instant_alter_column_possible or
# Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key
#
CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB; CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB;
show keys from t1;; show keys from t1;;
Table t1 Table t1
@ -130,6 +179,9 @@ Comment
Index_comment Index_comment
ALTER TABLE t1 ADD INDEX (pk); ALTER TABLE t1 ADD INDEX (pk);
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18922 Alter on long unique varchar column makes result null
#
CREATE TABLE t1 (b int, a varchar(4000)); CREATE TABLE t1 (b int, a varchar(4000));
INSERT INTO t1 VALUES (1, 2),(2,3),(3,4); INSERT INTO t1 VALUES (1, 2),(2,3),(3,4);
ALTER TABLE t1 ADD UNIQUE INDEX (a); ALTER TABLE t1 ADD UNIQUE INDEX (a);
@ -144,6 +196,10 @@ a
3 3
4 4
drop table t1; drop table t1;
#
# MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags
# & (1<< 30)' failed in setup_keyinfo_hash
#
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB; CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT; ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY
@ -159,6 +215,9 @@ insert into t1 values(1,1);
ERROR 23000: Duplicate entry '1-1' for key 'a' ERROR 23000: Duplicate entry '1-1' for key 'a'
alter table t1 add column c int; alter table t1 add column c int;
drop table t1; drop table t1;
#
# MDEV-18889 Long unique on virtual fields crashes server
#
create table t1(a blob , b blob as (a) unique); create table t1(a blob , b blob as (a) unique);
insert into t1 values(1, default); insert into t1 values(1, default);
insert into t1 values(1, default); insert into t1 values(1, default);
@ -172,6 +231,9 @@ insert into t1(a,b) values(2,2);
insert into t1(a,b) values(2,3); insert into t1(a,b) values(2,3);
insert into t1(a,b) values(3,2); insert into t1(a,b) values(3,2);
drop table t1; drop table t1;
#
# MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN
#
CREATE TABLE t1 ( CREATE TABLE t1 (
a CHAR(128), a CHAR(128),
b CHAR(128) AS (a), b CHAR(128) AS (a),
@ -187,6 +249,9 @@ c varchar(5000),
UNIQUE(c,b(64)) UNIQUE(c,b(64))
) ENGINE=InnoDB; ) ENGINE=InnoDB;
drop table t1; drop table t1;
#
# MDEV-18967 Load data in system version with long unique does not work
#
CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning; CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning;
INSERT INTO t1 VALUES ('A'); INSERT INTO t1 VALUES ('A');
SELECT * INTO OUTFILE 'load.data' from t1; SELECT * INTO OUTFILE 'load.data' from t1;
@ -196,6 +261,9 @@ select * from t1;
data data
A A
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column)
#
CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB; CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('f'), ('o'), ('o'); INSERT INTO t1 VALUES ('f'), ('o'), ('o');
SELECT * INTO OUTFILE 'load.data' from t1; SELECT * INTO OUTFILE 'load.data' from t1;
@ -214,12 +282,16 @@ SELECT * FROM t1;
data data
f f
o o
# This should be equivalent to the REPLACE above
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1; LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
SELECT * FROM t1; SELECT * FROM t1;
data data
f f
o o
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18953 Hash index on partial char field not working
#
create table t1 ( create table t1 (
c char(10) character set utf8mb4, c char(10) character set utf8mb4,
unique key a using hash (c(1)) unique key a using hash (c(1))
@ -236,10 +308,16 @@ ERROR 23000: Duplicate entry '
insert into t1 values ('ббб'); insert into t1 values ('ббб');
ERROR 23000: Duplicate entry 'Ð' for key 'a' ERROR 23000: Duplicate entry 'Ð' for key 'a'
drop table t1; drop table t1;
#
# MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map
#
CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2; CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2;
INSERT INTO t1 VALUES (2); INSERT INTO t1 VALUES (2);
REPLACE INTO t1 VALUES (2); REPLACE INTO t1 VALUES (2);
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key'
#
set innodb_lock_wait_timeout= 10; set innodb_lock_wait_timeout= 10;
CREATE TABLE t1 ( CREATE TABLE t1 (
id int primary key, id int primary key,
@ -266,11 +344,20 @@ ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
disconnect con1; disconnect con1;
connection default; connection default;
DROP TABLE t1, t2; DROP TABLE t1, t2;
#
# MDEV-18791 Wrong error upon creating Aria table with long index on BLOB
#
CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria; CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
ERROR 42000: Specified key was too long; max key length is 2000 bytes ERROR 42000: Specified key was too long; max key length is 2000 bytes
#
# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
#
create table t1(a int, unique(a) using hash); create table t1(a int, unique(a) using hash);
#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
drop table t1; drop table t1;
#
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
#
SET binlog_row_image= NOBLOB; SET binlog_row_image= NOBLOB;
CREATE TABLE t1 (pk INT PRIMARY KEY, a text ,UNIQUE(a) using hash); CREATE TABLE t1 (pk INT PRIMARY KEY, a text ,UNIQUE(a) using hash);
INSERT INTO t1 VALUES (1,'foo'); INSERT INTO t1 VALUES (1,'foo');
@ -278,6 +365,9 @@ create table t2(id int primary key, a blob, b varchar(20) as (LEFT(a,2)));
INSERT INTO t2 VALUES (1, 'foo', default); INSERT INTO t2 VALUES (1, 'foo', default);
DROP TABLE t1, t2; DROP TABLE t1, t2;
SET binlog_row_image= FULL; SET binlog_row_image= FULL;
#
# MDEV-22719 Long unique keys are not created when individual key_part->length < max_key_length but SUM(key_parts->length) > max_key_length
#
CREATE TABLE t1 (a int, b VARCHAR(1000), UNIQUE (a,b)) ENGINE=MyISAM; CREATE TABLE t1 (a int, b VARCHAR(1000), UNIQUE (a,b)) ENGINE=MyISAM;
show index from t1; show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@ -315,6 +405,9 @@ ERROR 23000: Duplicate entry '1' for key 'v2'
update t1,t2 set v1 = v2 , v5 = 0; update t1,t2 set v1 = v2 , v5 = 0;
ERROR 23000: Duplicate entry '-128' for key 'v1' ERROR 23000: Duplicate entry '-128' for key 'v1'
drop table t1, t2; drop table t1, t2;
#
# MDEV-23264 Unique blobs allow duplicate values upon UPDATE
#
CREATE TABLE t1 (f TEXT UNIQUE); CREATE TABLE t1 (f TEXT UNIQUE);
INSERT INTO t1 VALUES (NULL),(NULL); INSERT INTO t1 VALUES (NULL),(NULL);
UPDATE t1 SET f = ''; UPDATE t1 SET f = '';
@ -344,6 +437,18 @@ partition n0 values less than (10),
partition n1 values less than (50)); partition n1 values less than (50));
drop table t1; drop table t1;
# #
# MDEV-29199 Unique hash key is ignored upon INSERT ... SELECT into non-empty MyISAM table
#
create table t1 (a int, b text, unique(b)) engine=MyISAM;
insert into t1 values (0,'aa');
insert into t1 (a,b) select 1,'xxx' from seq_1_to_5;
ERROR 23000: Duplicate entry 'xxx' for key 'b'
select * from t1;
a b
0 aa
1 xxx
drop table t1;
#
# End of 10.4 tests # End of 10.4 tests
# #
# #

View File

@ -1,9 +1,10 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/have_partition.inc --source include/have_partition.inc
--source include/have_sequence.inc
# --echo #
# MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list --echo # MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list
# --echo #
create table t1 (a int, b int, c int, d int, e int); create table t1 (a int, b int, c int, d int, e int);
insert into t1 () values insert into t1 () values
(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(), (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
@ -20,18 +21,18 @@ drop table t1;
--remove_file $datadir/test/load.data --remove_file $datadir/test/load.data
drop table tmp; drop table tmp;
# --echo #
# MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob --echo # MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob
# --echo #
create table t1 (b blob) engine=innodb; create table t1 (b blob) engine=innodb;
alter table t1 add unique (b); alter table t1 add unique (b);
alter table t1 force; alter table t1 force;
show create table t1; show create table t1;
drop table t1; drop table t1;
# --echo #
# MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob --echo # MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob
# --echo #
create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam; create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam;
insert into t1 values (1,'foo'); insert into t1 values (1,'foo');
replace into t1 (pk) values (1); replace into t1 (pk) values (1);
@ -39,17 +40,17 @@ alter table t1 force;
replace into t1 (pk) values (1); replace into t1 (pk) values (1);
drop table t1; drop table t1;
# --echo #
# MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob --echo # MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob
# --echo #
create table t1 (t time, unique(t)) engine=innodb; create table t1 (t time, unique(t)) engine=innodb;
insert into t1 values (null),(null); insert into t1 values (null),(null);
alter ignore table t1 modify t text not null default ''; alter ignore table t1 modify t text not null default '';
drop table t1; drop table t1;
# --echo #
# MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob --echo # MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob
# --echo #
create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning; create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning;
insert into t1 values (1,'foo'); insert into t1 values (1,'foo');
update t1 set f = 'bar'; update t1 set f = 'bar';
@ -59,25 +60,25 @@ select * from t1;
select pk, f, row_end > DATE'2030-01-01' from t1 for system_time all; select pk, f, row_end > DATE'2030-01-01' from t1 for system_time all;
drop table t1; drop table t1;
# --echo #
# MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob --echo # MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob
# --echo #
create temporary table t1 (f blob, unique(f)) engine=innodb; create temporary table t1 (f blob, unique(f)) engine=innodb;
insert into t1 values (1); insert into t1 values (1);
replace into t1 values (1); replace into t1 values (1);
drop table t1; drop table t1;
# --echo #
# MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table --echo # MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table
# --echo #
create table t (b blob, unique(b)) engine=myisam; create table t (b blob, unique(b)) engine=myisam;
insert into t values ('foo'); insert into t values ('foo');
replace into t values ('foo'); replace into t values ('foo');
drop table t; drop table t;
# --echo #
# MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index --echo # MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index
# --echo #
CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x)); CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x));
INSERT INTO t1 VALUES (1,'foo'); INSERT INTO t1 VALUES (1,'foo');
--error ER_ALTER_OPERATION_NOT_SUPPORTED --error ER_ALTER_OPERATION_NOT_SUPPORTED
@ -85,9 +86,9 @@ ALTER TABLE t1 DROP x, ALGORITHM=INPLACE;
UPDATE t1 SET x = 'bar'; UPDATE t1 SET x = 'bar';
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18799 Long unique does not work after failed alter table --echo # MDEV-18799 Long unique does not work after failed alter table
# --echo #
create table t1(a blob unique , b blob); create table t1(a blob unique , b blob);
insert into t1 values(1,1),(2,1); insert into t1 values(1,1),(2,1);
--error ER_DUP_ENTRY --error ER_DUP_ENTRY
@ -97,28 +98,28 @@ alter table t1 add unique(b);
insert into t1 values(1,1); insert into t1 values(1,1);
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key --echo # MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key
# --echo #
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM; CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM;
--error ER_CANT_DROP_FIELD_OR_KEY --error ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP x; ALTER TABLE t1 DROP x;
UPDATE t1 SET b = 0 WHERE a = 'foo'; UPDATE t1 SET b = 0 WHERE a = 'foo';
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in --echo # MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in
# row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search" --echo # row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search"
# --echo #
CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB; CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB;
--error ER_CANT_DROP_FIELD_OR_KEY --error ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP x; ALTER TABLE t1 DROP x;
UPDATE t1 SET b = 0 WHERE a = 'foo'; UPDATE t1 SET b = 0 WHERE a = 'foo';
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index --echo # MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index
# --echo #
CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT; CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
--error ER_INDEX_COLUMN_TOO_LONG --error ER_INDEX_COLUMN_TOO_LONG
ALTER TABLE t1 ADD KEY (f); ALTER TABLE t1 ADD KEY (f);
@ -126,36 +127,36 @@ TRUNCATE TABLE t1;
SELECT * FROM t1 WHERE f LIKE 'foo'; SELECT * FROM t1 WHERE f LIKE 'foo';
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in --echo # MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in
# ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key --echo # ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key
# --echo #
CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB; CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB;
ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT; ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT;
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow --echo # MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow
# in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index --echo # in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index
# --echo #
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB; CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
--error ER_CANT_DROP_FIELD_OR_KEY --error ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP x; ALTER TABLE t1 DROP x;
SELECT * FROM t1 WHERE f LIKE 'foo'; SELECT * FROM t1 WHERE f LIKE 'foo';
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18800 Server crash in instant_alter_column_possible or --echo # MDEV-18800 Server crash in instant_alter_column_possible or
# Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key --echo # Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key
# --echo #
CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB; CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB;
--query_vertical show keys from t1; --query_vertical show keys from t1;
ALTER TABLE t1 ADD INDEX (pk); ALTER TABLE t1 ADD INDEX (pk);
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18922 Alter on long unique varchar column makes result null --echo # MDEV-18922 Alter on long unique varchar column makes result null
# --echo #
CREATE TABLE t1 (b int, a varchar(4000)); CREATE TABLE t1 (b int, a varchar(4000));
INSERT INTO t1 VALUES (1, 2),(2,3),(3,4); INSERT INTO t1 VALUES (1, 2),(2,3),(3,4);
ALTER TABLE t1 ADD UNIQUE INDEX (a); ALTER TABLE t1 ADD UNIQUE INDEX (a);
@ -163,10 +164,10 @@ SELECT * FROM t1;
SELECT a FROM t1; SELECT a FROM t1;
drop table t1; drop table t1;
# --echo #
# MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags --echo # MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags
# & (1<< 30)' failed in setup_keyinfo_hash --echo # & (1<< 30)' failed in setup_keyinfo_hash
# --echo #
CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB; CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT; ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
@ -181,9 +182,9 @@ insert into t1 values(1,1);
alter table t1 add column c int; alter table t1 add column c int;
drop table t1; drop table t1;
# --echo #
# MDEV-18889 Long unique on virtual fields crashes server --echo # MDEV-18889 Long unique on virtual fields crashes server
# --echo #
create table t1(a blob , b blob as (a) unique); create table t1(a blob , b blob as (a) unique);
insert into t1 values(1, default); insert into t1 values(1, default);
--error ER_DUP_ENTRY --error ER_DUP_ENTRY
@ -199,9 +200,9 @@ insert into t1(a,b) values(2,3);
insert into t1(a,b) values(3,2); insert into t1(a,b) values(3,2);
drop table t1; drop table t1;
# --echo #
# MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN --echo # MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN
# --echo #
CREATE TABLE t1 ( CREATE TABLE t1 (
a CHAR(128), a CHAR(128),
b CHAR(128) AS (a), b CHAR(128) AS (a),
@ -218,9 +219,9 @@ CREATE TABLE t1 (
) ENGINE=InnoDB; ) ENGINE=InnoDB;
drop table t1; drop table t1;
# --echo #
# MDEV-18967 Load data in system version with long unique does not work --echo # MDEV-18967 Load data in system version with long unique does not work
# --echo #
CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning; CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning;
INSERT INTO t1 VALUES ('A'); INSERT INTO t1 VALUES ('A');
SELECT * INTO OUTFILE 'load.data' from t1; SELECT * INTO OUTFILE 'load.data' from t1;
@ -231,9 +232,9 @@ DROP TABLE t1;
--let $datadir= `select @@datadir` --let $datadir= `select @@datadir`
--remove_file $datadir/test/load.data --remove_file $datadir/test/load.data
# --echo #
# MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column) --echo # MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column)
# --echo #
CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB; CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('f'), ('o'), ('o'); INSERT INTO t1 VALUES ('f'), ('o'), ('o');
@ -245,16 +246,16 @@ ALTER TABLE t1 ADD SYSTEM VERSIONING ;
SELECT * FROM t1; SELECT * FROM t1;
REPLACE INTO t1 VALUES ('f'), ('o'), ('o'); REPLACE INTO t1 VALUES ('f'), ('o'), ('o');
SELECT * FROM t1; SELECT * FROM t1;
# This should be equivalent to the REPLACE above --echo # This should be equivalent to the REPLACE above
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1; LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
SELECT * FROM t1; SELECT * FROM t1;
DROP TABLE t1; DROP TABLE t1;
--let $datadir= `select @@datadir` --let $datadir= `select @@datadir`
--remove_file $datadir/test/load.data --remove_file $datadir/test/load.data
# --echo #
# MDEV-18953 Hash index on partial char field not working --echo # MDEV-18953 Hash index on partial char field not working
# --echo #
create table t1 ( create table t1 (
c char(10) character set utf8mb4, c char(10) character set utf8mb4,
unique key a using hash (c(1)) unique key a using hash (c(1))
@ -267,17 +268,17 @@ insert into t1 values ('бб');
insert into t1 values ('ббб'); insert into t1 values ('ббб');
drop table t1; drop table t1;
# --echo #
# MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map --echo # MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map
# --echo #
CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2; CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2;
INSERT INTO t1 VALUES (2); INSERT INTO t1 VALUES (2);
REPLACE INTO t1 VALUES (2); REPLACE INTO t1 VALUES (2);
DROP TABLE t1; DROP TABLE t1;
# --echo #
# MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key' --echo # MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key'
# --echo #
set innodb_lock_wait_timeout= 10; set innodb_lock_wait_timeout= 10;
@ -317,15 +318,15 @@ INSERT IGNORE INTO t1 VALUES (4, 1)/*4*/;
--connection default --connection default
DROP TABLE t1, t2; DROP TABLE t1, t2;
# --echo #
# MDEV-18791 Wrong error upon creating Aria table with long index on BLOB --echo # MDEV-18791 Wrong error upon creating Aria table with long index on BLOB
# --echo #
--error ER_TOO_LONG_KEY --error ER_TOO_LONG_KEY
CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria; CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
# --echo #
# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes --echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
# --echo #
create table t1(a int, unique(a) using hash); create table t1(a int, unique(a) using hash);
--let $count=150 --let $count=150
--let insert_stmt= insert into t1 values(200) --let insert_stmt= insert into t1 values(200)
@ -340,9 +341,9 @@ while ($count)
--enable_query_log --enable_query_log
drop table t1; drop table t1;
# --echo #
# MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB --echo # MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB
# --echo #
--source include/have_binlog_format_row.inc --source include/have_binlog_format_row.inc
SET binlog_row_image= NOBLOB; SET binlog_row_image= NOBLOB;
@ -352,20 +353,17 @@ INSERT INTO t1 VALUES (1,'foo');
create table t2(id int primary key, a blob, b varchar(20) as (LEFT(a,2))); create table t2(id int primary key, a blob, b varchar(20) as (LEFT(a,2)));
INSERT INTO t2 VALUES (1, 'foo', default); INSERT INTO t2 VALUES (1, 'foo', default);
# Cleanup
DROP TABLE t1, t2; DROP TABLE t1, t2;
SET binlog_row_image= FULL; SET binlog_row_image= FULL;
# --echo #
# MDEV-22719 Long unique keys are not created when individual key_part->length < max_key_length but SUM(key_parts->length) > max_key_length --echo # MDEV-22719 Long unique keys are not created when individual key_part->length < max_key_length but SUM(key_parts->length) > max_key_length
# --echo #
CREATE TABLE t1 (a int, b VARCHAR(1000), UNIQUE (a,b)) ENGINE=MyISAM; CREATE TABLE t1 (a int, b VARCHAR(1000), UNIQUE (a,b)) ENGINE=MyISAM;
show index from t1; show index from t1;
CREATE TABLE t2 (a varchar(900), b VARCHAR(900), UNIQUE (a,b)) ENGINE=MyISAM; CREATE TABLE t2 (a varchar(900), b VARCHAR(900), UNIQUE (a,b)) ENGINE=MyISAM;
show index from t2; show index from t2;
# Cleanup
DROP TABLE t1,t2; DROP TABLE t1,t2;
--echo # --echo #
@ -397,9 +395,9 @@ update t1 set v2 = 1, v3 = -128;
update t1,t2 set v1 = v2 , v5 = 0; update t1,t2 set v1 = v2 , v5 = 0;
drop table t1, t2; drop table t1, t2;
# --echo #
# MDEV-23264 Unique blobs allow duplicate values upon UPDATE --echo # MDEV-23264 Unique blobs allow duplicate values upon UPDATE
# --echo #
CREATE TABLE t1 (f TEXT UNIQUE); CREATE TABLE t1 (f TEXT UNIQUE);
INSERT INTO t1 VALUES (NULL),(NULL); INSERT INTO t1 VALUES (NULL),(NULL);
@ -435,6 +433,16 @@ alter table t1 reorganize partition p1 into (
drop table t1; drop table t1;
--echo #
--echo # MDEV-29199 Unique hash key is ignored upon INSERT ... SELECT into non-empty MyISAM table
--echo #
create table t1 (a int, b text, unique(b)) engine=MyISAM;
insert into t1 values (0,'aa');
--error ER_DUP_ENTRY
insert into t1 (a,b) select 1,'xxx' from seq_1_to_5;
select * from t1;
drop table t1;
--echo # --echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo # --echo #

View File

@ -338,7 +338,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
o_totalprice between 200000 and 230000; o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition 1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where 1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
@ -371,6 +371,14 @@ EXPLAIN
"key_length": "4", "key_length": "4",
"used_key_parts": ["o_orderkey"], "used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"], "ref": ["dbt3_s001.lineitem.l_orderkey"],
"rowid_filter": {
"range": {
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 69,
"selectivity_pct": 4.6
},
"rows": 1, "rows": 1,
"filtered": 4.599999905, "filtered": 4.599999905,
"attached_condition": "orders.o_totalprice between 200000 and 230000" "attached_condition": "orders.o_totalprice between 200000 and 230000"
@ -383,7 +391,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
o_totalprice between 200000 and 230000; o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition 1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 4.60 11.22 Using where 1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 4.60 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
@ -423,13 +431,26 @@ ANALYZE
"key_length": "4", "key_length": "4",
"used_key_parts": ["o_orderkey"], "used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"], "ref": ["dbt3_s001.lineitem.l_orderkey"],
"rowid_filter": {
"range": {
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 69,
"selectivity_pct": 4.6,
"r_rows": 71,
"r_lookups": 96,
"r_selectivity_pct": 10.41666667,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
"r_loops": 98, "r_loops": 98,
"rows": 1, "rows": 1,
"r_rows": 1, "r_rows": 0.112244898,
"r_table_time_ms": "REPLACED", "r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED", "r_other_time_ms": "REPLACED",
"filtered": 4.599999905, "filtered": 4.599999905,
"r_filtered": 11.2244898, "r_filtered": 100,
"attached_condition": "orders.o_totalprice between 200000 and 230000" "attached_condition": "orders.o_totalprice between 200000 and 230000"
} }
} }
@ -575,7 +596,7 @@ l_quantity > 45 AND
o_totalprice between 180000 and 230000; o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter 1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where 1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
@ -619,6 +640,14 @@ EXPLAIN
"key_length": "4", "key_length": "4",
"used_key_parts": ["o_orderkey"], "used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"], "ref": ["dbt3_s001.lineitem.l_orderkey"],
"rowid_filter": {
"range": {
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 139,
"selectivity_pct": 9.266666667
},
"rows": 1, "rows": 1,
"filtered": 9.266666412, "filtered": 9.266666412,
"attached_condition": "orders.o_totalprice between 180000 and 230000" "attached_condition": "orders.o_totalprice between 180000 and 230000"
@ -632,7 +661,7 @@ l_quantity > 45 AND
o_totalprice between 180000 and 230000; o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (11%) 11.69 100.00 Using index condition; Using where; Using rowid filter 1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (11%) 11.69 100.00 Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.27 26.67 Using where 1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) 0.27 (25%) 9.27 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
@ -688,13 +717,26 @@ ANALYZE
"key_length": "4", "key_length": "4",
"used_key_parts": ["o_orderkey"], "used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"], "ref": ["dbt3_s001.lineitem.l_orderkey"],
"rowid_filter": {
"range": {
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 139,
"selectivity_pct": 9.266666667,
"r_rows": 144,
"r_lookups": 59,
"r_selectivity_pct": 25.42372881,
"r_buffer_size": "REPLACED",
"r_filling_time_ms": "REPLACED"
},
"r_loops": 60, "r_loops": 60,
"rows": 1, "rows": 1,
"r_rows": 1, "r_rows": 0.266666667,
"r_table_time_ms": "REPLACED", "r_table_time_ms": "REPLACED",
"r_other_time_ms": "REPLACED", "r_other_time_ms": "REPLACED",
"filtered": 9.266666412, "filtered": 9.266666412,
"r_filtered": 26.66666667, "r_filtered": 100,
"attached_condition": "orders.o_totalprice between 180000 and 230000" "attached_condition": "orders.o_totalprice between 180000 and 230000"
} }
} }

View File

@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2,t3 SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@ -3632,7 +3632,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where 1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2,t3 SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@ -3640,7 +3640,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where 1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
DROP TABLE t1,t2,t3; DROP TABLE t1,t2,t3;
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int); CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
CREATE TABLE t2 ( f11 int PRIMARY KEY ); CREATE TABLE t2 ( f11 int PRIMARY KEY );
@ -5639,4 +5639,60 @@ EXECUTE stmt;
COUNT(DISTINCT a) COUNT(DISTINCT a)
3 3
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
# failed in Item_cond::remove_eq_conds on SELECT
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
# Test for nested OR conditions:
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
a
1
EXPLAIN EXTENDED
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using temporary
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
Note 1249 Select 2 was reduced during optimization
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 and (1 or <expr_cache><`test`.`t1`.`a`>((/* select#3 */ select 3 from DUAL where `test`.`t1`.`a` = `test`.`t1`.`a`)) = 3)
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
a
1
EXECUTE stmt;
a
1
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v1;
a
1
# Test for nested AND conditions:
SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
a
1
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
a
1
EXECUTE stmt;
a
1
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v2;
a
1
DROP TABLE t1;
DROP VIEW v1, v2;
End of 10.0 tests End of 10.0 tests

View File

@ -4742,4 +4742,44 @@ EXECUTE stmt;
--enable_warnings --enable_warnings
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
--echo # failed in Item_cond::remove_eq_conds on SELECT
--echo #
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
--echo # Test for nested OR conditions:
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
EXPLAIN EXTENDED
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
EXECUTE stmt;
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v1;
--echo # Test for nested AND conditions:
SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
EXECUTE stmt;
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v2;
DROP TABLE t1;
DROP VIEW v1, v2;
--echo End of 10.0 tests --echo End of 10.0 tests

View File

@ -3627,7 +3627,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2,t3 SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
@ -3635,7 +3635,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@ -3643,7 +3643,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan 1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2,t3 SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@ -3651,7 +3651,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan 1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
DROP TABLE t1,t2,t3; DROP TABLE t1,t2,t3;
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int); CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
CREATE TABLE t2 ( f11 int PRIMARY KEY ); CREATE TABLE t2 ( f11 int PRIMARY KEY );
@ -5650,6 +5650,62 @@ EXECUTE stmt;
COUNT(DISTINCT a) COUNT(DISTINCT a)
3 3
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
# failed in Item_cond::remove_eq_conds on SELECT
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
# Test for nested OR conditions:
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
a
1
EXPLAIN EXTENDED
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using temporary
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
Note 1249 Select 2 was reduced during optimization
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 and (1 or <expr_cache><`test`.`t1`.`a`>((/* select#3 */ select 3 from DUAL where `test`.`t1`.`a` = `test`.`t1`.`a`)) = 3)
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
a
1
EXECUTE stmt;
a
1
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v1;
a
1
# Test for nested AND conditions:
SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
a
1
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
a
1
EXECUTE stmt;
a
1
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v2;
a
1
DROP TABLE t1;
DROP VIEW v1, v2;
End of 10.0 tests End of 10.0 tests
set join_cache_level=default; set join_cache_level=default;
set @@optimizer_switch=@save_optimizer_switch_jcl6; set @@optimizer_switch=@save_optimizer_switch_jcl6;

View File

@ -3616,7 +3616,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where 1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2,t3 SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
@ -3624,7 +3624,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where 1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3 SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@ -3632,7 +3632,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where 1 SIMPLE t2 range si si 5 NULL 2 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN EXPLAIN
SELECT t3.a FROM t1,t2,t3 SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@ -3640,7 +3640,7 @@ t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where 1 SIMPLE t2 range si,ai si 5 NULL 2 Using index condition; Using where
1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where 1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
DROP TABLE t1,t2,t3; DROP TABLE t1,t2,t3;
CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int); CREATE TABLE t1 ( f1 int primary key, f2 int, f3 int, f4 int, f5 int, f6 int, checked_out int);
CREATE TABLE t2 ( f11 int PRIMARY KEY ); CREATE TABLE t2 ( f11 int PRIMARY KEY );
@ -5639,4 +5639,60 @@ EXECUTE stmt;
COUNT(DISTINCT a) COUNT(DISTINCT a)
3 3
DROP TABLE t1; DROP TABLE t1;
#
# MDEV-29294: Assertion `functype() == ((Item_cond *) new_item)->functype()'
# failed in Item_cond::remove_eq_conds on SELECT
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
# Test for nested OR conditions:
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
a
1
EXPLAIN EXTENDED
SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using temporary
3 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #1
Note 1249 Select 2 was reduced during optimization
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1 and (1 or <expr_cache><`test`.`t1`.`a`>((/* select#3 */ select 3 from DUAL where `test`.`t1`.`a` = `test`.`t1`.`a`)) = 3)
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
a
1
EXECUTE stmt;
a
1
CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = 1 AND
(3 = 0 OR (SELECT a = 1 OR (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v1;
a
1
# Test for nested AND conditions:
SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
a
1
PREPARE stmt FROM 'SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3))';
EXECUTE stmt;
a
1
EXECUTE stmt;
a
1
CREATE VIEW v2 AS SELECT * FROM t1 WHERE a = 1 OR
(3 = 3 AND (SELECT a = 1 AND (SELECT 3 WHERE a = a) = 3));
SELECT * FROM v2;
a
1
DROP TABLE t1;
DROP VIEW v1, v2;
End of 10.0 tests End of 10.0 tests

View File

@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where 1 PRIMARY t3 ref|filter PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX FFOLDERID_IDX|CMFLDRPARNT_IDX 34|35 test.t3.PARENTID 1 (29%) Using where; Using rowid filter
drop table t1, t2, t3, t4; drop table t1, t2, t3, t4;
CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB; CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB;
INSERT INTO t1 VALUES (1),(2); INSERT INTO t1 VALUES (1),(2);

View File

@ -1320,6 +1320,28 @@ CASE WHEN a THEN DEFAULT(a) END
DROP TABLE t1; DROP TABLE t1;
SET timestamp=DEFAULT; SET timestamp=DEFAULT;
# #
# MDEV-27653 long uniques don't work with unicode collations
#
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
SET time_zone='+00:00';
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
SET time_zone='+01:00';
INSERT INTO t1 SELECT MAX(a) FROM t1;
ERROR 23000: Duplicate entry '2001-01-01 11:20:30' for key 'a'
SELECT * FROM t1;
a
2001-01-01 11:20:30
DROP TABLE t1;
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
SET time_zone='+00:00';
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
SET time_zone='+01:00';
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1;
SET time_zone=DEFAULT;
#
# End of 10.4 tests # End of 10.4 tests
# #
# #

View File

@ -877,6 +877,27 @@ SELECT CASE WHEN a THEN DEFAULT(a) END FROM t1;
DROP TABLE t1; DROP TABLE t1;
SET timestamp=DEFAULT; SET timestamp=DEFAULT;
--echo #
--echo # MDEV-27653 long uniques don't work with unicode collations
--echo #
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
SET time_zone='+00:00';
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
SET time_zone='+01:00';
--error ER_DUP_ENTRY
INSERT INTO t1 SELECT MAX(a) FROM t1;
SELECT * FROM t1;
DROP TABLE t1;
CREATE TABLE t1 (a timestamp, UNIQUE KEY(a) USING HASH);
SET time_zone='+00:00';
INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
SET time_zone='+01:00';
CHECK TABLE t1;
DROP TABLE t1;
SET time_zone=DEFAULT;
--echo # --echo #
--echo # End of 10.4 tests --echo # End of 10.4 tests
--echo # --echo #

View File

@ -4273,11 +4273,13 @@ GROUP BY
LEFT((SYSDATE()), 'foo') LEFT((SYSDATE()), 'foo')
WITH ROLLUP; WITH ROLLUP;
SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b) SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b)
NULL 1 0 1
NULL 1 0 2
Warnings: Warnings:
Warning 1292 Truncated incorrect INTEGER value: 'foo' Warning 1292 Truncated incorrect INTEGER value: 'foo'
Warning 1292 Truncated incorrect INTEGER value: 'foo' Warning 1292 Truncated incorrect INTEGER value: 'foo'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
drop table t1; drop table t1;
# #
# #
@ -4336,6 +4338,46 @@ pk a bit_or
DROP TABLE t2; DROP TABLE t2;
DROP TABLE t1; DROP TABLE t1;
# #
# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
#
CREATE TABLE t1 (i1 int, a int);
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
CREATE TABLE t2 (i2 int);
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
SELECT
a,
RANK() OVER (ORDER BY SUM(DISTINCT i1))
FROM
t1, t2 WHERE t2.i2 = t1.i1
GROUP BY
a;
a RANK() OVER (ORDER BY SUM(DISTINCT i1))
1 1
2 2
3 3
DROP TABLE t1, t2;
#
# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
#
CREATE TABLE t1 (UID BIGINT);
CREATE TABLE t2 (UID BIGINT);
CREATE TABLE t3 (UID BIGINT);
insert into t1 VALUES (1),(2);
insert into t2 VALUES (1),(2);
insert into t3 VALUES (1),(2);
SELECT
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
FROM t1 TT1,
t2 TT2,
t3 TT3
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
GROUP BY TT1.UID
;
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
1
1
DROP TABLE t1, t2, t3;
#
# End of 10.3 tests # End of 10.3 tests
# #
# #

View File

@ -2816,6 +2816,46 @@ DROP TABLE t2;
DROP TABLE t1; DROP TABLE t1;
--echo #
--echo # MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
--echo #
CREATE TABLE t1 (i1 int, a int);
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
CREATE TABLE t2 (i2 int);
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
SELECT
a,
RANK() OVER (ORDER BY SUM(DISTINCT i1))
FROM
t1, t2 WHERE t2.i2 = t1.i1
GROUP BY
a;
DROP TABLE t1, t2;
--echo #
--echo # MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
--echo #
CREATE TABLE t1 (UID BIGINT);
CREATE TABLE t2 (UID BIGINT);
CREATE TABLE t3 (UID BIGINT);
insert into t1 VALUES (1),(2);
insert into t2 VALUES (1),(2);
insert into t3 VALUES (1),(2);
SELECT
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
FROM t1 TT1,
t2 TT2,
t3 TT3
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
GROUP BY TT1.UID
;
DROP TABLE t1, t2, t3;
--echo # --echo #
--echo # End of 10.3 tests --echo # End of 10.3 tests

View File

@ -24,3 +24,66 @@ pk count(a) over (order by pk rows between 2 preceding and 2 following)
28 5 28 5
27 5 27 5
drop table t0,t1; drop table t0,t1;
#
# MDEV-30052: Crash with a query containing nested WINDOW clauses
#
CREATE TABLE t1 (c INT);
insert into t1 values (1),(2);
UPDATE t1 SET c=1
WHERE c=2
ORDER BY
(1 IN ((
SELECT *
FROM (SELECT * FROM t1) AS v1
GROUP BY c
WINDOW v2 AS (ORDER BY
(SELECT *
FROM t1
GROUP BY c
WINDOW v3 AS (PARTITION BY c)
)
)
))
);
drop table t1;
#
# MDEV-29359: Server crashed with heap-use-after-free in
# Field::is_null(long long) const (Just testcase)
#
CREATE TABLE t1 (id int);
INSERT INTO t1 VALUES (-1),(0),(84);
SELECT
id IN (SELECT id
FROM t1
WINDOW w AS (ORDER BY (SELECT 1
FROM t1
WHERE
EXISTS ( SELECT id
FROM t1
GROUP BY id
WINDOW w2 AS (ORDER BY id)
)
)
)
)
FROM t1;
id IN (SELECT id
FROM t1
WINDOW w AS (ORDER BY (SELECT 1
FROM t1
WHERE
EXISTS ( SELECT id
FROM t1
GROUP BY id
WINDOW w2 AS (ORDER BY id)
)
)
)
)
1
1
1
DROP TABLE t1;
#
# End of 10.3 tests
#

View File

@ -33,3 +33,58 @@ limit 4;
--disable_view_protocol --disable_view_protocol
drop table t0,t1; drop table t0,t1;
--echo #
--echo # MDEV-30052: Crash with a query containing nested WINDOW clauses
--echo #
CREATE TABLE t1 (c INT);
insert into t1 values (1),(2);
UPDATE t1 SET c=1
WHERE c=2
ORDER BY
(1 IN ((
SELECT *
FROM (SELECT * FROM t1) AS v1
GROUP BY c
WINDOW v2 AS (ORDER BY
(SELECT *
FROM t1
GROUP BY c
WINDOW v3 AS (PARTITION BY c)
)
)
))
);
drop table t1;
--echo #
--echo # MDEV-29359: Server crashed with heap-use-after-free in
--echo # Field::is_null(long long) const (Just testcase)
--echo #
CREATE TABLE t1 (id int);
INSERT INTO t1 VALUES (-1),(0),(84);
SELECT
id IN (SELECT id
FROM t1
WINDOW w AS (ORDER BY (SELECT 1
FROM t1
WHERE
EXISTS ( SELECT id
FROM t1
GROUP BY id
WINDOW w2 AS (ORDER BY id)
)
)
)
)
FROM t1;
DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #

View File

@ -0,0 +1,8 @@
connection default;
set @@global.binlog_checksum=none;
set @@session.debug_dbug='d,crash_before_write_second_checkpoint_event';
set @@global.binlog_checksum=crc32;
ERROR HY000: Lost connection to MySQL server during query
connection default;
NOT FOUND /Replication event checksum verification failed/ in mysqld.1.err
End of the tests

View File

@ -0,0 +1,89 @@
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_mixed.inc
--source include/count_sessions.inc
# MDEV-30010 merely adds is a Read-Committed version MDEV-30225 test
# solely to prove the RC isolation yields ROW binlog format as it is
# supposed to:
# https://mariadb.com/kb/en/unsafe-statements-for-statement-based-replication/#isolation-levels.
# The original MDEV-30225 test is adapted to the RC to create
# a similar safisticated scenario which does not lead to any deadlock though.
--connect (pause_purge,localhost,root)
START TRANSACTION WITH CONSISTENT SNAPSHOT;
--connection default
CREATE TABLE t (pk int PRIMARY KEY, sk INT UNIQUE) ENGINE=InnoDB;
INSERT INTO t VALUES (10, 100);
--connect (con1,localhost,root)
BEGIN; # trx 0
SELECT * FROM t WHERE sk = 100 FOR UPDATE;
--connect (con2,localhost,root)
SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL insert_wait_started";
# trx 1 is locked on try to read the record in secondary index during duplicates
# check. It's the first in waiting queue, that's why it will be woken up firstly
# when trx 0 commits.
--send INSERT INTO t VALUES (5, 100) # trx 1
--connect (con3,localhost,root)
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET DEBUG_SYNC="now WAIT_FOR insert_wait_started";
SET DEBUG_SYNC="lock_wait_suspend_thread_enter SIGNAL delete_started_waiting";
# trx 2 can delete (5, 100) on master, but not on slave, as on slave trx 1
# can insert (5, 100) after trx 2 positioned it's cursor. Trx 2 lock is placed
# in waiting queue after trx 1 lock, but its persistent cursor position was
# stored on (100, 10) record in secondary index before suspending. After trx 1
# is committed, trx 2 will restore persistent cursor position on (100, 10). As
# (100, 5) secondary index record was inserted before (100, 10) in logical
# order, and (100, 10) record is delete-marked, trx 2 just continues scanning.
#
# Note. There can be several records with the same key in unique secondary
# index, but only one of them must be non-delete-marked. That's why when we do
# point query, cursor position is set in the first record in logical order, and
# then records are iterated until either non-delete-marked record is found or
# all records with the same unique fields are iterated.
# to prepare showing interesting binlog events
--let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1)
BEGIN;
--send UPDATE t SET sk = 200 WHERE sk = 100; # trx 2
--connection con1
SET DEBUG_SYNC="now WAIT_FOR delete_started_waiting";
DELETE FROM t WHERE sk=100; # trx 0
COMMIT;
--disconnect con1
--connection con2
--reap
--disconnect con2
--connection con3
--error 0
--reap
if (`SELECT ROW_COUNT() > 0`)
{
--echo unexpected effective UPDATE
--die
}
--echo must be logged in ROW format as the only event of trx 2 (con3)
INSERT INTO t VALUES (11, 101);
COMMIT;
--source include/show_binlog_events.inc
--disconnect con3
--connection default
# If the bug is not fixed, we will see the row inserted by trx 1 here. This can
# cause duplicate key error on slave, when some other trx tries in insert row
# with the same secondary key, as was inserted by trx 1, and not deleted by trx
# 2.
SELECT * FROM t;
--disconnect pause_purge
SET DEBUG_SYNC="RESET";
DROP TABLE t;
--source include/wait_until_count_sessions.inc

View File

@ -4279,11 +4279,13 @@ GROUP BY
LEFT((SYSDATE()), 'foo') LEFT((SYSDATE()), 'foo')
WITH ROLLUP; WITH ROLLUP;
SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b) SUM(b) OVER (PARTITION BY a) ROW_NUMBER() OVER (PARTITION BY b)
NULL 1 0 1
NULL 1 0 2
Warnings: Warnings:
Warning 1292 Truncated incorrect INTEGER value: 'foo' Warning 1292 Truncated incorrect INTEGER value: 'foo'
Warning 1292 Truncated incorrect INTEGER value: 'foo' Warning 1292 Truncated incorrect INTEGER value: 'foo'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
Warning 1292 Truncated incorrect DOUBLE value: 'bar'
drop table t1; drop table t1;
# #
# #
@ -4342,6 +4344,46 @@ pk a bit_or
DROP TABLE t2; DROP TABLE t2;
DROP TABLE t1; DROP TABLE t1;
# #
# MDEV-15178: Filesort::make_sortorder: Assertion `pos->field != __null |
#
CREATE TABLE t1 (i1 int, a int);
INSERT INTO t1 VALUES (1, 1), (2, 2),(3, 3);
CREATE TABLE t2 (i2 int);
INSERT INTO t2 VALUES (1),(2),(5),(1),(7),(4),(3);
SELECT
a,
RANK() OVER (ORDER BY SUM(DISTINCT i1))
FROM
t1, t2 WHERE t2.i2 = t1.i1
GROUP BY
a;
a RANK() OVER (ORDER BY SUM(DISTINCT i1))
1 1
2 2
3 3
DROP TABLE t1, t2;
#
# MDEV-17014: Crash server using ROW_NUMBER() OVER (PARTITION ..)
#
CREATE TABLE t1 (UID BIGINT);
CREATE TABLE t2 (UID BIGINT);
CREATE TABLE t3 (UID BIGINT);
insert into t1 VALUES (1),(2);
insert into t2 VALUES (1),(2);
insert into t3 VALUES (1),(2);
SELECT
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
FROM t1 TT1,
t2 TT2,
t3 TT3
WHERE TT3.UID = TT1.UID AND TT2.UID = TT3.UID
GROUP BY TT1.UID
;
ROW_NUMBER() OVER (PARTITION BY GROUP_CONCAT(TT1.UID))
1
1
DROP TABLE t1, t2, t3;
#
# End of 10.3 tests # End of 10.3 tests
# #
# #

View File

@ -205,8 +205,3 @@ CALL mtr.add_suppression("conflict state 7 after post commit");
# Warning happens when the cluster is started for the first time # Warning happens when the cluster is started for the first time
CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); CALL mtr.add_suppression("Skipped GCache ring buffer recovery");
--connection node_2
call mtr.add_suppression("Error in Log_event::read_log_event():.*");
CALL mtr.add_suppression("Skipped GCache ring buffer recovery");

View File

@ -0,0 +1,30 @@
call mtr.add_suppression("InnoDB: New log files created");
#
# Start of 10.3 tests
#
#
# MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
#
CREATE DATABASE db1;
CREATE DATABASE db2;
CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
# Create base backup
DROP DATABASE db1;
# Create incremental backup
# Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
# Prepare base backup, apply incremental one
# shutdown server
# remove datadir
# xtrabackup move back
# restart
# Expect no 'db1' in the output, because it was really dropped.
# Expect 'db2' in the ouput, because it was not dropped!
# (its incremental directory was emptied only)
SHOW DATABASES LIKE 'db%';
Database (db%)
db2
DROP DATABASE db2;
#
# End of 10.3 tests
#

View File

@ -0,0 +1,68 @@
--source include/have_innodb.inc
call mtr.add_suppression("InnoDB: New log files created");
--echo #
--echo # Start of 10.3 tests
--echo #
--echo #
--echo # MDEV-23335 MariaBackup Incremental Does Not Reflect Dropped/Created Databases
--echo #
--let $datadir=`SELECT @@datadir`
--let $basedir=$MYSQLTEST_VARDIR/tmp/backup
--let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1
# Create two databases:
# - db1 is dropped normally below
# - db2 is used to cover a corner case: its db.opt file is removed
# Incremental backup contains:
# - no directory for db1
# - an empty directory for db2 (after we remove db2/db.opt)
CREATE DATABASE db1;
CREATE DATABASE db2;
# Add some tables to db1
CREATE TABLE db1.t1 (a INT) ENGINE=MyISAM;
CREATE TABLE db1.t2 (a INT) ENGINE=InnoDB;
--echo # Create base backup
--disable_result_log
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir
--enable_result_log
DROP DATABASE db1;
--echo # Create incremental backup
--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir
--echo # Remove incremental_dir/db2/db.opt file to make incremental_dir/db2/ empty
--remove_file $incremental_dir/db2/db.opt
--echo # Prepare base backup, apply incremental one
--disable_result_log
--exec $XTRABACKUP --prepare --target-dir=$basedir
--exec $XTRABACKUP --prepare --target-dir=$basedir --incremental-dir=$incremental_dir
--enable_result_log
--let $targetdir=$basedir
--source include/restart_and_restore.inc
--enable_result_log
--echo # Expect no 'db1' in the output, because it was really dropped.
--echo # Expect 'db2' in the ouput, because it was not dropped!
--echo # (its incremental directory was emptied only)
SHOW DATABASES LIKE 'db%';
DROP DATABASE db2;
--rmdir $basedir
--rmdir $incremental_dir
--echo #
--echo # End of 10.3 tests
--echo #

View File

@ -521,3 +521,10 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show status like \'server_audit_
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show variables like \'server_audit%\'',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show variables like \'server_audit%\'',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,plugin, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,plugin,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'uninstall plugin server_audit',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'uninstall plugin server_audit',0
#
# MDEV-27631 Assertion `global_status_var.global_memory_used == 0' failed in mysqld_exit
#
install plugin server_audit soname 'server_audit';
uninstall plugin server_audit;
Warnings:
Warning 1620 Plugin is busy and will be uninstalled on shutdown

View File

@ -235,3 +235,8 @@ uninstall plugin server_audit;
cat_file $MYSQLD_DATADIR/server_audit.log; cat_file $MYSQLD_DATADIR/server_audit.log;
remove_file $MYSQLD_DATADIR/server_audit.log; remove_file $MYSQLD_DATADIR/server_audit.log;
--echo #
--echo # MDEV-27631 Assertion `global_status_var.global_memory_used == 0' failed in mysqld_exit
--echo #
install plugin server_audit soname 'server_audit';
uninstall plugin server_audit;

View File

@ -0,0 +1,75 @@
# Creates or drops a stored function as a part of debug-sync based
# synchronization mechanism between replication servers.
#
# Parameters:
# $create_or_drop= [create]
# $server_master = [master]
# $server_slave = [slave]
if (!$create_or_drop)
{
--let $create_or_drop=create
}
if (`select strcmp('$create_or_drop', 'create') = 0`)
{
if (!$server_master)
{
--let $server_master=master
}
if (!$server_slave)
{
--let $server_slave=slave
}
--connection $server_master
# Use a stored function to inject a debug_sync into the appropriate THD.
# The function does nothing on the master, and on the slave it injects the
# desired debug_sync action(s).
SET sql_log_bin=0;
--delimiter ||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
RETURNS INT DETERMINISTIC
BEGIN
RETURN x;
END
||
--delimiter ;
SET sql_log_bin=1;
--connection $server_slave
SET sql_log_bin=0;
--delimiter ||
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
RETURNS INT DETERMINISTIC
BEGIN
IF d1 != '' THEN
SET debug_sync = d1;
END IF;
IF d2 != '' THEN
SET debug_sync = d2;
END IF;
RETURN x;
END
||
--delimiter ;
SET sql_log_bin=1;
}
if (`select strcmp('$create_or_drop', 'drop') = 0`)
{
if (!$server_slave)
{
--let $server_slave=slave=
}
if (!$server_master)
{
--let $server_master=master
}
--connection $server_slave
SET DEBUG_SYNC='RESET';
--connection $server_master
SET DEBUG_SYNC='RESET';
DROP FUNCTION foo;
}

View File

@ -0,0 +1,60 @@
include/master-slave.inc
[connection master]
connection slave;
include/stop_slave.inc
change master to master_delay=3, master_use_gtid=Slave_Pos;
set @@GLOBAL.slave_parallel_threads=2;
include/start_slave.inc
connection master;
create table t1 (a int);
include/sync_slave_sql_with_master.inc
#
# Pt 1) Ensure SBM is updated immediately upon arrival of the next event
# Lock t1 on slave so the first received transaction does not complete/commit
connection slave;
LOCK TABLES t1 WRITE;
connection master;
# Sleep 2 to allow a buffer between events for SBM check
insert into t1 values (0);
include/save_master_gtid.inc
connection slave;
# Waiting for transaction to arrive on slave and begin SQL Delay..
# Validating SBM is updated on event arrival..
# ..done
connection slave;
UNLOCK TABLES;
include/sync_with_master_gtid.inc
#
# Pt 2) If the SQL thread has not entered an idle state, ensure
# following events do not update SBM
# Stop slave IO thread so it receives both events together on restart
connection slave;
include/stop_slave_io.inc
connection master;
# Sleep 2 to allow a buffer between events for SBM check
insert into t1 values (1);
# Sleep 3 to create gap between events
insert into t1 values (2);
connection slave;
LOCK TABLES t1 WRITE;
START SLAVE IO_THREAD;
# Wait for first transaction to complete SQL delay and begin execution..
# Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
# ..and that SBM wasn't calculated using prior committed transactions
# ..done
connection slave;
UNLOCK TABLES;
#
# Cleanup
# Reset master_delay
include/stop_slave.inc
CHANGE MASTER TO master_delay=0;
set @@GLOBAL.slave_parallel_threads=4;
include/start_slave.inc
connection master;
DROP TABLE t1;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
include/rpl_end.inc
# End of rpl_delayed_parallel_slave_sbm.test

View File

@ -0,0 +1,76 @@
include/master-slave.inc
[connection master]
# Initialize
connection slave;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
# Setup data
connection master;
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE ta (a int);
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
connection master;
SET sql_log_bin=0;
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
RETURNS INT DETERMINISTIC
BEGIN
RETURN x;
END
||
SET sql_log_bin=1;
connection slave;
SET sql_log_bin=0;
CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500))
RETURNS INT DETERMINISTIC
BEGIN
IF d1 != '' THEN
SET debug_sync = d1;
END IF;
IF d2 != '' THEN
SET debug_sync = d2;
END IF;
RETURN x;
END
||
SET sql_log_bin=1;
include/stop_slave.inc
SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
SET GLOBAL slave_parallel_threads=10;
SET GLOBAL slave_parallel_mode=conservative;
SET GLOBAL gtid_strict_mode=ON;
include/start_slave.inc
connection master;
SET @old_format= @@SESSION.binlog_format;
SET binlog_format=statement;
INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
ANALYZE TABLE ta;
Table Op Msg_type Msg_text
test.ta analyze status Engine-independent statistics collected
test.ta analyze status Table is already up to date
include/save_master_gtid.inc
connection slave;
SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
info
ANALYZE TABLE ta
set @@debug_sync="now signal sig_go";
include/sync_with_master_gtid.inc
# Cleanup
connection master;
DROP TABLE t1,ta;
connection slave;
SET DEBUG_SYNC='RESET';
connection master;
SET DEBUG_SYNC='RESET';
DROP FUNCTION foo;
include/save_master_gtid.inc
connection slave;
include/sync_with_master_gtid.inc
include/stop_slave.inc
SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
include/start_slave.inc
include/rpl_end.inc

View File

@ -0,0 +1 @@
--slave-parallel-threads=4

View File

@ -0,0 +1,133 @@
#
# This test ensures that after a delayed parallel slave has idled, i.e.
# executed everything in its relay log, the next event group that the SQL
# thread reads from the relay log will immediately be used in the
# Seconds_Behind_Master. In particular, it ensures that the calculation for
# Seconds_Behind_Master is based on the timestamp of the new transaction,
# rather than the last committed transaction.
#
# References:
# MDEV-29639: Seconds_Behind_Master is incorrect for Delayed, Parallel
# Replicas
#
--source include/master-slave.inc
--connection slave
--source include/stop_slave.inc
--let $master_delay= 3
--eval change master to master_delay=$master_delay, master_use_gtid=Slave_Pos
--let $old_slave_threads= `SELECT @@GLOBAL.slave_parallel_threads`
set @@GLOBAL.slave_parallel_threads=2;
--source include/start_slave.inc
--connection master
create table t1 (a int);
--source include/sync_slave_sql_with_master.inc
--echo #
--echo # Pt 1) Ensure SBM is updated immediately upon arrival of the next event
--echo # Lock t1 on slave so the first received transaction does not complete/commit
--connection slave
LOCK TABLES t1 WRITE;
--connection master
--echo # Sleep 2 to allow a buffer between events for SBM check
sleep 2;
--let $ts_trx_before_ins= `SELECT UNIX_TIMESTAMP()`
--let insert_ctr= 0
--eval insert into t1 values ($insert_ctr)
--inc $insert_ctr
--source include/save_master_gtid.inc
--connection slave
--echo # Waiting for transaction to arrive on slave and begin SQL Delay..
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting until MASTER_DELAY seconds after master executed event';
--source include/wait_condition.inc
--echo # Validating SBM is updated on event arrival..
--let $sbm_trx1_arrive= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
--let $seconds_since_idling= `SELECT UNIX_TIMESTAMP() - $ts_trx_before_ins`
if (`SELECT $sbm_trx1_arrive > ($seconds_since_idling + 1)`)
{
--echo # SBM was $sbm_trx1_arrive yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
--die Seconds_Behind_Master should reset after idling
}
--echo # ..done
--connection slave
UNLOCK TABLES;
--source include/sync_with_master_gtid.inc
--echo #
--echo # Pt 2) If the SQL thread has not entered an idle state, ensure
--echo # following events do not update SBM
--echo # Stop slave IO thread so it receives both events together on restart
--connection slave
--source include/stop_slave_io.inc
--connection master
--echo # Sleep 2 to allow a buffer between events for SBM check
sleep 2;
--let $ts_trxpt2_before_ins= `SELECT UNIX_TIMESTAMP()`
--eval insert into t1 values ($insert_ctr)
--inc $insert_ctr
--echo # Sleep 3 to create gap between events
sleep 3;
--eval insert into t1 values ($insert_ctr)
--inc $insert_ctr
--let $ts_trx_after_ins= `SELECT UNIX_TIMESTAMP()`
--connection slave
LOCK TABLES t1 WRITE;
START SLAVE IO_THREAD;
--echo # Wait for first transaction to complete SQL delay and begin execution..
--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE 'Waiting for table metadata lock%' AND command LIKE 'Slave_Worker';
--source include/wait_condition.inc
--echo # Validate SBM calculation doesn't use the second transaction because SQL thread shouldn't have gone idle..
--let $sbm_after_trx_no_idle= query_get_value(SHOW SLAVE STATUS, Seconds_Behind_Master, 1)
--let $timestamp_trxpt2_arrive= `SELECT UNIX_TIMESTAMP()`
if (`SELECT $sbm_after_trx_no_idle < $timestamp_trxpt2_arrive - $ts_trx_after_ins`)
{
--let $cmpv= `SELECT $timestamp_trxpt2_arrive - $ts_trx_after_ins`
--echo # SBM $sbm_after_trx_no_idle was more recent than time since last transaction ($cmpv seconds)
--die Seconds_Behind_Master should not have used second transaction timestamp
}
--let $seconds_since_idling= `SELECT ($timestamp_trxpt2_arrive - $ts_trxpt2_before_ins)`
--echo # ..and that SBM wasn't calculated using prior committed transactions
if (`SELECT $sbm_after_trx_no_idle > ($seconds_since_idling + 1)`)
{
--echo # SBM was $sbm_after_trx_no_idle yet shouldn't have been larger than $seconds_since_idling + 1 (for possible negative clock_diff_with_master)
--die Seconds_Behind_Master calculation should not have used prior committed transaction
}
--echo # ..done
--connection slave
UNLOCK TABLES;
--echo #
--echo # Cleanup
--echo # Reset master_delay
--source include/stop_slave.inc
--eval CHANGE MASTER TO master_delay=0
--eval set @@GLOBAL.slave_parallel_threads=$old_slave_threads
--source include/start_slave.inc
--connection master
DROP TABLE t1;
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--source include/rpl_end.inc
--echo # End of rpl_delayed_parallel_slave_sbm.test

View File

@ -0,0 +1,84 @@
# The test file is created to prove fixes to
# MDEV-30323 Some DDLs like ANALYZE can complete on parallel slave out of order
# Debug-sync tests aiming at parallel replication of ADMIN commands
# are welcome here.
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--source include/master-slave.inc
--echo # Initialize
--connection slave
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
--echo # Setup data
--connection master
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE ta (a int);
--let $pre_load_gtid=`SELECT @@last_gtid`
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--source suite/rpl/include/create_or_drop_sync_func.inc
# configure MDEV-30323 slave
--source include/stop_slave.inc
SET @old_parallel_threads =@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode =@@GLOBAL.slave_parallel_mode;
SET @old_gtid_strict_mode =@@GLOBAL.gtid_strict_mode;
SET GLOBAL slave_parallel_threads=10;
SET GLOBAL slave_parallel_mode=conservative;
SET GLOBAL gtid_strict_mode=ON;
--source include/start_slave.inc
# MDEV-30323 setup needs two group of events the first of which is a DML
# and ANALYZE is the 2nd.
# The latter is made to race in slave execution over the DML thanks
# to a DML latency simulation.
# In the fixed case the race-over should not be a problem: ultimately
# ANALYZE must wait for its turn to update slave@@global.gtid_binlog_pos.
# Otherwise the reported OOO error must be issued.
--connection master
SET @old_format= @@SESSION.binlog_format;
SET binlog_format=statement;
INSERT INTO t1 VALUES (foo(1, 'rpl_parallel_after_mark_start_commit WAIT_FOR sig_go', ''));
ANALYZE TABLE ta;
--source include/save_master_gtid.inc
--connection slave
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
--source include/wait_condition.inc
SELECT info FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit";
if (`select strcmp(@@global.gtid_binlog_pos, '$pre_load_gtid') <> 0 or strcmp(@@global.gtid_slave_pos, '$pre_load_gtid') <> 0`)
{
--let $bs=`SELECT @@global.gtid_binlog_pos`
--let $es=`SELECT @@global.gtid_slave_pos`
--echo Mismatch between expected $pre_load_gtid state and the actual binlog state " @@global.gtid_binlog_pos = $bs or/and slave execution state @@global.gtid_slave_pos = $es.
--die
}
set @@debug_sync="now signal sig_go";
--source include/sync_with_master_gtid.inc
--echo # Cleanup
--connection master
DROP TABLE t1,ta;
--let $create_or_drop=drop
--source suite/rpl/include/create_or_drop_sync_func.inc
--source include/save_master_gtid.inc
--connection slave
--source include/sync_with_master_gtid.inc
--source include/stop_slave.inc
SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads;
SET @@GLOBAL.slave_parallel_mode =@old_parallel_mode;
SET @@GLOBAL.gtid_strict_mode =@old_gtid_strict_mode;
--source include/start_slave.inc
--source include/rpl_end.inc

View File

@ -228,6 +228,15 @@ FOREACH(se aria partition perfschema sql_sequence wsrep)
ENDIF() ENDIF()
ENDFOREACH() ENDFOREACH()
IF(VISIBILITY_HIDDEN_FLAG AND TARGET partition AND WITH_UBSAN)
# the spider plugin needs some partition symbols from inside mysqld
# when built with ubsan, in which case we need to remove
# -fvisibility=hidden from partition
GET_TARGET_PROPERTY(f partition COMPILE_FLAGS)
STRING(REPLACE "${VISIBILITY_HIDDEN_FLAG}" "" f ${f})
SET_TARGET_PROPERTIES(partition PROPERTIES COMPILE_FLAGS "${f}")
ENDIF()
IF(WIN32) IF(WIN32)
SET(MYSQLD_SOURCE main.cc message.rc) SET(MYSQLD_SOURCE main.cc message.rc)
ELSE() ELSE()

View File

@ -28,7 +28,7 @@ int writefile(const char *path, const char *db, const char *table,
inline void deletefrm(const char *path) inline void deletefrm(const char *path)
{ {
char frm_name[FN_REFLEN]; char frm_name[FN_REFLEN];
strxmov(frm_name, path, reg_ext, NullS); strxnmov(frm_name, sizeof(frm_name)-1, path, reg_ext, NullS);
mysql_file_delete(key_file_frm, frm_name, MYF(0)); mysql_file_delete(key_file_frm, frm_name, MYF(0));
} }

View File

@ -1869,17 +1869,11 @@ Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
} }
void Field::hash(ulong *nr, ulong *nr2) void Field::hash_not_null(Hasher *hasher)
{ {
if (is_null()) DBUG_ASSERT(marked_for_read());
{ DBUG_ASSERT(!is_null());
*nr^= (*nr << 1) | 1; hasher->add(sort_charset(), ptr, pack_length());
}
else
{
uint len= pack_length();
sort_charset()->hash_sort(ptr, len, nr, nr2);
}
} }
size_t size_t
@ -8268,17 +8262,12 @@ bool Field_varstring::is_equal(const Column_definition &new_field) const
} }
void Field_varstring::hash(ulong *nr, ulong *nr2) void Field_varstring::hash_not_null(Hasher *hasher)
{
if (is_null())
{
*nr^= (*nr << 1) | 1;
}
else
{ {
DBUG_ASSERT(marked_for_read());
DBUG_ASSERT(!is_null());
uint len= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr); uint len= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
charset()->hash_sort(ptr + length_bytes, len, nr, nr2); hasher->add(charset(), ptr + length_bytes, len);
}
} }
@ -8653,6 +8642,17 @@ oom_error:
} }
void Field_blob::hash_not_null(Hasher *hasher)
{
DBUG_ASSERT(marked_for_read());
DBUG_ASSERT(!is_null());
char *blob;
memcpy(&blob, ptr + packlength, sizeof(char*));
if (blob)
hasher->add(Field_blob::charset(), blob, get_length(ptr));
}
double Field_blob::val_real(void) double Field_blob::val_real(void)
{ {
DBUG_ASSERT(marked_for_read()); DBUG_ASSERT(marked_for_read());
@ -9726,20 +9726,27 @@ const DTCollation & Field_bit::dtcollation() const
} }
void Field_bit::hash(ulong *nr, ulong *nr2) /*
This method always calculates hash over 8 bytes.
This is different from how the HEAP engine calculate hash:
HEAP takes into account the actual octet size, so say for BIT(18)
it calculates hash over three bytes only:
- the incomplete byte with bits 16..17
- the two full bytes with bits 0..15
See hp_rec_hashnr(), hp_hashnr() for details.
The HEAP way is more efficient, especially for short lengths.
Let's consider fixing Field_bit eventually to do it in the HEAP way,
with proper measures to upgrade partitioned tables easy.
*/
void Field_bit::hash_not_null(Hasher *hasher)
{ {
if (is_null()) DBUG_ASSERT(marked_for_read());
{ DBUG_ASSERT(!is_null());
*nr^= (*nr << 1) | 1;
}
else
{
CHARSET_INFO *cs= &my_charset_bin;
longlong value= Field_bit::val_int(); longlong value= Field_bit::val_int();
uchar tmp[8]; uchar tmp[8];
mi_int8store(tmp,value); mi_int8store(tmp,value);
cs->hash_sort(tmp, 8, nr, nr2); hasher->add(&my_charset_bin, tmp, 8);
}
} }

View File

@ -1827,7 +1827,14 @@ public:
key_map get_possible_keys(); key_map get_possible_keys();
/* Hash value */ /* Hash value */
virtual void hash(ulong *nr, ulong *nr2); void hash(Hasher *hasher)
{
if (is_null())
hasher->add_null();
else
hash_not_null(hasher);
}
virtual void hash_not_null(Hasher *hasher);
/** /**
Get the upper limit of the MySQL integral and floating-point type. Get the upper limit of the MySQL integral and floating-point type.
@ -4196,7 +4203,7 @@ public:
uchar *new_ptr, uint32 length, uchar *new_ptr, uint32 length,
uchar *new_null_ptr, uint new_null_bit) override; uchar *new_null_ptr, uint new_null_bit) override;
bool is_equal(const Column_definition &new_field) const override; bool is_equal(const Column_definition &new_field) const override;
void hash(ulong *nr, ulong *nr2) override; void hash_not_null(Hasher *hasher) override;
uint length_size() const override { return length_bytes; } uint length_size() const override { return length_bytes; }
void print_key_value(String *out, uint32 length) override; void print_key_value(String *out, uint32 length) override;
Binlog_type_info binlog_type_info() const override; Binlog_type_info binlog_type_info() const override;
@ -4456,6 +4463,7 @@ public:
bool make_empty_rec_store_default_value(THD *thd, Item *item) override; bool make_empty_rec_store_default_value(THD *thd, Item *item) override;
int store(const char *to, size_t length, CHARSET_INFO *charset) override; int store(const char *to, size_t length, CHARSET_INFO *charset) override;
using Field_str::store; using Field_str::store;
void hash_not_null(Hasher *hasher) override;
double val_real() override; double val_real() override;
longlong val_int() override; longlong val_int() override;
String *val_str(String *, String *) override; String *val_str(String *, String *) override;
@ -5032,7 +5040,7 @@ public:
if (bit_ptr) if (bit_ptr)
bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*); bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*);
} }
void hash(ulong *nr, ulong *nr2) override; void hash_not_null(Hasher *hasher) override;
SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part, SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
const Item_bool_func *cond, const Item_bool_func *cond,

View File

@ -9980,8 +9980,7 @@ uint8 ha_partition::table_cache_type()
uint32 ha_partition::calculate_key_hash_value(Field **field_array) uint32 ha_partition::calculate_key_hash_value(Field **field_array)
{ {
ulong nr1= 1; Hasher hasher;
ulong nr2= 4;
bool use_51_hash; bool use_51_hash;
use_51_hash= MY_TEST((*field_array)->table->part_info->key_algorithm == use_51_hash= MY_TEST((*field_array)->table->part_info->key_algorithm ==
partition_info::KEY_ALGORITHM_51); partition_info::KEY_ALGORITHM_51);
@ -10008,12 +10007,12 @@ uint32 ha_partition::calculate_key_hash_value(Field **field_array)
{ {
if (field->is_null()) if (field->is_null())
{ {
nr1^= (nr1 << 1) | 1; hasher.add_null();
continue; continue;
} }
/* Force this to my_hash_sort_bin, which was used in 5.1! */ /* Force this to my_hash_sort_bin, which was used in 5.1! */
uint len= field->pack_length(); uint len= field->pack_length();
my_charset_bin.hash_sort(field->ptr, len, &nr1, &nr2); hasher.add(&my_charset_bin, field->ptr, len);
/* Done with this field, continue with next one. */ /* Done with this field, continue with next one. */
continue; continue;
} }
@ -10031,12 +10030,12 @@ uint32 ha_partition::calculate_key_hash_value(Field **field_array)
{ {
if (field->is_null()) if (field->is_null())
{ {
nr1^= (nr1 << 1) | 1; hasher.add_null();
continue; continue;
} }
/* Force this to my_hash_sort_bin, which was used in 5.1! */ /* Force this to my_hash_sort_bin, which was used in 5.1! */
uint len= field->pack_length(); uint len= field->pack_length();
my_charset_latin1.hash_sort(field->ptr, len, &nr1, &nr2); hasher.add(&my_charset_latin1, field->ptr, len);
continue; continue;
} }
/* New types in mysql-5.6. */ /* New types in mysql-5.6. */
@ -10063,9 +10062,9 @@ uint32 ha_partition::calculate_key_hash_value(Field **field_array)
} }
/* fall through, use collation based hashing. */ /* fall through, use collation based hashing. */
} }
field->hash(&nr1, &nr2); field->hash(&hasher);
} while (*(++field_array)); } while (*(++field_array));
return (uint32) nr1; return (uint32) hasher.finalize();
} }

View File

@ -4356,6 +4356,35 @@ int handler::check_collation_compatibility()
} }
int handler::check_long_hash_compatibility() const
{
if (!table->s->old_long_hash_function())
return 0;
KEY *key= table->key_info;
KEY *key_end= key + table->s->keys;
for ( ; key < key_end; key++)
{
if (key->algorithm == HA_KEY_ALG_LONG_HASH)
{
/*
The old (pre-MDEV-27653) hash function was wrong.
So the long hash unique constraint can have some
duplicate records. REPAIR TABLE can't fix this,
it will fail on a duplicate key error.
Only "ALTER IGNORE TABLE .. FORCE" can fix this.
So we need to return HA_ADMIN_NEEDS_ALTER here,
(not HA_ADMIN_NEEDS_UPGRADE which is used elsewhere),
to properly send the error message text corresponding
to ER_TABLE_NEEDS_REBUILD (rather than to ER_TABLE_NEEDS_UPGRADE)
to the user.
*/
return HA_ADMIN_NEEDS_ALTER;
}
}
return 0;
}
int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
{ {
int error; int error;
@ -4394,6 +4423,9 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
if (unlikely((error= check_collation_compatibility()))) if (unlikely((error= check_collation_compatibility())))
return error; return error;
if (unlikely((error= check_long_hash_compatibility())))
return error;
return check_for_upgrade(check_opt); return check_for_upgrade(check_opt);
} }

View File

@ -3416,6 +3416,7 @@ public:
} }
int check_collation_compatibility(); int check_collation_compatibility();
int check_long_hash_compatibility() const;
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt); int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
/** to be actually called to get 'check()' functionality*/ /** to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt); int ha_check(THD *thd, HA_CHECK_OPT *check_opt);

View File

@ -10755,7 +10755,7 @@ table_map Item_direct_view_ref::used_tables() const
table_map used= (*ref)->used_tables(); table_map used= (*ref)->used_tables();
return (used ? return (used ?
used : used :
((null_ref_table != NO_NULL_TABLE) ? (null_ref_table != NO_NULL_TABLE && !null_ref_table->const_table ?
null_ref_table->map : null_ref_table->map :
(table_map)0 )); (table_map)0 ));
} }

View File

@ -1295,6 +1295,12 @@ public:
*/ */
inline ulonglong val_uint() { return (ulonglong) val_int(); } inline ulonglong val_uint() { return (ulonglong) val_int(); }
virtual bool hash_not_null(Hasher *hasher)
{
DBUG_ASSERT(0);
return true;
}
/* /*
Return string representation of this item object. Return string representation of this item object.
@ -3506,6 +3512,13 @@ public:
{ {
return Sql_mode_dependency(0, field->value_depends_on_sql_mode()); return Sql_mode_dependency(0, field->value_depends_on_sql_mode());
} }
bool hash_not_null(Hasher *hasher)
{
if (field->is_null())
return true;
field->hash_not_null(hasher);
return false;
}
longlong val_int_endpoint(bool left_endp, bool *incl_endp) override; longlong val_int_endpoint(bool left_endp, bool *incl_endp) override;
bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override; bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) override;
bool get_date_result(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate) bool get_date_result(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate)

View File

@ -4870,38 +4870,18 @@ Item_cond::fix_fields(THD *thd, Item **ref)
if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
return TRUE; // Fatal error flag is set! return TRUE; // Fatal error flag is set!
/*
The following optimization reduces the depth of an AND-OR tree. while (li++)
E.g. a WHERE clause like
F1 AND (F2 AND (F2 AND F4))
is parsed into a tree with the same nested structure as defined
by braces. This optimization will transform such tree into
AND (F1, F2, F3, F4).
Trees of OR items are flattened as well:
((F1 OR F2) OR (F3 OR F4)) => OR (F1, F2, F3, F4)
Items for removed AND/OR levels will dangle until the death of the
entire statement.
The optimization is currently prepared statements and stored procedures
friendly as it doesn't allocate any memory and its effects are durable
(i.e. do not depend on PS/SP arguments).
*/
while ((item=li++))
{ {
while (item->type() == Item::COND_ITEM && merge_sub_condition(li);
((Item_cond*) item)->functype() == functype() && item= *li.ref();
!((Item_cond*) item)->list.is_empty())
{ // Identical function
li.replace(((Item_cond*) item)->list);
((Item_cond*) item)->list.empty();
item= *li.ref(); // new current item
}
if (abort_on_null) if (abort_on_null)
item->top_level_item(); item->top_level_item();
/* /*
replace degraded condition: replace degraded condition:
was: <field> was: <field>
become: <field> = 1 become: <field> != 0
*/ */
Item::Type type= item->type(); Item::Type type= item->type();
if (type == Item::FIELD_ITEM || type == Item::REF_ITEM) if (type == Item::FIELD_ITEM || type == Item::REF_ITEM)
@ -4917,7 +4897,9 @@ Item_cond::fix_fields(THD *thd, Item **ref)
if (item->fix_fields_if_needed_for_bool(thd, li.ref())) if (item->fix_fields_if_needed_for_bool(thd, li.ref()))
return TRUE; /* purecov: inspected */ return TRUE; /* purecov: inspected */
item= *li.ref(); // item can be substituted in fix_fields merge_sub_condition(li);
item= *li.ref(); // may be substituted in fix_fields/merge_item_if_possible
used_tables_cache|= item->used_tables(); used_tables_cache|= item->used_tables();
if (item->const_item() && !item->with_param && if (item->const_item() && !item->with_param &&
!item->is_expensive() && !cond_has_datetime_is_null(item)) !item->is_expensive() && !cond_has_datetime_is_null(item))
@ -4969,6 +4951,55 @@ Item_cond::fix_fields(THD *thd, Item **ref)
return FALSE; return FALSE;
} }
/**
@brief
Merge a lower-level condition pointed by the iterator into this Item_cond
if possible
@param li list iterator pointing to condition that must be
examined and merged if possible.
@details
If an item pointed by the iterator is an instance of Item_cond with the
same functype() as this Item_cond (i.e. both are Item_cond_and or both are
Item_cond_or) then the arguments of that lower-level item can be merged
into the list of arguments of this upper-level Item_cond.
This optimization reduces the depth of an AND-OR tree.
E.g. a WHERE clause like
F1 AND (F2 AND (F2 AND F4))
is parsed into a tree with the same nested structure as defined
by braces. This optimization will transform such tree into
AND (F1, F2, F3, F4).
Trees of OR items are flattened as well:
((F1 OR F2) OR (F3 OR F4)) => OR (F1, F2, F3, F4)
Items for removed AND/OR levels will dangle until the death of the
entire statement.
The optimization is currently prepared statements and stored procedures
friendly as it doesn't allocate any memory and its effects are durable
(i.e. do not depend on PS/SP arguments).
*/
void Item_cond::merge_sub_condition(List_iterator<Item>& li)
{
Item *item= *li.ref();
/*
The check for list.is_empty() is to catch empty Item_cond_and() items.
We may encounter Item_cond_and with an empty list, because optimizer code
strips multiple equalities, combines items, then adds multiple equalities
back
*/
while (item->type() == Item::COND_ITEM &&
((Item_cond*) item)->functype() == functype() &&
!((Item_cond*) item)->list.is_empty())
{
li.replace(((Item_cond*) item)->list);
((Item_cond*) item)->list.empty();
item= *li.ref();
}
}
bool bool
Item_cond::eval_not_null_tables(void *opt_arg) Item_cond::eval_not_null_tables(void *opt_arg)

View File

@ -3049,6 +3049,9 @@ public:
Item *build_clone(THD *thd); Item *build_clone(THD *thd);
bool excl_dep_on_table(table_map tab_map); bool excl_dep_on_table(table_map tab_map);
bool excl_dep_on_grouping_fields(st_select_lex *sel); bool excl_dep_on_grouping_fields(st_select_lex *sel);
private:
void merge_sub_condition(List_iterator<Item>& li);
}; };
template <template<class> class LI, class T> class Item_equal_iterator; template <template<class> class LI, class T> class Item_equal_iterator;

View File

@ -1768,7 +1768,7 @@ static void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
cs->hash_sort((uchar *)str->ptr(), str->length(), &nr1, &nr2); cs->hash_sort((uchar *)str->ptr(), str->length(), &nr1, &nr2);
} }
longlong Item_func_hash::val_int() longlong Item_func_hash_mariadb_100403::val_int()
{ {
DBUG_EXECUTE_IF("same_long_unique_hash", return 9;); DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
unsigned_flag= true; unsigned_flag= true;
@ -1789,6 +1789,24 @@ longlong Item_func_hash::val_int()
} }
longlong Item_func_hash::val_int()
{
DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
unsigned_flag= true;
Hasher hasher;
for(uint i= 0;i<arg_count;i++)
{
if (args[i]->hash_not_null(&hasher))
{
null_value= 1;
return 0;
}
}
null_value= 0;
return (longlong) hasher.finalize();
}
bool Item_func_hash::fix_length_and_dec() bool Item_func_hash::fix_length_and_dec()
{ {
decimals= 0; decimals= 0;

View File

@ -1213,6 +1213,18 @@ public:
const char *func_name() const { return "<hash>"; } const char *func_name() const { return "<hash>"; }
}; };
class Item_func_hash_mariadb_100403: public Item_func_hash
{
public:
Item_func_hash_mariadb_100403(THD *thd, List<Item> &item)
:Item_func_hash(thd, item)
{}
longlong val_int();
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_hash_mariadb_100403>(thd, this); }
const char *func_name() const { return "<hash_mariadb_100403>"; }
};
class Item_longlong_func: public Item_int_func class Item_longlong_func: public Item_int_func
{ {
public: public:

View File

@ -1612,6 +1612,18 @@ bool Item_func_ucase::fix_length_and_dec()
} }
bool Item_func_left::hash_not_null(Hasher *hasher)
{
StringBuffer<STRING_BUFFER_USUAL_SIZE> buf;
String *str= val_str(&buf);
DBUG_ASSERT((str == NULL) == null_value);
if (!str)
return true;
hasher->add(collation.collation, str->ptr(), str->length());
return false;
}
String *Item_func_left::val_str(String *str) String *Item_func_left::val_str(String *str)
{ {
DBUG_ASSERT(fixed == 1); DBUG_ASSERT(fixed == 1);

View File

@ -459,6 +459,7 @@ class Item_func_left :public Item_str_func
String tmp_value; String tmp_value;
public: public:
Item_func_left(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {} Item_func_left(THD *thd, Item *a, Item *b): Item_str_func(thd, a, b) {}
bool hash_not_null(Hasher *hasher);
String *val_str(String *); String *val_str(String *);
bool fix_length_and_dec(); bool fix_length_and_dec();
const char *func_name() const { return "left"; } const char *func_name() const { return "left"; }

View File

@ -367,7 +367,14 @@ public:
int8 aggr_level; /* nesting level of the aggregating subquery */ int8 aggr_level; /* nesting level of the aggregating subquery */
int8 max_arg_level; /* max level of unbound column references */ int8 max_arg_level; /* max level of unbound column references */
int8 max_sum_func_level;/* max level of aggregation for embedded functions */ int8 max_sum_func_level;/* max level of aggregation for embedded functions */
bool quick_group; /* If incremental update of fields */
/*
true (the default value) means this aggregate function can be computed
with TemporaryTableWithPartialSums algorithm (see end_update()).
false means this aggregate function needs OrderedGroupBy algorithm (see
end_write_group()).
*/
bool quick_group;
/* /*
This list is used by the check for mixing non aggregated fields and This list is used by the check for mixing non aggregated fields and
sum functions in the ONLY_FULL_GROUP_BY_MODE. We save all outer fields sum functions in the ONLY_FULL_GROUP_BY_MODE. We save all outer fields

View File

@ -263,7 +263,7 @@ static char *get_plugindir()
{ {
static char plugin_dir[2*MAX_PATH]; static char plugin_dir[2*MAX_PATH];
get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path); get_basedir(plugin_dir, sizeof(plugin_dir), mysqld_path);
strcat(plugin_dir, "/" STR(INSTALL_PLUGINDIR)); safe_strcat(plugin_dir, sizeof(plugin_dir), "/" STR(INSTALL_PLUGINDIR));
if (access(plugin_dir, 0) == 0) if (access(plugin_dir, 0) == 0)
return plugin_dir; return plugin_dir;

View File

@ -4852,12 +4852,11 @@ static int init_server_components()
else // full wsrep initialization else // full wsrep initialization
{ {
// add basedir/bin to PATH to resolve wsrep script names // add basedir/bin to PATH to resolve wsrep script names
char* const tmp_path= (char*)my_alloca(strlen(mysql_home) + size_t tmp_path_size= strlen(mysql_home) + 5; /* including "/bin" */
strlen("/bin") + 1); char* const tmp_path= (char*)my_alloca(tmp_path_size);
if (tmp_path) if (tmp_path)
{ {
strcpy(tmp_path, mysql_home); snprintf(tmp_path, tmp_path_size, "%s/bin", mysql_home);
strcat(tmp_path, "/bin");
wsrep_prepend_PATH(tmp_path); wsrep_prepend_PATH(tmp_path);
} }
else else
@ -5668,8 +5667,9 @@ int mysqld_main(int argc, char **argv)
char real_server_version[2 * SERVER_VERSION_LENGTH + 10]; char real_server_version[2 * SERVER_VERSION_LENGTH + 10];
set_server_version(real_server_version, sizeof(real_server_version)); set_server_version(real_server_version, sizeof(real_server_version));
strcat(real_server_version, "' as '"); safe_strcat(real_server_version, sizeof(real_server_version), "' as '");
strcat(real_server_version, server_version); safe_strcat(real_server_version, sizeof(real_server_version),
server_version);
sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname, sql_print_information(ER_DEFAULT(ER_STARTUP), my_progname,
real_server_version, real_server_version,
@ -7916,7 +7916,8 @@ static int mysql_init_variables(void)
} }
else else
my_path(prg_dev, my_progname, "mysql/bin"); my_path(prg_dev, my_progname, "mysql/bin");
strcat(prg_dev,"/../"); // Remove 'bin' to get base dir // Remove 'bin' to get base dir
safe_strcat(prg_dev, sizeof(prg_dev), "/../");
cleanup_dirname(mysql_home,prg_dev); cleanup_dirname(mysql_home,prg_dev);
} }
#else #else

View File

@ -56,8 +56,7 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev,
rgi->event_relay_log_pos= qev->event_relay_log_pos; rgi->event_relay_log_pos= qev->event_relay_log_pos;
rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos; rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos;
strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name); strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name);
if (!(ev->is_artificial_event() || ev->is_relay_log_event() || if (event_can_update_last_master_timestamp(ev))
(ev->when == 0)))
rgi->last_master_timestamp= ev->when + (time_t)ev->exec_time; rgi->last_master_timestamp= ev->when + (time_t)ev->exec_time;
err= apply_event_and_update_pos_for_parallel(ev, thd, rgi); err= apply_event_and_update_pos_for_parallel(ev, thd, rgi);

View File

@ -4192,10 +4192,10 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
the user might be surprised to see a claim that the slave is up to date the user might be surprised to see a claim that the slave is up to date
long before those queued events are actually executed. long before those queued events are actually executed.
*/ */
if (!rli->mi->using_parallel() && if ((!rli->mi->using_parallel()) && event_can_update_last_master_timestamp(ev))
!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0)))
{ {
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time; rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
rli->sql_thread_caught_up= false;
DBUG_ASSERT(rli->last_master_timestamp >= 0); DBUG_ASSERT(rli->last_master_timestamp >= 0);
} }
@ -4247,6 +4247,17 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
if (rli->mi->using_parallel()) if (rli->mi->using_parallel())
{ {
if (unlikely((rli->last_master_timestamp == 0 ||
rli->sql_thread_caught_up) &&
event_can_update_last_master_timestamp(ev)))
{
if (rli->last_master_timestamp < ev->when)
{
rli->last_master_timestamp= ev->when;
rli->sql_thread_caught_up= false;
}
}
int res= rli->parallel.do_event(serial_rgi, ev, event_size); int res= rli->parallel.do_event(serial_rgi, ev, event_size);
/* /*
In parallel replication, we need to update the relay log position In parallel replication, we need to update the relay log position
@ -4267,7 +4278,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
This is the case for pre-10.0 events without GTID, and for handling This is the case for pre-10.0 events without GTID, and for handling
slave_skip_counter. slave_skip_counter.
*/ */
if (!(ev->is_artificial_event() || ev->is_relay_log_event() || (ev->when == 0))) if (event_can_update_last_master_timestamp(ev))
{ {
/* /*
Ignore FD's timestamp as it does not reflect the slave execution Ignore FD's timestamp as it does not reflect the slave execution
@ -4275,7 +4286,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
data modification event execution last long all this time data modification event execution last long all this time
Seconds_Behind_Master is zero. Seconds_Behind_Master is zero.
*/ */
if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT &&
rli->last_master_timestamp < ev->when)
rli->last_master_timestamp= ev->when + (time_t) ev->exec_time; rli->last_master_timestamp= ev->when + (time_t) ev->exec_time;
DBUG_ASSERT(rli->last_master_timestamp >= 0); DBUG_ASSERT(rli->last_master_timestamp >= 0);
@ -7615,7 +7627,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
if (hot_log) if (hot_log)
mysql_mutex_unlock(log_lock); mysql_mutex_unlock(log_lock);
rli->sql_thread_caught_up= false;
DBUG_RETURN(ev); DBUG_RETURN(ev);
} }
if (opt_reckless_slave) // For mysql-test if (opt_reckless_slave) // For mysql-test
@ -7779,7 +7790,6 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size)
rli->relay_log.wait_for_update_relay_log(rli->sql_driver_thd); rli->relay_log.wait_for_update_relay_log(rli->sql_driver_thd);
// re-acquire data lock since we released it earlier // re-acquire data lock since we released it earlier
mysql_mutex_lock(&rli->data_lock); mysql_mutex_lock(&rli->data_lock);
rli->sql_thread_caught_up= false;
continue; continue;
} }
/* /*
@ -7970,12 +7980,19 @@ event(errno: %d cur_log->error: %d)",
{ {
sql_print_information("Error reading relay log event: %s", sql_print_information("Error reading relay log event: %s",
"slave SQL thread was killed"); "slave SQL thread was killed");
DBUG_RETURN(0); goto end;
} }
err: err:
if (errmsg) if (errmsg)
sql_print_error("Error reading relay log event: %s", errmsg); sql_print_error("Error reading relay log event: %s", errmsg);
end:
/*
Set that we are not caught up so if there is a hang/problem on restart,
Seconds_Behind_Master will still grow.
*/
rli->sql_thread_caught_up= false;
DBUG_RETURN(0); DBUG_RETURN(0);
} }
#ifdef WITH_WSREP #ifdef WITH_WSREP

View File

@ -49,6 +49,7 @@
#include "rpl_filter.h" #include "rpl_filter.h"
#include "rpl_tblmap.h" #include "rpl_tblmap.h"
#include "rpl_gtid.h" #include "rpl_gtid.h"
#include "log_event.h"
#define SLAVE_NET_TIMEOUT 60 #define SLAVE_NET_TIMEOUT 60
@ -293,6 +294,17 @@ extern char *report_host, *report_password;
extern I_List<THD> threads; extern I_List<THD> threads;
/*
Check that a binlog event (read from the relay log) is valid to update
last_master_timestamp. That is, a valid event is one with a consistent
timestamp which originated from a primary server.
*/
static inline bool event_can_update_last_master_timestamp(Log_event *ev)
{
return ev && !(ev->is_artificial_event() || ev->is_relay_log_event() ||
(ev->when == 0));
}
#else #else
#define close_active_mi() /* no-op */ #define close_active_mi() /* no-op */
#endif /* HAVE_REPLICATION */ #endif /* HAVE_REPLICATION */

View File

@ -37,7 +37,8 @@ const LEX_CSTRING msg_status= {STRING_WITH_LEN("status")};
/* Prepare, run and cleanup for mysql_recreate_table() */ /* Prepare, run and cleanup for mysql_recreate_table() */
static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list) static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list,
Recreate_info *recreate_info)
{ {
bool result_code; bool result_code;
DBUG_ENTER("admin_recreate_table"); DBUG_ENTER("admin_recreate_table");
@ -58,7 +59,7 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list)
DEBUG_SYNC(thd, "ha_admin_try_alter"); DEBUG_SYNC(thd, "ha_admin_try_alter");
tmp_disable_binlog(thd); // binlogging is done by caller if wanted tmp_disable_binlog(thd); // binlogging is done by caller if wanted
result_code= (thd->open_temporary_tables(table_list) || result_code= (thd->open_temporary_tables(table_list) ||
mysql_recreate_table(thd, table_list, false)); mysql_recreate_table(thd, table_list, recreate_info, false));
reenable_binlog(thd); reenable_binlog(thd);
/* /*
mysql_recreate_table() can push OK or ERROR. mysql_recreate_table() can push OK or ERROR.
@ -528,6 +529,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
bool open_error; bool open_error;
bool collect_eis= FALSE; bool collect_eis= FALSE;
bool open_for_modify= org_open_for_modify; bool open_for_modify= org_open_for_modify;
Recreate_info recreate_info;
DBUG_PRINT("admin", ("table: '%s'.'%s'", db, table->table_name.str)); DBUG_PRINT("admin", ("table: '%s'.'%s'", db, table->table_name.str));
DEBUG_SYNC(thd, "admin_command_kill_before_modify"); DEBUG_SYNC(thd, "admin_command_kill_before_modify");
@ -787,7 +789,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
{ {
/* We use extra_open_options to be able to open crashed tables */ /* We use extra_open_options to be able to open crashed tables */
thd->open_options|= extra_open_options; thd->open_options|= extra_open_options;
result_code= admin_recreate_table(thd, table); result_code= admin_recreate_table(thd, table, &recreate_info) ?
HA_ADMIN_FAILED : HA_ADMIN_OK;
thd->open_options&= ~extra_open_options; thd->open_options&= ~extra_open_options;
goto send_result; goto send_result;
} }
@ -968,12 +971,31 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
repair was not implemented and we need to upgrade the table repair was not implemented and we need to upgrade the table
to a new version so we recreate the table with ALTER TABLE to a new version so we recreate the table with ALTER TABLE
*/ */
result_code= admin_recreate_table(thd, table); result_code= admin_recreate_table(thd, table, &recreate_info);
} }
send_result: send_result:
lex->cleanup_after_one_table_open(); lex->cleanup_after_one_table_open();
thd->clear_error(); // these errors shouldn't get client thd->clear_error(); // these errors shouldn't get client
if (recreate_info.records_duplicate())
{
protocol->prepare_for_resend();
protocol->store(&table_name, system_charset_info);
protocol->store((char*) operator_name, system_charset_info);
protocol->store(warning_level_names[Sql_condition::WARN_LEVEL_WARN].str,
warning_level_names[Sql_condition::WARN_LEVEL_WARN].length,
system_charset_info);
char buf[80];
size_t length= my_snprintf(buf, sizeof(buf),
"Number of rows changed from %u to %u",
(uint) recreate_info.records_processed(),
(uint) recreate_info.records_copied());
protocol->store(buf, length, system_charset_info);
if (protocol->write())
goto err;
}
{ {
Diagnostics_area::Sql_condition_iterator it= Diagnostics_area::Sql_condition_iterator it=
thd->get_stmt_da()->sql_conditions(); thd->get_stmt_da()->sql_conditions();
@ -1083,7 +1105,7 @@ send_result_message:
*save_next_global= table->next_global; *save_next_global= table->next_global;
table->next_local= table->next_global= 0; table->next_local= table->next_global= 0;
result_code= admin_recreate_table(thd, table); result_code= admin_recreate_table(thd, table, &recreate_info);
trans_commit_stmt(thd); trans_commit_stmt(thd);
trans_commit(thd); trans_commit(thd);
close_thread_tables(thd); close_thread_tables(thd);
@ -1278,6 +1300,8 @@ send_result_message:
goto err; goto err;
DEBUG_SYNC(thd, "admin_command_kill_after_modify"); DEBUG_SYNC(thd, "admin_command_kill_after_modify");
} }
thd->resume_subsequent_commits(suspended_wfc);
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
if (is_table_modified && is_cmd_replicated && if (is_table_modified && is_cmd_replicated &&
(!opt_readonly || thd->slave_thread) && !thd->lex->no_write_to_binlog) (!opt_readonly || thd->slave_thread) && !thd->lex->no_write_to_binlog)
{ {
@ -1287,10 +1311,8 @@ send_result_message:
if (res) if (res)
goto err; goto err;
} }
my_eof(thd); my_eof(thd);
thd->resume_subsequent_commits(suspended_wfc);
DBUG_EXECUTE_IF("inject_analyze_table_sleep", my_sleep(500000););
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
err: err:
@ -1438,6 +1460,7 @@ bool Sql_cmd_optimize_table::execute(THD *thd)
LEX *m_lex= thd->lex; LEX *m_lex= thd->lex;
TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first; TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first;
bool res= TRUE; bool res= TRUE;
Recreate_info recreate_info;
DBUG_ENTER("Sql_cmd_optimize_table::execute"); DBUG_ENTER("Sql_cmd_optimize_table::execute");
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table, if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table,
@ -1446,7 +1469,7 @@ bool Sql_cmd_optimize_table::execute(THD *thd)
WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table); WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table);
res= (specialflag & SPECIAL_NO_NEW_FUNC) ? res= (specialflag & SPECIAL_NO_NEW_FUNC) ?
mysql_recreate_table(thd, first_table, true) : mysql_recreate_table(thd, first_table, &recreate_info, true) :
mysql_admin_table(thd, first_table, &m_lex->check_opt, mysql_admin_table(thd, first_table, &m_lex->check_opt,
"optimize", TL_WRITE, 1, 0, 0, 0, "optimize", TL_WRITE, 1, 0, 0, 0,
&handler::ha_optimize, 0, true); &handler::ha_optimize, 0, true);

View File

@ -550,9 +550,11 @@ bool Sql_cmd_alter_table::execute(THD *thd)
thd->work_part_info= 0; thd->work_part_info= 0;
#endif #endif
Recreate_info recreate_info;
result= mysql_alter_table(thd, &select_lex->db, &lex->name, result= mysql_alter_table(thd, &select_lex->db, &lex->name,
&create_info, &create_info,
first_table, first_table,
&recreate_info,
&alter_info, &alter_info,
select_lex->order_list.elements, select_lex->order_list.elements,
select_lex->order_list.first, select_lex->order_list.first,

View File

@ -8276,6 +8276,20 @@ bool THD::timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts,
return 0; return 0;
} }
void THD::my_ok_with_recreate_info(const Recreate_info &info,
ulong warn_count)
{
char buf[80];
my_snprintf(buf, sizeof(buf),
ER_THD(this, ER_INSERT_INFO),
(ulong) info.records_processed(),
(ulong) info.records_duplicate(),
warn_count);
my_ok(this, info.records_processed(), 0L, buf);
}
THD_list_iterator *THD_list_iterator::iterator() THD_list_iterator *THD_list_iterator::iterator()
{ {
return &server_threads; return &server_threads;

View File

@ -243,6 +243,29 @@ public:
}; };
class Recreate_info
{
ha_rows m_records_copied;
ha_rows m_records_duplicate;
public:
Recreate_info()
:m_records_copied(0),
m_records_duplicate(0)
{ }
Recreate_info(ha_rows records_copied,
ha_rows records_duplicate)
:m_records_copied(records_copied),
m_records_duplicate(records_duplicate)
{ }
ha_rows records_copied() const { return m_records_copied; }
ha_rows records_duplicate() const { return m_records_duplicate; }
ha_rows records_processed() const
{
return m_records_copied + m_records_duplicate;
}
};
#define TC_HEURISTIC_RECOVER_COMMIT 1 #define TC_HEURISTIC_RECOVER_COMMIT 1
#define TC_HEURISTIC_RECOVER_ROLLBACK 2 #define TC_HEURISTIC_RECOVER_ROLLBACK 2
extern ulong tc_heuristic_recover; extern ulong tc_heuristic_recover;
@ -4102,6 +4125,8 @@ public:
inline bool vio_ok() const { return TRUE; } inline bool vio_ok() const { return TRUE; }
inline bool is_connected() { return TRUE; } inline bool is_connected() { return TRUE; }
#endif #endif
void my_ok_with_recreate_info(const Recreate_info &info, ulong warn_count);
/** /**
Mark the current error as fatal. Warning: this does not Mark the current error as fatal. Warning: this does not
set any error, it sets a property of the error, so must be set any error, it sets a property of the error, so must be
@ -5963,6 +5988,12 @@ public:
uint sum_func_count; uint sum_func_count;
uint hidden_field_count; uint hidden_field_count;
uint group_parts,group_length,group_null_parts; uint group_parts,group_length,group_null_parts;
/*
If we're doing a GROUP BY operation, shows which one is used:
true TemporaryTableWithPartialSums algorithm (see end_update()).
false OrderedGroupBy algorithm (see end_write_group()).
*/
uint quick_group; uint quick_group;
/** /**
Enabled when we have atleast one outer_sum_func. Needed when used Enabled when we have atleast one outer_sum_func. Needed when used

View File

@ -100,49 +100,6 @@ bool LEX::check_dependencies_in_with_clauses()
} }
/**
@brief
Resolve references to CTE in specification of hanging CTE
@details
A CTE to which there are no references in the query is called hanging CTE.
Although such CTE is not used for execution its specification must be
subject to context analysis. All errors concerning references to
non-existing tables or fields occurred in the specification must be
reported as well as all other errors caught at the prepare stage.
The specification of a hanging CTE might contain references to other
CTE outside of the specification and within it if the specification
contains a with clause. This function resolves all such references for
all hanging CTEs encountered in the processed query.
@retval
false on success
true on failure
*/
bool
LEX::resolve_references_to_cte_in_hanging_cte()
{
for (With_clause *with_clause= with_clauses_list;
with_clause; with_clause= with_clause->next_with_clause)
{
for (With_element *with_elem= with_clause->with_list.first;
with_elem; with_elem= with_elem->next)
{
if (!with_elem->is_referenced())
{
TABLE_LIST *first_tbl=
with_elem->spec->first_select()->table_list.first;
TABLE_LIST **with_elem_end_pos= with_elem->head->tables_pos.end_pos;
if (first_tbl && resolve_references_to_cte(first_tbl, with_elem_end_pos))
return true;
}
}
}
return false;
}
/** /**
@brief @brief
Resolve table references to CTE from a sub-chain of table references Resolve table references to CTE from a sub-chain of table references
@ -289,8 +246,6 @@ LEX::check_cte_dependencies_and_resolve_references()
return false; return false;
if (resolve_references_to_cte(query_tables, query_tables_last)) if (resolve_references_to_cte(query_tables, query_tables_last))
return true; return true;
if (resolve_references_to_cte_in_hanging_cte())
return true;
return false; return false;
} }
@ -489,47 +444,33 @@ With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl,
st_unit_ctxt_elem *ctxt) st_unit_ctxt_elem *ctxt)
{ {
With_element *found= 0; With_element *found= 0;
st_select_lex_unit *top_unit= 0;
for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt; for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt;
unit_ctxt_elem; unit_ctxt_elem;
unit_ctxt_elem= unit_ctxt_elem->prev) unit_ctxt_elem= unit_ctxt_elem->prev)
{ {
st_select_lex_unit *unit= unit_ctxt_elem->unit; st_select_lex_unit *unit= unit_ctxt_elem->unit;
With_clause *with_clause= unit->with_clause; With_clause *with_clause= unit->with_clause;
/*
First look for the table definition in the with clause attached to 'unit'
if there is any such clause.
*/
if (with_clause) if (with_clause)
{ {
found= with_clause->find_table_def(tbl, NULL); /*
if (found) If the reference to tbl that has to be resolved belongs to
break; the FROM clause of a descendant of top_unit->with_element
} and this with element belongs to with_clause then this
/* element must be used as the barrier for the search in the
If 'unit' is the unit that defines a with element then reset 'unit' the list of CTEs from with_clause unless the clause contains
to the unit whose attached with clause contains this with element. RECURSIVE.
*/ */
With_element *with_elem= unit->with_element; With_element *barrier= 0;
if (with_elem) if (top_unit && !with_clause->with_recursive &&
{ top_unit->with_element &&
if (!(unit_ctxt_elem= unit_ctxt_elem->prev)) top_unit->with_element->get_owner() == with_clause)
break; barrier= top_unit->with_element;
unit= unit_ctxt_elem->unit; found= with_clause->find_table_def(tbl, barrier);
}
with_clause= unit->with_clause;
/*
Now look for the table definition in this with clause. If the with clause
contains RECURSIVE the search is performed through all CTE definitions in
clause, otherwise up to the definition of 'with_elem' unless it is NULL.
*/
if (with_clause)
{
found= with_clause->find_table_def(tbl,
with_clause->with_recursive ?
NULL : with_elem);
if (found) if (found)
break; break;
} }
top_unit= unit;
} }
return found; return found;
} }

View File

@ -326,8 +326,6 @@ public:
friend friend
bool LEX::resolve_references_to_cte(TABLE_LIST *tables, bool LEX::resolve_references_to_cte(TABLE_LIST *tables,
TABLE_LIST **tables_last); TABLE_LIST **tables_last);
friend
bool LEX::resolve_references_to_cte_in_hanging_cte();
}; };
const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8; const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8;
@ -441,9 +439,6 @@ public:
friend friend
bool LEX::check_dependencies_in_with_clauses(); bool LEX::check_dependencies_in_with_clauses();
friend
bool LEX::resolve_references_to_cte_in_hanging_cte();
}; };
inline inline

View File

@ -3962,7 +3962,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
lex->current_select->join->select_options|= OPTION_BUFFER_RESULT; lex->current_select->join->select_options|= OPTION_BUFFER_RESULT;
} }
else if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && else if (!(lex->current_select->options & OPTION_BUFFER_RESULT) &&
thd->locked_tables_mode <= LTM_LOCK_TABLES) thd->locked_tables_mode <= LTM_LOCK_TABLES &&
!table->s->long_unique_table)
{ {
/* /*
We must not yet prepare the result table if it is the same as one of the We must not yet prepare the result table if it is the same as one of the

View File

@ -1308,8 +1308,6 @@ void LEX::start(THD *thd_arg)
stmt_var_list.empty(); stmt_var_list.empty();
proc_list.elements=0; proc_list.elements=0;
save_group_list.empty();
save_order_list.empty();
win_ref= NULL; win_ref= NULL;
win_frame= NULL; win_frame= NULL;
frame_top_bound= NULL; frame_top_bound= NULL;

View File

@ -1116,6 +1116,7 @@ public:
group_list_ptrs, and re-establish the original list before each execution. group_list_ptrs, and re-establish the original list before each execution.
*/ */
SQL_I_List<ORDER> group_list; SQL_I_List<ORDER> group_list;
SQL_I_List<ORDER> save_group_list;
Group_list_ptrs *group_list_ptrs; Group_list_ptrs *group_list_ptrs;
List<Item> item_list; /* list of fields & expressions */ List<Item> item_list; /* list of fields & expressions */
@ -1181,6 +1182,7 @@ public:
const char *type; /* type of select for EXPLAIN */ const char *type; /* type of select for EXPLAIN */
SQL_I_List<ORDER> order_list; /* ORDER clause */ SQL_I_List<ORDER> order_list; /* ORDER clause */
SQL_I_List<ORDER> save_order_list;
SQL_I_List<ORDER> gorder_list; SQL_I_List<ORDER> gorder_list;
Item *select_limit, *offset_limit; /* LIMIT clause parameters */ Item *select_limit, *offset_limit; /* LIMIT clause parameters */
bool is_set_query_expr_tail; bool is_set_query_expr_tail;
@ -3537,8 +3539,6 @@ public:
} }
SQL_I_List<ORDER> save_group_list;
SQL_I_List<ORDER> save_order_list;
LEX_CSTRING *win_ref; LEX_CSTRING *win_ref;
Window_frame *win_frame; Window_frame *win_frame;
Window_frame_bound *frame_top_bound; Window_frame_bound *frame_top_bound;
@ -4778,12 +4778,11 @@ public:
const LEX_CSTRING *constraint_name, const LEX_CSTRING *constraint_name,
Table_ident *ref_table_name, Table_ident *ref_table_name,
DDL_options ddl_options); DDL_options ddl_options);
bool check_dependencies_in_with_clauses(); bool check_dependencies_in_with_clauses();
bool resolve_references_to_cte_in_hanging_cte();
bool check_cte_dependencies_and_resolve_references(); bool check_cte_dependencies_and_resolve_references();
bool resolve_references_to_cte(TABLE_LIST *tables, bool resolve_references_to_cte(TABLE_LIST *tables,
TABLE_LIST **tables_last); TABLE_LIST **tables_last);
}; };

View File

@ -54,7 +54,7 @@ public:
{ {
elements= tmp.elements; elements= tmp.elements;
first= tmp.first; first= tmp.first;
next= tmp.next; next= elements ? tmp.next : &first;;
return *this; return *this;
} }

View File

@ -4270,8 +4270,10 @@ mysql_execute_command(THD *thd)
WSREP_TO_ISOLATION_BEGIN(first_table->db.str, first_table->table_name.str, NULL); WSREP_TO_ISOLATION_BEGIN(first_table->db.str, first_table->table_name.str, NULL);
Recreate_info recreate_info;
res= mysql_alter_table(thd, &first_table->db, &first_table->table_name, res= mysql_alter_table(thd, &first_table->db, &first_table->table_name,
&create_info, first_table, &alter_info, &create_info, first_table,
&recreate_info, &alter_info,
0, (ORDER*) 0, 0, lex->if_exists()); 0, (ORDER*) 0, 0, lex->if_exists());
break; break;
} }
@ -8894,8 +8896,8 @@ TABLE_LIST *st_select_lex::convert_right_join()
void st_select_lex::prepare_add_window_spec(THD *thd) void st_select_lex::prepare_add_window_spec(THD *thd)
{ {
LEX *lex= thd->lex; LEX *lex= thd->lex;
lex->save_group_list= group_list; save_group_list= group_list;
lex->save_order_list= order_list; save_order_list= order_list;
lex->win_ref= NULL; lex->win_ref= NULL;
lex->win_frame= NULL; lex->win_frame= NULL;
lex->frame_top_bound= NULL; lex->frame_top_bound= NULL;
@ -8922,8 +8924,8 @@ bool st_select_lex::add_window_def(THD *thd,
win_part_list_ptr, win_part_list_ptr,
win_order_list_ptr, win_order_list_ptr,
win_frame); win_frame);
group_list= thd->lex->save_group_list; group_list= save_group_list;
order_list= thd->lex->save_order_list; order_list= save_order_list;
if (parsing_place != SELECT_LIST) if (parsing_place != SELECT_LIST)
{ {
fields_in_window_functions+= win_part_list_ptr->elements + fields_in_window_functions+= win_part_list_ptr->elements +
@ -8949,8 +8951,8 @@ bool st_select_lex::add_window_spec(THD *thd,
win_part_list_ptr, win_part_list_ptr,
win_order_list_ptr, win_order_list_ptr,
win_frame); win_frame);
group_list= thd->lex->save_group_list; group_list= save_group_list;
order_list= thd->lex->save_order_list; order_list= save_order_list;
if (parsing_place != SELECT_LIST) if (parsing_place != SELECT_LIST)
{ {
fields_in_window_functions+= win_part_list_ptr->elements + fields_in_window_functions+= win_part_list_ptr->elements +

View File

@ -342,7 +342,7 @@ static bool register_builtin(struct st_maria_plugin *, struct st_plugin_int *,
struct st_plugin_int **); struct st_plugin_int **);
static void unlock_variables(THD *thd, struct system_variables *vars); static void unlock_variables(THD *thd, struct system_variables *vars);
static void cleanup_variables(struct system_variables *vars); static void cleanup_variables(struct system_variables *vars);
static void plugin_vars_free_values(sys_var *vars); static void plugin_vars_free_values(st_mysql_sys_var **vars);
static void restore_ptr_backup(uint n, st_ptr_backup *backup); static void restore_ptr_backup(uint n, st_ptr_backup *backup);
static void intern_plugin_unlock(LEX *lex, plugin_ref plugin); static void intern_plugin_unlock(LEX *lex, plugin_ref plugin);
static void reap_plugins(void); static void reap_plugins(void);
@ -1290,7 +1290,7 @@ static void plugin_del(struct st_plugin_int *plugin)
DBUG_ENTER("plugin_del"); DBUG_ENTER("plugin_del");
mysql_mutex_assert_owner(&LOCK_plugin); mysql_mutex_assert_owner(&LOCK_plugin);
/* Free allocated strings before deleting the plugin. */ /* Free allocated strings before deleting the plugin. */
plugin_vars_free_values(plugin->system_vars); plugin_vars_free_values(plugin->plugin->system_vars);
restore_ptr_backup(plugin->nbackups, plugin->ptr_backup); restore_ptr_backup(plugin->nbackups, plugin->ptr_backup);
if (plugin->plugin_dl) if (plugin->plugin_dl)
{ {
@ -2941,6 +2941,7 @@ sys_var *find_sys_var(THD *thd, const char *str, size_t length,
/* /*
called by register_var, construct_options and test_plugin_options. called by register_var, construct_options and test_plugin_options.
Returns the 'bookmark' for the named variable. Returns the 'bookmark' for the named variable.
returns null for non thd-local variables.
LOCK_system_variables_hash should be at least read locked LOCK_system_variables_hash should be at least read locked
*/ */
static st_bookmark *find_bookmark(const char *plugin, const char *name, static st_bookmark *find_bookmark(const char *plugin, const char *name,
@ -2997,7 +2998,6 @@ static size_t var_storage_size(int flags)
/* /*
returns a bookmark for thd-local variables, creating if neccessary. returns a bookmark for thd-local variables, creating if neccessary.
returns null for non thd-local variables.
Requires that a write lock is obtained on LOCK_system_variables_hash Requires that a write lock is obtained on LOCK_system_variables_hash
*/ */
static st_bookmark *register_var(const char *plugin, const char *name, static st_bookmark *register_var(const char *plugin, const char *name,
@ -3351,27 +3351,35 @@ void plugin_thdvar_cleanup(THD *thd)
variables are no longer accessible and the value space is lost. Note variables are no longer accessible and the value space is lost. Note
that only string values with PLUGIN_VAR_MEMALLOC are allocated and that only string values with PLUGIN_VAR_MEMALLOC are allocated and
must be freed. must be freed.
@param[in] vars Chain of system variables of a plugin
*/ */
static void plugin_vars_free_values(sys_var *vars) static void plugin_vars_free_values(st_mysql_sys_var **vars)
{ {
DBUG_ENTER("plugin_vars_free_values"); DBUG_ENTER("plugin_vars_free_values");
for (sys_var *var= vars; var; var= var->next) if (!vars)
DBUG_VOID_RETURN;
while(st_mysql_sys_var *var= *vars++)
{ {
sys_var_pluginvar *piv= var->cast_pluginvar(); if ((var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR &&
if (piv && var->flags & PLUGIN_VAR_MEMALLOC)
((piv->plugin_var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR) &&
(piv->plugin_var->flags & PLUGIN_VAR_MEMALLOC))
{ {
/* Free the string from global_system_variables. */ char **val;
char **valptr= (char**) piv->real_value_ptr(NULL, OPT_GLOBAL); if (var->flags & PLUGIN_VAR_THDLOCAL)
{
st_bookmark *v= find_bookmark(0, var->name, var->flags);
if (!v)
continue;
val= (char**)(global_system_variables.dynamic_variables_ptr + v->offset);
}
else
val= *(char***) (var + 1);
DBUG_PRINT("plugin", ("freeing value for: '%s' addr: %p", DBUG_PRINT("plugin", ("freeing value for: '%s' addr: %p",
var->name.str, valptr)); var->name, val));
my_free(*valptr); my_free(*val);
*valptr= NULL; *val= NULL;
} }
} }
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
@ -4031,7 +4039,7 @@ static my_option *construct_help_options(MEM_ROOT *mem_root,
bzero(opts, sizeof(my_option) * count); bzero(opts, sizeof(my_option) * count);
/** /**
some plugin variables (those that don't have PLUGIN_VAR_NOSYSVAR flag) some plugin variables
have their names prefixed with the plugin name. Restore the names here have their names prefixed with the plugin name. Restore the names here
to get the correct (not double-prefixed) help text. to get the correct (not double-prefixed) help text.
We won't need @@sysvars anymore and don't care about their proper names. We won't need @@sysvars anymore and don't care about their proper names.
@ -4143,9 +4151,6 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
char *varname; char *varname;
sys_var *v; sys_var *v;
if (o->flags & PLUGIN_VAR_NOSYSVAR)
continue;
tmp_backup[tmp->nbackups++].save(&o->name); tmp_backup[tmp->nbackups++].save(&o->name);
if ((var= find_bookmark(tmp->name.str, o->name, o->flags))) if ((var= find_bookmark(tmp->name.str, o->name, o->flags)))
{ {
@ -4161,6 +4166,12 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
my_casedn_str(&my_charset_latin1, varname); my_casedn_str(&my_charset_latin1, varname);
convert_dash_to_underscore(varname, len-1); convert_dash_to_underscore(varname, len-1);
} }
if (o->flags & PLUGIN_VAR_NOSYSVAR)
{
o->name= varname;
continue;
}
const char *s= o->flags & PLUGIN_VAR_DEPRECATED ? "" : NULL; const char *s= o->flags & PLUGIN_VAR_DEPRECATED ? "" : NULL;
v= new (mem_root) sys_var_pluginvar(&chain, varname, tmp, o, s); v= new (mem_root) sys_var_pluginvar(&chain, varname, tmp, o, s);
v->test_load= (var ? &var->loaded : &static_unload); v->test_load= (var ? &var->loaded : &static_unload);

View File

@ -3539,14 +3539,25 @@ bool JOIN::make_aggr_tables_info()
/* /*
If we have different sort & group then we must sort the data by group If we have different sort & group then we must sort the data by group
and copy it to another tmp table and copy it to another tmp table.
This code is also used if we are using distinct something This code is also used if we are using distinct something
we haven't been able to store in the temporary table yet we haven't been able to store in the temporary table yet
like SEC_TO_TIME(SUM(...)). like SEC_TO_TIME(SUM(...)).
3. Also, this is used when
- the query has Window functions,
- the GROUP BY operation is done with OrderedGroupBy algorithm.
In this case, the first temptable will contain pre-GROUP-BY data. Force
the creation of the second temporary table. Post-GROUP-BY dataset will be
written there, and then Window Function processing code will be able to
process it.
*/ */
if ((group_list && if ((group_list &&
(!test_if_subpart(group_list, order) || select_distinct)) || (!test_if_subpart(group_list, order) || select_distinct)) ||
(select_distinct && tmp_table_param.using_outer_summary_function)) (select_distinct && tmp_table_param.using_outer_summary_function) ||
(group_list && !tmp_table_param.quick_group && // (3)
select_lex->have_window_funcs())) // (3)
{ /* Must copy to another table */ { /* Must copy to another table */
DBUG_PRINT("info",("Creating group table")); DBUG_PRINT("info",("Creating group table"));
@ -7992,7 +8003,27 @@ best_access_path(JOIN *join,
(table->file->index_flags(start_key->key,0,1) & (table->file->index_flags(start_key->key,0,1) &
HA_DO_RANGE_FILTER_PUSHDOWN)) HA_DO_RANGE_FILTER_PUSHDOWN))
{ {
double rows= record_count * records; double rows;
if (type == JT_EQ_REF)
{
/*
Treat EQ_REF access in a special way:
1. We have no cost for index-only read. Assume its cost is 50% of
the cost of the full read.
2. A regular ref access will do #record_count lookups, but eq_ref
has "lookup cache" which reduces the number of lookups made.
The estimation code uses prev_record_reads() call to estimate:
tmp = prev_record_reads(join_positions, idx, found_ref);
Set the effective number of rows from "tmp" here.
*/
keyread_tmp= tmp/ 2;
rows= tmp;
}
else
rows= record_count * records;
/* /*
If we use filter F with selectivity s the the cost of fetching data If we use filter F with selectivity s the the cost of fetching data
@ -8035,10 +8066,6 @@ best_access_path(JOIN *join,
we cannot use filters as the cost calculation below would cause we cannot use filters as the cost calculation below would cause
tmp to become negative. The future resultion is to not limit tmp to become negative. The future resultion is to not limit
cost with worst_seek. cost with worst_seek.
We cannot use filter with JT_EQ_REF as in this case 'tmp' is
number of rows from prev_record_read() and keyread_tmp is 0. These
numbers are not usable with rowid filter code.
*/ */
double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0);
if (!(records < s->worst_seeks && if (!(records < s->worst_seeks &&
@ -8046,7 +8073,7 @@ best_access_path(JOIN *join,
trace_access_idx.add("rowid_filter_skipped", "worst/max seeks clipping"); trace_access_idx.add("rowid_filter_skipped", "worst/max seeks clipping");
else if (access_cost_factor <= 0.0) else if (access_cost_factor <= 0.0)
trace_access_idx.add("rowid_filter_skipped", "cost_factor <= 0"); trace_access_idx.add("rowid_filter_skipped", "cost_factor <= 0");
else if (type != JT_EQ_REF) else
{ {
filter= filter=
table->best_range_rowid_filter_for_partial_join(start_key->key, table->best_range_rowid_filter_for_partial_join(start_key->key,
@ -22380,11 +22407,17 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/* /*
@brief @brief
Perform a GROUP BY operation over a stream of rows ordered by their group. Perform OrderedGroupBy operation and write the output into join->result.
The result is sent into join->result.
@detail @detail
Also applies HAVING, etc. The input stream is ordered by the GROUP BY expression, so groups come
one after another. We only need to accumulate the aggregate value, when
a GROUP BY group ends, check the HAVING and send the group.
Note that the output comes in the GROUP BY order, which is required by
the MySQL's GROUP BY semantics. No further sorting is needed.
@seealso end_write_group() also implements SortAndGroup
*/ */
enum_nested_loop_state enum_nested_loop_state
@ -22574,13 +22607,26 @@ end:
/* /*
@brief @brief
Perform a GROUP BY operation over rows coming in arbitrary order. Perform GROUP BY operation over rows coming in arbitrary order: use
TemporaryTableWithPartialSums algorithm.
This is done by looking up the group in a temp.table and updating group @detail
values. The TemporaryTableWithPartialSums algorithm is:
CREATE TEMPORARY TABLE tmp (
group_by_columns PRIMARY KEY,
partial_sum
);
for each row R in join output {
INSERT INTO tmp (R.group_by_columns, R.sum_value)
ON DUPLICATE KEY UPDATE partial_sum=partial_sum + R.sum_value;
}
@detail @detail
Also applies HAVING, etc. Also applies HAVING, etc.
@seealso end_unique_update()
*/ */
static enum_nested_loop_state static enum_nested_loop_state
@ -22730,13 +22776,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
/* /*
@brief @brief
Perform a GROUP BY operation over a stream of rows ordered by their group. Perform OrderedGroupBy operation and write the output into the temporary
Write the result into a temporary table. table (join_tab->table).
@detail @detail
Also applies HAVING, etc. The input stream is ordered by the GROUP BY expression, so groups come
one after another. We only need to accumulate the aggregate value, when
a GROUP BY group ends, check the HAVING and write the group.
The rows are written into temptable so e.g. filesort can read them. @seealso end_send_group() also implements OrderedGroupBy
*/ */
enum_nested_loop_state enum_nested_loop_state

View File

@ -1434,12 +1434,30 @@ public:
(set in make_join_statistics()) (set in make_join_statistics())
*/ */
bool impossible_where; bool impossible_where;
List<Item> all_fields; ///< to store all fields that used in query
/*
All fields used in the query processing.
Initially this is a list of fields from the query's SQL text.
Then, ORDER/GROUP BY and Window Function code add columns that need to
be saved to be available in the post-group-by context. These extra columns
are added to the front, because this->all_fields points to the suffix of
this list.
*/
List<Item> all_fields;
///Above list changed to use temporary table ///Above list changed to use temporary table
List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3; List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3;
///Part, shared with list above, emulate following list ///Part, shared with list above, emulate following list
List<Item> tmp_fields_list1, tmp_fields_list2, tmp_fields_list3; List<Item> tmp_fields_list1, tmp_fields_list2, tmp_fields_list3;
List<Item> &fields_list; ///< hold field list passed to mysql_select
/*
The original field list as it was passed to mysql_select(). This refers
to select_lex->item_list.
CAUTION: this list is a suffix of this->all_fields list, that is, it shares
elements with that list!
*/
List<Item> &fields_list;
List<Item> procedure_fields_list; List<Item> procedure_fields_list;
int error; int error;

View File

@ -10018,6 +10018,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
const LEX_CSTRING *new_name, const LEX_CSTRING *new_name,
HA_CREATE_INFO *create_info, HA_CREATE_INFO *create_info,
TABLE_LIST *table_list, TABLE_LIST *table_list,
Recreate_info *recreate_info,
Alter_info *alter_info, Alter_info *alter_info,
uint order_num, ORDER *order, bool ignore, uint order_num, ORDER *order, bool ignore,
bool if_exists) bool if_exists)
@ -11302,11 +11303,10 @@ end_inplace:
end_temporary: end_temporary:
thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF; thd->variables.option_bits&= ~OPTION_BIN_COMMIT_OFF;
my_snprintf(alter_ctx.tmp_buff, sizeof(alter_ctx.tmp_buff), *recreate_info= Recreate_info(copied, deleted);
ER_THD(thd, ER_INSERT_INFO), thd->my_ok_with_recreate_info(*recreate_info,
(ulong) (copied + deleted), (ulong) deleted, (ulong) thd->get_stmt_da()->
(ulong) thd->get_stmt_da()->current_statement_warn_count()); current_statement_warn_count());
my_ok(thd, copied + deleted, 0L, alter_ctx.tmp_buff);
DEBUG_SYNC(thd, "alter_table_inplace_trans_commit"); DEBUG_SYNC(thd, "alter_table_inplace_trans_commit");
DBUG_RETURN(false); DBUG_RETURN(false);
@ -11816,7 +11816,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
Like mysql_alter_table(). Like mysql_alter_table().
*/ */
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy) bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
Recreate_info *recreate_info, bool table_copy)
{ {
HA_CREATE_INFO create_info; HA_CREATE_INFO create_info;
Alter_info alter_info; Alter_info alter_info;
@ -11842,8 +11843,11 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy)
Alter_info::ALTER_TABLE_ALGORITHM_COPY); Alter_info::ALTER_TABLE_ALGORITHM_COPY);
bool res= mysql_alter_table(thd, &null_clex_str, &null_clex_str, &create_info, bool res= mysql_alter_table(thd, &null_clex_str, &null_clex_str, &create_info,
table_list, &alter_info, 0, table_list, recreate_info, &alter_info, 0,
(ORDER *) 0, 0, 0); (ORDER *) 0,
// Ignore duplicate records on REPAIR
thd->lex->sql_command == SQLCOM_REPAIR,
0);
table_list->next_global= next_table; table_list->next_global= next_table;
DBUG_RETURN(res); DBUG_RETURN(res);
} }

View File

@ -224,6 +224,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
const LEX_CSTRING *new_name, const LEX_CSTRING *new_name,
HA_CREATE_INFO *create_info, HA_CREATE_INFO *create_info,
TABLE_LIST *table_list, TABLE_LIST *table_list,
class Recreate_info *recreate_info,
Alter_info *alter_info, Alter_info *alter_info,
uint order_num, ORDER *order, bool ignore, uint order_num, ORDER *order, bool ignore,
bool if_exists); bool if_exists);
@ -231,7 +232,8 @@ bool mysql_compare_tables(TABLE *table,
Alter_info *alter_info, Alter_info *alter_info,
HA_CREATE_INFO *create_info, HA_CREATE_INFO *create_info,
bool *metadata_equal); bool *metadata_equal);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy); bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list,
class Recreate_info *recreate_info, bool table_copy);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table, bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
TABLE_LIST *src_table, TABLE_LIST *src_table,
Table_specification_st *create_info); Table_specification_st *create_info);

View File

@ -124,6 +124,32 @@ enum scalar_comparison_op
}; };
class Hasher
{
ulong m_nr1;
ulong m_nr2;
public:
Hasher(): m_nr1(1), m_nr2(4)
{ }
void add_null()
{
m_nr1^= (m_nr1 << 1) | 1;
}
void add(CHARSET_INFO *cs, const uchar *str, size_t length)
{
cs->coll->hash_sort(cs, str, length, &m_nr1, &m_nr2);
}
void add(CHARSET_INFO *cs, const char *str, size_t length)
{
add(cs, (const uchar *) str, length);
}
uint32 finalize() const
{
return (uint32) m_nr1;
}
};
enum partition_value_print_mode_t enum partition_value_print_mode_t
{ {
PARTITION_VALUE_PRINT_MODE_SHOW= 0, PARTITION_VALUE_PRINT_MODE_SHOW= 0,

View File

@ -1097,6 +1097,18 @@ static void mysql57_calculate_null_position(TABLE_SHARE *share,
} }
} }
Item_func_hash *TABLE_SHARE::make_long_hash_func(THD *thd,
MEM_ROOT *mem_root,
List<Item> *field_list)
const
{
if (old_long_hash_function())
return new (mem_root) Item_func_hash_mariadb_100403(thd, *field_list);
return new (mem_root) Item_func_hash(thd, *field_list);
}
/** Parse TABLE_SHARE::vcol_defs /** Parse TABLE_SHARE::vcol_defs
unpack_vcol_info_from_frm unpack_vcol_info_from_frm
@ -1308,7 +1320,10 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
list_item= new (mem_root) Item_field(thd, keypart->field); list_item= new (mem_root) Item_field(thd, keypart->field);
field_list->push_back(list_item, mem_root); field_list->push_back(list_item, mem_root);
} }
Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
Item_func_hash *hash_item= table->s->make_long_hash_func(thd, mem_root,
field_list);
Virtual_column_info *v= new (mem_root) Virtual_column_info(); Virtual_column_info *v= new (mem_root) Virtual_column_info();
field->vcol_info= v; field->vcol_info= v;
field->vcol_info->expr= hash_item; field->vcol_info->expr= hash_item;

View File

@ -55,6 +55,7 @@ class Item; /* Needed by ORDER */
typedef Item (*Item_ptr); typedef Item (*Item_ptr);
class Item_subselect; class Item_subselect;
class Item_field; class Item_field;
class Item_func_hash;
class GRANT_TABLE; class GRANT_TABLE;
class st_select_lex_unit; class st_select_lex_unit;
class st_select_lex; class st_select_lex;
@ -1165,6 +1166,21 @@ struct TABLE_SHARE
void free_frm_image(const uchar *frm); void free_frm_image(const uchar *frm);
void set_overlapped_keys(); void set_overlapped_keys();
bool old_long_hash_function() const
{
return mysql_version < 100428 ||
(mysql_version >= 100500 && mysql_version < 100519) ||
(mysql_version >= 100600 && mysql_version < 100612) ||
(mysql_version >= 100700 && mysql_version < 100708) ||
(mysql_version >= 100800 && mysql_version < 100807) ||
(mysql_version >= 100900 && mysql_version < 100905) ||
(mysql_version >= 101000 && mysql_version < 101003) ||
(mysql_version >= 101100 && mysql_version < 101102);
}
Item_func_hash *make_long_hash_func(THD *thd,
MEM_ROOT *mem_root,
List<Item> *field_list) const;
}; };
/* not NULL, but cannot be dereferenced */ /* not NULL, but cannot be dereferenced */

View File

@ -995,13 +995,19 @@ void wsrep_init_startup (bool sst_first)
With mysqldump SST (!sst_first) wait until the server reaches With mysqldump SST (!sst_first) wait until the server reaches
joiner state and procedd to accepting connections. joiner state and procedd to accepting connections.
*/ */
int err= 0;
if (sst_first) if (sst_first)
{ {
server_state.wait_until_state(Wsrep_server_state::s_initializing); err= server_state.wait_until_state(Wsrep_server_state::s_initializing);
} }
else else
{ {
server_state.wait_until_state(Wsrep_server_state::s_joiner); err= server_state.wait_until_state(Wsrep_server_state::s_joiner);
}
if (err)
{
WSREP_ERROR("Wsrep startup was interrupted");
unireg_abort(1);
} }
} }
@ -1107,7 +1113,11 @@ void wsrep_stop_replication(THD *thd)
{ {
WSREP_DEBUG("Disconnect provider"); WSREP_DEBUG("Disconnect provider");
Wsrep_server_state::instance().disconnect(); Wsrep_server_state::instance().disconnect();
Wsrep_server_state::instance().wait_until_state(Wsrep_server_state::s_disconnected); if (Wsrep_server_state::instance().wait_until_state(
Wsrep_server_state::s_disconnected))
{
WSREP_WARN("Wsrep interrupted while waiting for disconnected state");
}
} }
/* my connection, should not terminate with wsrep_close_client_connection(), /* my connection, should not terminate with wsrep_close_client_connection(),
@ -1129,7 +1139,11 @@ void wsrep_shutdown_replication()
{ {
WSREP_DEBUG("Disconnect provider"); WSREP_DEBUG("Disconnect provider");
Wsrep_server_state::instance().disconnect(); Wsrep_server_state::instance().disconnect();
Wsrep_server_state::instance().wait_until_state(Wsrep_server_state::s_disconnected); if (Wsrep_server_state::instance().wait_until_state(
Wsrep_server_state::s_disconnected))
{
WSREP_WARN("Wsrep interrupted while waiting for disconnected state");
}
} }
wsrep_close_client_connections(TRUE); wsrep_close_client_connections(TRUE);

View File

@ -336,10 +336,15 @@ static bool wsrep_sst_complete (THD* thd,
if ((state == Wsrep_server_state::s_joiner || if ((state == Wsrep_server_state::s_joiner ||
state == Wsrep_server_state::s_initialized)) state == Wsrep_server_state::s_initialized))
{ {
Wsrep_server_state::instance().sst_received(client_service, if (Wsrep_server_state::instance().sst_received(client_service, rcode))
rcode); {
failed= true;
}
else
{
WSREP_INFO("SST succeeded for position %s", start_pos_buf); WSREP_INFO("SST succeeded for position %s", start_pos_buf);
} }
}
else else
{ {
WSREP_ERROR("SST failed for position %s initialized %d server_state %s", WSREP_ERROR("SST failed for position %s initialized %d server_state %s",

View File

@ -975,13 +975,13 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g)
xtrc(1, "Arraylist: len=%d\n", len); xtrc(1, "Arraylist: len=%d\n", len);
p = (char *)PlugSubAlloc(g, NULL, len); p = (char *)PlugSubAlloc(g, NULL, len);
strcpy(p, "("); safe_strcpy(p, len, "(");
for (i = 0; i < Nval;) { for (i = 0; i < Nval;) {
Value->SetValue_pvblk(Vblp, i); Value->SetValue_pvblk(Vblp, i);
Value->Prints(g, tp, z); Value->Prints(g, tp, z);
strcat(p, tp); safe_strcat(p, len, tp);
strcat(p, (++i == Nval) ? ")" : ","); safe_strcat(p, len, (++i == Nval) ? ")" : ",");
} // enfor i } // enfor i
xtrc(1, "Arraylist: newlen=%d\n", strlen(p)); xtrc(1, "Arraylist: newlen=%d\n", strlen(p));

View File

@ -10,6 +10,7 @@
/* Include relevant sections of the MariaDB header file. */ /* Include relevant sections of the MariaDB header file. */
/***********************************************************************/ /***********************************************************************/
#include <my_global.h> #include <my_global.h>
#include <m_string.h>
/***********************************************************************/ /***********************************************************************/
/* Include application header files: */ /* Include application header files: */
@ -598,7 +599,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
try { try {
if (!bvp) { if (!bvp) {
strcpy(g->Message, "Null json tree"); safe_strcpy(g->Message, sizeof(g->Message), "Null json tree");
throw 1; throw 1;
} else if (!fn) { } else if (!fn) {
// Serialize to a string // Serialize to a string
@ -606,9 +607,8 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
b = pretty == 1; b = pretty == 1;
} else { } else {
if (!(fs = fopen(fn, "wb"))) { if (!(fs = fopen(fn, "wb"))) {
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR), snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR) ": %s",
"w", (int)errno, fn); "w", (int)errno, fn, strerror(errno));
strcat(strcat(g->Message, ": "), strerror(errno));
throw 2; throw 2;
} else if (pretty >= 2) { } else if (pretty >= 2) {
// Serialize to a pretty file // Serialize to a pretty file

View File

@ -4910,7 +4910,7 @@ char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
} // endfor i } // endfor i
if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) { if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) {
strcat(bsp->Msg, " array"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
// Keep result of constant function // Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL; g->Xchk = (initid->const_item) ? bsp : NULL;
@ -5108,8 +5108,9 @@ char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
PUSH_WARNING("Result truncated to json_grp_size values"); PUSH_WARNING("Result truncated to json_grp_size values");
if (arp) if (arp)
if ((bsp = BbinAlloc(g, initid->max_length, arp))) if ((bsp = BbinAlloc(g, initid->max_length, arp))) {
strcat(bsp->Msg, " array"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " array");
}
if (!bsp) { if (!bsp) {
*res_length = 0; *res_length = 0;
@ -5155,8 +5156,9 @@ char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
PUSH_WARNING("Result truncated to json_grp_size values"); PUSH_WARNING("Result truncated to json_grp_size values");
if (bop) if (bop)
if ((bsp = BbinAlloc(g, initid->max_length, bop))) if ((bsp = BbinAlloc(g, initid->max_length, bop))) {
strcat(bsp->Msg, " object"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
}
if (!bsp) { if (!bsp) {
*res_length = 0; *res_length = 0;
@ -5200,7 +5202,7 @@ char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i)); bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
strcat(bsp->Msg, " object"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
// Keep result of constant function // Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL; g->Xchk = (initid->const_item) ? bsp : NULL;
@ -5255,7 +5257,7 @@ char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
strcat(bsp->Msg, " object"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
// Keep result of constant function // Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL; g->Xchk = (initid->const_item) ? bsp : NULL;
@ -5314,7 +5316,7 @@ char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i)); bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
strcat(bsp->Msg, " object"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " object");
// Keep result of constant function // Keep result of constant function
g->Xchk = (initid->const_item) ? bsp : NULL; g->Xchk = (initid->const_item) ? bsp : NULL;
@ -6077,7 +6079,7 @@ char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
// pretty = pty; // pretty = pty;
if ((bsp = BbinAlloc(bnx.G, len, jsp))) { if ((bsp = BbinAlloc(bnx.G, len, jsp))) {
strcat(bsp->Msg, " file"); safe_strcat(bsp->Msg, sizeof(bsp->Msg), " file");
bsp->Filename = fn; bsp->Filename = fn;
bsp->Pretty = pretty; bsp->Pretty = pretty;
} else { } else {

View File

@ -442,7 +442,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info)
hp->Headlen, hp->Filedate[0], hp->Filedate[1], hp->Headlen, hp->Filedate[0], hp->Filedate[1],
hp->Filedate[2]); hp->Filedate[2]);
strcat(g->Message, buf); safe_strcat(g->Message, sizeof(g->Message), buf);
} // endif info } // endif info
#endif // 0 #endif // 0

View File

@ -36,6 +36,8 @@
#include <fcntl.h> #include <fcntl.h>
#endif // !_WIN32 #endif // !_WIN32
#include <m_string.h>
/***********************************************************************/ /***********************************************************************/
/* Include application header files: */ /* Include application header files: */
/* global.h is header containing all global declarations. */ /* global.h is header containing all global declarations. */
@ -881,7 +883,6 @@ bool BGXFAM::OpenTableFile(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL); (LPTSTR)filename, sizeof(filename), NULL);
strcat(g->Message, filename);
} else } else
rc = 0; rc = 0;
@ -1002,7 +1003,7 @@ int BGXFAM::Cardinality(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL); (LPTSTR)filename, sizeof(filename), NULL);
strcat(g->Message, filename); safe_strcat(g->Message, sizeof(g->Message), filename);
return -1; return -1;
} else } else
return 0; // File does not exist return 0; // File does not exist
@ -1382,7 +1383,8 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
/*********************************************************************/ /*********************************************************************/
tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
PlugSetPath(tempname, To_File, Tdbp->GetPath()); PlugSetPath(tempname, To_File, Tdbp->GetPath());
strcat(PlugRemoveType(tempname, tempname), ".t"); PlugRemoveType(tempname, tempname);
safe_strcat(tempname, _MAX_PATH, ".t");
remove(tempname); // Be sure it does not exist yet remove(tempname); // Be sure it does not exist yet
#if defined(_WIN32) #if defined(_WIN32)
@ -1391,11 +1393,12 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
if (Tfile == INVALID_HANDLE_VALUE) { if (Tfile == INVALID_HANDLE_VALUE) {
DWORD rc = GetLastError(); DWORD rc = GetLastError();
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname); snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT,
tempname);
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)tempname, _MAX_PATH, NULL); (LPTSTR)tempname, _MAX_PATH, NULL);
strcat(g->Message, tempname); safe_strcat(g->Message, sizeof(g->Message), tempname);
return true; return true;
} // endif Tfile } // endif Tfile
#else // UNIX #else // UNIX
@ -1403,8 +1406,8 @@ bool BGXFAM::OpenTempFile(PGLOBAL g)
if (Tfile == INVALID_HANDLE_VALUE) { if (Tfile == INVALID_HANDLE_VALUE) {
int rc = errno; int rc = errno;
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname); snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR)" %s", rc,
strcat(g->Message, strerror(errno)); MODE_INSERT, tempname, strerror(errno));
return true; return true;
} //endif Tfile } //endif Tfile
#endif // UNIX #endif // UNIX

View File

@ -33,6 +33,8 @@
#include <fcntl.h> #include <fcntl.h>
#endif // !_WIN32 #endif // !_WIN32
#include <m_string.h>
/***********************************************************************/ /***********************************************************************/
/* Include application header files: */ /* Include application header files: */
/* global.h is header containing all global declarations. */ /* global.h is header containing all global declarations. */
@ -128,12 +130,13 @@ int GZFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/ /***********************************************************************/
bool GZFAM::OpenTableFile(PGLOBAL g) bool GZFAM::OpenTableFile(PGLOBAL g)
{ {
char opmode[4], filename[_MAX_PATH]; const char *opmode;
char filename[_MAX_PATH];
MODE mode = Tdbp->GetMode(); MODE mode = Tdbp->GetMode();
switch (mode) { switch (mode) {
case MODE_READ: case MODE_READ:
strcpy(opmode, "r"); opmode = "rb";
break; break;
case MODE_UPDATE: case MODE_UPDATE:
/*****************************************************************/ /*****************************************************************/
@ -147,7 +150,7 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
DelRows = Cardinality(g); DelRows = Cardinality(g);
// This will erase the entire file // This will erase the entire file
strcpy(opmode, "w"); opmode = "wb";
// Block = 0; // For ZBKFAM // Block = 0; // For ZBKFAM
// Last = Nrec; // For ZBKFAM // Last = Nrec; // For ZBKFAM
Tdbp->ResetSize(); Tdbp->ResetSize();
@ -158,7 +161,7 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
break; break;
case MODE_INSERT: case MODE_INSERT:
strcpy(opmode, "a+"); opmode = "a+b";
break; break;
default: default:
snprintf(g->Message, sizeof(g->Message), MSG(BAD_OPEN_MODE), mode); snprintf(g->Message, sizeof(g->Message), MSG(BAD_OPEN_MODE), mode);
@ -170,13 +173,11 @@ bool GZFAM::OpenTableFile(PGLOBAL g)
/* Use specific zlib functions. */ /* Use specific zlib functions. */
/* Treat files as binary. */ /* Treat files as binary. */
/*********************************************************************/ /*********************************************************************/
strcat(opmode, "b");
Zfile = gzopen(PlugSetPath(filename, To_File, Tdbp->GetPath()), opmode); Zfile = gzopen(PlugSetPath(filename, To_File, Tdbp->GetPath()), opmode);
if (Zfile == NULL) { if (Zfile == NULL) {
snprintf(g->Message, sizeof(g->Message), MSG(GZOPEN_ERROR), snprintf(g->Message, sizeof(g->Message), MSG(GZOPEN_ERROR) ": %s",
opmode, (int)errno, filename); opmode, (int)errno, filename, strerror(errno));
strcat(strcat(g->Message, ": "), strerror(errno));
return (mode == MODE_READ && errno == ENOENT) return (mode == MODE_READ && errno == ENOENT)
? PushWarning(g, Tdbp) : true; ? PushWarning(g, Tdbp) : true;
} // endif Zfile } // endif Zfile

View File

@ -38,6 +38,8 @@
#include <fcntl.h> #include <fcntl.h>
#endif // !_WIN32 #endif // !_WIN32
#include <m_string.h>
/***********************************************************************/ /***********************************************************************/
/* Include application header files: */ /* Include application header files: */
/* global.h is header containing all global declarations. */ /* global.h is header containing all global declarations. */
@ -593,7 +595,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g)
} // endswitch Mode } // endswitch Mode
// For blocked I/O or for moving lines, open the table in binary // For blocked I/O or for moving lines, open the table in binary
strcat(opmode, (Bin) ? "b" : "t"); safe_strcat(opmode, sizeof(opmode), (Bin) ? "b" : "t");
// Now open the file stream // Now open the file stream
PlugSetPath(filename, To_File, Tdbp->GetPath()); PlugSetPath(filename, To_File, Tdbp->GetPath());
@ -1081,7 +1083,8 @@ bool DOSFAM::OpenTempFile(PGLOBAL g)
/* Open the temporary file, Spos is at the beginning of file. */ /* Open the temporary file, Spos is at the beginning of file. */
/*********************************************************************/ /*********************************************************************/
PlugSetPath(tempname, To_File, Tdbp->GetPath()); PlugSetPath(tempname, To_File, Tdbp->GetPath());
strcat(PlugRemoveType(tempname, tempname), ".t"); PlugRemoveType(tempname, tempname);
safe_strcat(tempname, sizeof(tempname), ".t");
if (!(T_Stream = PlugOpenFile(g, tempname, "wb"))) { if (!(T_Stream = PlugOpenFile(g, tempname, "wb"))) {
if (trace(1)) if (trace(1))
@ -1170,7 +1173,8 @@ int DOSFAM::RenameTempFile(PGLOBAL g)
if (!Abort) { if (!Abort) {
PlugSetPath(filename, To_File, Tdbp->GetPath()); PlugSetPath(filename, To_File, Tdbp->GetPath());
strcat(PlugRemoveType(filetemp, filename), ".ttt"); PlugRemoveType(filetemp, filename);
safe_strcat(filetemp, sizeof(filetemp), ".ttt");
remove(filetemp); // May still be there from previous error remove(filetemp); // May still be there from previous error
if (rename(filename, filetemp)) { // Save file for security if (rename(filename, filetemp)) { // Save file for security

View File

@ -42,6 +42,8 @@
#include <fcntl.h> #include <fcntl.h>
#endif // !_WIN32 #endif // !_WIN32
#include <m_string.h>
/***********************************************************************/ /***********************************************************************/
/* Include application header files: */ /* Include application header files: */
/* global.h is header containing all global declarations. */ /* global.h is header containing all global declarations. */
@ -194,7 +196,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g)
if (Header == 2) if (Header == 2)
{ {
PlugRemoveType(filename, filename); PlugRemoveType(filename, filename);
strncat(filename, ".blk", _MAX_PATH - strlen(filename)); safe_strcat(filename, sizeof(filename), ".blk");
} }
if ((h = global_open(g, MSGID_CANNOT_OPEN, filename, O_RDONLY)) == -1 if ((h = global_open(g, MSGID_CANNOT_OPEN, filename, O_RDONLY)) == -1
@ -251,7 +253,7 @@ bool VCTFAM::SetBlockInfo(PGLOBAL g)
} else { // Header == 2 } else { // Header == 2
PlugRemoveType(filename, filename); PlugRemoveType(filename, filename);
strncat(filename, ".blk", _MAX_PATH - strlen(filename)); safe_strcat(filename, sizeof(filename), ".blk");
s= global_fopen(g, MSGID_CANNOT_OPEN, filename, "wb"); s= global_fopen(g, MSGID_CANNOT_OPEN, filename, "wb");
} // endif Header } // endif Header
@ -587,7 +589,7 @@ bool VCTFAM::InitInsert(PGLOBAL g)
htrc("Exception %d: %s\n", n, g->Message); htrc("Exception %d: %s\n", n, g->Message);
rc = true; rc = true;
} catch (const char *msg) { } catch (const char *msg) {
strncpy(g->Message, msg, sizeof(g->Message)); safe_strcpy(g->Message, sizeof(msg), msg);
rc = true; rc = true;
} // end catch } // end catch
@ -891,8 +893,7 @@ bool VCTFAM::OpenTempFile(PGLOBAL g)
/*********************************************************************/ /*********************************************************************/
PlugSetPath(tempname, To_File, Tdbp->GetPath()); PlugSetPath(tempname, To_File, Tdbp->GetPath());
PlugRemoveType(tempname, tempname); PlugRemoveType(tempname, tempname);
strncat(tempname, ".t", _MAX_PATH - strlen(tempname)); safe_strcat(tempname, sizeof(tempname), ".t");
if (MaxBlk) { if (MaxBlk) {
if (MakeEmptyFile(g, tempname)) if (MakeEmptyFile(g, tempname))
return true; return true;
@ -1562,7 +1563,7 @@ bool VCMFAM::InitInsert(PGLOBAL g)
htrc("Exception %d: %s\n", n, g->Message); htrc("Exception %d: %s\n", n, g->Message);
rc = true; rc = true;
} catch (const char *msg) { } catch (const char *msg) {
strncpy(g->Message, msg, sizeof(g->Message)); safe_strcpy(g->Message, sizeof(g->Message), msg);
rc = true; rc = true;
} // end catch } // end catch
@ -2082,10 +2083,10 @@ bool VECFAM::AllocateBuffer(PGLOBAL g)
// Allocate all that is needed to move lines and make Temp // Allocate all that is needed to move lines and make Temp
if (UseTemp) { if (UseTemp) {
Tempat = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); Tempat = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
strcpy(Tempat, Colfn); safe_strcpy(Tempat, _MAX_PATH, Colfn);
PlugSetPath(Tempat, Tempat, Tdbp->GetPath()); PlugSetPath(Tempat, Tempat, Tdbp->GetPath());
PlugRemoveType(Tempat, Tempat); PlugRemoveType(Tempat, Tempat);
strncat(Tempat, ".t", _MAX_PATH - strlen(Tempat)); safe_strcat(Tempat, _MAX_PATH, ".t");
T_Fbs = (PFBLOCK *)PlugSubAlloc(g, NULL, Ncol * sizeof(PFBLOCK)); T_Fbs = (PFBLOCK *)PlugSubAlloc(g, NULL, Ncol * sizeof(PFBLOCK));
} // endif UseTemp } // endif UseTemp
@ -2460,7 +2461,7 @@ int VECFAM::RenameTempFile(PGLOBAL g)
snprintf(filename, _MAX_PATH, Colfn, i+1); snprintf(filename, _MAX_PATH, Colfn, i+1);
PlugSetPath(filename, filename, Tdbp->GetPath()); PlugSetPath(filename, filename, Tdbp->GetPath());
PlugRemoveType(filetemp, filename); PlugRemoveType(filetemp, filename);
strncat(filetemp, ".ttt", _MAX_PATH - strlen(filetemp)); safe_strcat(filetemp, sizeof(filetemp), ".ttt");
remove(filetemp); // May still be there from previous error remove(filetemp); // May still be there from previous error
if (rename(filename, filetemp)) { // Save file for security if (rename(filename, filetemp)) { // Save file for security
@ -3221,7 +3222,7 @@ int BGVFAM::GetBlockInfo(PGLOBAL g)
if (Header == 2) if (Header == 2)
{ {
PlugRemoveType(filename, filename); PlugRemoveType(filename, filename);
strncat(filename, ".blk", _MAX_PATH - strlen(filename)); safe_strcat(filename, sizeof(filename), ".blk");
} }
#if defined(_WIN32) #if defined(_WIN32)
@ -3300,7 +3301,7 @@ bool BGVFAM::SetBlockInfo(PGLOBAL g)
} else // Header == 2 } else // Header == 2
{ {
PlugRemoveType(filename, filename); PlugRemoveType(filename, filename);
strncat(filename, ".blk", _MAX_PATH - strlen(filename)); safe_strcat(filename, sizeof(filename), ".blk");
} }
if (h == INVALID_HANDLE_VALUE) { if (h == INVALID_HANDLE_VALUE) {
@ -3398,7 +3399,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL); (LPTSTR)filename, sizeof(filename), NULL);
strncat(g->Message, filename, sizeof(g->Message) - strlen(g->Message)); safe_strcat(g->Message, sizeof(g->Message), filename);
if (h != INVALID_HANDLE_VALUE) if (h != INVALID_HANDLE_VALUE)
CloseHandle(h); CloseHandle(h);
@ -3534,7 +3535,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)filename, sizeof(filename), NULL); (LPTSTR)filename, sizeof(filename), NULL);
strncat(g->Message, filename, sizeof(g->Message) - strlen(g->Message)); safe_strcat(g->Message, sizeof(g->Message), filename);
} // endif Hfile } // endif Hfile
if (trace(1)) if (trace(1))
@ -3622,8 +3623,8 @@ bool BGVFAM::OpenTableFile(PGLOBAL g)
if (Hfile == INVALID_HANDLE_VALUE) { if (Hfile == INVALID_HANDLE_VALUE) {
rc = errno; rc = errno;
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, mode, filename); snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR)"%s", rc, mode,
strncat(g->Message, strerror(errno), sizeof(g->Message) - strlen(g->Message)); filename, strerror(errno));
} // endif Hfile } // endif Hfile
if (trace(1)) if (trace(1))
@ -3967,7 +3968,7 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); tempname = (char*)PlugSubAlloc(g, NULL, _MAX_PATH);
PlugSetPath(tempname, To_File, Tdbp->GetPath()); PlugSetPath(tempname, To_File, Tdbp->GetPath());
PlugRemoveType(tempname, tempname); PlugRemoveType(tempname, tempname);
strncat(tempname, ".t", _MAX_PATH - strlen(tempname)); safe_strcat(tempname, _MAX_PATH, ".t");
if (!MaxBlk) if (!MaxBlk)
remove(tempname); // Be sure it does not exist yet remove(tempname); // Be sure it does not exist yet
@ -3986,7 +3987,7 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)tempname, _MAX_PATH, NULL); (LPTSTR)tempname, _MAX_PATH, NULL);
strncat(g->Message, tempname, sizeof(g->Message) - strlen(g->Message)); safe_strcat(g->Message, sizeof(g->Message), tempname);
return true; return true;
} // endif Tfile } // endif Tfile
#else // UNIX #else // UNIX
@ -3996,8 +3997,8 @@ bool BGVFAM::OpenTempFile(PGLOBAL g)
if (Tfile == INVALID_HANDLE_VALUE) { if (Tfile == INVALID_HANDLE_VALUE) {
int rc = errno; int rc = errno;
snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR), rc, MODE_INSERT, tempname); snprintf(g->Message, sizeof(g->Message), MSG(OPEN_ERROR) "%s", rc, MODE_INSERT,
strncat(g->Message, strerror(errno), sizeof(g->Message) - strlen(g->Message)); tempname, strerror(errno));
return true; return true;
} //endif Tfile } //endif Tfile
#endif // UNIX #endif // UNIX

View File

@ -29,6 +29,7 @@
#include <fcntl.h> #include <fcntl.h>
#endif // !_WIN32 #endif // !_WIN32
#include <time.h> #include <time.h>
#include <m_string.h>
/***********************************************************************/ /***********************************************************************/
/* Include application header files: */ /* Include application header files: */
@ -181,7 +182,8 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
while (true) { while (true) {
if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName); snprintf(filename, sizeof(filename), "%s%s%s",
drive, direc, FileData.cFileName);
if (ZipFile(g, zutp, filename, FileData.cFileName, buf)) { if (ZipFile(g, zutp, filename, FileData.cFileName, buf)) {
FindClose(hSearch); FindClose(hSearch);
@ -217,7 +219,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
struct dirent *entry; struct dirent *entry;
_splitpath(filename, NULL, direc, pattern, ftype); _splitpath(filename, NULL, direc, pattern, ftype);
strcat(pattern, ftype); safe_strcat(pattern, sizeof(pattern), ftype);
// Start searching files in the target directory. // Start searching files in the target directory.
if (!(dir = opendir(direc))) { if (!(dir = opendir(direc))) {
@ -226,7 +228,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
} // endif dir } // endif dir
while ((entry = readdir(dir))) { while ((entry = readdir(dir))) {
strcat(strcpy(fn, direc), entry->d_name); snprintf(fn, sizeof(fn), "%s%s", direc, entry->d_name);
if (lstat(fn, &fileinfo) < 0) { if (lstat(fn, &fileinfo) < 0) {
snprintf(g->Message, sizeof(g->Message), "%s: %s", fn, strerror(errno)); snprintf(g->Message, sizeof(g->Message), "%s: %s", fn, strerror(errno));
@ -240,7 +242,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
if (fnmatch(pattern, entry->d_name, 0)) if (fnmatch(pattern, entry->d_name, 0))
continue; // Not a match continue; // Not a match
strcat(strcpy(filename, direc), entry->d_name); snprintf(filename, sizeof(filename), "%s%s", direc, entry->d_name);
if (ZipFile(g, zutp, filename, entry->d_name, buf)) { if (ZipFile(g, zutp, filename, entry->d_name, buf)) {
closedir(dir); closedir(dir);

Some files were not shown because too many files have changed in this diff Show More