This commit is contained in:
Patrick Crews 2008-12-10 11:50:01 -05:00
commit d571e652a7
29 changed files with 501 additions and 134 deletions

View File

@ -6,7 +6,7 @@
let $counter= 500; let $counter= 500;
while ($mysql_errno) while ($mysql_errno)
{ {
--error 0,2002,2006,2013 --error 0,2002,2003,2006,2013
show status; show status;
dec $counter; dec $counter;

View File

@ -21,6 +21,7 @@ flush logs;
*** must be a warning master-bin.000001 was not found *** *** must be a warning master-bin.000001 was not found ***
Warnings: Warnings:
Warning 1612 Being purged log MYSQLTEST_VARDIR/log/master-bin.000001 was not found Warning 1612 Being purged log MYSQLTEST_VARDIR/log/master-bin.000001 was not found
Warning 1612 Being purged log MYSQLTEST_VARDIR/log/master-bin.000001 was not found
*** must show one record, of the active binlog, left in the index file after PURGE *** *** must show one record, of the active binlog, left in the index file after PURGE ***
show binary logs; show binary logs;
Log_name File_size Log_name File_size

View File

@ -1707,4 +1707,9 @@ where a.VARIABLE_NAME = b.VARIABLE_NAME;
a.VARIABLE_VALUE - b.VARIABLE_VALUE a.VARIABLE_VALUE - b.VARIABLE_VALUE
2 2
drop table t0; drop table t0;
CREATE TABLE t1(a INT) KEY_BLOCK_SIZE=1;
SELECT CREATE_OPTIONS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
CREATE_OPTIONS
KEY_BLOCK_SIZE=1
DROP TABLE t1;
End of 5.1 tests. End of 5.1 tests.

View File

@ -1,4 +1,11 @@
DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a INT PRIMARY KEY)
ENGINE MYISAM
PARTITION BY HASH (a)
PARTITIONS 1;
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
ALTER TABLE t1 REORGANIZE PARTITION;
DROP TABLE t1;
create table t1 (a int) create table t1 (a int)
partition by range (a) partition by range (a)
subpartition by key (a) subpartition by key (a)

View File

@ -4358,3 +4358,29 @@ a
4 4
5 5
DROP TABLE t1; DROP TABLE t1;
CREATE TABLE A (date_key date);
CREATE TABLE C (
pk int,
int_nokey int,
int_key int,
date_key date NOT NULL,
date_nokey date,
varchar_key varchar(1)
);
INSERT INTO C VALUES
(1,1,1,'0000-00-00',NULL,NULL),
(1,1,1,'0000-00-00',NULL,NULL);
SELECT 1 FROM C WHERE pk > ANY (SELECT 1 FROM C);
1
SELECT COUNT(DISTINCT 1) FROM C
WHERE date_key = (SELECT 1 FROM A WHERE C.date_key IS NULL) GROUP BY pk;
COUNT(DISTINCT 1)
SELECT date_nokey FROM C
WHERE int_key IN (SELECT 1 FROM A)
HAVING date_nokey = '10:41:7'
ORDER BY date_key;
date_nokey
Warnings:
Warning 1292 Incorrect date value: '10:41:7' for column 'date_nokey' at row 1
DROP TABLE A,C;
End of 5.1 tests

View File

@ -393,15 +393,11 @@ f1 + 0e0
-1.0000000150475e+30 -1.0000000150475e+30
drop table t1; drop table t1;
create table t1(d double, u bigint unsigned); create table t1(d double, u bigint unsigned);
insert into t1(d) values (9.2233720368547777e+18), insert into t1(d) values (9.22337203685479e18),
(9.223372036854779e18),
(9.22337203685479e18),
(1.84e19); (1.84e19);
update t1 set u = d; update t1 set u = d;
select u from t1; select u from t1;
u u
9223372036854775808
9223372036854779904
9223372036854790144 9223372036854790144
18400000000000000000 18400000000000000000
drop table t1; drop table t1;

View File

@ -1029,4 +1029,28 @@ SELECT 1 FROM t1 ORDER BY(UPDATEXML(a, '1', '1'));
1 1
1 1
DROP TABLE t1; DROP TABLE t1;
SET @xml=
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title> Title - document with document declaration</title>
</head>
<body> Hi, Im a webpage with document a declaration </body>
</html>';
SELECT ExtractValue(@xml, 'html/head/title');
ExtractValue(@xml, 'html/head/title')
Title - document with document declaration
SELECT ExtractValue(@xml, 'html/body');
ExtractValue(@xml, 'html/body')
Hi, Im a webpage with document a declaration
SELECT ExtractValue('<xml "xxx" "yyy">CharData</xml>', '/xml');
ExtractValue('<xml "xxx" "yyy">CharData</xml>', '/xml')
NULL
Warnings:
Warning 1525 Incorrect XML value: 'parse error at line 1 pos 11: STRING unexpected ('>' wanted)'
SELECT ExtractValue('<xml xxx "yyy">CharData</xml>', '/xml');
ExtractValue('<xml xxx "yyy">CharData</xml>', '/xml')
NULL
Warnings:
Warning 1525 Incorrect XML value: 'parse error at line 1 pos 17: STRING unexpected ('>' wanted)'
End of 5.1 tests End of 5.1 tests

View File

@ -213,5 +213,10 @@ START TRANSACTION;
INSERT INTO t1 VALUES (NULL, 'first row t2'); INSERT INTO t1 VALUES (NULL, 'first row t2');
SET autocommit=OFF; SET autocommit=OFF;
ALTER TABLE t1 AUTO_INCREMENT = 10; ALTER TABLE t1 AUTO_INCREMENT = 10;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
INSERT INTO t1 VALUES (NULL, 'second row t2'); INSERT INTO t1 VALUES (NULL, 'second row t2');
SELECT a,b FROM t1 ORDER BY a;
a b
1 first row t2
2 second row t2
DROP TABLE t1; DROP TABLE t1;

View File

@ -68,10 +68,12 @@ INSERT INTO t1 VALUES (NULL, 'first row t2');
--connection con2 --connection con2
SET autocommit=OFF; SET autocommit=OFF;
--error ER_LOCK_WAIT_TIMEOUT
ALTER TABLE t1 AUTO_INCREMENT = 10; ALTER TABLE t1 AUTO_INCREMENT = 10;
--connection con1 --connection con1
INSERT INTO t1 VALUES (NULL, 'second row t2'); INSERT INTO t1 VALUES (NULL, 'second row t2');
SELECT a,b FROM t1 ORDER BY a;
--disconnect con2 --disconnect con2
--disconnect con1 --disconnect con1
--connection default --connection default

View File

@ -0,0 +1,14 @@
stop slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
reset master;
reset slave;
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
start slave;
stop slave;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (id INT);
start slave;
SET GLOBAL debug= "+d,crash_before_purge_logs";
FLUSH LOGS;
ERROR HY000: Lost connection to MySQL server during query
start slave;

View File

@ -0,0 +1,57 @@
--source include/master-slave.inc
--source include/have_debug.inc
--disable_reconnect
# We have to sync with master, to ensure slave had time to start properly
# # before we stop it. If not, we get errors about UNIX_TIMESTAMP() in the
# log.
sync_slave_with_master;
stop slave;
--source include/wait_for_slave_to_stop.inc
# ON MASTER
connection master;
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1 (id INT);
let $1=100;
disable_query_log;
begin;
while ($1)
{
eval INSERT INTO t1 VALUES( $1 );
dec $1;
}
DROP TABLE t1;
save_master_pos;
enable_query_log;
## ON SLAVE
connection slave;
start slave;
--source include/wait_for_slave_to_start.inc
sync_with_master 0;
connection master;
save_master_pos;
connection slave;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/slave0.expect
SET GLOBAL debug= "+d,crash_before_purge_logs";
--error 2013
# try to rotate logs
FLUSH LOGS;
--enable_reconnect
--source include/wait_until_connected_again.inc
start slave;
--source include/wait_for_slave_to_start.inc
sync_with_master 0;

View File

@ -1383,4 +1383,11 @@ select a.VARIABLE_VALUE - b.VARIABLE_VALUE from t0 b, information_schema.global_
where a.VARIABLE_NAME = b.VARIABLE_NAME; where a.VARIABLE_NAME = b.VARIABLE_NAME;
drop table t0; drop table t0;
#
# Bug#35275 INFORMATION_SCHEMA.TABLES.CREATE_OPTIONS omits KEY_BLOCK_SIZE
#
CREATE TABLE t1(a INT) KEY_BLOCK_SIZE=1;
SELECT CREATE_OPTIONS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1';
DROP TABLE t1;
--echo End of 5.1 tests. --echo End of 5.1 tests.

View File

@ -3,6 +3,17 @@
DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t1;
--enable_warnings --enable_warnings
#
# Bug 40389: REORGANIZE PARTITION crashes when only using one partition
#
CREATE TABLE t1 (a INT PRIMARY KEY)
ENGINE MYISAM
PARTITION BY HASH (a)
PARTITIONS 1;
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
ALTER TABLE t1 REORGANIZE PARTITION;
DROP TABLE t1;
# #
# Bug 21143: mysqld hang when error in number of subparts in # Bug 21143: mysqld hang when error in number of subparts in
# REORGANIZE command # REORGANIZE command

View File

@ -3701,3 +3701,36 @@ SELECT a FROM t1 ORDER BY a LIMIT 2;
SELECT a FROM t1 ORDER BY a LIMIT 2,4294967296; SELECT a FROM t1 ORDER BY a LIMIT 2,4294967296;
SELECT a FROM t1 ORDER BY a LIMIT 2,4294967297; SELECT a FROM t1 ORDER BY a LIMIT 2,4294967297;
DROP TABLE t1; DROP TABLE t1;
#
# Bug #37936: ASSERT_COLUMN_MARKED_FOR_WRITE in Field_datetime::store ,
# Field_varstring::store
#
CREATE TABLE A (date_key date);
CREATE TABLE C (
pk int,
int_nokey int,
int_key int,
date_key date NOT NULL,
date_nokey date,
varchar_key varchar(1)
);
INSERT INTO C VALUES
(1,1,1,'0000-00-00',NULL,NULL),
(1,1,1,'0000-00-00',NULL,NULL);
SELECT 1 FROM C WHERE pk > ANY (SELECT 1 FROM C);
SELECT COUNT(DISTINCT 1) FROM C
WHERE date_key = (SELECT 1 FROM A WHERE C.date_key IS NULL) GROUP BY pk;
SELECT date_nokey FROM C
WHERE int_key IN (SELECT 1 FROM A)
HAVING date_nokey = '10:41:7'
ORDER BY date_key;
DROP TABLE A,C;
--echo End of 5.1 tests

View File

@ -259,9 +259,7 @@ drop table t1;
create table t1(d double, u bigint unsigned); create table t1(d double, u bigint unsigned);
insert into t1(d) values (9.2233720368547777e+18), insert into t1(d) values (9.22337203685479e18),
(9.223372036854779e18),
(9.22337203685479e18),
(1.84e19); (1.84e19);
update t1 set u = d; update t1 set u = d;

View File

@ -551,4 +551,29 @@ INSERT INTO t1 VALUES (0), (0);
SELECT 1 FROM t1 ORDER BY(UPDATEXML(a, '1', '1')); SELECT 1 FROM t1 ORDER BY(UPDATEXML(a, '1', '1'));
DROP TABLE t1; DROP TABLE t1;
#
# BUG#38227 EXTRACTVALUE doesn't work with DTD declarations
#
# Check that quoted strings work fine in DOCTYPE declaration.
#
SET @xml=
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title> Title - document with document declaration</title>
</head>
<body> Hi, Im a webpage with document a declaration </body>
</html>';
SELECT ExtractValue(@xml, 'html/head/title');
SELECT ExtractValue(@xml, 'html/body');
# These two documents will fail.
# Quoted strings are not allowed in regular tags
#
SELECT ExtractValue('<xml "xxx" "yyy">CharData</xml>', '/xml');
SELECT ExtractValue('<xml xxx "yyy">CharData</xml>', '/xml');
--echo End of 5.1 tests --echo End of 5.1 tests

View File

@ -3431,7 +3431,7 @@ int ha_partition::rnd_next(uchar *buf)
while (TRUE) while (TRUE)
{ {
int result= file->rnd_next(buf); result= file->rnd_next(buf);
if (!result) if (!result)
{ {
m_last_part= part_id; m_last_part= part_id;

View File

@ -394,19 +394,16 @@ static bool convert_constant_item(THD *thd, Item_field *field_item,
TABLE *table= field->table; TABLE *table= field->table;
ulong orig_sql_mode= thd->variables.sql_mode; ulong orig_sql_mode= thd->variables.sql_mode;
enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields; enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields;
my_bitmap_map *old_write_map; my_bitmap_map *old_maps[2];
my_bitmap_map *old_read_map;
ulonglong orig_field_val; /* original field value if valid */ ulonglong orig_field_val; /* original field value if valid */
LINT_INIT(old_write_map); LINT_INIT(old_maps[0]);
LINT_INIT(old_read_map); LINT_INIT(old_maps[1]);
LINT_INIT(orig_field_val); LINT_INIT(orig_field_val);
if (table) if (table)
{ dbug_tmp_use_all_columns(table, old_maps,
old_write_map= dbug_tmp_use_all_columns(table, table->write_set); table->read_set, table->write_set);
old_read_map= dbug_tmp_use_all_columns(table, table->read_set);
}
/* For comparison purposes allow invalid dates like 2000-01-32 */ /* For comparison purposes allow invalid dates like 2000-01-32 */
thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) |
MODE_INVALID_DATES; MODE_INVALID_DATES;
@ -441,10 +438,7 @@ static bool convert_constant_item(THD *thd, Item_field *field_item,
thd->variables.sql_mode= orig_sql_mode; thd->variables.sql_mode= orig_sql_mode;
thd->count_cuted_fields= orig_count_cuted_fields; thd->count_cuted_fields= orig_count_cuted_fields;
if (table) if (table)
{ dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_maps);
dbug_tmp_restore_column_map(table->write_set, old_write_map);
dbug_tmp_restore_column_map(table->read_set, old_read_map);
}
} }
return result; return result;
} }

View File

@ -2314,6 +2314,7 @@ MYSQL_BIN_LOG::MYSQL_BIN_LOG()
*/ */
index_file_name[0] = 0; index_file_name[0] = 0;
bzero((char*) &index_file, sizeof(index_file)); bzero((char*) &index_file, sizeof(index_file));
bzero((char*) &purge_temp, sizeof(purge_temp));
} }
/* this is called only once */ /* this is called only once */
@ -2908,6 +2909,7 @@ err:
int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included) int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
{ {
int error; int error;
char *to_purge_if_included= NULL;
DBUG_ENTER("purge_first_log"); DBUG_ENTER("purge_first_log");
DBUG_ASSERT(is_open()); DBUG_ASSERT(is_open());
@ -2915,36 +2917,20 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name)); DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name));
pthread_mutex_lock(&LOCK_index); pthread_mutex_lock(&LOCK_index);
pthread_mutex_lock(&rli->log_space_lock); to_purge_if_included= my_strdup(rli->group_relay_log_name, MYF(0));
rli->relay_log.purge_logs(rli->group_relay_log_name, included,
0, 0, &rli->log_space_total);
// Tell the I/O thread to take the relay_log_space_limit into account
rli->ignore_log_space_limit= 0;
pthread_mutex_unlock(&rli->log_space_lock);
/*
Ok to broadcast after the critical region as there is no risk of
the mutex being destroyed by this thread later - this helps save
context switches
*/
pthread_cond_broadcast(&rli->log_space_cond);
/* /*
Read the next log file name from the index file and pass it back to Read the next log file name from the index file and pass it back to
the caller the caller.
If included is true, we want the first relay log;
otherwise we want the one after event_relay_log_name.
*/ */
if ((included && (error=find_log_pos(&rli->linfo, NullS, 0))) || if((error=find_log_pos(&rli->linfo, rli->event_relay_log_name, 0)) ||
(!included && (error=find_next_log(&rli->linfo, 0)))
((error=find_log_pos(&rli->linfo, rli->event_relay_log_name, 0)) ||
(error=find_next_log(&rli->linfo, 0)))))
{ {
char buff[22]; char buff[22];
sql_print_error("next log error: %d offset: %s log: %s included: %d", sql_print_error("next log error: %d offset: %s log: %s included: %d",
error, error,
llstr(rli->linfo.index_file_offset,buff), llstr(rli->linfo.index_file_offset,buff),
rli->group_relay_log_name, rli->event_relay_log_name,
included); included);
goto err; goto err;
} }
@ -2972,7 +2958,42 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
/* Store where we are in the new file for the execution thread */ /* Store where we are in the new file for the execution thread */
flush_relay_log_info(rli); flush_relay_log_info(rli);
DBUG_EXECUTE_IF("crash_before_purge_logs", abort(););
pthread_mutex_lock(&rli->log_space_lock);
rli->relay_log.purge_logs(to_purge_if_included, included,
0, 0, &rli->log_space_total);
// Tell the I/O thread to take the relay_log_space_limit into account
rli->ignore_log_space_limit= 0;
pthread_mutex_unlock(&rli->log_space_lock);
/*
Ok to broadcast after the critical region as there is no risk of
the mutex being destroyed by this thread later - this helps save
context switches
*/
pthread_cond_broadcast(&rli->log_space_cond);
/*
* Need to update the log pos because purge logs has been called
* after fetching initially the log pos at the begining of the method.
*/
if(error=find_log_pos(&rli->linfo, rli->event_relay_log_name, 0))
{
char buff[22];
sql_print_error("next log error: %d offset: %s log: %s included: %d",
error,
llstr(rli->linfo.index_file_offset,buff),
rli->group_relay_log_name,
included);
goto err;
}
/* If included was passed, rli->linfo should be the first entry. */
DBUG_ASSERT(!included || rli->linfo.index_file_start_offset == 0);
err: err:
my_free(to_purge_if_included, MYF(0));
pthread_mutex_unlock(&LOCK_index); pthread_mutex_unlock(&LOCK_index);
DBUG_RETURN(error); DBUG_RETURN(error);
} }
@ -3032,8 +3053,36 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
if (need_mutex) if (need_mutex)
pthread_mutex_lock(&LOCK_index); pthread_mutex_lock(&LOCK_index);
if ((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/))) if ((error=find_log_pos(&log_info, to_log, 0 /*no mutex*/)))
{
sql_print_error("MYSQL_LOG::purge_logs was called with file %s not "
"listed in the index.", to_log);
goto err; goto err;
}
/*
For crash recovery reasons the index needs to be updated before
any files are deleted. Move files to be deleted into a temp file
to be processed after the index is updated.
*/
if (!my_b_inited(&purge_temp))
{
if (error=open_cached_file(&purge_temp, mysql_tmpdir, TEMP_PREFIX,
DISK_BUFFER_SIZE, MYF(MY_WME)))
{
sql_print_error("MYSQL_LOG::purge_logs failed to open purge_temp");
goto err;
}
}
else
{
if (error=reinit_io_cache(&purge_temp, WRITE_CACHE, 0, 0, 1))
{
sql_print_error("MYSQL_LOG::purge_logs failed to reinit purge_temp "
"for write");
goto err;
}
}
/* /*
File name exists in index file; delete until we find this file File name exists in index file; delete until we find this file
@ -3044,6 +3093,61 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) && while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) &&
!log_in_use(log_info.log_file_name)) !log_in_use(log_info.log_file_name))
{ {
if ((error=my_b_write(&purge_temp, (const uchar*)log_info.log_file_name,
strlen(log_info.log_file_name))) ||
(error=my_b_write(&purge_temp, (const uchar*)"\n", 1)))
{
sql_print_error("MYSQL_LOG::purge_logs failed to copy %s to purge_temp",
log_info.log_file_name);
goto err;
}
if (find_next_log(&log_info, 0) || exit_loop)
break;
}
/* We know how many files to delete. Update index file. */
if (error=update_log_index(&log_info, need_update_threads))
{
sql_print_error("MSYQL_LOG::purge_logs failed to update the index file");
goto err;
}
DBUG_EXECUTE_IF("crash_after_update_index", abort(););
/* Switch purge_temp for read. */
if (error=reinit_io_cache(&purge_temp, READ_CACHE, 0, 0, 0))
{
sql_print_error("MSYQL_LOG::purge_logs failed to reinit purge_temp "
"for read");
goto err;
}
/* Read each entry from purge_temp and delete the file. */
for (;;)
{
uint length;
if ((length=my_b_gets(&purge_temp, log_info.log_file_name,
FN_REFLEN)) <= 1)
{
if (purge_temp.error)
{
error= purge_temp.error;
sql_print_error("MSYQL_LOG::purge_logs error %d reading from "
"purge_temp", error);
goto err;
}
/* Reached EOF */
break;
}
/* Get rid of the trailing '\n' */
log_info.log_file_name[length-1]= 0;
ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
MY_STAT s; MY_STAT s;
if (!my_stat(log_info.log_file_name, &s, MYF(0))) if (!my_stat(log_info.log_file_name, &s, MYF(0)))
{ {
@ -3144,20 +3248,6 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
} }
} }
} }
ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
if (find_next_log(&log_info, 0) || exit_loop)
break;
}
/*
If we get killed -9 here, the sysadmin would have to edit
the log index file after restart - otherwise, this should be safe
*/
error= update_log_index(&log_info, need_update_threads);
if (error == 0) {
error = ret;
} }
err: err:
@ -3171,7 +3261,7 @@ err:
index file. index file.
@param thd Thread pointer @param thd Thread pointer
@param before_date Delete all log files before given date. @param purge_time Delete all log files before given date.
@note @note
If any of the logs before the deleted one is in use, If any of the logs before the deleted one is in use,
@ -3188,6 +3278,7 @@ err:
int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time) int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
{ {
int error; int error;
char to_log[FN_REFLEN];
LOG_INFO log_info; LOG_INFO log_info;
MY_STAT stat_area; MY_STAT stat_area;
THD *thd= current_thd; THD *thd= current_thd;
@ -3195,12 +3286,8 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
DBUG_ENTER("purge_logs_before_date"); DBUG_ENTER("purge_logs_before_date");
pthread_mutex_lock(&LOCK_index); pthread_mutex_lock(&LOCK_index);
to_log[0]= 0;
/*
Delete until we find curren file
or a file that is used or a file
that is older than purge_time.
*/
if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/))) if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/)))
goto err; goto err;
@ -3250,55 +3337,18 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
} }
else else
{ {
if (stat_area.st_mtime >= purge_time) if (stat_area.st_mtime < purge_time)
strmake(to_log,
log_info.log_file_name,
sizeof(log_info.log_file_name));
else
break; break;
if (my_delete(log_info.log_file_name, MYF(0)))
{
if (my_errno == ENOENT)
{
/* It's not fatal even if we can't delete a log file */
if (thd)
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE),
log_info.log_file_name);
}
sql_print_information("Failed to delete file '%s'",
log_info.log_file_name);
my_errno= 0;
}
else
{
if (thd)
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
"of your binlog index file "
"to the actual binlog files",
log_info.log_file_name);
}
else
{
sql_print_information("Failed to delete log file '%s'",
log_info.log_file_name);
}
error= LOG_INFO_FATAL;
goto err;
}
}
ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
} }
if (find_next_log(&log_info, 0)) if (find_next_log(&log_info, 0))
break; break;
} }
/* error= (to_log[0] ? purge_logs(to_log, 1, 0, 1, (ulonglong *) 0) : 0);
If we get killed -9 here, the sysadmin would have to edit
the log index file after restart - otherwise, this should be safe
*/
error= update_log_index(&log_info, 1);
err: err:
pthread_mutex_unlock(&LOCK_index); pthread_mutex_unlock(&LOCK_index);

View File

@ -233,6 +233,13 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
pthread_cond_t update_cond; pthread_cond_t update_cond;
ulonglong bytes_written; ulonglong bytes_written;
IO_CACHE index_file; IO_CACHE index_file;
/*
purge_temp is a temp file used in purge_logs so that the index file
can be updated before deleting files from disk, yielding better crash
recovery. It is created on demand the first time purge_logs is called
and then reused for subsequent calls. It is cleaned up in cleanup().
*/
IO_CACHE purge_temp;
char index_file_name[FN_REFLEN]; char index_file_name[FN_REFLEN];
/* /*
The max size before rotation (usable only if log_type == LOG_BIN: binary The max size before rotation (usable only if log_type == LOG_BIN: binary

View File

@ -2668,7 +2668,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
PART_PRUNE_PARAM prune_param; PART_PRUNE_PARAM prune_param;
MEM_ROOT alloc; MEM_ROOT alloc;
RANGE_OPT_PARAM *range_par= &prune_param.range_param; RANGE_OPT_PARAM *range_par= &prune_param.range_param;
my_bitmap_map *old_read_set, *old_write_set; my_bitmap_map *old_sets[2];
prune_param.part_info= part_info; prune_param.part_info= part_info;
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
@ -2682,8 +2682,8 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }
old_write_set= dbug_tmp_use_all_columns(table, table->write_set); dbug_tmp_use_all_columns(table, old_sets,
old_read_set= dbug_tmp_use_all_columns(table, table->read_set); table->read_set, table->write_set);
range_par->thd= thd; range_par->thd= thd;
range_par->table= table; range_par->table= table;
/* range_par->cond doesn't need initialization */ /* range_par->cond doesn't need initialization */
@ -2773,8 +2773,7 @@ all_used:
retval= FALSE; // some partitions are used retval= FALSE; // some partitions are used
mark_all_partitions_as_used(prune_param.part_info); mark_all_partitions_as_used(prune_param.part_info);
end: end:
dbug_tmp_restore_column_map(table->write_set, old_write_set); dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
dbug_tmp_restore_column_map(table->read_set, old_read_set);
thd->no_errors=0; thd->no_errors=0;
thd->mem_root= range_par->old_root; thd->mem_root= range_par->old_root;
free_root(&alloc,MYF(0)); // Return memory & allocator free_root(&alloc,MYF(0)); // Return memory & allocator
@ -11145,9 +11144,9 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length)
String tmp(buff,sizeof(buff),&my_charset_bin); String tmp(buff,sizeof(buff),&my_charset_bin);
uint store_length; uint store_length;
TABLE *table= key_part->field->table; TABLE *table= key_part->field->table;
my_bitmap_map *old_write_set, *old_read_set; my_bitmap_map *old_sets[2];
old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
old_read_set= dbug_tmp_use_all_columns(table, table->read_set); dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
for (; key < key_end; key+=store_length, key_part++) for (; key < key_end; key+=store_length, key_part++)
{ {
@ -11173,8 +11172,7 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length)
if (key+store_length < key_end) if (key+store_length < key_end)
fputc('/',DBUG_FILE); fputc('/',DBUG_FILE);
} }
dbug_tmp_restore_column_map(table->write_set, old_write_set); dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
dbug_tmp_restore_column_map(table->read_set, old_read_set);
} }
@ -11182,18 +11180,16 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{ {
char buf[MAX_KEY/8+1]; char buf[MAX_KEY/8+1];
TABLE *table; TABLE *table;
my_bitmap_map *old_read_map, *old_write_map; my_bitmap_map *old_sets[2];
DBUG_ENTER("print_quick"); DBUG_ENTER("print_quick");
if (!quick) if (!quick)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
DBUG_LOCK_FILE; DBUG_LOCK_FILE;
table= quick->head; table= quick->head;
old_read_map= dbug_tmp_use_all_columns(table, table->read_set); dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
old_write_map= dbug_tmp_use_all_columns(table, table->write_set);
quick->dbug_dump(0, TRUE); quick->dbug_dump(0, TRUE);
dbug_tmp_restore_column_map(table->read_set, old_read_map); dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
dbug_tmp_restore_column_map(table->write_set, old_write_map);
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf)); fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));

View File

@ -867,6 +867,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
if (part_type != HASH_PARTITION || !list_of_part_fields) if (part_type != HASH_PARTITION || !list_of_part_fields)
{ {
DBUG_ASSERT(part_expr);
err= part_expr->walk(&Item::check_partition_func_processor, 0, err= part_expr->walk(&Item::check_partition_func_processor, 0,
NULL); NULL);
if (!err && is_sub_partitioned() && !list_of_subpart_fields) if (!err && is_sub_partitioned() && !list_of_subpart_fields)

View File

@ -85,6 +85,7 @@ class Materialized_cursor: public Server_side_cursor
List<Item> item_list; List<Item> item_list;
ulong fetch_limit; ulong fetch_limit;
ulong fetch_count; ulong fetch_count;
bool is_rnd_inited;
public: public:
Materialized_cursor(select_result *result, TABLE *table); Materialized_cursor(select_result *result, TABLE *table);
@ -190,7 +191,11 @@ int mysql_open_cursor(THD *thd, uint flags, select_result *result,
such command is SHOW VARIABLES or SHOW STATUS. such command is SHOW VARIABLES or SHOW STATUS.
*/ */
if (rc) if (rc)
{
if (result_materialize->materialized_cursor)
delete result_materialize->materialized_cursor;
goto err_open; goto err_open;
}
if (sensitive_cursor->is_open()) if (sensitive_cursor->is_open())
{ {
@ -542,7 +547,8 @@ Materialized_cursor::Materialized_cursor(select_result *result_arg,
:Server_side_cursor(&table_arg->mem_root, result_arg), :Server_side_cursor(&table_arg->mem_root, result_arg),
table(table_arg), table(table_arg),
fetch_limit(0), fetch_limit(0),
fetch_count(0) fetch_count(0),
is_rnd_inited(0)
{ {
fake_unit.init_query(); fake_unit.init_query();
fake_unit.thd= table->in_use; fake_unit.thd= table->in_use;
@ -599,11 +605,12 @@ int Materialized_cursor::open(JOIN *join __attribute__((unused)))
THD *thd= fake_unit.thd; THD *thd= fake_unit.thd;
int rc; int rc;
Query_arena backup_arena; Query_arena backup_arena;
thd->set_n_backup_active_arena(this, &backup_arena); thd->set_n_backup_active_arena(this, &backup_arena);
/* Create a list of fields and start sequential scan */ /* Create a list of fields and start sequential scan */
rc= (result->prepare(item_list, &fake_unit) || rc= result->prepare(item_list, &fake_unit);
table->file->ha_rnd_init(TRUE)); if (!rc && !(rc= table->file->ha_rnd_init(TRUE)))
is_rnd_inited= 1;
thd->restore_active_arena(this, &backup_arena); thd->restore_active_arena(this, &backup_arena);
if (rc == 0) if (rc == 0)
{ {
@ -678,7 +685,8 @@ void Materialized_cursor::close()
{ {
/* Free item_list items */ /* Free item_list items */
free_items(); free_items();
(void) table->file->ha_rnd_end(); if (is_rnd_inited)
(void) table->file->ha_rnd_end();
/* /*
We need to grab table->mem_root to prevent free_tmp_table from freeing: We need to grab table->mem_root to prevent free_tmp_table from freeing:
the cursor object was allocated in this memory. the cursor object was allocated in this memory.

View File

@ -4233,9 +4233,8 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
after the change as before. Thus we can reply ok immediately after the change as before. Thus we can reply ok immediately
without any changes at all. without any changes at all.
*/ */
DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0), *fast_alter_partition= TRUE;
table, NULL, DBUG_RETURN(FALSE);
TRUE, NULL, FALSE));
} }
else if (new_part_no > curr_part_no) else if (new_part_no > curr_part_no)
{ {

View File

@ -1667,8 +1667,11 @@ JOIN::exec()
(zero_result_cause?zero_result_cause:"No tables used")); (zero_result_cause?zero_result_cause:"No tables used"));
else else
{ {
result->send_fields(*columns_list, if (result->send_fields(*columns_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
{
DBUG_VOID_RETURN;
}
/* /*
We have to test for 'conds' here as the WHERE may not be constant We have to test for 'conds' here as the WHERE may not be constant
even if we don't have any tables for prepared statements or if even if we don't have any tables for prepared statements or if

View File

@ -3589,6 +3589,11 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
(share->transactional == HA_CHOICE_YES ? "1" : "0"), (share->transactional == HA_CHOICE_YES ? "1" : "0"),
NullS); NullS);
} }
if (share->key_block_size)
{
ptr= strmov(ptr, " KEY_BLOCK_SIZE=");
ptr= longlong10_to_str(share->key_block_size, ptr, 10);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE #ifdef WITH_PARTITION_STORAGE_ENGINE
if (is_partitioned) if (is_partitioned)
ptr= strmov(ptr, " partitioned"); ptr= strmov(ptr, " partitioned");

View File

@ -1691,5 +1691,35 @@ static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
#endif #endif
} }
/*
Variant of the above : handle both read and write sets.
Provide for the possiblity of the read set being the same as the write set
*/
static inline void dbug_tmp_use_all_columns(TABLE *table,
my_bitmap_map **save,
MY_BITMAP *read_set,
MY_BITMAP *write_set)
{
#ifndef DBUG_OFF
save[0]= read_set->bitmap;
save[1]= write_set->bitmap;
(void) tmp_use_all_columns(table, read_set);
(void) tmp_use_all_columns(table, write_set);
#endif
}
static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set,
MY_BITMAP *write_set,
my_bitmap_map **old)
{
#ifndef DBUG_OFF
tmp_restore_column_map(read_set, old[0]);
tmp_restore_column_map(write_set, old[1]);
#endif
}
size_t max_row_length(TABLE *table, const uchar *data); size_t max_row_length(TABLE *table, const uchar *data);

View File

@ -328,7 +328,7 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len)
} }
while ((MY_XML_IDENT == (lex=my_xml_scan(p,&a))) || while ((MY_XML_IDENT == (lex=my_xml_scan(p,&a))) ||
(MY_XML_STRING == lex)) ((MY_XML_STRING == lex && exclam)))
{ {
MY_XML_ATTR b; MY_XML_ATTR b;
if (MY_XML_EQ == (lex=my_xml_scan(p,&b))) if (MY_XML_EQ == (lex=my_xml_scan(p,&b)))
@ -349,13 +349,22 @@ int my_xml_parse(MY_XML_PARSER *p,const char *str, size_t len)
return MY_XML_ERROR; return MY_XML_ERROR;
} }
} }
else if ((MY_XML_STRING == lex) || (MY_XML_IDENT == lex)) else if (MY_XML_IDENT == lex)
{ {
p->current_node_type= MY_XML_NODE_ATTR; p->current_node_type= MY_XML_NODE_ATTR;
if ((MY_XML_OK != my_xml_enter(p,a.beg,(size_t) (a.end-a.beg))) || if ((MY_XML_OK != my_xml_enter(p,a.beg,(size_t) (a.end-a.beg))) ||
(MY_XML_OK != my_xml_leave(p,a.beg,(size_t) (a.end-a.beg)))) (MY_XML_OK != my_xml_leave(p,a.beg,(size_t) (a.end-a.beg))))
return MY_XML_ERROR; return MY_XML_ERROR;
} }
else if ((MY_XML_STRING == lex) && exclam)
{
/*
We are in <!DOCTYPE>, e.g.
<!DOCTYPE name SYSTEM "SystemLiteral">
<!DOCTYPE name PUBLIC "PublidLiteral" "SystemLiteral">
Just skip "SystemLiteral" and "PublicidLiteral"
*/
}
else else
break; break;
} }

View File

@ -16552,6 +16552,59 @@ static void test_change_user()
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
/**
Bug#37956 memory leak and / or crash with geometry and prepared statements!
*/
static void test_bug37956(void)
{
const char *query="select point(?,?)";
MYSQL_STMT *stmt=NULL;
unsigned int val=0;
MYSQL_BIND bind_param[2];
unsigned char buff[2]= { 134, 211 };
DBUG_ENTER("test_bug37956");
myheader("test_bug37956");
stmt= mysql_simple_prepare(mysql, query);
check_stmt(stmt);
val=1;
mysql_stmt_attr_set(stmt, STMT_ATTR_UPDATE_MAX_LENGTH, (void *)&val);
val=CURSOR_TYPE_READ_ONLY;
mysql_stmt_attr_set(stmt, STMT_ATTR_CURSOR_TYPE, (void *)&val);
val=0;
mysql_stmt_attr_set(stmt, STMT_ATTR_PREFETCH_ROWS, (void *)&val);
memset(bind_param, 0, sizeof(bind_param));
bind_param[0].buffer_type=MYSQL_TYPE_TINY;
bind_param[0].buffer= (void *)buff;
bind_param[0].is_null=NULL;
bind_param[0].error=NULL;
bind_param[0].is_unsigned=1;
bind_param[1].buffer_type=MYSQL_TYPE_TINY;
bind_param[1].buffer= (void *)(buff+1);
bind_param[1].is_null=NULL;
bind_param[1].error=NULL;
bind_param[1].is_unsigned=1;
if (mysql_stmt_bind_param(stmt, bind_param))
{
mysql_stmt_close(stmt);
DIE_UNLESS(0);
}
if (mysql_stmt_execute(stmt))
{
mysql_stmt_close(stmt);
DBUG_VOID_RETURN;
}
/* Should never reach here: execution returns an error. */
mysql_stmt_close(stmt);
DIE_UNLESS(0);
DBUG_VOID_RETURN;
}
/* /*
Bug#27592 (stack overrun when storing datetime value using prepared statements) Bug#27592 (stack overrun when storing datetime value using prepared statements)
*/ */
@ -17967,6 +18020,7 @@ static struct my_tests_st my_tests[]= {
{ "test_wl4166_2", test_wl4166_2 }, { "test_wl4166_2", test_wl4166_2 },
{ "test_bug38486", test_bug38486 }, { "test_bug38486", test_bug38486 },
{ "test_bug40365", test_bug40365 }, { "test_bug40365", test_bug40365 },
{ "test_bug37956", test_bug37956 },
{ 0, 0 } { 0, 0 }
}; };