merge from 5.1 main

This commit is contained in:
Bjorn Munch 2011-04-29 10:55:08 +02:00
commit cc78ab77c9
34 changed files with 697 additions and 195 deletions

View File

@ -42,6 +42,10 @@
*.vcxproj
*.vcxproj.filters
*/*.dir/*
*.dir
Debug
MySql.sdf
Win32
*/*_pure_*warnings
*/.deps
*/.libs/*
@ -611,6 +615,7 @@ include/mysql_h.ic
include/mysql_version.h
include/mysqld_ername.h
include/mysqld_error.h
include/mysqld_error.h.rule
include/openssl
include/readline
include/readline/*.h
@ -1883,7 +1888,9 @@ scripts/mysql_find_rows
scripts/mysql_fix_extensions
scripts/mysql_fix_privilege_tables
scripts/mysql_fix_privilege_tables.sql
scripts/mysql_fix_privilege_tables.sql.rule
scripts/mysql_fix_privilege_tables_sql.c
scripts/mysql_fix_privilege_tables_sql.c.rule
scripts/mysql_install_db
scripts/mysql_secure_installation
scripts/mysql_setpermission
@ -2120,6 +2127,7 @@ sql/handlerton.cc
sql/html
sql/latex
sql/lex_hash.h
sql/lex_hash.h.rule
sql/link_sources
sql/max/*
sql/message.h
@ -2151,6 +2159,7 @@ sql/sql_builtin.cc
sql/sql_select.cc.orig
sql/sql_yacc.cc
sql/sql_yacc.h
sql/sql_yacc.h.rule
sql/sql_yacc.output
sql/sql_yacc.yy.orig
sql/test_time

View File

@ -1012,8 +1012,10 @@ vi_histedit(EditLine *el, int c __attribute__((__unused__)))
if (fd < 0)
return CC_ERROR;
cp = el->el_line.buffer;
write(fd, cp, el->el_line.lastchar - cp +0u);
write(fd, "\n", 1);
if (write(fd, cp, el->el_line.lastchar - cp +0u) == -1)
goto error;
if (write(fd, "\n", 1) == -1)
goto error;
pid = fork();
switch (pid) {
case -1:
@ -1041,6 +1043,12 @@ vi_histedit(EditLine *el, int c __attribute__((__unused__)))
unlink(tempfile);
/* return CC_REFRESH; */
return ed_newline(el, 0);
/* XXXMYSQL: Avoid compiler warnings. */
error:
close(fd);
unlink(tempfile);
return CC_ERROR;
}
/* vi_history_word():

View File

@ -2,8 +2,7 @@
# in alphabetical order. This also helps with merge conflict resolution.
binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin
binlog.binlog_bug23533 # WL#5867: skozlov: test case moved from unused bugs suite
binlog.binlog_bug36391 # WL#5867: skozlov: test case moved from unused bugs suite
binlog.binlog_bug23533 # skozlov: BUG#12371924
funcs_1.charset_collation_1 # depends on compile-time decisions
@ -26,9 +25,7 @@ ndb.* # joro : NDB tests marked as experiment
rpl.rpl_innodb_bug28430 @solaris # Bug#46029
rpl.rpl_row_sp011 @solaris # Joro : Bug #45445
rpl.rpl_stop_slave @freebsd # Sven : BUG#12345981
rpl.rpl_bug37426 # WL#5867: skozlov: test case moved from unused bugs suite
rpl.rpl_stop_slave # Sven : BUG#12345981
rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin
rpl_ndb.rpl_ndb_log # Bug#38998

View File

@ -518,4 +518,26 @@ CREATE TABLE t1 SELECT CEIL(LINESTRINGFROMWKB(1) DIV NULL);
DROP TABLE t1;
CREATE TABLE t1 SELECT FLOOR(LINESTRINGFROMWKB(1) DIV NULL);
DROP TABLE t1;
#
# Bug#11765923 58937: MANY VALGRIND ERRORS AFTER GROUPING BY RESULT OF DECIMAL COLUMN FUNCTION
#
CREATE TABLE t1(f1 DECIMAL(22,1));
INSERT INTO t1 VALUES (0),(1);
SELECT ROUND(f1, f1) FROM t1;
ROUND(f1, f1)
0.0
1.0
SELECT ROUND(f1, f1) FROM t1 GROUP BY 1;
ROUND(f1, f1)
0.0
1.0
DROP TABLE t1;
#
# Bug#11764671 57533: UNINITIALISED VALUES IN COPY_AND_CONVERT (SQL_STRING.CC) WITH CERTAIN CHA
#
SELECT ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a'));
ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a'))
-4939092.0000
Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'a'
End of 5.1 tests

View File

@ -1405,4 +1405,16 @@ NULL
SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR);
ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR)
NULL
#
# Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION
#
SELECT DATE_FORMAT('0000-00-11', '%W');
DATE_FORMAT('0000-00-11', '%W')
NULL
SELECT DATE_FORMAT('0000-00-11', '%a');
DATE_FORMAT('0000-00-11', '%a')
NULL
SELECT DATE_FORMAT('0000-00-11', '%w');
DATE_FORMAT('0000-00-11', '%w')
NULL
End of 5.1 tests

View File

@ -545,4 +545,26 @@ FROM t1 JOIN t2 ON t2.f2 LIKE 'x'
HAVING field1 < 7;
field1
DROP TABLE t1,t2;
#
# Bug#48916 Server incorrectly processing HAVING clauses with an ORDER BY clause
#
CREATE TABLE t1 (f1 INT, f2 INT);
INSERT INTO t1 VALUES (1, 0), (2, 1), (3, 2);
CREATE TABLE t2 (f1 INT, f2 INT);
SELECT t1.f1
FROM t1
HAVING (3, 2) IN (SELECT f1, f2 FROM t2) AND t1.f1 >= 0
ORDER BY t1.f1;
f1
SELECT t1.f1
FROM t1
HAVING (3, 2) IN (SELECT 4, 2) AND t1.f1 >= 0
ORDER BY t1.f1;
f1
SELECT t1.f1
FROM t1
HAVING 2 IN (SELECT f2 FROM t2) AND t1.f1 >= 0
ORDER BY t1.f1;
f1
DROP TABLE t1,t2;
End of 5.1 tests

View File

@ -539,4 +539,13 @@ CREATE TABLE t1(f1 INT);
SELECT 0xE1BB30 INTO OUTFILE 't1.dat';
LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8;
DROP TABLE t1;
#
# Bug#11765141 - 58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY
# WHEN ERROR OCCURS
#
SELECT '1\n' INTO DUMPFILE 'MYSQLTEST_VARDIR/tmp/bug11735141.txt';
create table t1(a point);
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/bug11735141.txt' INTO TABLE t1;
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
drop table t1;
End of 5.1 tests

View File

@ -79,3 +79,12 @@ a
DROP TABLE t1;
# Should not be any files left here
# End of bug#30102 test.
# Test of post-push fix for bug#11766249/59316
CREATE TABLE t1 (a INT, b VARCHAR(255), PRIMARY KEY (a))
ENGINE = MyISAM
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (0) MAX_ROWS=100,
PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100,
PARTITION pMax VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Partition p1, first row");
DROP TABLE t1;

View File

@ -547,4 +547,67 @@ a
2000-01-01 00:00:01
2000-01-01 00:00:01
DROP TABLE t1;
#
# Bug#50774: failed to get the correct resultset when timestamp values
# are appended with .0
#
CREATE TABLE t1 ( a TIMESTAMP, KEY ( a ) );
INSERT INTO t1 VALUES( '2010-02-01 09:31:01' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:02' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:03' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:04' );
SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0';
a
2010-02-01 09:31:02
2010-02-01 09:31:03
2010-02-01 09:31:04
SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' <= a;
a
2010-02-01 09:31:02
2010-02-01 09:31:03
2010-02-01 09:31:04
SELECT * FROM t1 WHERE a <= '2010-02-01 09:31:02.0';
a
2010-02-01 09:31:01
2010-02-01 09:31:02
SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' >= a;
a
2010-02-01 09:31:01
2010-02-01 09:31:02
EXPLAIN
SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0';
id select_type table type possible_keys key key_len ref rows Extra
x x x range x x x x x x
SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0';
a
2010-02-01 09:31:02
2010-02-01 09:31:03
2010-02-01 09:31:04
CREATE TABLE t2 ( a TIMESTAMP, KEY ( a DESC ) );
INSERT INTO t2 VALUES( '2010-02-01 09:31:01' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:02' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:03' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:04' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:05' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:06' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:07' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:08' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:09' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:10' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:11' );
# The bug would cause the range optimizer's comparison to use an open
# interval here. This reveals itself only in the number of reads
# performed.
FLUSH STATUS;
EXPLAIN
SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0';
id select_type table type possible_keys key key_len ref rows Extra
x x x range x x x x x x
SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0';
a
2010-02-01 09:31:01
SHOW STATUS LIKE 'Handler_read_next';
Variable_name Value
Handler_read_next 1
DROP TABLE t1, t2;
End of 5.1 tests

View File

@ -3,7 +3,7 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I
SELECT COUNT(*) FROM t1;
COUNT(*)
1000
SET @saved_max_binlog_cache_size=@@max_binlog_cache_size;
SET GLOBAL binlog_cache_size=4096;
SET GLOBAL max_binlog_cache_size=4096;
START TRANSACTION;
CREATE TABLE t2 SELECT * FROM t1;
@ -12,5 +12,4 @@ COMMIT;
SHOW TABLES LIKE 't%';
Tables_in_test (t%)
t1
SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size;
DROP TABLE t1;

View File

@ -15,16 +15,24 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I
let $i= 1000;
while ($i)
{
BEGIN;
eval INSERT INTO t1 VALUES($i, REPEAT('x', 4096));
COMMIT;
dec $i;
}
--enable_query_log
SELECT COUNT(*) FROM t1;
# Set small value for max_binlog_cache_size
SET @saved_max_binlog_cache_size=@@max_binlog_cache_size;
let $saved_binlog_cache_size= query_get_value(SELECT @@binlog_cache_size AS Value, Value, 1);
let $saved_max_binlog_cache_size= query_get_value(SELECT @@max_binlog_cache_size AS Value, Value, 1);
SET GLOBAL binlog_cache_size=4096;
SET GLOBAL max_binlog_cache_size=4096;
# New value of max_binlog_cache_size will apply to new session
disconnect default;
connect(default,localhost,root,,test);
# Copied data from t1 into t2 large than max_binlog_cache_size
START TRANSACTION;
--error 1197
@ -33,5 +41,10 @@ COMMIT;
SHOW TABLES LIKE 't%';
# 5.1 End of Test
SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size;
--disable_query_log
eval SET GLOBAL max_binlog_cache_size=$saved_max_binlog_cache_size;
eval SET GLOBAL binlog_cache_size=$saved_binlog_cache_size;
--enable_query_log
DROP TABLE t1;
disconnect default;
connect(default,localhost,root,,test);

View File

@ -333,4 +333,20 @@ DROP TABLE t1;
CREATE TABLE t1 SELECT FLOOR(LINESTRINGFROMWKB(1) DIV NULL);
DROP TABLE t1;
--echo #
--echo # Bug#11765923 58937: MANY VALGRIND ERRORS AFTER GROUPING BY RESULT OF DECIMAL COLUMN FUNCTION
--echo #
CREATE TABLE t1(f1 DECIMAL(22,1));
INSERT INTO t1 VALUES (0),(1);
SELECT ROUND(f1, f1) FROM t1;
SELECT ROUND(f1, f1) FROM t1 GROUP BY 1;
DROP TABLE t1;
--echo #
--echo # Bug#11764671 57533: UNINITIALISED VALUES IN COPY_AND_CONVERT (SQL_STRING.CC) WITH CERTAIN CHA
--echo #
SELECT ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a'));
--echo End of 5.1 tests

View File

@ -913,4 +913,12 @@ SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025));
SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR);
--echo #
--echo # Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION
--echo #
SELECT DATE_FORMAT('0000-00-11', '%W');
SELECT DATE_FORMAT('0000-00-11', '%a');
SELECT DATE_FORMAT('0000-00-11', '%w');
--echo End of 5.1 tests

View File

@ -564,4 +564,30 @@ HAVING field1 < 7;
DROP TABLE t1,t2;
--echo #
--echo # Bug#48916 Server incorrectly processing HAVING clauses with an ORDER BY clause
--echo #
CREATE TABLE t1 (f1 INT, f2 INT);
INSERT INTO t1 VALUES (1, 0), (2, 1), (3, 2);
CREATE TABLE t2 (f1 INT, f2 INT);
SELECT t1.f1
FROM t1
HAVING (3, 2) IN (SELECT f1, f2 FROM t2) AND t1.f1 >= 0
ORDER BY t1.f1;
SELECT t1.f1
FROM t1
HAVING (3, 2) IN (SELECT 4, 2) AND t1.f1 >= 0
ORDER BY t1.f1;
SELECT t1.f1
FROM t1
HAVING 2 IN (SELECT f2 FROM t2) AND t1.f1 >= 0
ORDER BY t1.f1;
DROP TABLE t1,t2;
--echo End of 5.1 tests

View File

@ -625,4 +625,19 @@ DROP TABLE t1;
let $MYSQLD_DATADIR= `select @@datadir`;
remove_file $MYSQLD_DATADIR/test/t1.dat;
--echo #
--echo # Bug#11765141 - 58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY
--echo # WHEN ERROR OCCURS
--echo #
--let $file=$MYSQLTEST_VARDIR/tmp/bug11735141.txt
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--eval SELECT '1\n' INTO DUMPFILE '$file'
create table t1(a point);
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--error ER_CANT_CREATE_GEOMETRY_OBJECT
--eval LOAD DATA INFILE '$file' INTO TABLE t1
drop table t1;
--echo End of 5.1 tests

View File

@ -1,5 +1,4 @@
-- source include/have_partition.inc
-- source include/not_embedded.inc
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
--enable_warnings
@ -51,3 +50,13 @@ DROP TABLE t1;
--list_files $MYSQLD_DATADIR/test t1*
--list_files $MYSQLD_DATADIR/test t2*
--echo # End of bug#30102 test.
--echo # Test of post-push fix for bug#11766249/59316
CREATE TABLE t1 (a INT, b VARCHAR(255), PRIMARY KEY (a))
ENGINE = MyISAM
PARTITION BY RANGE (a)
(PARTITION p0 VALUES LESS THAN (0) MAX_ROWS=100,
PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100,
PARTITION pMax VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Partition p1, first row");
DROP TABLE t1;

View File

@ -373,4 +373,51 @@ SELECT a FROM t1 WHERE a >= '20000101000000';
DROP TABLE t1;
--echo #
--echo # Bug#50774: failed to get the correct resultset when timestamp values
--echo # are appended with .0
--echo #
CREATE TABLE t1 ( a TIMESTAMP, KEY ( a ) );
INSERT INTO t1 VALUES( '2010-02-01 09:31:01' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:02' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:03' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:04' );
SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0';
SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' <= a;
SELECT * FROM t1 WHERE a <= '2010-02-01 09:31:02.0';
SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' >= a;
--replace_column 1 x 2 x 3 x 5 x 6 x 7 x 8 x 9 x 10 x
EXPLAIN
SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0';
SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0';
CREATE TABLE t2 ( a TIMESTAMP, KEY ( a DESC ) );
INSERT INTO t2 VALUES( '2010-02-01 09:31:01' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:02' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:03' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:04' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:05' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:06' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:07' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:08' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:09' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:10' );
INSERT INTO t2 VALUES( '2010-02-01 09:31:11' );
--echo # The bug would cause the range optimizer's comparison to use an open
--echo # interval here. This reveals itself only in the number of reads
--echo # performed.
FLUSH STATUS;
--replace_column 1 x 2 x 3 x 5 x 6 x 7 x 8 x 9 x 10 x
EXPLAIN
SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0';
SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0';
SHOW STATUS LIKE 'Handler_read_next';
DROP TABLE t1, t2;
--echo End of 5.1 tests

View File

@ -772,7 +772,7 @@ long calc_daynr(uint year,uint month,uint day)
int y= year; /* may be < 0 temporarily */
DBUG_ENTER("calc_daynr");
if (y == 0 && month == 0 && day == 0)
if (y == 0 && month == 0)
DBUG_RETURN(0); /* Skip errors */
/* Cast to int to be able to handle month == 0 */
delsum= (long) (365 * y + 31 *((int) month - 1) + (int) day);
@ -783,6 +783,7 @@ long calc_daynr(uint year,uint month,uint day)
temp=(int) ((y/100+1)*3)/4;
DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld",
y+(month <= 2),month,day,delsum+y/4-temp));
DBUG_ASSERT(delsum+(int) y/4-temp > 0);
DBUG_RETURN(delsum+(int) y/4-temp);
} /* calc_daynr */

View File

@ -163,8 +163,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF;
*/
ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
:handler(hton, share), m_part_info(NULL), m_create_handler(FALSE),
m_is_sub_partitioned(0)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
init_handler_variables();
@ -184,15 +183,44 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
*/
ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
:handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE),
m_is_sub_partitioned(m_part_info->is_sub_partitioned())
:handler(hton, NULL)
{
DBUG_ENTER("ha_partition::ha_partition(part_info)");
DBUG_ASSERT(part_info);
init_handler_variables();
DBUG_ASSERT(m_part_info);
m_part_info= part_info;
m_create_handler= TRUE;
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
DBUG_VOID_RETURN;
}
/**
ha_partition constructor method used by ha_partition::clone()
@param hton Handlerton (partition_hton)
@param share Table share object
@param part_info_arg partition_info to use
@param clone_arg ha_partition to clone
@param clme_mem_root_arg MEM_ROOT to use
@return New partition handler
*/
ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
partition_info *part_info_arg,
ha_partition *clone_arg,
MEM_ROOT *clone_mem_root_arg)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(clone)");
init_handler_variables();
m_part_info= part_info_arg;
m_create_handler= TRUE;
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
m_is_clone_of= clone_arg;
m_clone_mem_root= clone_mem_root_arg;
DBUG_VOID_RETURN;
}
/*
Initialize handler object
@ -244,7 +272,6 @@ void ha_partition::init_handler_variables()
m_rec0= 0;
m_curr_key_info[0]= NULL;
m_curr_key_info[1]= NULL;
is_clone= FALSE,
m_part_func_monotonicity_info= NON_MONOTONIC;
auto_increment_lock= FALSE;
auto_increment_safe_stmt_log_lock= FALSE;
@ -252,6 +279,11 @@ void ha_partition::init_handler_variables()
this allows blackhole to work properly
*/
m_no_locks= 0;
m_part_info= NULL;
m_create_handler= FALSE;
m_is_sub_partitioned= 0;
m_is_clone_of= NULL;
m_clone_mem_root= NULL;
#ifdef DONT_HAVE_TO_BE_INITALIZED
m_start_key.flag= 0;
@ -359,7 +391,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root)
*/
DBUG_RETURN(0);
}
else if (get_from_handler_file(table_share->normalized_path.str, mem_root))
else if (get_from_handler_file(table_share->normalized_path.str,
mem_root, false))
{
my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0));
DBUG_RETURN(1);
@ -1848,7 +1881,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
DBUG_RETURN(TRUE);
}
if (get_from_handler_file(from, ha_thd()->mem_root))
if (get_from_handler_file(from, ha_thd()->mem_root, false))
DBUG_RETURN(TRUE);
DBUG_ASSERT(m_file_buffer);
DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to));
@ -2064,18 +2097,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name)
}
/*
/**
Create the special .par file
SYNOPSIS
create_handler_file()
name Full path of table name
@param name Full path of table name
RETURN VALUE
>0 Error code
0 Success
@return Operation status
@retval FALSE Error code
@retval TRUE Success
DESCRIPTION
@note
Method used to create handler file with names of partitions, their
engine types and the number of partitions.
*/
@ -2139,19 +2170,22 @@ bool ha_partition::create_handler_file(const char *name)
Array of engine types n * 4 bytes where
n = (m_tot_parts + 3)/4
Length of name part in bytes 4 bytes
(Names in filename format)
Name part m * 4 bytes where
m = ((length_name_part + 3)/4)*4
All padding bytes are zeroed
*/
tot_partition_words= (tot_parts + 3) / 4;
tot_name_words= (tot_name_len + 3) / 4;
tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE;
tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE;
/* 4 static words (tot words, checksum, tot partitions, name length) */
tot_len_words= 4 + tot_partition_words + tot_name_words;
tot_len_byte= 4 * tot_len_words;
tot_len_byte= PAR_WORD_SIZE * tot_len_words;
if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL))))
DBUG_RETURN(TRUE);
engine_array= (file_buffer + 12);
name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
engine_array= (file_buffer + PAR_ENGINES_OFFSET);
name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE
+ PAR_WORD_SIZE);
part_it.rewind();
for (i= 0; i < no_parts; i++)
{
@ -2189,13 +2223,15 @@ bool ha_partition::create_handler_file(const char *name)
}
chksum= 0;
int4store(file_buffer, tot_len_words);
int4store(file_buffer + 8, tot_parts);
int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts);
int4store(file_buffer + PAR_ENGINES_OFFSET +
(tot_partition_words * PAR_WORD_SIZE),
tot_name_len);
for (i= 0; i < tot_len_words; i++)
chksum^= uint4korr(file_buffer + 4 * i);
int4store(file_buffer + 4, chksum);
chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i);
int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum);
/*
Remove .frm extension and replace with .par
Add .par extension to the file name.
Create and write and close file
to be used at open, delete_table and rename_table
*/
@ -2213,14 +2249,9 @@ bool ha_partition::create_handler_file(const char *name)
DBUG_RETURN(result);
}
/*
/**
Clear handler variables and free some memory
SYNOPSIS
clear_handler_file()
RETURN VALUE
NONE
*/
void ha_partition::clear_handler_file()
@ -2233,16 +2264,15 @@ void ha_partition::clear_handler_file()
m_engine_array= NULL;
}
/*
/**
Create underlying handler objects
SYNOPSIS
create_handlers()
mem_root Allocate memory through this
@param mem_root Allocate memory through this
RETURN VALUE
TRUE Error
FALSE Success
@return Operation status
@retval TRUE Error
@retval FALSE Success
*/
bool ha_partition::create_handlers(MEM_ROOT *mem_root)
@ -2280,6 +2310,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
DBUG_RETURN(FALSE);
}
/*
Create underlying handler objects from partition info
@ -2351,100 +2382,164 @@ error_end:
}
/*
Get info about partition engines and their names from the .par file
/**
Read the .par file to get the partitions engines and names
SYNOPSIS
get_from_handler_file()
name Full path of table name
mem_root Allocate memory through this
@param name Name of table file (without extention)
RETURN VALUE
TRUE Error
FALSE Success
@return Operation status
@retval true Failure
@retval false Success
DESCRIPTION
Open handler file to get partition names, engine types and number of
partitions.
@note On success, m_file_buffer is allocated and must be
freed by the caller. m_name_buffer_ptr and m_tot_parts is also set.
*/
bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
bool ha_partition::read_par_file(const char *name)
{
char buff[FN_REFLEN], *address_tot_name_len;
char buff[FN_REFLEN], *tot_name_len_offset;
File file;
char *file_buffer, *name_buffer_ptr;
handlerton **engine_array;
char *file_buffer;
uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum;
DBUG_ENTER("ha_partition::get_from_handler_file");
DBUG_ENTER("ha_partition::read_par_file");
DBUG_PRINT("enter", ("table name: '%s'", name));
if (m_file_buffer)
DBUG_RETURN(FALSE);
DBUG_RETURN(false);
fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT);
/* Following could be done with my_stat to read in whole file */
if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0)
DBUG_RETURN(TRUE);
if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP)))
DBUG_RETURN(true);
if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP)))
goto err1;
len_words= uint4korr(buff);
len_bytes= 4 * len_words;
len_bytes= PAR_WORD_SIZE * len_words;
if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
goto err1;
if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0))))
goto err1;
VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0)));
if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP)))
goto err2;
chksum= 0;
for (i= 0; i < len_words; i++)
chksum ^= uint4korr((file_buffer) + 4 * i);
chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i);
if (chksum)
goto err2;
m_tot_parts= uint4korr((file_buffer) + 8);
m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET);
DBUG_PRINT("info", ("No of parts = %u", m_tot_parts));
tot_partition_words= (m_tot_parts + 3) / 4;
tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE;
tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET +
PAR_WORD_SIZE * tot_partition_words;
tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) /
PAR_WORD_SIZE;
/*
Verify the total length = tot size word, checksum word, num parts word +
engines array + name length word + name array.
*/
if (len_words != (tot_partition_words + tot_name_words + 4))
goto err2;
VOID(my_close(file, MYF(0)));
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE;
DBUG_RETURN(false);
err2:
my_free(file_buffer, MYF(0));
err1:
VOID(my_close(file, MYF(0)));
DBUG_RETURN(true);
}
/**
Setup m_engine_array
@param mem_root MEM_ROOT to use for allocating new handlers
@return Operation status
@retval false Success
@retval true Failure
*/
bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
{
uint i;
uchar *buff;
handlerton **engine_array;
DBUG_ASSERT(!m_file);
DBUG_ENTER("ha_partition::setup_engine_array");
engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*));
if (!engine_array)
DBUG_RETURN(true);
buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET);
for (i= 0; i < m_tot_parts; i++)
{
engine_array[i]= ha_resolve_by_legacy_type(ha_thd(),
(enum legacy_db_type)
*(uchar *) ((file_buffer) +
12 + i));
*(buff + i));
if (!engine_array[i])
goto err3;
goto err;
}
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4;
if (len_words != (tot_partition_words + tot_name_words + 4))
goto err3;
name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words;
VOID(my_close(file, MYF(0)));
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
m_name_buffer_ptr= name_buffer_ptr;
if (!(m_engine_array= (plugin_ref*)
my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME))))
goto err3;
goto err;
for (i= 0; i < m_tot_parts; i++)
m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]);
my_afree((gptr) engine_array);
if (!m_file && create_handlers(mem_root))
if (create_handlers(mem_root))
{
clear_handler_file();
DBUG_RETURN(TRUE);
DBUG_RETURN(true);
}
DBUG_RETURN(FALSE);
err3:
DBUG_RETURN(false);
err:
my_afree((gptr) engine_array);
err2:
my_free(file_buffer, MYF(0));
err1:
VOID(my_close(file, MYF(0)));
DBUG_RETURN(TRUE);
DBUG_RETURN(true);
}
/**
Get info about partition engines and their names from the .par file
@param name Full path of table name
@param mem_root Allocate memory through this
@param is_clone If it is a clone, don't create new handlers
@return Operation status
@retval true Error
@retval false Success
@note Open handler file to get partition names, engine types and number of
partitions.
*/
bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root,
bool is_clone)
{
DBUG_ENTER("ha_partition::get_from_handler_file");
DBUG_PRINT("enter", ("table name: '%s'", name));
if (m_file_buffer)
DBUG_RETURN(false);
if (read_par_file(name))
DBUG_RETURN(true);
if (!is_clone && setup_engine_array(mem_root))
DBUG_RETURN(true);
DBUG_RETURN(false);
}
@ -2491,13 +2586,13 @@ void ha_data_partition_destroy(void *ha_data)
int ha_partition::open(const char *name, int mode, uint test_if_locked)
{
char *name_buffer_ptr= m_name_buffer_ptr;
int error;
char *name_buffer_ptr;
int error= HA_ERR_INITIALIZATION;
uint alloc_len;
handler **file;
char name_buff[FN_REFLEN];
bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE);
ulonglong check_table_flags= 0;
ulonglong check_table_flags;
DBUG_ENTER("ha_partition::open");
DBUG_ASSERT(table->s == table_share);
@ -2505,8 +2600,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_mode= mode;
m_open_test_lock= test_if_locked;
m_part_field_array= m_part_info->full_part_field_array;
if (get_from_handler_file(name, &table->mem_root))
DBUG_RETURN(1);
if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of)))
DBUG_RETURN(error);
name_buffer_ptr= m_name_buffer_ptr;
m_start_key.length= 0;
m_rec0= table->record[0];
m_rec_length= table_share->reclength;
@ -2516,7 +2612,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
{
if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME))))
{
DBUG_RETURN(1);
DBUG_RETURN(error);
}
{
/*
@ -2539,48 +2635,84 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
/* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
DBUG_RETURN(1);
DBUG_RETURN(error);
bitmap_clear_all(&m_bulk_insert_started);
/* Initialize the bitmap we use to determine what partitions are used */
if (!is_clone)
if (!m_is_clone_of)
{
DBUG_ASSERT(!m_clone_mem_root);
if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
{
bitmap_free(&m_bulk_insert_started);
DBUG_RETURN(1);
DBUG_RETURN(error);
}
bitmap_set_all(&(m_part_info->used_partitions));
}
file= m_file;
do
if (m_is_clone_of)
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
test_if_locked)))
goto err_handler;
m_no_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
uint i;
DBUG_ASSERT(m_clone_mem_root);
/* Allocate an array of handler pointers for the partitions handlers. */
alloc_len= (m_tot_parts + 1) * sizeof(handler*);
if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len)))
goto err_alloc;
memset(m_file, 0, alloc_len);
/*
Populate them by cloning the original partitions. This also opens them.
Note that file->ref is allocated too.
*/
file= m_is_clone_of->m_file;
for (i= 0; i < m_tot_parts; i++)
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root)))
{
error= HA_ERR_INITIALIZATION;
file= &m_file[i];
goto err_handler;
}
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
}
}
else
{
file= m_file;
do
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked)))
goto err_handler;
m_no_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
} while (*(++file));
}
file= m_file;
ref_length= (*file)->ref_length;
check_table_flags= (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS));
while (*(++file))
{
/* MyISAM can have smaller ref_length for partitions with MAX_ROWS set */
set_if_bigger(ref_length, ((*file)->ref_length));
/*
Verify that all partitions have the same set of table flags.
Mask all flags that partitioning enables/disables.
*/
if (!check_table_flags)
{
check_table_flags= (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS));
}
else if (check_table_flags != (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS)))
if (check_table_flags != (((*file)->ha_table_flags() &
~(PARTITION_DISABLED_TABLE_FLAGS)) |
(PARTITION_ENABLED_TABLE_FLAGS)))
{
error= HA_ERR_INITIALIZATION;
/* set file to last handler, so all of them is closed */
file = &m_file[m_tot_parts - 1];
goto err_handler;
}
} while (*(++file));
}
key_used_on_scan= m_file[0]->key_used_on_scan;
implicit_emptied= m_file[0]->implicit_emptied;
/*
@ -2589,6 +2721,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
*/
ref_length+= PARTITION_BYTES_IN_POS;
m_ref_length= ref_length;
/*
Release buffer read from .par file. It will not be reused again after
being opened once.
@ -2646,25 +2779,54 @@ err_handler:
DEBUG_SYNC(ha_thd(), "partition_open_error");
while (file-- != m_file)
(*file)->close();
err_alloc:
bitmap_free(&m_bulk_insert_started);
if (!is_clone)
if (!m_is_clone_of)
bitmap_free(&(m_part_info->used_partitions));
DBUG_RETURN(error);
}
handler *ha_partition::clone(MEM_ROOT *mem_root)
/**
Clone the open and locked partitioning handler.
@param mem_root MEM_ROOT to use.
@return Pointer to the successfully created clone or NULL
@details
This function creates a new ha_partition handler as a clone/copy. The
original (this) must already be opened and locked. The clone will use
the originals m_part_info.
It also allocates memory for ref + ref_dup.
In ha_partition::open() it will clone its original handlers partitions
which will allocate then on the correct MEM_ROOT and also open them.
*/
handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root,
table->s->db_type());
((ha_partition*)new_handler)->m_part_info= m_part_info;
((ha_partition*)new_handler)->is_clone= TRUE;
if (new_handler && !new_handler->ha_open(table,
table->s->normalized_path.str,
table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
return new_handler;
return NULL;
ha_partition *new_handler;
DBUG_ENTER("ha_partition::clone");
new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info,
this, mem_root);
/*
Allocate new_handler->ref here because otherwise ha_open will allocate it
on this->table->mem_root and we will not be able to reclaim that memory
when the clone handler object is destroyed.
*/
if (new_handler &&
!(new_handler->ref= (uchar*) alloc_root(mem_root,
ALIGN_SIZE(m_ref_length)*2)))
new_handler= NULL;
if (new_handler &&
new_handler->ha_open(table, name,
table->db_stat, HA_OPEN_IGNORE_IF_LOCKED))
new_handler= NULL;
DBUG_RETURN((handler*) new_handler);
}
@ -2695,7 +2857,7 @@ int ha_partition::close(void)
DBUG_ASSERT(table->s == table_share);
delete_queue(&m_queue);
bitmap_free(&m_bulk_insert_started);
if (!is_clone)
if (!m_is_clone_of)
bitmap_free(&(m_part_info->used_partitions));
file= m_file;
@ -3795,19 +3957,16 @@ end_dont_reset_start_part:
void ha_partition::position(const uchar *record)
{
handler *file= m_file[m_last_part];
uint pad_length;
DBUG_ENTER("ha_partition::position");
file->position(record);
int2store(ref, m_last_part);
memcpy((ref + PARTITION_BYTES_IN_POS), file->ref,
(ref_length - PARTITION_BYTES_IN_POS));
memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, file->ref_length);
pad_length= m_ref_length - PARTITION_BYTES_IN_POS - file->ref_length;
if (pad_length)
memset((ref + PARTITION_BYTES_IN_POS + file->ref_length), 0, pad_length);
#ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES
#ifdef HAVE_purify
bzero(ref + PARTITION_BYTES_IN_POS + ref_length,
max_ref_length-ref_length);
#endif /* HAVE_purify */
#endif
DBUG_VOID_RETURN;
}

View File

@ -55,6 +55,16 @@ typedef struct st_ha_data_partition
HA_DUPLICATE_POS | \
HA_CAN_SQL_HANDLER | \
HA_CAN_INSERT_DELAYED)
/* First 4 bytes in the .par file is the number of 32-bit words in the file */
#define PAR_WORD_SIZE 4
/* offset to the .par file checksum */
#define PAR_CHECKSUM_OFFSET 4
/* offset to the total number of partitions */
#define PAR_NUM_PARTS_OFFSET 8
/* offset to the engines array */
#define PAR_ENGINES_OFFSET 12
class ha_partition :public handler
{
private:
@ -71,7 +81,7 @@ private:
/* Data for the partition handler */
int m_mode; // Open mode
uint m_open_test_lock; // Open test_if_locked
char *m_file_buffer; // Buffer with names
char *m_file_buffer; // Content of the .par file
char *m_name_buffer_ptr; // Pointer to first partition name
plugin_ref *m_engine_array; // Array of types of the handlers
handler **m_file; // Array of references to handler inst.
@ -133,6 +143,13 @@ private:
bool m_is_sub_partitioned; // Is subpartitioned
bool m_ordered_scan_ongoing;
/*
If set, this object was created with ha_partition::clone and doesn't
"own" the m_part_info structure.
*/
ha_partition *m_is_clone_of;
MEM_ROOT *m_clone_mem_root;
/*
We keep track if all underlying handlers are MyISAM since MyISAM has a
great number of extra flags not needed by other handlers.
@ -169,11 +186,6 @@ private:
PARTITION_SHARE *share; /* Shared lock info */
#endif
/*
TRUE <=> this object was created with ha_partition::clone and doesn't
"own" the m_part_info structure.
*/
bool is_clone;
bool auto_increment_lock; /**< lock reading/updating auto_inc */
/**
Flag to keep the auto_increment lock through out the statement.
@ -186,7 +198,7 @@ private:
/** used for prediction of start_bulk_insert rows */
enum_monotonicity_info m_part_func_monotonicity_info;
public:
handler *clone(MEM_ROOT *mem_root);
handler *clone(const char *name, MEM_ROOT *mem_root);
virtual void set_part_info(partition_info *part_info)
{
m_part_info= part_info;
@ -205,6 +217,10 @@ public:
*/
ha_partition(handlerton *hton, TABLE_SHARE * table);
ha_partition(handlerton *hton, partition_info * part_info);
ha_partition(handlerton *hton, TABLE_SHARE *share,
partition_info *part_info_arg,
ha_partition *clone_arg,
MEM_ROOT *clone_mem_root_arg);
~ha_partition();
/*
A partition handler has no characteristics in itself. It only inherits
@ -275,7 +291,10 @@ private:
And one method to read it in.
*/
bool create_handler_file(const char *name);
bool get_from_handler_file(const char *name, MEM_ROOT *mem_root);
bool setup_engine_array(MEM_ROOT *mem_root);
bool read_par_file(const char *name);
bool get_from_handler_file(const char *name, MEM_ROOT *mem_root,
bool is_clone);
bool new_handlers_from_part_info(MEM_ROOT *mem_root);
bool create_handlers(MEM_ROOT *mem_root);
void clear_handler_file();

View File

@ -2037,22 +2037,29 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
/****************************************************************************
** General handler functions
****************************************************************************/
handler *handler::clone(MEM_ROOT *mem_root)
handler *handler::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
handler *new_handler= get_new_handler(table->s, mem_root, ht);
/*
Allocate handler->ref here because otherwise ha_open will allocate it
on this->table->mem_root and we will not be able to reclaim that memory
when the clone handler object is destroyed.
*/
if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2)))
return NULL;
if (new_handler && !new_handler->ha_open(table,
table->s->normalized_path.str,
table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
return new_handler;
return NULL;
if (new_handler &&
!(new_handler->ref= (uchar*) alloc_root(mem_root,
ALIGN_SIZE(ref_length)*2)))
new_handler= NULL;
/*
TODO: Implement a more efficient way to have more than one index open for
the same table instance. The ha_open call is not cachable for clone.
*/
if (new_handler && new_handler->ha_open(table,
name,
table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
new_handler= NULL;
return new_handler;
}

View File

@ -1165,7 +1165,7 @@ public:
DBUG_ASSERT(locked == FALSE);
/* TODO: DBUG_ASSERT(inited == NONE); */
}
virtual handler *clone(MEM_ROOT *mem_root);
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
/** This is called after create to allow us to set up cached variables */
void init()
{

View File

@ -1,4 +1,4 @@
/* Copyright (C) 2000-2006 MySQL AB
/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -183,7 +183,7 @@ char * ip_to_hostname(struct in_addr *in, uint *errors)
&tmp_hostent,buff,sizeof(buff),&tmp_errno)))
{
DBUG_PRINT("error",("gethostbyaddr_r returned %d",tmp_errno));
return 0;
DBUG_RETURN(0);
}
if (!(check=my_gethostbyname_r(hp->h_name,&tmp_hostent2,buff2,sizeof(buff2),
&tmp_errno)))

View File

@ -2122,10 +2122,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value)
if (!(null_value= (args[0]->null_value || args[1]->null_value ||
my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec,
truncate, decimal_value) > 1)))
{
decimal_value->frac= decimals;
return decimal_value;
}
return 0;
}

View File

@ -648,7 +648,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time,
system_charset_info);
break;
case 'W':
if (type == MYSQL_TIMESTAMP_TIME)
if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year))
return 1;
weekday= calc_weekday(calc_daynr(l_time->year,l_time->month,
l_time->day),0);
@ -657,7 +657,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time,
system_charset_info);
break;
case 'a':
if (type == MYSQL_TIMESTAMP_TIME)
if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year))
return 1;
weekday=calc_weekday(calc_daynr(l_time->year,l_time->month,
l_time->day),0);
@ -816,7 +816,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time,
}
break;
case 'w':
if (type == MYSQL_TIMESTAMP_TIME)
if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year))
return 1;
weekday=calc_weekday(calc_daynr(l_time->year,l_time->month,
l_time->day),1);

View File

@ -1335,7 +1335,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
}
thd= head->in_use;
if (!(file= head->file->clone(thd->mem_root)))
if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root)))
{
/*
Manually set the error flag. Note: there seems to be quite a few

View File

@ -1075,9 +1075,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
String &field_term, String &line_start, String &line_term,
String &enclosed_par, int escape, bool get_it_from_net,
bool is_fifo)
:file(file_par),escape_char(escape)
:file(file_par), buff_length(tot_length), escape_char(escape),
found_end_of_line(false), eof(false), need_end_io_cache(false),
error(false), line_cuted(false), found_null(false), read_charset(cs)
{
read_charset= cs;
field_term_ptr=(char*) field_term.ptr();
field_term_length= field_term.length();
line_term_ptr=(char*) line_term.ptr();
@ -1104,8 +1105,6 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
(uchar) enclosed_par[0] : INT_MAX;
field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX;
line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX;
error=eof=found_end_of_line=found_null=line_cuted=0;
buff_length=tot_length;
/* Set of a stack for unget if long terminators */
@ -1151,7 +1150,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
READ_INFO::~READ_INFO()
{
if (!error && need_end_io_cache)
if (need_end_io_cache)
::end_io_cache(&cache);
my_free(buffer, MYF(MY_ALLOW_ZERO_PTR));

View File

@ -2215,7 +2215,7 @@ JOIN::exec()
Item* sort_table_cond= make_cond_for_table(curr_join->tmp_having,
used_tables,
used_tables);
(table_map) 0);
if (sort_table_cond)
{
if (!curr_table->select)
@ -12852,6 +12852,42 @@ static bool test_if_ref(Item_field *left_item,Item *right_item)
return 0; // keep test
}
/**
Extract a condition that can be checked after reading given table
@param cond Condition to analyze
@param tables Tables for which "current field values" are available
@param used_table Table that we're extracting the condition for (may
also include PSEUDO_TABLE_BITS, and may be zero)
@param exclude_expensive_cond Do not push expensive conditions
@retval <>NULL Generated condition
@retval =NULL Already checked, OR error
@details
Extract the condition that can be checked after reading the table
specified in 'used_table', given that current-field values for tables
specified in 'tables' bitmap are available.
If 'used_table' is 0
- extract conditions for all tables in 'tables'.
- extract conditions are unrelated to any tables
in the same query block/level(i.e. conditions
which have used_tables == 0).
The function assumes that
- Constant parts of the condition has already been checked.
- Condition that could be checked for tables in 'tables' has already
been checked.
The function takes into account that some parts of the condition are
guaranteed to be true by employed 'ref' access methods (the code that
does this is located at the end, search down for "EQ_FUNC").
@note
Make sure to keep the implementations of make_cond_for_table() and
make_cond_after_sjm() synchronized.
make_cond_for_info_schema() uses similar algorithm as well.
*/
static COND *
make_cond_for_table(COND *cond, table_map tables, table_map used_table)

View File

@ -142,11 +142,11 @@ int ha_heap::close(void)
DESCRIPTION
Do same as default implementation but use file->s->name instead of
table->s->path. This is needed by Windows where the clone() call sees
'/'-delimited path in table->s->path, while ha_peap::open() was called
'/'-delimited path in table->s->path, while ha_heap::open() was called
with '\'-delimited path.
*/
handler *ha_heap::clone(MEM_ROOT *mem_root)
handler *ha_heap::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat,

View File

@ -34,7 +34,7 @@ class ha_heap: public handler
public:
ha_heap(handlerton *hton, TABLE_SHARE *table);
~ha_heap() {}
handler *clone(MEM_ROOT *mem_root);
handler *clone(const char *name, MEM_ROOT *mem_root);
const char *table_type() const
{
return (table->in_use->variables.sql_mode & MODE_MYSQL323) ?

View File

@ -552,9 +552,10 @@ ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg)
can_enable_indexes(1)
{}
handler *ha_myisam::clone(MEM_ROOT *mem_root)
handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root)
{
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(mem_root));
ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(name,
mem_root));
if (new_handler)
new_handler->file->state= file->state;
return new_handler;

View File

@ -44,7 +44,7 @@ class ha_myisam: public handler
public:
ha_myisam(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisam() {}
handler *clone(MEM_ROOT *mem_root);
handler *clone(const char *name, MEM_ROOT *mem_root);
const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
const char **bas_ext() const;

View File

@ -459,8 +459,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)),
problem because all locking is handled by the original MERGE table
from which this is cloned of.
*/
if (!(file= myrg_open(table->s->normalized_path.str, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED)))
if (!(file= myrg_open(name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)))
{
DBUG_PRINT("error", ("my_errno %d", my_errno));
DBUG_RETURN(my_errno ? my_errno : -1);
@ -484,7 +483,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)),
@return A cloned handler instance.
*/
handler *ha_myisammrg::clone(MEM_ROOT *mem_root)
handler *ha_myisammrg::clone(const char *name, MEM_ROOT *mem_root)
{
MYRG_TABLE *u_table,*newu_table;
ha_myisammrg *new_handler=
@ -505,8 +504,8 @@ handler *ha_myisammrg::clone(MEM_ROOT *mem_root)
return NULL;
}
if (new_handler->ha_open(table, table->s->normalized_path.str, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
if (new_handler->ha_open(table, name, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
{
delete new_handler;
return NULL;

View File

@ -62,7 +62,7 @@ class ha_myisammrg: public handler
int open(const char *name, int mode, uint test_if_locked);
int attach_children(void);
int detach_children(void);
virtual handler *clone(MEM_ROOT *mem_root);
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
int close(void);
int write_row(uchar * buf);
int update_row(const uchar * old_data, uchar * new_data);