Guard against compiling without -fno-exceptions
Allocate bigger default thread stack because of problems with glibc Fixed bug in UPDATE ... not_null_field=expression_that_returns_null Fixed bug in replication when using auto_increment and LOAD DATA INFILE
This commit is contained in:
parent
1bc3105da3
commit
8dd439e751
@ -153,6 +153,13 @@ C_MODE_END
|
||||
#undef HAVE_INITGROUPS
|
||||
#endif
|
||||
|
||||
/* gcc/egcs issues */
|
||||
|
||||
#if defined(__GNUC) && defined(__EXCEPTIONS)
|
||||
#error "Please add -fno-exceptions to CXXFLAGS and reconfigure/recompile"
|
||||
#endif
|
||||
|
||||
|
||||
/* Fix a bug in gcc 2.8.0 on IRIX 6.2 */
|
||||
#if SIZEOF_LONG == 4 && defined(__LONG_MAX__)
|
||||
#undef __LONG_MAX__ /* Is a longlong value in gcc 2.8.0 ??? */
|
||||
|
@ -581,9 +581,13 @@ extern int pthread_dummy(int);
|
||||
|
||||
#define THREAD_NAME_SIZE 10
|
||||
#if defined(__ia64__)
|
||||
#define DEFAULT_THREAD_STACK (128*1024)
|
||||
/*
|
||||
MySQL can survive with 32K, but some glibc libraries require > 128K stack
|
||||
To resolve hostnames
|
||||
*/
|
||||
#define DEFAULT_THREAD_STACK (192*1024L)
|
||||
#else
|
||||
#define DEFAULT_THREAD_STACK (64*1024)
|
||||
#define DEFAULT_THREAD_STACK (192*1024L)
|
||||
#endif
|
||||
|
||||
struct st_my_thread_var
|
||||
|
@ -73,3 +73,39 @@ b ifnull(t2.b,"this is null")
|
||||
NULL this is null
|
||||
NULL this is null
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (a varchar(16) NOT NULL, b smallint(6) NOT NULL, c datetime NOT NULL, d smallint(6) NOT NULL);
|
||||
INSERT INTO t1 SET a = "", d= "2003-01-14 03:54:55";
|
||||
UPDATE t1 SET d=1/NULL;
|
||||
UPDATE t1 SET d=NULL;
|
||||
INSERT INTO t1 (a) values (null);
|
||||
Column 'a' cannot be null
|
||||
INSERT INTO t1 (a) values (1/null);
|
||||
Column 'a' cannot be null
|
||||
INSERT INTO t1 (a) values (null),(null);
|
||||
INSERT INTO t1 (b) values (null);
|
||||
Column 'b' cannot be null
|
||||
INSERT INTO t1 (b) values (1/null);
|
||||
Column 'b' cannot be null
|
||||
INSERT INTO t1 (b) values (null),(null);
|
||||
INSERT INTO t1 (c) values (null);
|
||||
Column 'c' cannot be null
|
||||
INSERT INTO t1 (c) values (1/null);
|
||||
Column 'c' cannot be null
|
||||
INSERT INTO t1 (c) values (null),(null);
|
||||
INSERT INTO t1 (d) values (null);
|
||||
Column 'd' cannot be null
|
||||
INSERT INTO t1 (d) values (1/null);
|
||||
Column 'd' cannot be null
|
||||
INSERT INTO t1 (d) values (null),(null);
|
||||
select * from t1;
|
||||
a b c d
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
0 0000-00-00 00:00:00 0
|
||||
drop table t1;
|
||||
|
13
mysql-test/r/rpl_loaddata.result
Normal file
13
mysql-test/r/rpl_loaddata.result
Normal file
@ -0,0 +1,13 @@
|
||||
slave stop;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
reset master;
|
||||
reset slave;
|
||||
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
|
||||
slave start;
|
||||
create table t1(a int not null auto_increment, b int, primary key(a) );
|
||||
load data infile '../../std_data/rpl_loaddata.dat' into table t1;
|
||||
select * from t1;
|
||||
a b
|
||||
1 10
|
||||
2 15
|
||||
drop table t1;
|
2
mysql-test/std_data/rpl_loaddata.dat
Normal file
2
mysql-test/std_data/rpl_loaddata.dat
Normal file
@ -0,0 +1,2 @@
|
||||
\N 10
|
||||
\N 15
|
@ -48,3 +48,34 @@ insert into t1 values(10,null);
|
||||
select t2.b, ifnull(t2.b,"this is null") from t1 as t2 left join t1 as t3 on
|
||||
t2.b=t3.a order by 1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test inserting and updating with NULL
|
||||
#
|
||||
CREATE TABLE t1 (a varchar(16) NOT NULL, b smallint(6) NOT NULL, c datetime NOT NULL, d smallint(6) NOT NULL);
|
||||
INSERT INTO t1 SET a = "", d= "2003-01-14 03:54:55";
|
||||
UPDATE t1 SET d=1/NULL;
|
||||
UPDATE t1 SET d=NULL;
|
||||
--error 1048
|
||||
INSERT INTO t1 (a) values (null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (a) values (1/null);
|
||||
INSERT INTO t1 (a) values (null),(null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (b) values (null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (b) values (1/null);
|
||||
INSERT INTO t1 (b) values (null),(null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (c) values (null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (c) values (1/null);
|
||||
INSERT INTO t1 (c) values (null),(null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (d) values (null);
|
||||
--error 1048
|
||||
INSERT INTO t1 (d) values (1/null);
|
||||
INSERT INTO t1 (d) values (null),(null);
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
|
16
mysql-test/t/rpl_loaddata.test
Normal file
16
mysql-test/t/rpl_loaddata.test
Normal file
@ -0,0 +1,16 @@
|
||||
# See if replication of a "LOAD DATA in an autoincrement column"
|
||||
# Honours autoincrement values
|
||||
# i.e. if the master and slave have the same sequence
|
||||
source include/master-slave.inc;
|
||||
|
||||
create table t1(a int not null auto_increment, b int, primary key(a) );
|
||||
load data infile '../../std_data/rpl_loaddata.dat' into table t1;
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
||||
select * from t1;
|
||||
connection master;
|
||||
drop table t1;
|
||||
save_master_pos;
|
||||
connection slave;
|
||||
sync_with_master;
|
1
mysql-test/t/rpl_log-master.opt
Normal file
1
mysql-test/t/rpl_log-master.opt
Normal file
@ -0,0 +1 @@
|
||||
--skip-external-locking
|
@ -118,6 +118,15 @@ set_field_to_null(Field *field)
|
||||
field->reset();
|
||||
return 0;
|
||||
}
|
||||
field->reset();
|
||||
if (current_thd->count_cuted_fields)
|
||||
{
|
||||
current_thd->cuted_fields++; // Increment error counter
|
||||
return 0;
|
||||
}
|
||||
if (!current_thd->no_errors)
|
||||
my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0),
|
||||
field->field_name);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -357,8 +357,10 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields,
|
||||
{
|
||||
List_iterator_fast<Item> it(fields);
|
||||
Item_field *sql_field;
|
||||
ulonglong id;
|
||||
DBUG_ENTER("read_fixed_length");
|
||||
|
||||
id=0;
|
||||
/* No fields can be null in this format. mark all fields as not null */
|
||||
while ((sql_field= (Item_field*) it++))
|
||||
sql_field->field->set_notnull();
|
||||
@ -401,6 +403,14 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields,
|
||||
thd->cuted_fields++; /* To long row */
|
||||
if (write_record(table,&info))
|
||||
DBUG_RETURN(1);
|
||||
/*
|
||||
If auto_increment values are used, save the first one
|
||||
for LAST_INSERT_ID() and for the binary/update log.
|
||||
We can't use insert_id() as we don't want to touch the
|
||||
last_insert_id_used flag.
|
||||
*/
|
||||
if (!id && thd->insert_id_used)
|
||||
id= thd->last_insert_id;
|
||||
if (table->next_number_field)
|
||||
table->next_number_field->reset(); // Clear for next record
|
||||
if (read_info.next_line()) // Skip to next line
|
||||
@ -408,6 +418,8 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields,
|
||||
if (read_info.line_cuted)
|
||||
thd->cuted_fields++; /* To long row */
|
||||
}
|
||||
if (id && !read_info.error)
|
||||
thd->insert_id(id); // For binary/update log
|
||||
DBUG_RETURN(test(read_info.error));
|
||||
}
|
||||
|
||||
@ -421,10 +433,12 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table,
|
||||
List_iterator_fast<Item> it(fields);
|
||||
Item_field *sql_field;
|
||||
uint enclosed_length;
|
||||
ulonglong id;
|
||||
DBUG_ENTER("read_sep_field");
|
||||
|
||||
enclosed_length=enclosed.length();
|
||||
|
||||
id=0;
|
||||
|
||||
for (;;it.rewind())
|
||||
{
|
||||
if (thd->killed)
|
||||
@ -477,6 +491,14 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table,
|
||||
}
|
||||
if (write_record(table,&info))
|
||||
DBUG_RETURN(1);
|
||||
/*
|
||||
If auto_increment values are used, save the first one
|
||||
for LAST_INSERT_ID() and for the binary/update log.
|
||||
We can't use insert_id() as we don't want to touch the
|
||||
last_insert_id_used flag.
|
||||
*/
|
||||
if (!id && thd->insert_id_used)
|
||||
id= thd->last_insert_id;
|
||||
if (table->next_number_field)
|
||||
table->next_number_field->reset(); // Clear for next record
|
||||
if (read_info.next_line()) // Skip to next line
|
||||
@ -484,6 +506,8 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table,
|
||||
if (read_info.line_cuted)
|
||||
thd->cuted_fields++; /* To long row */
|
||||
}
|
||||
if (id && !read_info.error)
|
||||
thd->insert_id(id); // For binary/update log
|
||||
DBUG_RETURN(test(read_info.error));
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user