Merge mysql.com:/home/my/mysql-5.1

into  mysql.com:/home/my/mysql-5.1-TDC
This commit is contained in:
monty@mysql.com 2005-11-23 22:58:53 +02:00
commit 4575a662cb
100 changed files with 3648 additions and 2112 deletions

View File

@ -1320,16 +1320,16 @@ start_master()
if [ x$DO_DDD = x1 ]
then
$ECHO "set args $master_args" > $GDB_MASTER_INIT
$ECHO "set args $master_args" > $GDB_MASTER_INIT$1
manager_launch master ddd -display $DISPLAY --debugger \
"gdb -x $GDB_MASTER_INIT" $MASTER_MYSQLD
"gdb -x $GDB_MASTER_INIT$1" $MASTER_MYSQLD
elif [ x$DO_GDB = x1 ]
then
if [ x$MANUAL_GDB = x1 ]
then
$ECHO "set args $master_args" > $GDB_MASTER_INIT
$ECHO "set args $master_args" > $GDB_MASTER_INIT$1
$ECHO "To start gdb for the master , type in another window:"
$ECHO "cd $CWD ; gdb -x $GDB_MASTER_INIT $MASTER_MYSQLD"
$ECHO "cd $CWD ; gdb -x $GDB_MASTER_INIT$1 $MASTER_MYSQLD"
wait_for_master=1500
else
( $ECHO set args $master_args;
@ -1341,9 +1341,9 @@ disa 1
end
r
EOF
fi ) > $GDB_MASTER_INIT
fi ) > $GDB_MASTER_INIT$1
manager_launch master $XTERM -display $DISPLAY \
-title "Master" -e gdb -x $GDB_MASTER_INIT $MASTER_MYSQLD
-title "Master" -e gdb -x $GDB_MASTER_INIT$1 $MASTER_MYSQLD
fi
else
manager_launch master $MASTER_MYSQLD $master_args
@ -1965,10 +1965,10 @@ then
$MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --socket=$MASTER_MYSOCK1 -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --socket=$SLAVE_MYSOCK -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=$MASTER_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=`expr $MASTER_MYPORT+1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=$SLAVE_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=`expr $SLAVE_MYPORT + 1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --port=$MASTER_MYPORT --protocol=tcp -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --protocol=tcp --port=`expr $MASTER_MYPORT+1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --protocol=tcp --port=$SLAVE_MYPORT -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
$MYSQLADMIN --no-defaults --host=$hostname --protocol=tcp --port=`expr $SLAVE_MYPORT + 1` -u root -O connect_timeout=5 -O shutdown_timeout=20 shutdown > /dev/null 2>&1
sleep_until_file_deleted 0 $MASTER_MYPID
sleep_until_file_deleted 0 $MASTER_MYPID"1"
sleep_until_file_deleted 0 $SLAVE_MYPID

View File

@ -6,6 +6,37 @@ flush table t1;
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
unlock tables;
lock table t1 read;
lock table t1 read;
flush table t1;
select * from t1;
a
1
unlock tables;
select * from t1;
a
1
unlock tables;
lock table t1 write;
lock table t1 read;
flush table t1;
select * from t1;
a
1
unlock tables;
unlock tables;
lock table t1 read;
lock table t1 write;
flush table t1;
select * from t1;
a
1
unlock tables;
unlock tables;
select * from t1;
a
1
drop table t1;
create table t1(table_id char(20) primary key);
create table t2(table_id char(20) primary key);

View File

@ -486,7 +486,6 @@ select s1 from t1 where s1 in (select version from
information_schema.tables) union select version from
information_schema.tables;
s1
0
10
drop table t1;
SHOW CREATE TABLE INFORMATION_SCHEMA.character_sets;

View File

@ -1615,7 +1615,7 @@ t2 CREATE TABLE `t2` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t2;
create table t2 (id int(11) not null, id2 int(11) not null, constraint t1_id_fk foreign key (id2,id) references t1 (id)) engine = innodb;
ERROR HY000: Can't create table './test/t2' (errno: 150)
ERROR HY000: Can't create table 'test.t2' (errno: 150)
create table t2 (a int auto_increment primary key, b int, index(b), foreign key (b) references t1(id), unique(b)) engine=innodb;
show create table t2;
Table Create Table
@ -2437,7 +2437,7 @@ a b
20 NULL
drop table t1;
create table t1 (v varchar(65530), key(v));
ERROR HY000: Can't create table './test/t1' (errno: 139)
ERROR HY000: Can't create table 'test.t1' (errno: 139)
create table t1 (v varchar(65536));
Warnings:
Note 1246 Converting column 'v' from VARCHAR to TEXT
@ -2580,19 +2580,19 @@ character set = latin1 engine = innodb;
drop table t1, t2, t3, t4, t5, t6, t7, t8, t9;
create table t1 (col1 varchar(768), index (col1))
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t1.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t1' (errno: 139)
create table t2 (col1 varchar(768) primary key)
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t2.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t2' (errno: 139)
create table t3 (col1 varbinary(768) primary key)
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t3.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t3' (errno: 139)
create table t4 (col1 text, index(col1(768)))
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t4.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t4' (errno: 139)
create table t5 (col1 blob, index(col1(768)))
character set = latin1 engine = innodb;
ERROR HY000: Can't create table './test/t5.frm' (errno: 139)
ERROR HY000: Can't create table 'test.t5' (errno: 139)
CREATE TABLE t1
(
id INT PRIMARY KEY

View File

@ -181,6 +181,9 @@ select * from t4;
ERROR HY000: All tables in the MERGE table are not identically defined
alter table t4 add column c int;
ERROR HY000: All tables in the MERGE table are not identically defined
flush tables;
select * from t4;
ERROR HY000: All tables in the MERGE table are not identically defined
create database mysqltest;
create table mysqltest.t6 (a int not null primary key auto_increment, message char(20));
create table t5 (a int not null, b char(20), key(a)) engine=MERGE UNION=(test.t1,mysqltest.t6);

View File

@ -201,18 +201,18 @@ create table t1 (
pk1 bit(9) not null primary key,
b int
) engine=ndbcluster;
ERROR HY000: Can't create table './test/t1.frm' (errno: 140)
ERROR HY000: Can't create table 'test.t1' (errno: 140)
show warnings;
Level Code Message
Error 1296 Got error 739 'Unsupported primary key length' from NDB
Error 1005 Can't create table './test/t1.frm' (errno: 140)
Error 1005 Can't create table 'test.t1' (errno: 140)
create table t1 (
pk1 int not null primary key,
b bit(9),
key(b)
) engine=ndbcluster;
ERROR HY000: Can't create table './test/t1.frm' (errno: 140)
ERROR HY000: Can't create table 'test.t1' (errno: 140)
show warnings;
Level Code Message
Error 1296 Got error 743 'Unsupported character set in table or index' from NDB
Error 1005 Can't create table './test/t1.frm' (errno: 140)
Error 1005 Can't create table 'test.t1' (errno: 140)

View File

@ -11,11 +11,11 @@ partitions 3
(partition x1 values less than (5) nodegroup 12,
partition x2 values less than (10) nodegroup 13,
partition x3 values less than (20) nodegroup 14);
ERROR HY000: Can't create table './test/t1.frm' (errno: 140)
ERROR HY000: Can't create table 'test.t1' (errno: 140)
show warnings;
Level Code Message
Error 1296 Got error 771 'Given NODEGROUP doesn't exist in this cluster' from NDB
Error 1005 Can't create table './test/t1.frm' (errno: 140)
Error 1005 Can't create table 'test.t1' (errno: 140)
CREATE TABLE t1 (
a int not null,
b int not null,

View File

@ -5,6 +5,8 @@ reset query cache;
flush status;
drop table if exists t1,t2,t3,t4,t11,t21;
drop database if exists mysqltest;
drop table if exists ```a`;
drop view if exists v1;
create table t1 (a int not null);
insert into t1 values (1),(2),(3);
select * from t1;

View File

@ -135,3 +135,14 @@ d c
bar 2
foo 1
drop table t1, t2;
create temporary table t1 (a int);
insert into t1 values (4711);
select * from t1;
a
4711
truncate t1;
insert into t1 values (42);
select * from t1;
a
42
drop table t1;

View File

@ -384,7 +384,7 @@ set sql_quote_show_create=1;
set sql_safe_updates=1;
set sql_select_limit=1;
set sql_warnings=1;
set global table_cache=100;
set global table_open_cache=100;
set storage_engine=myisam;
set global thread_cache_size=100;
set timestamp=1, timestamp=default;
@ -516,11 +516,11 @@ SET GLOBAL MYISAM_DATA_POINTER_SIZE= 7;
SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE';
Variable_name Value
myisam_data_pointer_size 7
SET GLOBAL table_cache=-1;
SHOW VARIABLES LIKE 'table_cache';
SET GLOBAL table_open_cache=-1;
SHOW VARIABLES LIKE 'table_open_cache';
Variable_name Value
table_cache 1
SET GLOBAL table_cache=DEFAULT;
table_open_cache 1
SET GLOBAL table_open_cache=DEFAULT;
set character_set_results=NULL;
select ifnull(@@character_set_results,"really null");
ifnull(@@character_set_results,"really null")

View File

@ -9,10 +9,63 @@ drop table if exists t1,t2;
--enable_warnings
create table t1 (a int not null auto_increment primary key);
insert into t1 values(0);
# Test for with read lock + flush
lock table t1 read;
flush table t1;
check table t1;
unlock tables;
# Test for with 2 read lock in different thread + flush
lock table t1 read;
connect (locker,localhost,root,,test);
connection locker;
lock table t1 read;
connection default;
send flush table t1;
connection locker;
--sleep 2
select * from t1;
unlock tables;
connection default;
reap;
select * from t1;
unlock tables;
# Test for with a write lock and a waiting read lock + flush
lock table t1 write;
connection locker;
send lock table t1 read;
connection default;
sleep 2;
flush table t1;
select * from t1;
unlock tables;
connection locker;
reap;
unlock tables;
connection default;
# Test for with a read lock and a waiting write lock + flush
lock table t1 read;
connection locker;
send lock table t1 write;
connection default;
sleep 2;
flush table t1;
select * from t1;
unlock tables;
connection locker;
reap;
unlock tables;
select * from t1;
connection default;
drop table t1;
disconnect locker;
#
# In the following test FLUSH TABLES produces a deadlock

View File

@ -51,6 +51,9 @@ create table t4 (a int not null, b char(10), key(a)) engine=MERGE UNION=(t1,t2);
select * from t4;
--error 1168
alter table t4 add column c int;
flush tables;
--error 1168
select * from t4;
#
# Test tables in different databases

View File

@ -495,6 +495,11 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2;
#
# Test alter table and a concurrent multi update
# (This will force update to reopen tables)
#
create table t1 (a int, b int);
insert into t1 values (1, 2), (2, 3), (3, 4);
create table t2 (a int);
@ -511,6 +516,7 @@ send alter table t1 add column c int default 100 after a;
connect (updater,localhost,root,,test);
connection updater;
sleep 2;
send update t1, v1 set t1.b=t1.a+t1.b+v1.b where t1.a=v1.a;
connection locker;

View File

@ -14,6 +14,10 @@ flush status;
--disable_warnings
drop table if exists t1,t2,t3,t4,t11,t21;
drop database if exists mysqltest;
# Fix possible left overs from other tests
drop table if exists ```a`;
drop view if exists v1;
--enable_warnings
#

View File

@ -115,3 +115,15 @@ select d, c from t1 left join t2 on b = c where a = 3 order by d;
drop table t1, t2;
# End of 4.1 tests
#
# Test truncate with temporary tables
#
create temporary table t1 (a int);
insert into t1 values (4711);
select * from t1;
truncate t1;
insert into t1 values (42);
select * from t1;
drop table t1;

View File

@ -258,7 +258,7 @@ set sql_quote_show_create=1;
set sql_safe_updates=1;
set sql_select_limit=1;
set sql_warnings=1;
set global table_cache=100;
set global table_open_cache=100;
set storage_engine=myisam;
set global thread_cache_size=100;
set timestamp=1, timestamp=default;
@ -390,9 +390,9 @@ SHOW VARIABLES LIKE 'MYISAM_DATA_POINTER_SIZE';
# Bug #6958: negative arguments to integer options wrap around
#
SET GLOBAL table_cache=-1;
SHOW VARIABLES LIKE 'table_cache';
SET GLOBAL table_cache=DEFAULT;
SET GLOBAL table_open_cache=-1;
SHOW VARIABLES LIKE 'table_open_cache';
SET GLOBAL table_open_cache=DEFAULT;
#
# Bugs12363: character_set_results is nullable,

View File

@ -152,3 +152,15 @@
obj:*/libz.so.*
fun:gzflush
}
#
# Warning from my_thread_init becasue mysqld dies before kill thread exists
#
{
my_thread_init kill thread memory loss second
Memcheck:Leak
fun:calloc
fun:my_thread_init
fun:kill_server_thread
}

View File

@ -109,7 +109,7 @@ static inline void hash_free_elements(HASH *hash)
void hash_free(HASH *hash)
{
DBUG_ENTER("hash_free");
DBUG_PRINT("enter",("hash: 0x%lxd",hash));
DBUG_PRINT("enter",("hash: 0x%lx", hash));
hash_free_elements(hash);
hash->free= 0;

View File

@ -73,7 +73,7 @@ uint dirname_part(my_string to, const char *name)
SYNPOSIS
convert_dirname()
to Store result here
from Original filename
from Original filename. May be == to
from_end Pointer at end of filename (normally end \0)
IMPLEMENTATION
@ -101,6 +101,7 @@ char *convert_dirname(char *to, const char *from, const char *from_end)
#ifdef BACKSLASH_MBTAIL
CHARSET_INFO *fs= fs_character_set();
#endif
DBUG_ENTER("convert_dirname");
/* We use -2 here, becasue we need place for the last FN_LIBCHAR */
if (!from_end || (from_end - from) > FN_REFLEN-2)
@ -149,5 +150,5 @@ char *convert_dirname(char *to, const char *from, const char *from_end)
*to++=FN_LIBCHAR;
*to=0;
}
return to; /* Pointer to end of dir */
DBUG_RETURN(to); /* Pointer to end of dir */
} /* convert_dirname */

View File

@ -107,16 +107,27 @@ void pack_dirname(my_string to, const char *from)
} /* pack_dirname */
/* remove unwanted chars from dirname */
/* if "/../" removes prev dir; "/~/" removes all before ~ */
/* "//" is same as "/", except on Win32 at start of a file */
/* "/./" is removed */
/* Unpacks home_dir if "~/.." used */
/* Unpacks current dir if if "./.." used */
/*
remove unwanted chars from dirname
SYNOPSIS
cleanup_dirname()
to Store result here
from Dirname to fix. May be same as to
IMPLEMENTATION
"/../" removes prev dir
"/~/" removes all before ~
//" is same as "/", except on Win32 at start of a file
"/./" is removed
Unpacks home_dir if "~/.." used
Unpacks current dir if if "./.." used
RETURN
# length of new name
*/
uint cleanup_dirname(register my_string to, const char *from)
/* to may be == from */
{
reg5 uint length;
reg2 my_string pos;

View File

@ -28,9 +28,12 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
char *end, *copy;
char buff[FN_REFLEN];
DYNAMIC_ARRAY t_arr;
DBUG_ENTER("init_tmpdir");
DBUG_PRINT("enter", ("pathlist: %s", pathlist ? pathlist : "NULL"));
pthread_mutex_init(&tmpdir->mutex, MY_MUTEX_INIT_FAST);
if (my_init_dynamic_array(&t_arr, sizeof(char*), 1, 5))
return TRUE;
goto err;
if (!pathlist || !pathlist[0])
{
/* Get default temporary directory */
@ -46,12 +49,13 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
}
do
{
uint length;
end=strcend(pathlist, DELIM);
convert_dirname(buff, pathlist, end);
if (!(copy=my_strdup(buff, MYF(MY_WME))))
return TRUE;
if (insert_dynamic(&t_arr, (gptr)&copy))
return TRUE;
strmake(buff, pathlist, (uint) (end-pathlist));
length= cleanup_dirname(buff, buff);
if (!(copy= my_strdup_with_length(buff, length, MYF(MY_WME))) ||
insert_dynamic(&t_arr, (gptr) &copy))
DBUG_RETURN(TRUE)
pathlist=end+1;
}
while (*end);
@ -59,12 +63,20 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
tmpdir->list=(char **)t_arr.buffer;
tmpdir->max=t_arr.elements-1;
tmpdir->cur=0;
return FALSE;
DBUG_RETURN(FALSE);
err:
delete_dynamic(&t_arr); /* Safe to free */
pthread_mutex_destroy(&tmpdir->mutex);
DBUG_RETURN(TRUE);
}
char *my_tmpdir(MY_TMPDIR *tmpdir)
{
char *dir;
if (!tmpdir->max)
return tmpdir->list[0];
pthread_mutex_lock(&tmpdir->mutex);
dir=tmpdir->list[tmpdir->cur];
tmpdir->cur= (tmpdir->cur == tmpdir->max) ? 0 : tmpdir->cur+1;

View File

@ -396,6 +396,7 @@ char *strdup_root(MEM_ROOT *root,const char *str)
return strmake_root(root, str, (uint) strlen(str));
}
char *strmake_root(MEM_ROOT *root,const char *str, uint len)
{
char *pos;

View File

@ -219,7 +219,7 @@ static handler* example_create_handler(TABLE *table)
}
ha_example::ha_example(TABLE *table_arg)
ha_example::ha_example(TABLE_SHARE *table_arg)
:handler(&example_hton, table_arg)
{}

View File

@ -45,7 +45,7 @@ class ha_example: public handler
EXAMPLE_SHARE *share; /* Shared lock info */
public:
ha_example(TABLE *table_arg);
ha_example(TABLE_SHARE *table_arg);
~ha_example()
{
}

View File

@ -55,7 +55,7 @@ TODO:
pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
static int tina_init= 0;
static handler* tina_create_handler(TABLE *table);
static handler *tina_create_handler(TABLE_SHARE *table);
handlerton tina_hton= {
"CSV",
@ -285,17 +285,17 @@ byte * find_eoln(byte *data, off_t begin, off_t end)
}
static handler* tina_create_handler(TABLE *table)
static handler *tina_create_handler(TABLE_SHARE *table)
{
return new ha_tina(table);
}
ha_tina::ha_tina(TABLE *table_arg)
ha_tina::ha_tina(TABLE_SHARE *table_arg)
:handler(&tina_hton, table_arg),
/*
These definitions are found in hanler.h
These are not probably completely right.
These definitions are found in handler.h
They are not probably completely right.
*/
current_position(0), next_position(0), chain_alloced(0),
chain_size(DEFAULT_CHAIN_LENGTH)
@ -308,6 +308,7 @@ ha_tina::ha_tina(TABLE *table_arg)
/*
Encode a buffer into the quoted format.
*/
int ha_tina::encode_quote(byte *buf)
{
char attribute_buffer[1024];

View File

@ -55,7 +55,7 @@ class ha_tina: public handler
uint32 chain_size;
public:
ha_tina(TABLE *table_arg);
ha_tina(TABLE_SHARE *table_arg);
~ha_tina()
{
if (chain_alloced)

View File

@ -1024,10 +1024,9 @@ bool Field::type_can_have_key_part(enum enum_field_types type)
Field_num::Field_num(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg)
:Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg),
unireg_check_arg, field_name_arg),
dec(dec_arg),zerofill(zero_arg),unsigned_flag(unsigned_arg)
{
if (zerofill)
@ -1216,16 +1215,11 @@ String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val)
}
/* This is used as a table name when the table structure is not set up */
const char *unknown_table_name= 0;
Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
uchar null_bit_arg,
utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg)
utype unireg_check_arg, const char *field_name_arg)
:ptr(ptr_arg),null_ptr(null_ptr_arg),
table(table_arg),orig_table(table_arg),
table_name(table_arg ? &table_arg->alias : &unknown_table_name),
table(0), orig_table(0), table_name(0),
field_name(field_name_arg),
query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0),
unireg_check(unireg_check_arg),
@ -1282,10 +1276,10 @@ void Field_num::add_zerofill_and_unsigned(String &res) const
void Field::make_field(Send_field *field)
{
if (orig_table->s->table_cache_key && *(orig_table->s->table_cache_key))
if (orig_table->s->db.str && *orig_table->s->db.str)
{
field->org_table_name= orig_table->s->table_name;
field->db_name= orig_table->s->table_cache_key;
field->db_name= orig_table->s->db.str;
field->org_table_name= orig_table->s->table_name.str;
}
else
field->org_table_name= field->db_name= "";
@ -1393,10 +1387,9 @@ my_decimal* Field_num::val_decimal(my_decimal *decimal_value)
Field_str::Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,CHARSET_INFO *charset)
const char *field_name_arg, CHARSET_INFO *charset)
:Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg)
unireg_check_arg, field_name_arg)
{
field_charset=charset;
if (charset->state & MY_CS_BINSORT)
@ -1529,7 +1522,7 @@ Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table)
tmp->key_start.init(0);
tmp->part_of_key.init(0);
tmp->part_of_sortkey.init(0);
tmp->unireg_check=Field::NONE;
tmp->unireg_check= Field::NONE;
tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG |
ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG);
tmp->reset_fields();
@ -1651,6 +1644,21 @@ bool Field::needs_quotes(void)
}
/* This is used to generate a field in TABLE from TABLE_SHARE */
Field *Field::clone(MEM_ROOT *root, struct st_table *new_table)
{
Field *tmp;
if ((tmp= (Field*) memdup_root(root,(char*) this,size_of())))
{
tmp->init(new_table);
tmp->move_field_offset((my_ptrdiff_t) (new_table->record[0] -
new_table->s->default_values));
}
return tmp;
}
/****************************************************************************
Field_null, a field that always return NULL
****************************************************************************/
@ -2280,13 +2288,10 @@ Field_new_decimal::Field_new_decimal(char *ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,
bool unsigned_arg)
:Field_num(ptr_arg, len_arg,
null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
dec_arg, zero_arg, unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, dec_arg, zero_arg, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
@ -2298,14 +2303,11 @@ Field_new_decimal::Field_new_decimal(char *ptr_arg,
Field_new_decimal::Field_new_decimal(uint32 len_arg,
bool maybe_null,
const char *name,
struct st_table *t_arg,
uint8 dec_arg,
bool unsigned_arg)
:Field_num((char*) 0, len_arg,
maybe_null ? (uchar*) "": 0, 0,
NONE, name, t_arg,
dec_arg,
0, unsigned_arg)
NONE, name, dec_arg, 0, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) &&
@ -4447,19 +4449,18 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,
TABLE_SHARE *share,
CHARSET_INFO *cs)
:Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
unireg_check_arg, field_name_arg, cs)
{
/* For 4.0 MYD and 4.0 InnoDB compatibility */
flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
if (table && !table->timestamp_field &&
unireg_check != NONE)
if (!share->timestamp_field && unireg_check != NONE)
{
/* This timestamp has auto-update */
table->timestamp_field= this;
flags|=TIMESTAMP_FLAG;
share->timestamp_field= this;
flags|= TIMESTAMP_FLAG;
}
}
@ -6217,8 +6218,7 @@ uint Field_string::max_packed_col_length(uint max_length)
Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
{
Field *new_field;
Field *field;
if (type() != MYSQL_TYPE_VAR_STRING || table == new_table)
return Field::new_field(root, new_table);
@ -6227,19 +6227,23 @@ Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table)
This is done to ensure that ALTER TABLE will convert old VARCHAR fields
to now VARCHAR fields.
*/
if (new_field= new Field_varstring(field_length, maybe_null(),
field_name, new_table, charset()))
if ((field= new Field_varstring(field_length, maybe_null(), field_name,
new_table->s, charset())))
{
field->init(new_table);
/*
delayed_insert::get_local_table() needs a ptr copied from old table.
This is what other new_field() methods do too. The above method of
Field_varstring sets ptr to NULL.
*/
new_field->ptr= ptr;
field->ptr= ptr;
field->null_ptr= null_ptr;
field->null_bit= null_bit;
}
return new_field;
return field;
}
/****************************************************************************
VARCHAR type
Data in field->ptr is stored as:
@ -6781,16 +6785,15 @@ uint Field_varstring::is_equal(create_field *new_field)
Field_blob::Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,uint blob_pack_length,
TABLE_SHARE *share, uint blob_pack_length,
CHARSET_INFO *cs)
:Field_longstr(ptr_arg, BLOB_PACK_LENGTH_TO_MAX_LENGH(blob_pack_length),
null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg,
table_arg, cs),
null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg,
cs),
packlength(blob_pack_length)
{
flags|= BLOB_FLAG;
if (table)
table->s->blob_fields++;
share->blob_fields++;
}
@ -7958,10 +7961,9 @@ uint Field_num::is_equal(create_field *new_field)
Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg)
enum utype unireg_check_arg, const char *field_name_arg)
: Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg),
unireg_check_arg, field_name_arg),
bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7)
{
/*
@ -8228,11 +8230,10 @@ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
uchar *bit_ptr_arg, uchar bit_ofs_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg)
: Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, bit_ptr_arg,
bit_ofs_arg, unireg_check_arg, field_name_arg, table_arg),
create_length(len_arg)
const char *field_name_arg)
:Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, bit_ptr_arg,
bit_ofs_arg, unireg_check_arg, field_name_arg),
create_length(len_arg)
{
bit_ptr= 0;
bit_ofs= 0;
@ -8426,7 +8427,7 @@ uint pack_length_to_packflag(uint type)
}
Field *make_field(char *ptr, uint32 field_length,
Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
uchar *null_pos, uchar null_bit,
uint pack_flag,
enum_field_types field_type,
@ -8434,8 +8435,7 @@ Field *make_field(char *ptr, uint32 field_length,
Field::geometry_type geom_type,
Field::utype unireg_check,
TYPELIB *interval,
const char *field_name,
struct st_table *table)
const char *field_name)
{
uchar *bit_ptr;
uchar bit_offset;
@ -8481,13 +8481,14 @@ Field *make_field(char *ptr, uint32 field_length,
field_type == FIELD_TYPE_DECIMAL || // 3.23 or 4.0 string
field_type == MYSQL_TYPE_VAR_STRING)
return new Field_string(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
field_charset);
if (field_type == MYSQL_TYPE_VARCHAR)
return new Field_varstring(ptr,field_length,
HA_VARCHAR_PACKLENGTH(field_length),
null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
share,
field_charset);
return 0; // Error
}
@ -8499,22 +8500,22 @@ Field *make_field(char *ptr, uint32 field_length,
#ifdef HAVE_SPATIAL
if (f_is_geom(pack_flag))
return new Field_geom(ptr,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name, share,
pack_length, geom_type);
#endif
if (f_is_blob(pack_flag))
return new Field_blob(ptr,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name, share,
pack_length, field_charset);
if (interval)
{
if (f_is_enum(pack_flag))
return new Field_enum(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
pack_length, interval, field_charset);
else
return new Field_set(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
pack_length, interval, field_charset);
}
}
@ -8522,80 +8523,82 @@ Field *make_field(char *ptr, uint32 field_length,
switch (field_type) {
case FIELD_TYPE_DECIMAL:
return new Field_decimal(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_NEWDECIMAL:
return new Field_new_decimal(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_FLOAT:
return new Field_float(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag)== 0);
case FIELD_TYPE_DOUBLE:
return new Field_double(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_decimals(pack_flag),
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag)== 0);
case FIELD_TYPE_TINY:
return new Field_tiny(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_SHORT:
return new Field_short(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_INT24:
return new Field_medium(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_LONG:
return new Field_long(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_LONGLONG:
return new Field_longlong(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table,
unireg_check, field_name,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_TIMESTAMP:
return new Field_timestamp(ptr,field_length, null_pos, null_bit,
unireg_check, field_name, table,
unireg_check, field_name, share,
field_charset);
case FIELD_TYPE_YEAR:
return new Field_year(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table);
unireg_check, field_name);
case FIELD_TYPE_DATE:
return new Field_date(ptr,null_pos,null_bit,
unireg_check, field_name, table, field_charset);
unireg_check, field_name, field_charset);
case FIELD_TYPE_NEWDATE:
return new Field_newdate(ptr,null_pos,null_bit,
unireg_check, field_name, table, field_charset);
unireg_check, field_name, field_charset);
case FIELD_TYPE_TIME:
return new Field_time(ptr,null_pos,null_bit,
unireg_check, field_name, table, field_charset);
unireg_check, field_name, field_charset);
case FIELD_TYPE_DATETIME:
return new Field_datetime(ptr,null_pos,null_bit,
unireg_check, field_name, table, field_charset);
unireg_check, field_name, field_charset);
case FIELD_TYPE_NULL:
return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset);
return new Field_null(ptr, field_length, unireg_check, field_name,
field_charset);
case FIELD_TYPE_BIT:
return f_bit_as_char(pack_flag) ?
new Field_bit_as_char(ptr, field_length, null_pos, null_bit, bit_ptr,
bit_offset, unireg_check, field_name, table) :
new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
bit_offset, unireg_check, field_name, table);
new Field_bit_as_char(ptr, field_length, null_pos, null_bit,
bit_ptr, bit_offset, unireg_check, field_name) :
new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
bit_offset, unireg_check, field_name);
default: // Impossible (Wrong version)
break;
}
@ -8677,14 +8680,15 @@ create_field::create_field(Field *old_field,Field *orig_field)
char buff[MAX_FIELD_WIDTH],*pos;
String tmp(buff,sizeof(buff), charset), *res;
my_ptrdiff_t diff;
bool is_null;
/* Get the value from default_values */
diff= (my_ptrdiff_t) (orig_field->table->s->default_values-
orig_field->table->record[0]);
orig_field->move_field(diff); // Points now at default_values
bool is_null=orig_field->is_real_null();
orig_field->move_field_offset(diff); // Points now at default_values
is_null= orig_field->is_real_null();
res= orig_field->val_str(&tmp);
orig_field->move_field(-diff); // Back to record[0]
orig_field->move_field_offset(-diff); // Back to record[0]
if (!is_null)
{
pos= (char*) sql_strmake(res->ptr(), res->length());

View File

@ -96,8 +96,7 @@ public:
uchar null_bit; // Bit used to test null bit
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg);
utype unireg_check_arg, const char *field_name_arg);
virtual ~Field() {}
/* Store functions returns 1 on overflow and -1 on fatal error */
virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0;
@ -215,12 +214,13 @@ public:
virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
char *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
Field *clone(MEM_ROOT *mem_root, struct st_table *new_table);
inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
{
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;
}
inline void move_field(char *ptr_arg) { ptr=ptr_arg; }
inline void move_field(my_ptrdiff_t ptr_diff)
virtual void move_field_offset(my_ptrdiff_t ptr_diff)
{
ptr=ADD_TO_PTR(ptr,ptr_diff,char*);
if (null_ptr)
@ -315,6 +315,12 @@ public:
return (op_result == E_DEC_OVERFLOW);
}
int warn_if_overflow(int op_result);
void init(TABLE *table_arg)
{
orig_table= table= table_arg;
table_name= &table_arg->alias;
}
/* maximum possible display length */
virtual uint32 max_length()= 0;
@ -347,7 +353,6 @@ public:
Field_num(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg);
Item_result result_type () const { return REAL_RESULT; }
void prepend_zeros(String *value);
@ -369,8 +374,7 @@ protected:
public:
Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *charset);
const char *field_name_arg, CHARSET_INFO *charset);
Item_result result_type () const { return STRING_RESULT; }
uint decimals() const { return NOT_FIXED_DEC; }
int store(double nr);
@ -395,10 +399,9 @@ class Field_longstr :public Field_str
public:
Field_longstr(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,CHARSET_INFO *charset)
const char *field_name_arg, CHARSET_INFO *charset)
:Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
field_name_arg, table_arg, charset)
field_name_arg, charset)
{}
int store_decimal(const my_decimal *d);
@ -407,17 +410,13 @@ public:
/* base class for float and double and decimal (old one) */
class Field_real :public Field_num {
public:
Field_real(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
field_name_arg, table_arg, dec_arg, zero_arg, unsigned_arg)
field_name_arg, dec_arg, zero_arg, unsigned_arg)
{}
int store_decimal(const my_decimal *);
my_decimal *val_decimal(my_decimal *);
};
@ -428,10 +427,9 @@ public:
Field_decimal(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,bool unsigned_arg)
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
enum_field_types type() const { return FIELD_TYPE_DECIMAL;}
@ -468,11 +466,9 @@ public:
Field_new_decimal(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg, bool zero_arg, bool unsigned_arg);
Field_new_decimal(uint32 len_arg, bool maybe_null_arg,
const char *field_name_arg,
struct st_table *table_arg, uint8 dec_arg,
const char *field_name_arg, uint8 dec_arg,
bool unsigned_arg);
enum_field_types type() const { return FIELD_TYPE_NEWDECIMAL;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
@ -503,10 +499,9 @@ public:
Field_tiny(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
@ -534,16 +529,15 @@ public:
Field_short(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
Field_short(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg,bool unsigned_arg)
bool unsigned_arg)
:Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
NONE, field_name_arg, table_arg,0,0,unsigned_arg)
NONE, field_name_arg, 0, 0, unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
enum_field_types type() const { return FIELD_TYPE_SHORT;}
@ -570,10 +564,9 @@ public:
Field_medium(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
@ -601,16 +594,15 @@ public:
Field_long(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
Field_long(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg,bool unsigned_arg)
bool unsigned_arg)
:Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
NONE, field_name_arg, table_arg,0,0,unsigned_arg)
NONE, field_name_arg,0,0,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
enum_field_types type() const { return FIELD_TYPE_LONG;}
@ -638,17 +630,16 @@ public:
Field_longlong(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
bool zero_arg, bool unsigned_arg)
:Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
0, zero_arg,unsigned_arg)
{}
Field_longlong(uint32 len_arg,bool maybe_null_arg,
const char *field_name_arg,
struct st_table *table_arg, bool unsigned_arg)
bool unsigned_arg)
:Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0,0,
NONE, field_name_arg, table_arg,0,0,unsigned_arg)
NONE, field_name_arg,0,0,unsigned_arg)
{}
enum Item_result result_type () const { return INT_RESULT; }
enum_field_types type() const { return FIELD_TYPE_LONGLONG;}
@ -677,16 +668,15 @@ public:
Field_float(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,bool unsigned_arg)
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
Field_float(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, uint8 dec_arg)
uint8 dec_arg)
:Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0,
NONE, field_name_arg, table_arg, dec_arg, 0, 0)
NONE, field_name_arg, dec_arg, 0, 0)
{}
enum_field_types type() const { return FIELD_TYPE_FLOAT;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_FLOAT; }
@ -711,16 +701,15 @@ public:
Field_double(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
uint8 dec_arg,bool zero_arg,bool unsigned_arg)
:Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg,
unireg_check_arg, field_name_arg,
dec_arg, zero_arg, unsigned_arg)
{}
Field_double(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, uint8 dec_arg)
uint8 dec_arg)
:Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0,
NONE, field_name_arg, table_arg, dec_arg, 0, 0)
NONE, field_name_arg, dec_arg, 0, 0)
{}
enum_field_types type() const { return FIELD_TYPE_DOUBLE;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_DOUBLE; }
@ -747,9 +736,9 @@ class Field_null :public Field_str {
public:
Field_null(char *ptr_arg, uint32 len_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str(ptr_arg, len_arg, null, 1,
unireg_check_arg, field_name_arg, table_arg, cs)
unireg_check_arg, field_name_arg, cs)
{}
enum_field_types type() const { return FIELD_TYPE_NULL;}
int store(const char *to, uint length, CHARSET_INFO *cs)
@ -777,8 +766,7 @@ public:
Field_timestamp(char *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
CHARSET_INFO *cs);
TABLE_SHARE *share, CHARSET_INFO *cs);
enum_field_types type() const { return FIELD_TYPE_TIMESTAMP;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
enum Item_result cmp_type () const { return INT_RESULT; }
@ -828,10 +816,9 @@ class Field_year :public Field_tiny {
public:
Field_year(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg)
enum utype unireg_check_arg, const char *field_name_arg)
:Field_tiny(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, 1, 1)
unireg_check_arg, field_name_arg, 1, 1)
{}
enum_field_types type() const { return FIELD_TYPE_YEAR;}
int store(const char *to,uint length,CHARSET_INFO *charset);
@ -850,14 +837,14 @@ class Field_date :public Field_str {
public:
Field_date(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
unireg_check_arg, field_name_arg, cs)
{}
Field_date(bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str((char*) 0,10, maybe_null_arg ? (uchar*) "": 0,0,
NONE, field_name_arg, table_arg, cs) {}
NONE, field_name_arg, cs) {}
enum_field_types type() const { return FIELD_TYPE_DATE;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
enum Item_result cmp_type () const { return INT_RESULT; }
@ -877,13 +864,14 @@ public:
bool zero_pack() const { return 1; }
};
class Field_newdate :public Field_str {
public:
Field_newdate(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
unireg_check_arg, field_name_arg, cs)
{}
enum_field_types type() const { return FIELD_TYPE_DATE;}
enum_field_types real_type() const { return FIELD_TYPE_NEWDATE; }
@ -913,14 +901,14 @@ class Field_time :public Field_str {
public:
Field_time(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str(ptr_arg, 8, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
unireg_check_arg, field_name_arg, cs)
{}
Field_time(bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str((char*) 0,8, maybe_null_arg ? (uchar*) "": 0,0,
NONE, field_name_arg, table_arg, cs) {}
NONE, field_name_arg, cs) {}
enum_field_types type() const { return FIELD_TYPE_TIME;}
enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; }
enum Item_result cmp_type () const { return INT_RESULT; }
@ -948,14 +936,14 @@ class Field_datetime :public Field_str {
public:
Field_datetime(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
unireg_check_arg, field_name_arg, cs)
{}
Field_datetime(bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_str((char*) 0,19, maybe_null_arg ? (uchar*) "": 0,0,
NONE, field_name_arg, table_arg, cs) {}
NONE, field_name_arg, cs) {}
enum_field_types type() const { return FIELD_TYPE_DATETIME;}
#ifdef HAVE_LONG_LONG
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONGLONG; }
@ -987,13 +975,13 @@ public:
Field_string(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs) {};
unireg_check_arg, field_name_arg, cs) {};
Field_string(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
CHARSET_INFO *cs)
:Field_longstr((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
NONE, field_name_arg, table_arg, cs) {};
NONE, field_name_arg, cs) {};
enum_field_types type() const
{
@ -1038,26 +1026,23 @@ public:
uint32 length_bytes;
Field_varstring(char *ptr_arg,
uint32 len_arg, uint length_bytes_arg,
uchar *null_ptr_arg,
uchar null_bit_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
TABLE_SHARE *share, CHARSET_INFO *cs)
:Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs),
unireg_check_arg, field_name_arg, cs),
length_bytes(length_bytes_arg)
{
if (table)
table->s->varchar_fields++;
share->varchar_fields++;
}
Field_varstring(uint32 len_arg,bool maybe_null_arg,
const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
TABLE_SHARE *share, CHARSET_INFO *cs)
:Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
NONE, field_name_arg, table_arg, cs),
NONE, field_name_arg, cs),
length_bytes(len_arg < 256 ? 1 :2)
{
if (table)
table->s->varchar_fields++;
share->varchar_fields++;
}
enum_field_types type() const { return MYSQL_TYPE_VARCHAR; }
@ -1119,12 +1104,11 @@ protected:
public:
Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,uint blob_pack_length,
CHARSET_INFO *cs);
TABLE_SHARE *share, uint blob_pack_length, CHARSET_INFO *cs);
Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, CHARSET_INFO *cs)
:Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
NONE, field_name_arg, table_arg, cs),
CHARSET_INFO *cs)
:Field_longstr((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, 0,
NONE, field_name_arg, cs),
packlength(4)
{
flags|= BLOB_FLAG;
@ -1221,15 +1205,14 @@ public:
Field_geom(char *ptr_arg, uchar *null_ptr_arg, uint null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,uint blob_pack_length,
TABLE_SHARE *share, uint blob_pack_length,
enum geometry_type geom_type_arg)
:Field_blob(ptr_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
field_name_arg, table_arg, blob_pack_length,&my_charset_bin)
field_name_arg, share, blob_pack_length, &my_charset_bin)
{ geom_type= geom_type_arg; }
Field_geom(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg,
struct st_table *table_arg, enum geometry_type geom_type_arg)
:Field_blob(len_arg, maybe_null_arg, field_name_arg,
table_arg, &my_charset_bin)
TABLE_SHARE *share, enum geometry_type geom_type_arg)
:Field_blob(len_arg, maybe_null_arg, field_name_arg, &my_charset_bin)
{ geom_type= geom_type_arg; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; }
enum_field_types type() const { return FIELD_TYPE_GEOMETRY; }
@ -1250,13 +1233,13 @@ protected:
public:
TYPELIB *typelib;
Field_enum(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,uint packlength_arg,
TYPELIB *typelib_arg,
CHARSET_INFO *charset_arg)
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
uint packlength_arg,
TYPELIB *typelib_arg,
CHARSET_INFO *charset_arg)
:Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, charset_arg),
unireg_check_arg, field_name_arg, charset_arg),
packlength(packlength_arg),typelib(typelib_arg)
{
flags|=ENUM_FLAG;
@ -1293,12 +1276,12 @@ public:
Field_set(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,uint32 packlength_arg,
uint32 packlength_arg,
TYPELIB *typelib_arg, CHARSET_INFO *charset_arg)
:Field_enum(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg,
table_arg, packlength_arg,
typelib_arg,charset_arg)
packlength_arg,
typelib_arg,charset_arg)
{
flags=(flags & ~ENUM_FLAG) | SET_FLAG;
}
@ -1320,8 +1303,7 @@ public:
uint bit_len; // number of 'uneven' high bits
Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg);
enum utype unireg_check_arg, const char *field_name_arg);
enum_field_types type() const { return FIELD_TYPE_BIT; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
uint32 key_length() const { return (uint32) field_length + (bit_len > 0); }
@ -1363,6 +1345,11 @@ public:
bit_ptr= bit_ptr_arg;
bit_ofs= bit_ofs_arg;
}
void move_field_offset(my_ptrdiff_t ptr_diff)
{
Field::move_field_offset(ptr_diff);
bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*);
}
};
@ -1371,8 +1358,7 @@ public:
uchar create_length;
Field_bit_as_char(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg);
enum utype unireg_check_arg, const char *field_name_arg);
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
uint32 max_length() { return (uint32) create_length; }
uint size_of() const { return sizeof(*this); }
@ -1467,14 +1453,13 @@ public:
};
Field *make_field(char *ptr, uint32 field_length,
Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length,
uchar *null_pos, uchar null_bit,
uint pack_flag, enum_field_types field_type,
CHARSET_INFO *cs,
Field::geometry_type geom_type,
Field::utype unireg_check,
TYPELIB *interval, const char *field_name,
struct st_table *table);
TYPELIB *interval, const char *field_name);
uint pack_length_to_packflag(uint type);
enum_field_types get_blob_type_from_length(ulong length);
uint32 calc_pack_length(enum_field_types type,uint32 length);

View File

@ -135,7 +135,7 @@ static HASH archive_open_tables;
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
/* Static declarations for handerton */
static handler *archive_create_handler(TABLE *table);
static handler *archive_create_handler(TABLE_SHARE *table);
/* dummy handlerton - only to have something to return from archive_db_init */
@ -172,7 +172,7 @@ handlerton archive_hton = {
HTON_NO_FLAGS
};
static handler *archive_create_handler(TABLE *table)
static handler *archive_create_handler(TABLE_SHARE *table)
{
return new ha_archive(table);
}
@ -242,7 +242,7 @@ int archive_db_end(ha_panic_function type)
return 0;
}
ha_archive::ha_archive(TABLE *table_arg)
ha_archive::ha_archive(TABLE_SHARE *table_arg)
:handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
{
/* Set our original buffer from pre-allocated memory */

View File

@ -58,7 +58,7 @@ class ha_archive: public handler
bool bulk_insert; /* If we are performing a bulk insert */
public:
ha_archive(TABLE *table_arg);
ha_archive(TABLE_SHARE *table_arg);
~ha_archive()
{
}

View File

@ -114,7 +114,7 @@ static void berkeley_noticecall(DB_ENV *db_env, db_notices notice);
static int berkeley_close_connection(THD *thd);
static int berkeley_commit(THD *thd, bool all);
static int berkeley_rollback(THD *thd, bool all);
static handler *berkeley_create_handler(TABLE *table);
static handler *berkeley_create_handler(TABLE_SHARE *table);
handlerton berkeley_hton = {
"BerkeleyDB",
@ -149,7 +149,7 @@ handlerton berkeley_hton = {
HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME
};
handler *berkeley_create_handler(TABLE *table)
handler *berkeley_create_handler(TABLE_SHARE *table)
{
return new ha_berkeley(table);
}
@ -425,7 +425,7 @@ void berkeley_cleanup_log_files(void)
** Berkeley DB tables
*****************************************************************************/
ha_berkeley::ha_berkeley(TABLE *table_arg)
ha_berkeley::ha_berkeley(TABLE_SHARE *table_arg)
:handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0),
int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
@ -452,13 +452,14 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const
| HA_READ_RANGE);
for (uint i= all_parts ? 0 : part ; i <= part ; i++)
{
if (table->key_info[idx].key_part[i].field->type() == FIELD_TYPE_BLOB)
KEY_PART_INFO *key_part= table_share->key_info[idx].key_part+i;
if (key_part->field->type() == FIELD_TYPE_BLOB)
{
/* We can't use BLOBS to shortcut sorts */
flags&= ~(HA_READ_ORDER | HA_KEYREAD_ONLY | HA_READ_RANGE);
break;
}
switch (table->key_info[idx].key_part[i].field->key_type()) {
switch (key_part->field->key_type()) {
case HA_KEYTYPE_TEXT:
case HA_KEYTYPE_VARTEXT1:
case HA_KEYTYPE_VARTEXT2:
@ -466,8 +467,7 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const
As BDB stores only one copy of equal strings, we can't use key read
on these. Binary collations do support key read though.
*/
if (!(table->key_info[idx].key_part[i].field->charset()->state
& MY_CS_BINSORT))
if (!(key_part->field->charset()->state & MY_CS_BINSORT))
flags&= ~HA_KEYREAD_ONLY;
break;
default: // Keep compiler happy
@ -596,7 +596,6 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
uint open_mode=(mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
uint max_key_length;
int error;
TABLE_SHARE *table_share= table->s;
DBUG_ENTER("ha_berkeley::open");
/* Open primary key */
@ -618,7 +617,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked)
&key_buff2, max_key_length,
&primary_key_buff,
(hidden_primary_key ? 0 :
table->key_info[table_share->primary_key].key_length),
table_share->key_info[table_share->primary_key].key_length),
NullS)))
DBUG_RETURN(1); /* purecov: inspected */
if (!(rec_buff= (byte*) my_malloc((alloced_rec_buff_length=
@ -775,9 +774,9 @@ bool ha_berkeley::fix_rec_buff_for_blob(ulong length)
ulong ha_berkeley::max_row_length(const byte *buf)
{
ulong length= table->s->reclength + table->s->fields*2;
ulong length= table_share->reclength + table_share->fields*2;
uint *ptr, *end;
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
for (ptr= table_share->blob_field, end=ptr + table_share->blob_fields ;
ptr != end ;
ptr++)
{
@ -804,25 +803,26 @@ int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row)
if (share->fixed_length_row)
{
row->data=(void*) record;
row->size= table->s->reclength+hidden_primary_key;
row->size= table_share->reclength+hidden_primary_key;
if (hidden_primary_key)
{
if (new_row)
get_auto_primary_key(current_ident);
memcpy_fixed((char*) record+table->s->reclength, (char*) current_ident,
memcpy_fixed((char*) record+table_share->reclength,
(char*) current_ident,
BDB_HIDDEN_PRIMARY_KEY_LENGTH);
}
return 0;
}
if (table->s->blob_fields)
if (table_share->blob_fields)
{
if (fix_rec_buff_for_blob(max_row_length(record)))
return HA_ERR_OUT_OF_MEM; /* purecov: inspected */
}
/* Copy null bits */
memcpy(rec_buff, record, table->s->null_bytes);
ptr= rec_buff + table->s->null_bytes;
memcpy(rec_buff, record, table_share->null_bytes);
ptr= rec_buff + table_share->null_bytes;
for (Field **field=table->field ; *field ; field++)
ptr=(byte*) (*field)->pack((char*) ptr,
@ -845,13 +845,13 @@ int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row)
void ha_berkeley::unpack_row(char *record, DBT *row)
{
if (share->fixed_length_row)
memcpy(record,(char*) row->data,table->s->reclength+hidden_primary_key);
memcpy(record,(char*) row->data,table_share->reclength+hidden_primary_key);
else
{
/* Copy null bits */
const char *ptr= (const char*) row->data;
memcpy(record, ptr, table->s->null_bytes);
ptr+= table->s->null_bytes;
memcpy(record, ptr, table_share->null_bytes);
ptr+= table_share->null_bytes;
for (Field **field=table->field ; *field ; field++)
ptr= (*field)->unpack(record + (*field)->offset(), ptr);
}
@ -997,7 +997,7 @@ int ha_berkeley::write_row(byte * record)
DBUG_RETURN(error); /* purecov: inspected */
table->insert_or_update= 1; // For handling of VARCHAR
if (table->s->keys + test(hidden_primary_key) == 1)
if (table_share->keys + test(hidden_primary_key) == 1)
{
error=file->put(file, transaction, create_key(&prim_key, primary_key,
key_buff, record),
@ -1016,7 +1016,7 @@ int ha_berkeley::write_row(byte * record)
&row, key_type[primary_key])))
{
changed_keys.set_bit(primary_key);
for (uint keynr=0 ; keynr < table->s->keys ; keynr++)
for (uint keynr=0 ; keynr < table_share->keys ; keynr++)
{
if (keynr == primary_key)
continue;
@ -1044,7 +1044,7 @@ int ha_berkeley::write_row(byte * record)
{
new_error = 0;
for (uint keynr=0;
keynr < table->s->keys+test(hidden_primary_key);
keynr < table_share->keys+test(hidden_primary_key);
keynr++)
{
if (changed_keys.is_set(keynr))
@ -1187,7 +1187,7 @@ int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys,
that one just put back the old value. */
if (!changed_keys->is_clear_all())
{
for (keynr=0 ; keynr < table->s->keys+test(hidden_primary_key) ; keynr++)
for (keynr=0 ; keynr < table_share->keys+test(hidden_primary_key) ; keynr++)
{
if (changed_keys->is_set(keynr))
{
@ -1252,7 +1252,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
using_ignore)))
{
// Update all other keys
for (uint keynr=0 ; keynr < table->s->keys ; keynr++)
for (uint keynr=0 ; keynr < table_share->keys ; keynr++)
{
if (keynr == primary_key)
continue;
@ -1364,7 +1364,7 @@ int ha_berkeley::remove_keys(DB_TXN *trans, const byte *record,
{
int result = 0;
for (uint keynr=0;
keynr < table->s->keys+test(hidden_primary_key);
keynr < table_share->keys+test(hidden_primary_key);
keynr++)
{
if (keys->is_set(keynr))
@ -1385,7 +1385,7 @@ int ha_berkeley::delete_row(const byte * record)
{
int error;
DBT row, prim_key;
key_map keys= table->s->keys_in_use;
key_map keys= table_share->keys_in_use;
DBUG_ENTER("delete_row");
statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
@ -1421,7 +1421,8 @@ int ha_berkeley::index_init(uint keynr, bool sorted)
{
int error;
DBUG_ENTER("ha_berkeley::index_init");
DBUG_PRINT("enter",("table: '%s' key: %d", table->s->table_name, keynr));
DBUG_PRINT("enter",("table: '%s' key: %d", table_share->table_name.str,
keynr));
/*
Under some very rare conditions (like full joins) we may already have
@ -1448,7 +1449,7 @@ int ha_berkeley::index_end()
DBUG_ENTER("ha_berkely::index_end");
if (cursor)
{
DBUG_PRINT("enter",("table: '%s'", table->s->table_name));
DBUG_PRINT("enter",("table: '%s'", table_share->table_name.str));
error=cursor->c_close(cursor);
cursor=0;
}
@ -1803,14 +1804,14 @@ void ha_berkeley::info(uint flag)
if ((flag & HA_STATUS_CONST) || version != share->version)
{
version=share->version;
for (uint i=0 ; i < table->s->keys ; i++)
for (uint i=0 ; i < table_share->keys ; i++)
{
table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]=
share->rec_per_key[i];
}
}
/* Don't return key if we got an error for the internal primary key */
if (flag & HA_STATUS_ERRKEY && last_dup_key < table->s->keys)
if (flag & HA_STATUS_ERRKEY && last_dup_key < table_share->keys)
errkey= last_dup_key;
DBUG_VOID_RETURN;
}
@ -2059,7 +2060,7 @@ int ha_berkeley::create(const char *name, register TABLE *form,
if ((error= create_sub_table(name_buff,"main",DB_BTREE,0)))
DBUG_RETURN(error); /* purecov: inspected */
primary_key= table->s->primary_key;
primary_key= form->s->primary_key;
/* Create the keys */
for (uint i=0; i < form->s->keys; i++)
{
@ -2067,7 +2068,7 @@ int ha_berkeley::create(const char *name, register TABLE *form,
{
sprintf(part,"key%02d",index++);
if ((error= create_sub_table(name_buff, part, DB_BTREE,
(table->key_info[i].flags & HA_NOSAME) ? 0 :
(form->key_info[i].flags & HA_NOSAME) ? 0 :
DB_DUP)))
DBUG_RETURN(error); /* purecov: inspected */
}
@ -2083,7 +2084,7 @@ int ha_berkeley::create(const char *name, register TABLE *form,
"status", DB_BTREE, DB_CREATE, 0))))
{
char rec_buff[4+MAX_KEY*4];
uint length= 4+ table->s->keys*4;
uint length= 4+ form->s->keys*4;
bzero(rec_buff, length);
error= write_status(status_block, rec_buff, length);
status_block->close(status_block,0);
@ -2203,9 +2204,9 @@ ulonglong ha_berkeley::get_auto_increment()
(void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
/* Set 'active_index' */
ha_berkeley::index_init(table->s->next_number_index, 0);
ha_berkeley::index_init(table_share->next_number_index, 0);
if (!table->s->next_number_key_offset)
if (!table_share->next_number_key_offset)
{ // Autoincrement at key-start
error=ha_berkeley::index_last(table->record[1]);
}
@ -2218,7 +2219,7 @@ ulonglong ha_berkeley::get_auto_increment()
/* Reading next available number for a sub key */
ha_berkeley::create_key(&last_key, active_index,
key_buff, table->record[0],
table->s->next_number_key_offset);
table_share->next_number_key_offset);
/* Store for compare */
memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size));
old_key.app_private=(void*) key_info;
@ -2248,7 +2249,7 @@ ulonglong ha_berkeley::get_auto_increment()
}
if (!error)
nr= (ulonglong)
table->next_number_field->val_int_offset(table->s->rec_buff_length)+1;
table->next_number_field->val_int_offset(table_share->rec_buff_length)+1;
ha_berkeley::index_end();
(void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD);
return nr;
@ -2332,7 +2333,7 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt)
free(txn_stat_ptr);
}
for (i=0 ; i < table->s->keys ; i++)
for (i=0 ; i < table_share->keys ; i++)
{
if (stat)
{
@ -2407,7 +2408,7 @@ int ha_berkeley::check(THD* thd, HA_CHECK_OPT* check_opt)
(hidden_primary_key ? berkeley_cmp_hidden_key :
berkeley_cmp_packed_key));
tmp_file->app_private= (void*) (table->key_info+table->primary_key);
fn_format(name_buff,share->table_name,"", ha_berkeley_ext, 2 | 4);
fn_format(name_buff,share->table_name.str,"", ha_berkeley_ext, 2 | 4);
if ((error=tmp_file->verify(tmp_file, name_buff, NullS, (FILE*) 0,
hidden_primary_key ? 0 : DB_NOORDERCHK)))
{
@ -2481,7 +2482,7 @@ static BDB_SHARE *get_share(const char *table_name, TABLE *table)
share->rec_per_key = rec_per_key;
share->table_name = tmp_name;
share->table_name_length=length;
strmov(share->table_name,table_name);
strmov(share->table_name, table_name);
share->key_file = key_file;
share->key_type = key_type;
if (my_hash_insert(&bdb_open_tables, (byte*) share))
@ -2553,7 +2554,7 @@ void ha_berkeley::get_status()
char name_buff[FN_REFLEN];
uint open_mode= (((table->db_stat & HA_READ_ONLY) ? DB_RDONLY : 0)
| DB_THREAD);
fn_format(name_buff, share->table_name,"", ha_berkeley_ext, 2 | 4);
fn_format(name_buff, share->table_name, "", ha_berkeley_ext, 2 | 4);
if (!db_create(&share->status_block, db_env, 0))
{
if (share->status_block->open(share->status_block, NULL, name_buff,
@ -2567,7 +2568,7 @@ void ha_berkeley::get_status()
if (!(share->status & STATUS_ROW_COUNT_INIT) && share->status_block)
{
share->org_rows= share->rows=
table->s->max_rows ? table->s->max_rows : HA_BERKELEY_MAX_ROWS;
table_share->max_rows ? table_share->max_rows : HA_BERKELEY_MAX_ROWS;
if (!share->status_block->cursor(share->status_block, 0, &cursor, 0))
{
DBT row;
@ -2582,7 +2583,7 @@ void ha_berkeley::get_status()
uint i;
uchar *pos=(uchar*) row.data;
share->org_rows=share->rows=uint4korr(pos); pos+=4;
for (i=0 ; i < table->s->keys ; i++)
for (i=0 ; i < table_share->keys ; i++)
{
share->rec_per_key[i]=uint4korr(pos);
pos+=4;
@ -2634,8 +2635,8 @@ static void update_status(BDB_SHARE *share, TABLE *table)
goto end; /* purecov: inspected */
share->status_block->set_flags(share->status_block,0); /* purecov: inspected */
if (share->status_block->open(share->status_block, NULL,
fn_format(name_buff,share->table_name,"",
ha_berkeley_ext,2 | 4),
fn_format(name_buff,share->table_name,
"", ha_berkeley_ext,2 | 4),
"status", DB_BTREE,
DB_THREAD | DB_CREATE, my_umask)) /* purecov: inspected */
goto end; /* purecov: inspected */
@ -2647,7 +2648,7 @@ static void update_status(BDB_SHARE *share, TABLE *table)
{
int4store(pos,share->rec_per_key[i]); pos+=4;
}
DBUG_PRINT("info",("updating status for %s",share->table_name));
DBUG_PRINT("info",("updating status for %s", share->table_name));
(void) write_status(share->status_block, rec_buff,
(uint) (pos-rec_buff));
share->status&= ~STATUS_BDB_ANALYZE;
@ -2677,7 +2678,7 @@ int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2)
int result;
Field *field;
KEY *key_info=table->key_info+table->s->primary_key;
KEY *key_info=table->key_info+table_share->primary_key;
KEY_PART_INFO *key_part=key_info->key_part;
KEY_PART_INFO *end=key_part+key_info->key_parts;

View File

@ -84,7 +84,7 @@ class ha_berkeley: public handler
DBT *get_pos(DBT *to, byte *pos);
public:
ha_berkeley(TABLE *table_arg);
ha_berkeley(TABLE_SHARE *table_arg);
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
ulong index_flags(uint idx, uint part, bool all_parts) const;

View File

@ -24,7 +24,7 @@
/* Static declarations for handlerton */
static handler *blackhole_create_handler(TABLE *table);
static handler *blackhole_create_handler(TABLE_SHARE *table);
/* Blackhole storage engine handlerton */
@ -63,7 +63,7 @@ handlerton blackhole_hton= {
};
static handler *blackhole_create_handler(TABLE *table)
static handler *blackhole_create_handler(TABLE_SHARE *table)
{
return new ha_blackhole(table);
}
@ -73,7 +73,7 @@ static handler *blackhole_create_handler(TABLE *table)
** BLACKHOLE tables
*****************************************************************************/
ha_blackhole::ha_blackhole(TABLE *table_arg)
ha_blackhole::ha_blackhole(TABLE_SHARE *table_arg)
:handler(&blackhole_hton, table_arg)
{}
@ -112,13 +112,12 @@ int ha_blackhole::create(const char *name, TABLE *table_arg,
const char *ha_blackhole::index_type(uint key_number)
{
DBUG_ENTER("ha_blackhole::index_type");
DBUG_RETURN((table->key_info[key_number].flags & HA_FULLTEXT) ?
DBUG_RETURN((table_share->key_info[key_number].flags & HA_FULLTEXT) ?
"FULLTEXT" :
(table->key_info[key_number].flags & HA_SPATIAL) ?
(table_share->key_info[key_number].flags & HA_SPATIAL) ?
"SPATIAL" :
(table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
"RTREE" :
"BTREE");
(table_share->key_info[key_number].algorithm ==
HA_KEY_ALG_RTREE) ? "RTREE" : "BTREE");
}
int ha_blackhole::write_row(byte * buf)

View File

@ -28,7 +28,7 @@ class ha_blackhole: public handler
THR_LOCK thr_lock;
public:
ha_blackhole(TABLE *table_arg);
ha_blackhole(TABLE_SHARE *table_arg);
~ha_blackhole()
{
}
@ -49,7 +49,7 @@ public:
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}

View File

@ -355,15 +355,12 @@
#include "m_string.h"
/* Variables for federated share methods */
static HASH federated_open_tables; // Hash used to track open
// tables
pthread_mutex_t federated_mutex; // This is the mutex we use to
// init the hash
static int federated_init= FALSE; // Variable for checking the
// init state of hash
static HASH federated_open_tables; // To track open tables
pthread_mutex_t federated_mutex; // To init the hash
static int federated_init= FALSE; // Checking the state of hash
/* Static declaration for handerton */
static handler *federated_create_handler(TABLE *table);
static handler *federated_create_handler(TABLE_SHARE *table);
static int federated_commit(THD *thd, bool all);
static int federated_rollback(THD *thd, bool all);
@ -403,13 +400,13 @@ handlerton federated_hton= {
};
static handler *federated_create_handler(TABLE *table)
static handler *federated_create_handler(TABLE_SHARE *table)
{
return new ha_federated(table);
}
/* Function we use in the creation of our hash to get key. */
/* Function we use in the creation of our hash to get key */
static byte *federated_get_key(FEDERATED_SHARE *share, uint *length,
my_bool not_used __attribute__ ((unused)))
@ -437,14 +434,12 @@ bool federated_db_init()
goto error;
if (hash_init(&federated_open_tables, system_charset_info, 32, 0, 0,
(hash_get_key) federated_get_key, 0, 0))
{
VOID(pthread_mutex_destroy(&federated_mutex));
}
else
{
federated_init= TRUE;
DBUG_RETURN(FALSE);
}
VOID(pthread_mutex_destroy(&federated_mutex));
error:
have_federated_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
DBUG_RETURN(TRUE);
@ -456,7 +451,6 @@ error:
SYNOPSIS
federated_db_end()
void
RETURN
FALSE OK
@ -473,6 +467,7 @@ int federated_db_end(ha_panic_function type)
return 0;
}
/*
Check (in create) whether the tables exists, and that it can be connected to
@ -605,12 +600,12 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num)
SYNOPSIS
parse_url()
share pointer to FEDERATED share
table pointer to current TABLE class
table_create_flag determines what error to throw
share pointer to FEDERATED share
table pointer to current TABLE class
table_create_flag determines what error to throw
DESCRIPTION
populates the share with information about the connection
Populates the share with information about the connection
to the foreign database that will serve as the data source.
This string must be specified (currently) in the "comment" field,
listed in the CREATE TABLE statement.
@ -629,7 +624,7 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num)
***IMPORTANT***
Currently, only "mysql://" is supported.
'password' and 'port' are both optional.
'password' and 'port' are both optional.
RETURN VALUE
0 success
@ -739,8 +734,8 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
}
DBUG_PRINT("info",
("scheme %s username %s password %s \
hostname %s port %d database %s tablename %s",
("scheme: %s username: %s password: %s \
hostname: %s port: %d database: %s tablename: %s",
share->scheme, share->username, share->password,
share->hostname, share->port, share->database,
share->table_name));
@ -756,7 +751,7 @@ error:
** FEDERATED tables
*****************************************************************************/
ha_federated::ha_federated(TABLE *table_arg)
ha_federated::ha_federated(TABLE_SHARE *table_arg)
:handler(&federated_hton, table_arg),
mysql(0), stored_result(0), scan_flag(0),
ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0)
@ -770,8 +765,8 @@ ha_federated::ha_federated(TABLE *table_arg)
SYNOPSIS
convert_row_to_internal_format()
record Byte pointer to record
row MySQL result set row from fetchrow()
record Byte pointer to record
row MySQL result set row from fetchrow()
DESCRIPTION
This method simply iterates through a row returned via fetchrow with
@ -782,7 +777,7 @@ ha_federated::ha_federated(TABLE *table_arg)
RETURN VALUE
0 After fields have had field values stored from record
*/
*/
uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
{
@ -793,24 +788,23 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
lengths= mysql_fetch_lengths(stored_result);
memset(record, 0, table->s->null_bytes);
for (field= table->field; *field; field++)
for (field= table->field; *field; field++, row++, lengths++)
{
/*
index variable to move us through the row at the
same iterative step as the field
*/
int x= field - table->field;
my_ptrdiff_t old_ptr;
old_ptr= (my_ptrdiff_t) (record - table->record[0]);
(*field)->move_field(old_ptr);
if (!row[x])
(*field)->move_field_offset(old_ptr);
if (!*row)
(*field)->set_null();
else
{
(*field)->set_notnull();
(*field)->store(row[x], lengths[x], &my_charset_bin);
(*field)->store(*row, *lengths, &my_charset_bin);
}
(*field)->move_field(-old_ptr);
(*field)->move_field_offset(-old_ptr);
}
DBUG_RETURN(0);
@ -1215,8 +1209,8 @@ bool ha_federated::create_where_from_key(String *to,
DBUG_RETURN(1);
}
else
/* LIKE */
{
/* LIKE */
if (emit_key_part_name(&tmp, key_part) ||
tmp.append(FEDERATED_LIKE) ||
emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
@ -1328,16 +1322,16 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
/*
In order to use this string, we must first zero it's length,
or it will contain garbage
*/
*/
query.length(0);
pthread_mutex_lock(&federated_mutex);
tmp_table_name= (char *)table->s->table_name;
tmp_table_name_length= (uint) strlen(tmp_table_name);
tmp_table_name= table->s->table_name.str;
tmp_table_name_length= table->s->table_name.length;
if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
(byte*) table_name,
strlen(table_name))))
tmp_table_name_length)))
{
query.set_charset(system_charset_info);
query.append(FEDERATED_SELECT);
@ -1348,7 +1342,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
query.append(FEDERATED_BTICK);
query.append(FEDERATED_COMMA);
}
query.length(query.length()- strlen(FEDERATED_COMMA));
query.length(query.length()- FEDERATED_COMMA_LEN);
query.append(FEDERATED_FROM);
query.append(FEDERATED_BTICK);
@ -1372,7 +1366,6 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
share->select_query= select_query;
strmov(share->select_query, query.ptr());
share->use_count= 0;
share->table_name_length= strlen(share->table_name);
DBUG_PRINT("info",
("share->select_query %s", share->select_query));
@ -1486,8 +1479,8 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
}
/*
Since we do not support transactions at this version, we can let the client
API silently reconnect. For future versions, we will need more logic to deal
with transactions
API silently reconnect. For future versions, we will need more logic to
deal with transactions
*/
mysql->reconnect= 1;
@ -1563,6 +1556,7 @@ inline uint field_in_record_is_null(TABLE *table,
DBUG_RETURN(0);
}
/*
write_row() inserts a row. No extra() hint is given currently if a bulk load
is happeneding. buf() is a byte array of data. You can use the field
@ -1819,15 +1813,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
update_string.append(FEDERATED_BTICK);
update_string.append(FEDERATED_SET);
/*
In this loop, we want to match column names to values being inserted
(while building INSERT statement).
/*
In this loop, we want to match column names to values being inserted
(while building INSERT statement).
Iterate through table->field (new data) and share->old_filed (old_data)
using the same index to created an SQL UPDATE statement, new data is
used to create SET field=value and old data is used to create WHERE
field=oldvalue
*/
Iterate through table->field (new data) and share->old_filed (old_data)
using the same index to created an SQL UPDATE statement, new data is
used to create SET field=value and old data is used to create WHERE
field=oldvalue
*/
for (Field **field= table->field; *field; field++)
{
@ -2048,7 +2042,7 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
This basically says that the record in table->record[0] is legal,
and that it is ok to use this record, for whatever reason, such
as with a join (without it, joins will not work)
*/
*/
table->status= 0;
retval= rnd_next(buf);
@ -2070,7 +2064,7 @@ int ha_federated::index_init(uint keynr, bool sorted)
{
DBUG_ENTER("ha_federated::index_init");
DBUG_PRINT("info",
("table: '%s' key: %d", table->s->table_name, keynr));
("table: '%s' key: %d", table->s->table_name.str, keynr));
active_index= keynr;
DBUG_RETURN(0);
}
@ -2251,6 +2245,7 @@ int ha_federated::rnd_end()
DBUG_RETURN(retval);
}
int ha_federated::index_end(void)
{
DBUG_ENTER("ha_federated::index_end");
@ -2258,6 +2253,7 @@ int ha_federated::index_end(void)
DBUG_RETURN(0);
}
/*
This is called for each row of the table scan. When you run out of records
you should return HA_ERR_END_OF_FILE. Fill buff up with the row information.

View File

@ -173,7 +173,7 @@ private:
int stash_remote_error();
public:
ha_federated(TABLE *table_arg);
ha_federated(TABLE_SHARE *table_arg);
~ha_federated() {}
/* The name that will be used for display purposes */
const char *table_type() const { return "FEDERATED"; }
@ -232,8 +232,7 @@ public:
*/
double scan_time()
{
DBUG_PRINT("info",
("records %d", records));
DBUG_PRINT("info", ("records %lu", (ulong) records));
return (double)(records*1000);
}
/*

View File

@ -24,7 +24,7 @@
#include "ha_heap.h"
static handler *heap_create_handler(TABLE *table);
static handler *heap_create_handler(TABLE_SHARE *table);
handlerton heap_hton= {
"MEMORY",
@ -59,7 +59,7 @@ handlerton heap_hton= {
HTON_CAN_RECREATE
};
static handler *heap_create_handler(TABLE *table)
static handler *heap_create_handler(TABLE_SHARE *table)
{
return new ha_heap(table);
}
@ -69,7 +69,7 @@ static handler *heap_create_handler(TABLE *table)
** HEAP tables
*****************************************************************************/
ha_heap::ha_heap(TABLE *table_arg)
ha_heap::ha_heap(TABLE_SHARE *table_arg)
:handler(&heap_hton, table_arg), file(0), records_changed(0),
key_stats_ok(0)
{}
@ -490,8 +490,7 @@ THR_LOCK_DATA **ha_heap::store_lock(THD *thd,
int ha_heap::delete_table(const char *name)
{
char buff[FN_REFLEN];
int error= heap_delete_table(fn_format(buff,name,"","",
MY_REPLACE_EXT|MY_UNPACK_FILENAME));
int error= heap_delete_table(name);
return error == ENOENT ? 0 : error;
}
@ -537,7 +536,6 @@ int ha_heap::create(const char *name, TABLE *table_arg,
ha_rows max_rows;
HP_KEYDEF *keydef;
HA_KEYSEG *seg;
char buff[FN_REFLEN];
int error;
TABLE_SHARE *share= table_arg->s;
bool found_real_auto_increment= 0;
@ -618,7 +616,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
}
}
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size /
max_rows = (ha_rows) (table_arg->in_use->variables.max_heap_table_size /
mem_per_row);
if (table_arg->found_next_number_field)
{
@ -633,8 +631,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;
hp_create_info.with_auto_increment= found_real_auto_increment;
max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row);
error= heap_create(fn_format(buff,name,"","",
MY_REPLACE_EXT|MY_UNPACK_FILENAME),
error= heap_create(name,
keys, keydef, share->reclength,
(ulong) ((share->max_rows < max_rows &&
share->max_rows) ?

View File

@ -31,7 +31,7 @@ class ha_heap: public handler
uint records_changed;
bool key_stats_ok;
public:
ha_heap(TABLE *table);
ha_heap(TABLE_SHARE *table);
~ha_heap() {}
const char *table_type() const
{
@ -40,7 +40,7 @@ public:
}
const char *index_type(uint inx)
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
"BTREE" : "HASH");
}
/* Rows also use a fixed-size format */
@ -54,7 +54,7 @@ public:
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE :
HA_ONLY_WHOLE_INDEX);
}

View File

@ -205,7 +205,7 @@ static int innobase_rollback(THD* thd, bool all);
static int innobase_rollback_to_savepoint(THD* thd, void *savepoint);
static int innobase_savepoint(THD* thd, void *savepoint);
static int innobase_release_savepoint(THD* thd, void *savepoint);
static handler *innobase_create_handler(TABLE *table);
static handler *innobase_create_handler(TABLE_SHARE *table);
handlerton innobase_hton = {
"InnoDB",
@ -245,7 +245,7 @@ handlerton innobase_hton = {
};
static handler *innobase_create_handler(TABLE *table)
static handler *innobase_create_handler(TABLE_SHARE *table)
{
return new ha_innobase(table);
}
@ -826,7 +826,7 @@ check_trx_exists(
/*************************************************************************
Construct ha_innobase handler. */
ha_innobase::ha_innobase(TABLE *table_arg)
ha_innobase::ha_innobase(TABLE_SHARE *table_arg)
:handler(&innobase_hton, table_arg),
int_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY |
@ -4820,8 +4820,8 @@ ha_innobase::create(
/* Look for a primary key */
primary_key_no= (table->s->primary_key != MAX_KEY ?
(int) table->s->primary_key :
primary_key_no= (form->s->primary_key != MAX_KEY ?
(int) form->s->primary_key :
-1);
/* Our function row_get_mysql_key_number_for_index assumes

View File

@ -81,7 +81,7 @@ class ha_innobase: public handler
/* Init values for the class: */
public:
ha_innobase(TABLE *table_arg);
ha_innobase(TABLE_SHARE *table_arg);
~ha_innobase() {}
/*
Get the row type from the storage engine. If this method returns

View File

@ -50,7 +50,7 @@ TYPELIB myisam_stats_method_typelib= {
** MyISAM tables
*****************************************************************************/
static handler *myisam_create_handler(TABLE *table);
static handler *myisam_create_handler(TABLE_SHARE *table);
/* MyISAM handlerton */
@ -92,7 +92,7 @@ handlerton myisam_hton= {
};
static handler *myisam_create_handler(TABLE *table)
static handler *myisam_create_handler(TABLE_SHARE *table)
{
return new ha_myisam(table);
}
@ -178,7 +178,7 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
}
ha_myisam::ha_myisam(TABLE *table_arg)
ha_myisam::ha_myisam(TABLE_SHARE *table_arg)
:handler(&myisam_hton, table_arg), file(0),
int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
@ -358,7 +358,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name = "check";
param.db_name= table->s->db;
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag = check_opt->flags | T_CHECK | T_SILENT;
param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method;
@ -446,7 +446,7 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
myisamchk_init(&param);
param.thd = thd;
param.op_name= "analyze";
param.db_name= table->s->db;
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
T_DONT_CHECK_CHECKSUM);
@ -474,7 +474,7 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
HA_CHECK_OPT tmp_check_opt;
char *backup_dir= thd->lex->backup_dir;
char src_path[FN_REFLEN], dst_path[FN_REFLEN];
const char *table_name= table->s->table_name;
const char *table_name= table->s->table_name.str;
int error;
const char* errmsg;
DBUG_ENTER("restore");
@ -483,8 +483,8 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
MI_NAME_DEXT))
DBUG_RETURN(HA_ADMIN_INVALID);
if (my_copy(src_path, fn_format(dst_path, table->s->path, "",
MI_NAME_DEXT, 4), MYF(MY_WME)))
strxmov(dst_path, table->s->normalized_path.str, MI_NAME_DEXT, NullS);
if (my_copy(src_path, dst_path, MYF(MY_WME)))
{
error= HA_ADMIN_FAILED;
errmsg= "Failed in my_copy (Error %d)";
@ -501,8 +501,8 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "restore";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg, my_errno);
DBUG_RETURN(error);
@ -514,7 +514,7 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
{
char *backup_dir= thd->lex->backup_dir;
char src_path[FN_REFLEN], dst_path[FN_REFLEN];
const char *table_name= table->s->table_name;
const char *table_name= table->s->table_name.str;
int error;
const char *errmsg;
DBUG_ENTER("ha_myisam::backup");
@ -527,9 +527,8 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
goto err;
}
if (my_copy(fn_format(src_path, table->s->path, "", reg_ext,
MY_UNPACK_FILENAME),
dst_path,
strxmov(src_path, table->s->normalized_path.str, reg_ext, NullS);
if (my_copy(src_path, dst_path,
MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE)))
{
error = HA_ADMIN_FAILED;
@ -546,9 +545,8 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
goto err;
}
if (my_copy(fn_format(src_path, table->s->path, "", MI_NAME_DEXT,
MY_UNPACK_FILENAME),
dst_path,
strxmov(src_path, table->s->normalized_path.str, MI_NAME_DEXT, NullS);
if (my_copy(src_path, dst_path,
MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE)))
{
errmsg = "Failed copying .MYD file (errno: %d)";
@ -563,8 +561,8 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "backup";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag = 0;
mi_check_print_error(&param,errmsg, my_errno);
DBUG_RETURN(error);
@ -655,7 +653,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
ha_rows rows= file->state->records;
DBUG_ENTER("ha_myisam::repair");
param.db_name= table->s->db;
param.db_name= table->s->db.str;
param.table_name= table->alias;
param.tmpfile_createflag = O_RDWR | O_TRUNC;
param.using_global_keycache = 1;
@ -826,8 +824,8 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "assign_to_keycache";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg);
}
@ -894,8 +892,8 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
myisamchk_init(&param);
param.thd= thd;
param.op_name= "preload_keys";
param.db_name= table->s->db;
param.table_name= table->s->table_name;
param.db_name= table->s->db.str;
param.table_name= table->s->table_name.str;
param.testflag= 0;
mi_check_print_error(&param, errmsg);
DBUG_RETURN(error);
@ -1149,8 +1147,8 @@ bool ha_myisam::check_and_repair(THD *thd)
old_query= thd->query;
old_query_length= thd->query_length;
pthread_mutex_lock(&LOCK_thread_count);
thd->query= (char*) table->s->table_name;
thd->query_length= (uint32) strlen(table->s->table_name);
thd->query= table->s->table_name.str;
thd->query_length= table->s->table_name.length;
pthread_mutex_unlock(&LOCK_thread_count);
if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt))
@ -1337,6 +1335,10 @@ void ha_myisam::info(uint flag)
ref_length= info.reflength;
share->db_options_in_use= info.options;
block_size= myisam_block_size;
/* Update share */
if (share->tmp_table == NO_TMP_TABLE)
pthread_mutex_lock(&share->mutex);
share->keys_in_use.set_prefix(share->keys);
share->keys_in_use.intersect_extended(info.key_map);
share->keys_for_keyread.intersect(share->keys_in_use);
@ -1345,6 +1347,9 @@ void ha_myisam::info(uint flag)
memcpy((char*) table->key_info[0].rec_per_key,
(char*) info.rec_per_key,
sizeof(table->key_info[0].rec_per_key)*share->key_parts);
if (share->tmp_table == NO_TMP_TABLE)
pthread_mutex_unlock(&share->mutex);
raid_type= info.raid_type;
raid_chunks= info.raid_chunks;
raid_chunksize= info.raid_chunksize;
@ -1353,7 +1358,7 @@ void ha_myisam::info(uint flag)
Set data_file_name and index_file_name to point at the symlink value
if table is symlinked (Ie; Real name is not same as generated name)
*/
data_file_name=index_file_name=0;
data_file_name= index_file_name= 0;
fn_format(name_buff, file->filename, "", MI_NAME_DEXT, 2);
if (strcmp(name_buff, info.data_file_name))
data_file_name=info.data_file_name;
@ -1448,7 +1453,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg,
MI_KEYDEF *keydef;
MI_COLUMNDEF *recinfo,*recinfo_pos;
HA_KEYSEG *keyseg;
TABLE_SHARE *share= table->s;
TABLE_SHARE *share= table_arg->s;
uint options= share->db_options_in_use;
DBUG_ENTER("ha_myisam::create");

View File

@ -43,7 +43,7 @@ class ha_myisam: public handler
int repair(THD *thd, MI_CHECK &param, bool optimize);
public:
ha_myisam(TABLE *table_arg);
ha_myisam(TABLE_SHARE *table_arg);
~ha_myisam() {}
const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
@ -51,7 +51,7 @@ class ha_myisam: public handler
ulong table_flags() const { return int_table_flags; }
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}

View File

@ -32,7 +32,7 @@
** MyISAM MERGE tables
*****************************************************************************/
static handler *myisammrg_create_handler(TABLE *table);
static handler *myisammrg_create_handler(TABLE_SHARE *table);
/* MyISAM MERGE handlerton */
@ -69,13 +69,13 @@ handlerton myisammrg_hton= {
HTON_CAN_RECREATE
};
static handler *myisammrg_create_handler(TABLE *table)
static handler *myisammrg_create_handler(TABLE_SHARE *table)
{
return new ha_myisammrg(table);
}
ha_myisammrg::ha_myisammrg(TABLE *table_arg)
ha_myisammrg::ha_myisammrg(TABLE_SHARE *table_arg)
:handler(&myisammrg_hton, table_arg), file(0)
{}
@ -302,7 +302,6 @@ void ha_myisammrg::info(uint flag)
errkey = info.errkey;
table->s->keys_in_use.set_prefix(table->s->keys);
table->s->db_options_in_use= info.options;
table->s->is_view= 1;
mean_rec_length= info.reclength;
block_size=0;
update_time=0;
@ -456,9 +455,9 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
for (pos= table_names; tables; tables= tables->next_local)
{
const char *table_name;
TABLE **tbl= 0;
TABLE *tbl= 0;
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
tbl= find_temporary_table(thd, tables->db, tables->table_name);
tbl= find_temporary_table(thd, tables);
if (!tbl)
{
/*
@ -487,7 +486,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
else
table_name= (*tbl)->s->path;
table_name= tbl->s->path.str;
*pos++= table_name;
}
*pos=0;
@ -503,6 +502,7 @@ void ha_myisammrg::append_create_info(String *packet)
const char *current_db;
uint db_length;
THD *thd= current_thd;
MYRG_TABLE *open_table, *first;
if (file->merge_insert_method != MERGE_INSERT_DISABLED)
{
@ -510,10 +510,9 @@ void ha_myisammrg::append_create_info(String *packet)
packet->append(get_type(&merge_insert_method,file->merge_insert_method-1));
}
packet->append(STRING_WITH_LEN(" UNION=("));
MYRG_TABLE *open_table,*first;
current_db= table->s->db;
db_length= (uint) strlen(current_db);
current_db= table->s->db.str;
db_length= table->s->db.length;
for (first=open_table=file->open_tables ;
open_table != file->end_table ;

View File

@ -28,7 +28,7 @@ class ha_myisammrg: public handler
MYRG_INFO *file;
public:
ha_myisammrg(TABLE *table_arg);
ha_myisammrg(TABLE_SHARE *table_arg);
~ha_myisammrg() {}
const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
@ -37,11 +37,12 @@ class ha_myisammrg: public handler
{
return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME |
HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED |
HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE);
HA_CAN_INSERT_DELAYED | HA_ANY_INDEX_MAY_BE_UNIQUE |
HA_NO_COPY_ON_ALTER);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
HA_READ_ORDER | HA_KEYREAD_ONLY);
}

View File

@ -56,7 +56,7 @@ static const char share_prefix[]= "./";
static int ndbcluster_close_connection(THD *thd);
static int ndbcluster_commit(THD *thd, bool all);
static int ndbcluster_rollback(THD *thd, bool all);
static handler* ndbcluster_create_handler(TABLE *table);
static handler* ndbcluster_create_handler(TABLE_SHARE *table);
handlerton ndbcluster_hton = {
"ndbcluster",
@ -91,7 +91,7 @@ handlerton ndbcluster_hton = {
HTON_NO_FLAGS
};
static handler *ndbcluster_create_handler(TABLE *table)
static handler *ndbcluster_create_handler(TABLE_SHARE *table)
{
return new ha_ndbcluster(table);
}
@ -985,6 +985,10 @@ bool ha_ndbcluster::uses_blob_value()
IMPLEMENTATION
- check that frm-file on disk is equal to frm-file
of table accessed in NDB
RETURN
0 ok
-2 Meta data has changed; Re-read data and try again
*/
static int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
@ -1007,7 +1011,6 @@ int ha_ndbcluster::get_metadata(const char *path)
const NDBTAB *tab;
int error;
bool invalidating_ndb_table= FALSE;
DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
@ -1053,7 +1056,7 @@ int ha_ndbcluster::get_metadata(const char *path)
memcmp(pack_data, tab->getFrmData(), pack_length)));
DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
error= 3;
error= HA_ERR_TABLE_DEF_CHANGED;
invalidating_ndb_table= FALSE;
}
}
@ -1129,7 +1132,7 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
m_index[i].type= idx_type;
if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
{
strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS);
strxnmov(unique_index_name, FN_LEN-1, index_name, unique_suffix, NullS);
DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d",
unique_index_name, i));
}
@ -1163,7 +1166,7 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
if (error)
{
DBUG_PRINT("error", ("Failed to create index %u", i));
drop_table();
intern_drop_table();
break;
}
}
@ -1215,11 +1218,12 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
*/
NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const
{
bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH);
if (inx == table->s->primary_key)
bool is_hash_index= (table_share->key_info[inx].algorithm ==
HA_KEY_ALG_HASH);
if (inx == table_share->primary_key)
return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX;
return ((table->key_info[inx].flags & HA_NOSAME) ?
return ((table_share->key_info[inx].flags & HA_NOSAME) ?
(is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) :
ORDERED_INDEX);
}
@ -1338,7 +1342,7 @@ inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part,
bool all_parts) const
{
DBUG_ENTER("ha_ndbcluster::index_flags");
DBUG_PRINT("info", ("idx_no: %d", idx_no));
DBUG_PRINT("enter", ("idx_no: %u", idx_no));
DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size);
DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)] |
HA_KEY_SCAN_NOT_ROR);
@ -3934,13 +3938,14 @@ int ha_ndbcluster::create(const char *name,
char name2[FN_HEADLEN];
bool create_from_engine= test(info->table_options &
HA_OPTION_CREATE_FROM_ENGINE);
DBUG_ENTER("ha_ndbcluster::create");
DBUG_PRINT("enter", ("name: %s", name));
fn_format(name2, name, "", "",2); // Remove the .frm extension
set_dbname(name2);
set_tabname(name2);
table= form;
if (create_from_engine)
{
/*
@ -4058,7 +4063,7 @@ int ha_ndbcluster::create(const char *name,
}
// Check partition info
partition_info *part_info= form->s->part_info;
partition_info *part_info= form->part_info;
if (part_info)
{
int error;
@ -4255,7 +4260,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
int res;
if (h)
{
res= h->drop_table();
res= h->intern_drop_table();
}
else
{
@ -4292,12 +4297,12 @@ int ha_ndbcluster::delete_table(const char *name)
Drop table in NDB Cluster
*/
int ha_ndbcluster::drop_table()
int ha_ndbcluster::intern_drop_table()
{
Ndb *ndb= get_ndb();
NdbDictionary::Dictionary *dict= ndb->getDictionary();
DBUG_ENTER("drop_table");
DBUG_ENTER("intern_drop_table");
DBUG_PRINT("enter", ("Deleting %s", m_tabname));
release_metadata();
if (dict->dropTable(m_tabname))
@ -4358,7 +4363,7 @@ ulonglong ha_ndbcluster::get_auto_increment()
HA_CAN_GEOMETRY | \
HA_CAN_BIT_FIELD
ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
handler(&ndbcluster_hton, table_arg),
m_active_trans(NULL),
m_active_cursor(NULL),
@ -4455,18 +4460,24 @@ ha_ndbcluster::~ha_ndbcluster()
Open a table for further use
- fetch metadata for this table from NDB
- check that table exists
RETURN
0 ok
< 0 Table has changed
*/
int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
{
int res;
KEY *key;
DBUG_ENTER("open");
DBUG_PRINT("enter", ("this: %d name: %s mode: %d test_if_locked: %d",
this, name, mode, test_if_locked));
DBUG_ENTER("ha_ndbcluster::open");
DBUG_PRINT("enter", ("name: %s mode: %d test_if_locked: %d",
name, mode, test_if_locked));
// Setup ref_length to make room for the whole
// primary key to be written in the ref variable
/*
Setup ref_length to make room for the whole
primary key to be written in the ref variable
*/
if (table->s->primary_key != MAX_KEY)
{
@ -4492,9 +4503,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
if (table->s->part_info)
if (table->part_info)
{
m_part_info= table->s->part_info;
m_part_info= table->part_info;
if (!(m_part_info->part_type == HASH_PARTITION &&
m_part_info->list_of_part_fields &&
!is_sub_partitioned(m_part_info)))
@ -4729,12 +4740,13 @@ int ndbcluster_drop_database_impl(const char *path)
}
// Drop any tables belonging to database
char full_path[FN_REFLEN];
char *tmp= strxnmov(full_path, FN_REFLEN, share_prefix, dbname, "/", NullS);
char *tmp= strxnmov(full_path, FN_REFLEN-1, share_prefix, dbname, "/",
NullS);
ndb->setDatabaseName(dbname);
List_iterator_fast<char> it(drop_list);
while ((tabname=it++))
{
strxnmov(tmp, FN_REFLEN - (tmp - full_path), tabname, NullS);
strxnmov(tmp, FN_REFLEN - (tmp - full_path)-1, tabname, NullS);
if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname))
{
const NdbError err= dict->getNdbError();
@ -4801,7 +4813,7 @@ static int ndbcluster_find_all_files(THD *thd)
if (ndbtab->getFrmLength() == 0)
continue;
strxnmov(key, FN_LEN, mysql_data_home, "/",
strxnmov(key, FN_LEN-1, mysql_data_home, "/",
elmt.database, "/", elmt.name, NullS);
const void *data= 0, *pack_data= 0;
uint length, pack_length;
@ -4917,7 +4929,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
}
// File is not in NDB, check for .ndb file with this name
(void)strxnmov(name, FN_REFLEN,
(void)strxnmov(name, FN_REFLEN-1,
mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS);
DBUG_PRINT("info", ("Check access for %s", name));
if (access(name, F_OK))
@ -4947,7 +4959,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
file_name= hash_element(&ndb_tables, i);
if (!hash_search(&ok_tables, file_name, strlen(file_name)))
{
strxnmov(name, sizeof(name),
strxnmov(name, sizeof(name)-1,
mysql_data_home, "/", db, "/", file_name, reg_ext, NullS);
if (access(name, F_OK))
{
@ -4993,7 +5005,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
files->push_back(thd->strdup(file_name));
}
pthread_mutex_unlock(&LOCK_open);
pthread_mutex_unlock(&LOCK_open);
hash_free(&ok_tables);
hash_free(&ndb_tables);
@ -5175,11 +5187,13 @@ int ndbcluster_end(ha_panic_function type)
void ndbcluster_print_error(int error, const NdbOperation *error_op)
{
DBUG_ENTER("ndbcluster_print_error");
TABLE tab;
TABLE_SHARE share;
const char *tab_name= (error_op) ? error_op->getTableName() : "";
tab.alias= (char *) tab_name;
ha_ndbcluster error_handler(&tab);
tab.file= &error_handler;
share.db.str= (char*) "";
share.db.length= 0;
share.table_name.str= (char *) tab_name;
share.table_name.length= strlen(tab_name);
ha_ndbcluster error_handler(&share);
error_handler.print_error(error, MYF(0));
DBUG_VOID_RETURN;
}
@ -5431,11 +5445,11 @@ uint8 ha_ndbcluster::table_cache_type()
uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
Uint64 *commit_count)
{
DBUG_ENTER("ndb_get_commitcount");
char name[FN_REFLEN];
NDB_SHARE *share;
(void)strxnmov(name, FN_REFLEN, share_prefix, dbname, "/", tabname, NullS);
DBUG_ENTER("ndb_get_commitcount");
(void)strxnmov(name, FN_REFLEN-1, share_prefix, dbname, "/", tabname, NullS);
DBUG_PRINT("enter", ("name: %s", name));
pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
@ -6071,7 +6085,7 @@ int ha_ndbcluster::write_ndb_file()
DBUG_ENTER("write_ndb_file");
DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname));
(void)strxnmov(path, FN_REFLEN,
(void)strxnmov(path, FN_REFLEN-1,
mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS);
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
@ -7073,9 +7087,9 @@ void ndb_serialize_cond(const Item *item, void *arg)
}
else
{
DBUG_PRINT("info", ("Was not expecting field from table %s(%s)",
context->table->s->table_name,
field->table->s->table_name));
DBUG_PRINT("info", ("Was not expecting field from table %s (%s)",
context->table->s->table_name.str,
field->table->s->table_name.str));
context->supported= FALSE;
}
break;

View File

@ -478,7 +478,7 @@ class Thd_ndb
class ha_ndbcluster: public handler
{
public:
ha_ndbcluster(TABLE *table);
ha_ndbcluster(TABLE_SHARE *table);
~ha_ndbcluster();
int open(const char *name, int mode, uint test_if_locked);
@ -620,7 +620,7 @@ private:
const char *path,
const char *db,
const char *table_name);
int drop_table();
int intern_drop_table();
int create_index(const char *name, KEY *key_info, bool unique);
int create_ordered_index(const char *name, KEY *key_info);
int create_unique_index(const char *name, KEY *key_info);

View File

@ -66,7 +66,7 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
MODULE create/delete handler object
****************************************************************************/
static handler* partition_create_handler(TABLE *table);
static handler *partition_create_handler(TABLE_SHARE *share);
handlerton partition_hton = {
"partition",
@ -101,31 +101,25 @@ handlerton partition_hton = {
HTON_NOT_USER_SELECTABLE
};
static handler* partition_create_handler(TABLE *table)
static handler *partition_create_handler(TABLE_SHARE *share)
{
return new ha_partition(table);
return new ha_partition(share);
}
ha_partition::ha_partition(TABLE *table)
:handler(&partition_hton, table), m_part_info(NULL), m_create_handler(FALSE),
ha_partition::ha_partition(TABLE_SHARE *share)
:handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE),
m_is_sub_partitioned(0)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
init_handler_variables();
if (table)
{
if (table->s->part_info)
{
m_part_info= table->s->part_info;
m_is_sub_partitioned= is_sub_partitioned(m_part_info);
}
}
DBUG_VOID_RETURN;
}
ha_partition::ha_partition(partition_info *part_info)
:handler(&partition_hton, NULL), m_part_info(part_info), m_create_handler(TRUE),
:handler(&partition_hton, NULL), m_part_info(part_info),
m_create_handler(TRUE),
m_is_sub_partitioned(is_sub_partitioned(m_part_info))
{
@ -230,64 +224,64 @@ ha_partition::~ha_partition()
int ha_partition::ha_initialise()
{
handler **file_array, *file;
DBUG_ENTER("ha_partition::set_up_constants");
DBUG_ENTER("ha_partition::ha_initialise");
if (m_part_info)
if (m_create_handler)
{
m_tot_parts= get_tot_partitions(m_part_info);
DBUG_ASSERT(m_tot_parts > 0);
if (m_create_handler)
if (new_handlers_from_part_info())
DBUG_RETURN(1);
}
else if (!table_share || !table_share->normalized_path.str)
{
/*
Called with dummy table share (delete, rename and alter table)
Don't need to set-up table flags other than
HA_FILE_BASED here
*/
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
DBUG_RETURN(0);
}
else if (get_from_handler_file(table_share->normalized_path.str))
{
my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
DBUG_RETURN(1);
}
/*
We create all underlying table handlers here. We do it in this special
method to be able to report allocation errors.
Set up table_flags, low_byte_first, primary_key_is_clustered and
has_transactions since they are called often in all kinds of places,
other parameters are calculated on demand.
HA_FILE_BASED is always set for partition handler since we use a
special file for handling names of partitions, engine types.
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER,
HA_CAN_INSERT_DELAYED is disabled until further investigated.
*/
m_table_flags= m_file[0]->table_flags();
m_low_byte_first= m_file[0]->low_byte_first();
m_has_transactions= TRUE;
m_pkey_is_clustered= TRUE;
file_array= m_file;
do
{
file= *file_array;
if (m_low_byte_first != file->low_byte_first())
{
if (new_handlers_from_part_info())
DBUG_RETURN(1);
}
else if (get_from_handler_file(table->s->path))
{
my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
// Cannot have handlers with different endian
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
DBUG_RETURN(1);
}
/*
We create all underlying table handlers here. We only do it if we have
access to the partition info. We do it in this special method to be
able to report allocation errors.
*/
/*
Set up table_flags, low_byte_first, primary_key_is_clustered and
has_transactions since they are called often in all kinds of places,
other parameters are calculated on demand.
HA_FILE_BASED is always set for partition handler since we use a
special file for handling names of partitions, engine types.
HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER,
HA_CAN_INSERT_DELAYED is disabled until further investigated.
*/
m_table_flags= m_file[0]->table_flags();
m_low_byte_first= m_file[0]->low_byte_first();
m_has_transactions= TRUE;
m_pkey_is_clustered= TRUE;
file_array= m_file;
do
{
file= *file_array;
if (m_low_byte_first != file->low_byte_first())
{
// Cannot have handlers with different endian
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
DBUG_RETURN(1);
}
if (!file->has_transactions())
m_has_transactions= FALSE;
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
m_table_flags&= file->table_flags();
} while (*(++file_array));
m_table_flags&= ~(HA_CAN_GEOMETRY & HA_CAN_FULLTEXT &
HA_CAN_SQL_HANDLER & HA_CAN_INSERT_DELAYED);
/*
TODO RONM:
Make sure that the tree works without partition defined, compiles
and goes through mysql-test-run.
*/
}
if (!file->has_transactions())
m_has_transactions= FALSE;
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
m_table_flags&= file->table_flags();
} while (*(++file_array));
m_table_flags&= ~(HA_CAN_GEOMETRY & HA_CAN_FULLTEXT &
HA_CAN_SQL_HANDLER & HA_CAN_INSERT_DELAYED);
m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ;
DBUG_RETURN(0);
}
@ -720,7 +714,7 @@ bool ha_partition::create_handlers()
bzero(m_file, alloc_len);
for (i= 0; i < m_tot_parts; i++)
{
if (!(m_file[i]= get_new_handler(table, current_thd->mem_root,
if (!(m_file[i]= get_new_handler(table_share, current_thd->mem_root,
(enum db_type) m_engine_array[i])))
DBUG_RETURN(TRUE);
DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
@ -764,7 +758,7 @@ bool ha_partition::new_handlers_from_part_info()
do
{
part_elem= part_it++;
if (!(m_file[i]= get_new_handler(table, thd->mem_root,
if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
part_elem->engine_type)))
goto error;
DBUG_PRINT("info", ("engine_type: %u", (uint) part_elem->engine_type));
@ -772,7 +766,7 @@ bool ha_partition::new_handlers_from_part_info()
{
for (j= 0; j < m_part_info->no_subparts; j++)
{
if (!(m_file[i]= get_new_handler(table, thd->mem_root,
if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
part_elem->engine_type)))
goto error;
DBUG_PRINT("info", ("engine_type: %u", (uint) part_elem->engine_type));
@ -913,7 +907,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
do
{
create_partition_name(name_buff, name, name_buffer_ptr);
if ((error= (*file)->ha_open((const char*) name_buff, mode,
if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
test_if_locked)))
goto err_handler;
m_no_locks+= (*file)->lock_count();

View File

@ -122,6 +122,11 @@ private:
PARTITION_SHARE *share; /* Shared lock info */
public:
void set_part_info(partition_info *part_info)
{
m_part_info= part_info;
m_is_sub_partitioned= is_sub_partitioned(part_info);
}
/*
-------------------------------------------------------------------------
MODULE create/delete handler object
@ -133,7 +138,7 @@ public:
partition handler.
-------------------------------------------------------------------------
*/
ha_partition(TABLE * table);
ha_partition(TABLE_SHARE * table);
ha_partition(partition_info * part_info);
~ha_partition();
/*

View File

@ -189,7 +189,8 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type,
} /* ha_checktype */
handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
enum db_type db_type)
{
handler *file= NULL;
handlerton **types;
@ -205,7 +206,7 @@ handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
if (db_type == (*types)->db_type && (*types)->create)
{
file= ((*types)->state == SHOW_OPTION_YES) ?
(*types)->create(table) : NULL;
(*types)->create(share) : NULL;
break;
}
}
@ -216,7 +217,7 @@ handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
enum db_type def=(enum db_type) current_thd->variables.table_type;
/* Try first with 'default table type' */
if (db_type != def)
return get_new_handler(table, alloc, def);
return get_new_handler(share, alloc, def);
}
if (file)
{
@ -1047,7 +1048,8 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
}
statistic_increment(thd->status_var.ha_savepoint_rollback_count,&LOCK_status);
statistic_increment(thd->status_var.ha_savepoint_rollback_count,
&LOCK_status);
trans->no_2pc|=(*ht)->prepare == 0;
}
/*
@ -1177,7 +1179,7 @@ bool ha_flush_logs(enum db_type db_type)
*/
int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
const char *alias, bool generate_warning)
const char *db, const char *alias, bool generate_warning)
{
handler *file;
char tmp_path[FN_REFLEN];
@ -1192,7 +1194,7 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
/* DB_TYPE_UNKNOWN is used in ALTER TABLE when renaming only .frm files */
if (table_type == DB_TYPE_UNKNOWN ||
! (file=get_new_handler(&dummy_table, thd->mem_root, table_type)))
! (file=get_new_handler(&dummy_share, thd->mem_root, table_type)))
DBUG_RETURN(ENOENT);
if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED))
@ -1225,7 +1227,12 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
thd->net.last_error[0]= 0;
/* Fill up strucutures that print_error may need */
dummy_table.s->path= path;
dummy_share.path.str= (char*) path;
dummy_share.path.length= strlen(path);
dummy_share.db.str= (char*) db;
dummy_share.db.length= strlen(db);
dummy_share.table_name.str= (char*) alias;
dummy_share.table_name.length= strlen(alias);
dummy_table.alias= alias;
file->print_error(error, 0);
@ -1247,16 +1254,26 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
** General handler functions
****************************************************************************/
/* Open database-handler. Try O_RDONLY if can't open as O_RDWR */
/* Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set */
/*
Open database-handler.
int handler::ha_open(const char *name, int mode, int test_if_locked)
IMPLEMENTATION
Try O_RDONLY if cannot open as O_RDWR
Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
*/
int handler::ha_open(TABLE *table_arg, const char *name, int mode,
int test_if_locked)
{
int error;
DBUG_ENTER("handler::ha_open");
DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
name, table->s->db_type, table->db_stat, mode,
test_if_locked));
DBUG_PRINT("enter",
("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
name, table_share->db_type, table_arg->db_stat, mode,
test_if_locked));
table= table_arg;
DBUG_ASSERT(table->s == table_share);
if ((error=open(name,mode,test_if_locked)))
{
@ -1269,7 +1286,7 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
}
if (error)
{
my_errno=error; /* Safeguard */
my_errno= error; /* Safeguard */
DBUG_PRINT("error",("error: %d errno: %d",error,errno));
}
else
@ -1287,74 +1304,51 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
}
else
dupp_ref=ref+ALIGN_SIZE(ref_length);
if (ha_allocate_read_write_set(table->s->fields))
error= 1;
}
DBUG_RETURN(error);
}
int handler::ha_initialise()
{
DBUG_ENTER("ha_initialise");
if (table && table->s->fields &&
ha_allocate_read_write_set(table->s->fields))
{
DBUG_RETURN(TRUE);
}
DBUG_RETURN(FALSE);
}
/*
Initalize bit maps for used fields
Called from open_table_from_share()
*/
int handler::ha_allocate_read_write_set(ulong no_fields)
{
uint bitmap_size= 4*(((no_fields+1)+31)/32);
uint32 *read_buf, *write_buf;
#ifndef DEBUG_OFF
my_bool r;
#endif
DBUG_ENTER("ha_allocate_read_write_set");
DBUG_PRINT("enter", ("no_fields = %d", no_fields));
if (table)
if (!multi_alloc_root(&table->mem_root,
&read_set, sizeof(MY_BITMAP),
&write_set, sizeof(MY_BITMAP),
&read_buf, bitmap_size,
&write_buf, bitmap_size,
NullS))
{
if (table->read_set == NULL)
{
read_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
write_set= (MY_BITMAP*)sql_alloc(sizeof(MY_BITMAP));
read_buf= (uint32*)sql_alloc(bitmap_size);
write_buf= (uint32*)sql_alloc(bitmap_size);
if (!read_set || !write_set || !read_buf || !write_buf)
{
ha_deallocate_read_write_set();
DBUG_RETURN(TRUE);
}
#ifndef DEBUG_OFF
r =
#endif
bitmap_init(read_set, read_buf, no_fields+1, FALSE);
DBUG_ASSERT(!r /*bitmap_init(read_set...)*/);
#ifndef DEBUG_OFF
r =
#endif
bitmap_init(write_set, write_buf, no_fields+1, FALSE);
DBUG_ASSERT(!r /*bitmap_init(write_set...)*/);
table->read_set= read_set;
table->write_set= write_set;
ha_clear_all_set();
}
else
{
read_set= table->read_set;
write_set= table->write_set;
}
DBUG_RETURN(TRUE);
}
bitmap_init(read_set, read_buf, no_fields+1, FALSE);
bitmap_init(write_set, write_buf, no_fields+1, FALSE);
table->read_set= read_set;
table->write_set= write_set;
ha_clear_all_set();
DBUG_RETURN(FALSE);
}
void handler::ha_deallocate_read_write_set()
{
DBUG_ENTER("ha_deallocate_read_write_set");
read_set=write_set=0;
DBUG_VOID_RETURN;
}
void handler::ha_clear_all_set()
{
DBUG_ENTER("ha_clear_all_set");
@ -1396,6 +1390,7 @@ void handler::ha_set_primary_key_in_read_set()
}
/*
Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types
@ -1407,7 +1402,8 @@ int handler::read_first_row(byte * buf, uint primary_key)
register int error;
DBUG_ENTER("handler::read_first_row");
statistic_increment(current_thd->status_var.ha_read_first_count,&LOCK_status);
statistic_increment(table->in_use->status_var.ha_read_first_count,
&LOCK_status);
/*
If there is very few deleted rows in the table, find the first row by
@ -1673,9 +1669,10 @@ void handler::print_error(int error, myf errflag)
uint key_nr=get_dup_key(error);
if ((int) key_nr >= 0)
{
/* Write the dupplicated key in the error message */
/* Write the duplicated key in the error message */
char key[MAX_KEY_LENGTH];
String str(key,sizeof(key),system_charset_info);
/* Table is opened and defined at this point */
key_unpack(&str,table,(uint) key_nr);
uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_DUP_ENTRY));
if (str.length() >= max_length)
@ -1762,20 +1759,9 @@ void handler::print_error(int error, myf errflag)
textno=ER_TABLE_DEF_CHANGED;
break;
case HA_ERR_NO_SUCH_TABLE:
{
/*
We have to use path to find database name instead of using
table->table_cache_key because if the table didn't exist, then
table_cache_key was not set up
*/
char *db;
char buff[FN_REFLEN];
uint length= dirname_part(buff,table->s->path);
buff[length-1]=0;
db=buff+dirname_length(buff);
my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->alias);
my_error(ER_NO_SUCH_TABLE, MYF(0), table_share->db.str,
table_share->table_name.str);
break;
}
default:
{
/* The error was "unknown" to this function.
@ -1796,7 +1782,7 @@ void handler::print_error(int error, myf errflag)
DBUG_VOID_RETURN;
}
}
my_error(textno, errflag, table->alias, error);
my_error(textno, errflag, table_share->table_name.str, error);
DBUG_VOID_RETURN;
}
@ -1941,23 +1927,37 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
/*
Initiates table-file and calls apropriate database-creator
Returns 1 if something got wrong
NOTES
We must have a write lock on LOCK_open to be sure no other thread
interfers with table
RETURN
0 ok
1 error
*/
int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
int ha_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
bool update_create_info)
{
int error;
int error= 1;
TABLE table;
char name_buff[FN_REFLEN];
const char *name;
TABLE_SHARE share;
DBUG_ENTER("ha_create_table");
init_tmp_table_share(&share, db, 0, table_name, path);
if (open_table_def(thd, &share, 0) ||
open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table))
goto err;
if (openfrm(current_thd, name,"",0,(uint) READ_ALL, 0, &table))
DBUG_RETURN(1);
if (update_create_info)
{
update_create_info_from_table(create_info, &table);
}
name= share.path.str;
if (lower_case_table_names == 2 &&
!(table.file->table_flags() & HA_FILE_BASED))
{
@ -1967,27 +1967,32 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
name= name_buff;
}
error=table.file->create(name,&table,create_info);
VOID(closefrm(&table));
error= table.file->create(name, &table, create_info);
VOID(closefrm(&table, 0));
if (error)
my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name,error);
{
strxmov(name_buff, db, ".", table_name, NullS);
my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name_buff, error);
}
err:
free_table_share(&share);
DBUG_RETURN(error != 0);
}
/*
Try to discover table from engine and
if found, write the frm file to disk.
Try to discover table from engine
NOTES
If found, write the frm file to disk.
RETURN VALUES:
-1 : Table did not exists
0 : Table created ok
> 0 : Error, table existed but could not be created
-1 Table did not exists
0 Table created ok
> 0 Error, table existed but could not be created
*/
int ha_create_table_from_engine(THD* thd,
const char *db,
const char *name)
int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
{
int error;
const void *frmblob;
@ -1995,6 +2000,7 @@ int ha_create_table_from_engine(THD* thd,
char path[FN_REFLEN];
HA_CREATE_INFO create_info;
TABLE table;
TABLE_SHARE share;
DBUG_ENTER("ha_create_table_from_engine");
DBUG_PRINT("enter", ("name '%s'.'%s'", db, name));
@ -2010,15 +2016,23 @@ int ha_create_table_from_engine(THD* thd,
frmblob and frmlen are set, write the frm to disk
*/
(void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS);
(void)strxnmov(path,FN_REFLEN-1,mysql_data_home,"/",db,"/",name,NullS);
// Save the frm file
error= writefrm(path, frmblob, frmlen);
my_free((char*) frmblob, MYF(0));
if (error)
DBUG_RETURN(2);
if (openfrm(thd, path,"",0,(uint) READ_ALL, 0, &table))
init_tmp_table_share(&share, db, 0, name, path);
if (open_table_def(thd, &share, 0))
{
DBUG_RETURN(3);
}
if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table))
{
free_table_share(&share);
DBUG_RETURN(3);
}
update_create_info_from_table(&create_info, &table);
create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
@ -2030,7 +2044,7 @@ int ha_create_table_from_engine(THD* thd,
my_casedn_str(files_charset_info, path);
}
error=table.file->create(path,&table,&create_info);
VOID(closefrm(&table));
VOID(closefrm(&table, 1));
DBUG_RETURN(error != 0);
}
@ -2489,7 +2503,7 @@ TYPELIB *ha_known_exts(void)
{
if ((*types)->state == SHOW_OPTION_YES)
{
handler *file= get_new_handler(0, mem_root,
handler *file= get_new_handler((TABLE_SHARE*) 0, mem_root,
(enum db_type) (*types)->db_type);
for (ext= file->bas_ext(); *ext; ext++)
{

View File

@ -86,6 +86,7 @@
#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */
#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */
#define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30)
#define HA_NO_COPY_ON_ALTER (1 << 31)
/* Flags for partition handlers */
#define HA_CAN_PARTITION (1 << 0) /* Partition support */
@ -311,6 +312,7 @@ typedef struct xid_t XID;
struct st_table;
typedef struct st_table TABLE;
typedef struct st_table_share TABLE_SHARE;
struct st_foreign_key_info;
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
typedef bool (stat_print_fn)(THD *thd, const char *type, const char *file,
@ -411,7 +413,7 @@ typedef struct
void *(*create_cursor_read_view)();
void (*set_cursor_read_view)(void *);
void (*close_cursor_read_view)(void *);
handler *(*create)(TABLE *table);
handler *(*create)(TABLE_SHARE *table);
void (*drop_database)(char* path);
int (*panic)(enum ha_panic_function flag);
int (*release_temporary_latches)(THD *thd);
@ -739,8 +741,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
KEY *key_info,
const key_range *key_spec,
part_id_range *part_spec);
bool mysql_unpack_partition(THD *thd, uchar *part_buf, uint part_info_len,
TABLE* table, enum db_type default_db_type);
bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
uint part_info_len, TABLE *table,
enum db_type default_db_type);
#endif
@ -765,7 +768,8 @@ class handler :public Sql_alloc
friend class ha_partition;
#endif
protected:
struct st_table *table; /* The table definition */
struct st_table_share *table_share; /* The table definition */
struct st_table *table; /* The current open table */
virtual int index_init(uint idx, bool sorted) { active_index=idx; return 0; }
virtual int index_end() { active_index=MAX_KEY; return 0; }
@ -826,8 +830,8 @@ public:
MY_BITMAP *read_set;
MY_BITMAP *write_set;
handler(const handlerton *ht_arg, TABLE *table_arg) :table(table_arg),
ht(ht_arg),
handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), ht(ht_arg),
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
delete_length(0), auto_increment_value(0),
records(0), deleted(0), mean_rec_length(0),
@ -839,16 +843,19 @@ public:
{}
virtual ~handler(void)
{
ha_deallocate_read_write_set();
/* TODO: DBUG_ASSERT(inited == NONE); */
}
virtual int ha_initialise();
int ha_open(const char *name, int mode, int test_if_locked);
int ha_open(TABLE *table, const char *name, int mode, int test_if_locked);
bool update_auto_increment();
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
void change_table_ptr(TABLE *table_arg) { table=table_arg; }
void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
table= table_arg;
table_share= share;
}
virtual double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + 2; }
virtual double read_time(uint index, uint ranges, ha_rows rows)
@ -1034,7 +1041,6 @@ public:
}
void ha_set_primary_key_in_read_set();
int ha_allocate_read_write_set(ulong no_fields);
void ha_deallocate_read_write_set();
void ha_clear_all_set();
uint get_index(void) const { return active_index; }
virtual int open(const char *name, int mode, uint test_if_locked)=0;
@ -1408,7 +1414,8 @@ extern ulong total_ha, total_ha_2pc;
/* lookups */
enum db_type ha_resolve_by_name(const char *name, uint namelen);
const char *ha_get_storage_engine(enum db_type db_type);
handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type);
handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
enum db_type db_type);
enum db_type ha_checktype(THD *thd, enum db_type database_type,
bool no_substitute, bool report_error);
bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag);
@ -1422,10 +1429,12 @@ void ha_close_connection(THD* thd);
my_bool ha_storage_engine_is_enabled(enum db_type database_type);
bool ha_flush_logs(enum db_type db_type=DB_TYPE_DEFAULT);
void ha_drop_database(char* path);
int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
int ha_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
bool update_create_info);
int ha_delete_table(THD *thd, enum db_type db_type, const char *path,
const char *alias, bool generate_warning);
const char *db, const char *alias, bool generate_warning);
/* statistics and info */
bool ha_show_status(THD *thd, enum db_type db_type, enum ha_stat_type stat);

View File

@ -1406,20 +1406,21 @@ bool agg_item_charsets(DTCollation &coll, const char *fname,
Item_field::Item_field(Field *f)
:Item_ident(0, NullS, *f->table_name, f->field_name),
item_equal(0), no_const_subst(0),
item_equal(0), no_const_subst(0),
have_privileges(0), any_privileges(0)
{
set_field(f);
/*
field_name and talbe_name should not point to garbage
field_name and table_name should not point to garbage
if this item is to be reused
*/
orig_table_name= orig_field_name= "";
}
Item_field::Item_field(THD *thd, Name_resolution_context *context_arg,
Field *f)
:Item_ident(context_arg, f->table->s->db, *f->table_name, f->field_name),
:Item_ident(context_arg, f->table->s->db.str, *f->table_name, f->field_name),
item_equal(0), no_const_subst(0),
have_privileges(0), any_privileges(0)
{
@ -1486,7 +1487,7 @@ void Item_field::set_field(Field *field_par)
max_length= field_par->max_length();
table_name= *field_par->table_name;
field_name= field_par->field_name;
db_name= field_par->table->s->db;
db_name= field_par->table->s->db.str;
alias_name_used= field_par->table->alias_name_used;
unsigned_flag=test(field_par->flags & UNSIGNED_FLAG);
collation.set(field_par->charset(), DERIVATION_IMPLICIT);
@ -3729,15 +3730,20 @@ enum_field_types Item::field_type() const
Field *Item::make_string_field(TABLE *table)
{
Field *field;
DBUG_ASSERT(collation.collation);
if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB)
return new Field_blob(max_length, maybe_null, name, table,
field= new Field_blob(max_length, maybe_null, name,
collation.collation);
if (max_length > 0)
return new Field_varstring(max_length, maybe_null, name, table,
else if (max_length > 0)
field= new Field_varstring(max_length, maybe_null, name, table->s,
collation.collation);
return new Field_string(max_length, maybe_null, name, table,
collation.collation);
else
field= new Field_string(max_length, maybe_null, name,
collation.collation);
if (field)
field->init(table);
return field;
}
@ -3745,73 +3751,95 @@ Field *Item::make_string_field(TABLE *table)
Create a field based on field_type of argument
For now, this is only used to create a field for
IFNULL(x,something)
IFNULL(x,something) and time functions
RETURN
0 error
# Created field
*/
Field *Item::tmp_table_field_from_field_type(TABLE *table)
Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
{
/*
The field functions defines a field to be not null if null_ptr is not 0
*/
uchar *null_ptr= maybe_null ? (uchar*) "" : 0;
Field *field;
switch (field_type()) {
case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_NEWDECIMAL:
return new Field_new_decimal((char*) 0, max_length, null_ptr, 0,
Field::NONE, name, table, decimals, 0,
field= new Field_new_decimal((char*) 0, max_length, null_ptr, 0,
Field::NONE, name, decimals, 0,
unsigned_flag);
break;
case MYSQL_TYPE_TINY:
return new Field_tiny((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, 0, unsigned_flag);
field= new Field_tiny((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, 0, unsigned_flag);
break;
case MYSQL_TYPE_SHORT:
return new Field_short((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, 0, unsigned_flag);
field= new Field_short((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, 0, unsigned_flag);
break;
case MYSQL_TYPE_LONG:
return new Field_long((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, 0, unsigned_flag);
field= new Field_long((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, 0, unsigned_flag);
break;
#ifdef HAVE_LONG_LONG
case MYSQL_TYPE_LONGLONG:
return new Field_longlong((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, 0, unsigned_flag);
field= new Field_longlong((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, 0, unsigned_flag);
break;
#endif
case MYSQL_TYPE_FLOAT:
return new Field_float((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, decimals, 0, unsigned_flag);
field= new Field_float((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, decimals, 0, unsigned_flag);
break;
case MYSQL_TYPE_DOUBLE:
return new Field_double((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, decimals, 0, unsigned_flag);
field= new Field_double((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, decimals, 0, unsigned_flag);
break;
case MYSQL_TYPE_NULL:
return new Field_null((char*) 0, max_length, Field::NONE,
name, table, &my_charset_bin);
field= new Field_null((char*) 0, max_length, Field::NONE,
name, &my_charset_bin);
break;
case MYSQL_TYPE_INT24:
return new Field_medium((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table, 0, unsigned_flag);
field= new Field_medium((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, 0, unsigned_flag);
break;
case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_DATE:
return new Field_date(maybe_null, name, table, &my_charset_bin);
field= new Field_date(maybe_null, name, &my_charset_bin);
break;
case MYSQL_TYPE_TIME:
return new Field_time(maybe_null, name, table, &my_charset_bin);
field= new Field_time(maybe_null, name, &my_charset_bin);
break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
return new Field_datetime(maybe_null, name, table, &my_charset_bin);
field= new Field_datetime(maybe_null, name, &my_charset_bin);
break;
case MYSQL_TYPE_YEAR:
return new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE,
name, table);
field= new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE,
name);
break;
case MYSQL_TYPE_BIT:
return new Field_bit_as_char(NULL, max_length, null_ptr, 0, NULL, 0,
Field::NONE, name, table);
field= new Field_bit_as_char(NULL, max_length, null_ptr, 0, NULL, 0,
Field::NONE, name);
break;
default:
/* This case should never be chosen */
DBUG_ASSERT(0);
/* If something goes awfully wrong, it's better to get a string than die */
case MYSQL_TYPE_STRING:
if (fixed_length && max_length < CONVERT_IF_BIGGER_TO_BLOB)
{
field= new Field_string(max_length, maybe_null, name,
collation.collation);
break;
}
/* Fall through to make_string_field() */
case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET:
case MYSQL_TYPE_STRING:
case MYSQL_TYPE_VAR_STRING:
case MYSQL_TYPE_VARCHAR:
return make_string_field(table);
@ -3820,10 +3848,12 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table)
case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_GEOMETRY:
return new Field_blob(max_length, maybe_null, name, table,
collation.collation);
field= new Field_blob(max_length, maybe_null, name, collation.collation);
break; // Blob handled outside of case
}
if (field)
field->init(table);
return field;
}
@ -5034,8 +5064,9 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of())))
goto error;
memcpy(def_field, field_arg->field, field_arg->field->size_of());
def_field->move_field(def_field->table->s->default_values -
def_field->table->record[0]);
def_field->move_field_offset((my_ptrdiff_t)
(def_field->table->s->default_values -
def_field->table->record[0]));
set_field(def_field);
return FALSE;
@ -5130,16 +5161,22 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items)
if (!def_field)
return TRUE;
memcpy(def_field, field_arg->field, field_arg->field->size_of());
def_field->move_field(def_field->table->insert_values -
def_field->table->record[0]);
def_field->move_field_offset((my_ptrdiff_t)
(def_field->table->insert_values -
def_field->table->record[0]));
set_field(def_field);
}
else
{
Field *tmp_field= field_arg->field;
/* charset doesn't matter here, it's to avoid sigsegv only */
set_field(new Field_null(0, 0, Field::NONE, tmp_field->field_name,
tmp_field->table, &my_charset_bin));
tmp_field= new Field_null(0, 0, Field::NONE, field_arg->field->field_name,
&my_charset_bin);
if (tmp_field)
{
tmp_field->init(field_arg->field->table);
set_field(tmp_field);
}
}
return FALSE;
}
@ -5919,24 +5956,31 @@ Field *Item_type_holder::make_field_by_type(TABLE *table)
The field functions defines a field to be not null if null_ptr is not 0
*/
uchar *null_ptr= maybe_null ? (uchar*) "" : 0;
switch (fld_type)
{
Field *field;
switch (fld_type) {
case MYSQL_TYPE_ENUM:
DBUG_ASSERT(enum_set_typelib);
return new Field_enum((char *) 0, max_length, null_ptr, 0,
field= new Field_enum((char *) 0, max_length, null_ptr, 0,
Field::NONE, name,
table, get_enum_pack_length(enum_set_typelib->count),
get_enum_pack_length(enum_set_typelib->count),
enum_set_typelib, collation.collation);
if (field)
field->init(table);
return field;
case MYSQL_TYPE_SET:
DBUG_ASSERT(enum_set_typelib);
return new Field_set((char *) 0, max_length, null_ptr, 0,
field= new Field_set((char *) 0, max_length, null_ptr, 0,
Field::NONE, name,
table, get_set_pack_length(enum_set_typelib->count),
get_set_pack_length(enum_set_typelib->count),
enum_set_typelib, collation.collation);
if (field)
field->init(table);
return field;
default:
break;
}
return tmp_table_field_from_field_type(table);
return tmp_table_field_from_field_type(table, 0);
}

View File

@ -689,7 +689,7 @@ public:
// used in row subselects to get value of elements
virtual void bring_value() {}
Field *tmp_table_field_from_field_type(TABLE *table);
Field *tmp_table_field_from_field_type(TABLE *table, bool fixed_length);
virtual Item_field *filed_for_view_update() { return 0; }
virtual Item *neg_transformer(THD *thd) { return NULL; }

View File

@ -1228,7 +1228,7 @@ enum_field_types Item_func_ifnull::field_type() const
Field *Item_func_ifnull::tmp_table_field(TABLE *table)
{
return tmp_table_field_from_field_type(table);
return tmp_table_field_from_field_type(table, 0);
}
double

View File

@ -462,7 +462,6 @@ Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec,
CHARSET_INFO *cs)
{
Item *res;
LINT_INIT(res);
switch (cast_type) {
case ITEM_CAST_BINARY: res= new Item_func_binary(a); break;
@ -478,6 +477,10 @@ Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec,
res= new Item_char_typecast(a, len, cs ? cs :
current_thd->variables.collation_connection);
break;
default:
DBUG_ASSERT(0);
res= 0;
break;
}
return res;
}

View File

@ -362,41 +362,43 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const
}
Field *Item_func::tmp_table_field(TABLE *t_arg)
Field *Item_func::tmp_table_field(TABLE *table)
{
Field *res;
LINT_INIT(res);
Field *field;
LINT_INIT(field);
switch (result_type()) {
case INT_RESULT:
if (max_length > 11)
res= new Field_longlong(max_length, maybe_null, name, t_arg,
unsigned_flag);
field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
else
res= new Field_long(max_length, maybe_null, name, t_arg,
unsigned_flag);
field= new Field_long(max_length, maybe_null, name, unsigned_flag);
break;
case REAL_RESULT:
res= new Field_double(max_length, maybe_null, name, t_arg, decimals);
field= new Field_double(max_length, maybe_null, name, decimals);
break;
case STRING_RESULT:
res= make_string_field(t_arg);
return make_string_field(table);
break;
case DECIMAL_RESULT:
res= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(),
decimals,
unsigned_flag),
maybe_null, name, t_arg, decimals, unsigned_flag);
field= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(),
decimals,
unsigned_flag),
maybe_null, name, decimals, unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be chosen
DBUG_ASSERT(0);
field= 0;
break;
}
return res;
if (field)
field->init(table);
return field;
}
my_decimal *Item_func::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed);
@ -4637,7 +4639,8 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name)
{
maybe_null= 1;
m_name->init_qname(current_thd);
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE));
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
}
@ -4648,9 +4651,11 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg,
{
maybe_null= 1;
m_name->init_qname(current_thd);
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE));
dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)+ sizeof(TABLE_SHARE));
dummy_table->s= (TABLE_SHARE*) (dummy_table+1);
}
void
Item_func_sp::cleanup()
{
@ -4705,16 +4710,15 @@ Item_func_sp::sp_result_field(void) const
DBUG_RETURN(0);
}
}
if (!dummy_table->s)
if (!dummy_table->alias)
{
char *empty_name= (char *) "";
TABLE_SHARE *share;
dummy_table->s= share= &dummy_table->share_not_to_be_used;
dummy_table->alias = empty_name;
dummy_table->maybe_null = maybe_null;
dummy_table->alias= empty_name;
dummy_table->maybe_null= maybe_null;
dummy_table->in_use= current_thd;
share->table_cache_key = empty_name;
share->table_name = empty_name;
dummy_table->s->table_cache_key.str = empty_name;
dummy_table->s->table_name.str= empty_name;
dummy_table->s->db.str= empty_name;
}
field= m_sp->make_field(max_length, name, dummy_table);
DBUG_RETURN(field);

View File

@ -133,6 +133,7 @@ Item_subselect::select_transformer(JOIN *join)
bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
{
char const *save_where= thd_param->where;
uint8 uncacheable;
bool res;
DBUG_ASSERT(fixed == 0);
@ -178,15 +179,17 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
fix_length_and_dec();
}
else
return 1;
uint8 uncacheable= engine->uncacheable();
if (uncacheable)
goto err;
if ((uncacheable= engine->uncacheable()))
{
const_item_cache= 0;
if (uncacheable & UNCACHEABLE_RAND)
used_tables_cache|= RAND_TABLE_BIT;
}
fixed= 1;
err:
thd->where= save_where;
return res;
}
@ -1797,7 +1800,7 @@ void subselect_uniquesubquery_engine::print(String *str)
str->append(STRING_WITH_LEN("<primary_index_lookup>("));
tab->ref.items[0]->print(str);
str->append(STRING_WITH_LEN(" in "));
str->append(tab->table->s->table_name);
str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
KEY *key_info= tab->table->key_info+ tab->ref.key;
str->append(STRING_WITH_LEN(" on "));
str->append(key_info->name);
@ -1815,7 +1818,7 @@ void subselect_indexsubquery_engine::print(String *str)
str->append(STRING_WITH_LEN("<index_lookup>("));
tab->ref.items[0]->print(str);
str->append(STRING_WITH_LEN(" in "));
str->append(tab->table->s->table_name);
str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
KEY *key_info= tab->table->key_info+ tab->ref.key;
str->append(STRING_WITH_LEN(" on "));
str->append(key_info->name);

View File

@ -143,26 +143,33 @@ bool Item_sum::walk (Item_processor processor, byte *argument)
Field *Item_sum::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
Field *field;
switch (result_type()) {
case REAL_RESULT:
return new Field_double(max_length,maybe_null,name,table,decimals);
field= new Field_double(max_length, maybe_null, name, decimals);
break;
case INT_RESULT:
return new Field_longlong(max_length,maybe_null,name,table,unsigned_flag);
field= new Field_longlong(max_length, maybe_null, name, unsigned_flag);
break;
case STRING_RESULT:
if (max_length > 255 && convert_blob_length)
return new Field_varstring(convert_blob_length, maybe_null,
name, table,
collation.collation);
return make_string_field(table);
if (max_length <= 255 || !convert_blob_length)
return make_string_field(table);
field= new Field_varstring(convert_blob_length, maybe_null,
name, table->s, collation.collation);
break;
case DECIMAL_RESULT:
return new Field_new_decimal(max_length, maybe_null, name, table,
field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be choosen
DBUG_ASSERT(0);
return 0;
}
if (field)
field->init(table);
return field;
}
@ -312,9 +319,10 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
Field *field;
if (args[0]->type() == Item::FIELD_ITEM)
{
Field *field= ((Item_field*) args[0])->field;
field= ((Item_field*) args[0])->field;
if ((field= create_tmp_field_from_field(current_thd, field, name, table,
NULL, convert_blob_length)))
@ -328,16 +336,21 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
*/
switch (args[0]->field_type()) {
case MYSQL_TYPE_DATE:
return new Field_date(maybe_null, name, table, collation.collation);
field= new Field_date(maybe_null, name, collation.collation);
break;
case MYSQL_TYPE_TIME:
return new Field_time(maybe_null, name, table, collation.collation);
field= new Field_time(maybe_null, name, collation.collation);
break;
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
return new Field_datetime(maybe_null, name, table, collation.collation);
default:
field= new Field_datetime(maybe_null, name, collation.collation);
break;
default:
return Item_sum::create_tmp_field(group, table, convert_blob_length);
}
return Item_sum::create_tmp_field(group, table, convert_blob_length);
if (field)
field->init(table);
return field;
}
@ -839,6 +852,7 @@ Item *Item_sum_avg::copy_or_same(THD* thd)
Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
uint convert_blob_len)
{
Field *field;
if (group)
{
/*
@ -846,14 +860,18 @@ Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table,
The easyest way is to do this is to store both value in a string
and unpack on access.
*/
return new Field_string(((hybrid_type == DECIMAL_RESULT) ?
field= new Field_string(((hybrid_type == DECIMAL_RESULT) ?
dec_bin_size : sizeof(double)) + sizeof(longlong),
0, name, table, &my_charset_bin);
0, name, &my_charset_bin);
}
if (hybrid_type == DECIMAL_RESULT)
return new Field_new_decimal(max_length, maybe_null, name, table,
else if (hybrid_type == DECIMAL_RESULT)
field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
return new Field_double(max_length, maybe_null, name, table, decimals);
else
field= new Field_double(max_length, maybe_null, name, decimals);
if (field)
field->init(table);
return field;
}
@ -1018,6 +1036,7 @@ Item *Item_sum_variance::copy_or_same(THD* thd)
Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
uint convert_blob_len)
{
Field *field;
if (group)
{
/*
@ -1025,15 +1044,19 @@ Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table,
The easyest way is to do this is to store both value in a string
and unpack on access.
*/
return new Field_string(((hybrid_type == DECIMAL_RESULT) ?
field= new Field_string(((hybrid_type == DECIMAL_RESULT) ?
dec_bin_size0 + dec_bin_size1 :
sizeof(double)*2) + sizeof(longlong),
0, name, table, &my_charset_bin);
0, name, &my_charset_bin);
}
if (hybrid_type == DECIMAL_RESULT)
return new Field_new_decimal(max_length, maybe_null, name, table,
else if (hybrid_type == DECIMAL_RESULT)
field= new Field_new_decimal(max_length, maybe_null, name,
decimals, unsigned_flag);
return new Field_double(max_length, maybe_null,name,table,decimals);
else
field= new Field_double(max_length, maybe_null, name, decimals);
if (field)
field->init(table);
return field;
}

View File

@ -3002,18 +3002,6 @@ get_date_time_result_type(const char *format, uint length)
}
Field *Item_func_str_to_date::tmp_table_field(TABLE *t_arg)
{
if (cached_field_type == MYSQL_TYPE_TIME)
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
if (cached_field_type == MYSQL_TYPE_DATE)
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
if (cached_field_type == MYSQL_TYPE_DATETIME)
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
}
void Item_func_str_to_date::fix_length_and_dec()
{
char format_buff[64];

View File

@ -340,10 +340,10 @@ public:
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
int save_in_field(Field *to, bool no_conversions);
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
}
return tmp_table_field_from_field_type(table, 0);
}
};
@ -355,9 +355,9 @@ public:
Item_date_func(Item *a,Item *b) :Item_str_func(a,b) {}
Item_date_func(Item *a,Item *b, Item *c) :Item_str_func(a,b,c) {}
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -378,9 +378,9 @@ public:
longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
String *val_str(String *str);
void fix_length_and_dec();
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
/*
Abstract method that defines which time zone is used for conversion.
@ -618,9 +618,9 @@ public:
}
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
const char *func_name() const { return "sec_to_time"; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -741,9 +741,9 @@ public:
bool get_date(TIME *ltime, uint fuzzy_date);
const char *cast_type() const { return "date"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
void fix_length_and_dec()
{
@ -763,9 +763,9 @@ public:
bool get_time(TIME *ltime);
const char *cast_type() const { return "time"; }
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -778,9 +778,9 @@ public:
String *val_str(String *str);
const char *cast_type() const { return "datetime"; }
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -796,9 +796,9 @@ public:
decimals=0;
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -816,18 +816,9 @@ public:
enum_field_types field_type() const { return cached_field_type; }
void fix_length_and_dec();
/*
TODO:
Change this when we support
microseconds in TIME/DATETIME
*/
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
if (cached_field_type == MYSQL_TYPE_TIME)
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
else if (cached_field_type == MYSQL_TYPE_DATETIME)
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
void print(String *str);
const char *func_name() const { return "add_time"; }
@ -847,9 +838,9 @@ public:
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null= 1;
}
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -866,9 +857,9 @@ public:
decimals=0;
max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
}
Field *tmp_table_field(TABLE *t_arg)
Field *tmp_table_field(TABLE *table)
{
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
return tmp_table_field_from_field_type(table, 0);
}
};
@ -942,7 +933,10 @@ public:
const char *func_name() const { return "str_to_date"; }
enum_field_types field_type() const { return cached_field_type; }
void fix_length_and_dec();
Field *tmp_table_field(TABLE *t_arg);
Field *tmp_table_field(TABLE *table)
{
return tmp_table_field_from_field_type(table, 1);
}
};

View File

@ -25,5 +25,8 @@
Field *Item_sum_unique_users::create_tmp_field(bool group, TABLE *table,
uint convert_blob_length)
{
return new Field_long(9,maybe_null,name,table,1);
Field *field= new Field_long(9, maybe_null, name, 1);
if (field)
field->init(table);
return field;
}

View File

@ -28,7 +28,7 @@
** Used when calculating key for NEXT_NUMBER
*/
int find_ref_key(TABLE *table,Field *field, uint *key_length)
int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length)
{
reg2 int i;
reg3 KEY *key_info;
@ -38,8 +38,8 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length)
/* Test if some key starts as fieldpos */
for (i= 0, key_info= table->key_info ;
i < (int) table->s->keys ;
for (i= 0, key_info= key ;
i < (int) key_count ;
i++, key_info++)
{
if (key_info->key_part[0].offset == fieldpos)
@ -50,8 +50,8 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length)
}
/* Test if some key contains fieldpos */
for (i= 0, key_info= table->key_info ;
i < (int) table->s->keys ;
for (i= 0, key_info= key;
i < (int) key_count ;
i++, key_info++)
{
uint j;

View File

@ -357,12 +357,15 @@ void mysql_lock_abort(THD *thd, TABLE *table)
{
MYSQL_LOCK *locked;
TABLE *write_lock_used;
DBUG_ENTER("mysql_lock_abort");
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
thr_abort_locks(locked->locks[i]->lock);
my_free((gptr) locked,MYF(0));
}
DBUG_VOID_RETURN;
}
@ -482,8 +485,8 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
table_ptr[i]->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE &&
count != 1)
{
my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db,
table_ptr[i]->s->table_name);
my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db.str,
table_ptr[i]->s->table_name.str);
DBUG_RETURN(0);
}
}
@ -610,32 +613,35 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list)
DBUG_ENTER("lock_table_name");
DBUG_PRINT("enter",("db: %s name: %s", db, table_list->table_name));
safe_mutex_assert_owner(&LOCK_open);
key_length=(uint) (strmov(strmov(key,db)+1,table_list->table_name)
-key)+ 1;
key_length= create_table_def_key(thd, key, table_list, 0);
/* Only insert the table if we haven't insert it already */
for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ;
table ;
table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length))
{
if (table->in_use == thd)
{
DBUG_PRINT("info", ("Table is in use"));
table->s->version= 0; // Ensure no one can use this
table->locked_by_name= 1;
DBUG_RETURN(0);
}
}
/*
Create a table entry with the right key and with an old refresh version
Note that we must use my_malloc() here as this is freed by the table
cache
*/
if (!(table= (TABLE*) my_malloc(sizeof(*table)+key_length,
MYF(MY_WME | MY_ZEROFILL))))
if (!(table= (TABLE*) my_malloc(sizeof(*table)+ sizeof(TABLE_SHARE)+
key_length, MYF(MY_WME | MY_ZEROFILL))))
DBUG_RETURN(-1);
table->s= &table->share_not_to_be_used;
memcpy((table->s->table_cache_key= (char*) (table+1)), key, key_length);
table->s->db= table->s->table_cache_key;
table->s->key_length=key_length;
table->in_use=thd;
table->s= (TABLE_SHARE*) (table+1);
memcpy((table->s->table_cache_key.str= (char*) (table->s+1)), key,
key_length);
table->s->table_cache_key.length= key_length;
table->s->tmp_table= INTERNAL_TMP_TABLE; // for intern_close_table
table->in_use= thd;
table->locked_by_name=1;
table_list->table=table;
@ -665,8 +671,17 @@ static bool locked_named_table(THD *thd, TABLE_LIST *table_list)
{
for (; table_list ; table_list=table_list->next_local)
{
if (table_list->table && table_is_used(table_list->table,0))
return 1;
TABLE *table= table_list->table;
if (table)
{
TABLE *save_next= table->next;
bool result;
table->next= 0;
result= table_is_used(table_list->table, 0);
table->next= save_next;
if (result)
return 1;
}
}
return 0; // All tables are locked
}
@ -676,6 +691,7 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
{
bool result=0;
DBUG_ENTER("wait_for_locked_table_names");
safe_mutex_assert_owner(&LOCK_open);
while (locked_named_table(thd,table_list))
@ -685,7 +701,7 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
result=1;
break;
}
wait_for_refresh(thd);
wait_for_condition(thd, &LOCK_open, &COND_refresh);
pthread_mutex_lock(&LOCK_open);
}
DBUG_RETURN(result);
@ -1037,5 +1053,3 @@ bool make_global_read_lock_block_commit(THD *thd)
thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock
DBUG_RETURN(error);
}

View File

@ -601,8 +601,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables,
bool if_exists, bool drop_temporary,
bool log_query);
int quick_rm_table(enum db_type base,const char *db,
const char *table_name);
bool quick_rm_table(enum db_type base,const char *db,
const char *table_name);
void close_cached_table(THD *thd, TABLE *table);
bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list);
bool mysql_change_db(THD *thd,const char *name,bool no_access_check);
@ -633,7 +633,10 @@ bool check_dup(const char *db, const char *name, TABLE_LIST *tables);
bool table_cache_init(void);
void table_cache_free(void);
uint cached_tables(void);
bool table_def_init(void);
void table_def_free(void);
uint cached_open_tables(void);
uint cached_table_definitions(void);
void kill_mysql(void);
void close_connection(THD *thd, uint errcode, bool lock);
bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
@ -780,15 +783,18 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
bool reset_auto_increment);
bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok);
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create);
uint create_table_def_key(THD *thd, byte *key, TABLE_LIST *table_list,
bool tmp_table);
TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, byte *key,
uint key_length, uint db_flags, int *error);
void release_table_share(TABLE_SHARE *share, enum release_type type);
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name);
TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update);
TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem,
bool *refresh, uint flags);
bool reopen_name_locked_table(THD* thd, TABLE_LIST* table);
TABLE *find_locked_table(THD *thd, const char *db,const char *table_name);
bool reopen_table(TABLE *table,bool locked);
bool reopen_tables(THD *thd,bool get_locks,bool in_refresh);
void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
bool send_refresh);
bool close_data_tables(THD *thd,const char *db, const char *table_name);
bool wait_for_tables(THD *thd);
bool table_is_used(TABLE *table, bool wait_for_name_lock);
@ -985,7 +991,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves,
COND **conds);
int setup_ftfuncs(SELECT_LEX* select);
int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order);
void wait_for_refresh(THD *thd);
void wait_for_condition(THD *thd, pthread_mutex_t *mutex,
pthread_cond_t *cond);
int open_tables(THD *thd, TABLE_LIST **tables, uint *counter, uint flags);
int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables);
bool open_and_lock_tables(THD *thd,TABLE_LIST *tables);
@ -1004,9 +1011,12 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table,
const char *db_name,
const char *table_name);
TABLE_LIST *unique_table(TABLE_LIST *table, TABLE_LIST *table_list);
TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name);
bool close_temporary_table(THD *thd, const char *db, const char *table_name);
void close_temporary(TABLE *table, bool delete_table);
TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name);
TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list);
bool close_temporary_table(THD *thd, TABLE_LIST *table_list);
void close_temporary_table(THD *thd, TABLE *table, bool free_share,
bool delete_table);
void close_temporary(TABLE *table, bool free_share, bool delete_table);
bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
const char *table_name);
void remove_db_from_cache(const char *db);
@ -1085,7 +1095,7 @@ void print_plan(JOIN* join, double read_time, double record_count,
#endif
void mysql_print_status();
/* key.cc */
int find_ref_key(TABLE *form,Field *field, uint *offset);
int find_ref_key(KEY *key, uint key_count, Field *field, uint *key_length);
void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length);
void key_restore(byte *to_record, byte *from_key, KEY *key_info,
uint key_length);
@ -1175,7 +1185,7 @@ extern ulong delayed_rows_in_use,delayed_insert_errors;
extern ulong slave_open_temp_tables;
extern ulong query_cache_size, query_cache_min_res_unit;
extern ulong slow_launch_threads, slow_launch_time;
extern ulong table_cache_size;
extern ulong table_cache_size, table_def_size;
extern ulong max_connections,max_connect_errors, connect_timeout;
extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
@ -1370,23 +1380,36 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
void unireg_init(ulong options);
void unireg_end(void);
bool mysql_create_frm(THD *thd, my_string file_name,
bool mysql_create_frm(THD *thd, const char *file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info,handler *db_type);
int rea_create_table(THD *thd, my_string file_name,
const char *db, const char *table,
int rea_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info, handler *file);
List<create_field> &create_field,
uint key_count,KEY *key_info,
handler *file);
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
my_string *errpos);
/* table.cc */
TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, byte *key,
uint key_length);
void init_tmp_table_share(TABLE_SHARE *share, const char *key, uint key_length,
const char *table_name, const char *path);
void free_table_share(TABLE_SHARE *share);
int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags);
void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg);
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags,
TABLE *outparam);
int openfrm(THD *thd, const char *name,const char *alias,uint filestat,
uint prgflag, uint ha_open_flags, TABLE *outparam);
int readfrm(const char *name, const void** data, uint* length);
int writefrm(const char* name, const void* data, uint len);
int closefrm(TABLE *table);
int closefrm(TABLE *table, bool free_share);
int read_string(File file, gptr *to, uint length);
void free_blobs(TABLE *table);
int set_zone(int nr,int min_zone,int max_zone);
@ -1445,8 +1468,8 @@ ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames,
const char *newname);
ulong next_io_size(ulong pos);
void append_unescaped(String *res, const char *pos, uint length);
int create_frm(THD *thd, char *name, const char *db, const char *table,
uint reclength,uchar *fileinfo,
int create_frm(THD *thd, const char *name, const char *db, const char *table,
uint reclength, uchar *fileinfo,
HA_CREATE_INFO *create_info, uint keys);
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
int rename_file_ext(const char * from,const char * to,const char * ext);

View File

@ -447,7 +447,8 @@ uint tc_heuristic_recover= 0;
uint volatile thread_count, thread_running;
ulonglong thd_startup_options;
ulong back_log, connect_timeout, concurrency, server_id;
ulong table_cache_size, thread_stack, what_to_log;
ulong table_cache_size, table_def_size;
ulong thread_stack, what_to_log;
ulong query_buff_size, slow_launch_time, slave_open_temp_tables;
ulong open_files_limit, max_binlog_size, max_relay_log_size;
ulong slave_net_timeout, slave_trans_retries;
@ -1113,6 +1114,7 @@ void clean_up(bool print_message)
#endif
query_cache_destroy();
table_cache_free();
table_def_free();
hostname_cache_free();
item_user_lock_free();
lex_free(); /* Free some memory */
@ -1411,7 +1413,7 @@ static void network_init(void)
struct sockaddr_un UNIXaddr;
#endif
int arg=1;
DBUG_ENTER("server_init");
DBUG_ENTER("network_init");
set_ports();
@ -2775,7 +2777,7 @@ static int init_thread_environment()
{
(void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_open,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_open, NULL);
(void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_status,MY_MUTEX_INIT_FAST);
@ -2937,7 +2939,11 @@ static void init_ssl()
static int init_server_components()
{
DBUG_ENTER("init_server_components");
if (table_cache_init() || hostname_cache_init())
/*
We need to call each of these following functions to ensure that
all things are initialized so that unireg_abort() doesn't fail
*/
if (table_cache_init() | table_def_init() | hostname_cache_init())
unireg_abort(1);
query_cache_result_size_limit(query_cache_limit);
@ -3379,9 +3385,7 @@ int main(int argc, char **argv)
*/
check_data_home(mysql_real_data_home);
if (my_setwd(mysql_real_data_home,MYF(MY_WME)))
{
unireg_abort(1); /* purecov: inspected */
}
mysql_data_home= mysql_data_home_buff;
mysql_data_home[0]=FN_CURLIB; // all paths are relative from here
mysql_data_home[1]=0;
@ -3396,7 +3400,6 @@ int main(int argc, char **argv)
set_user(mysqld_user, user_info);
}
if (opt_bin_log && !server_id)
{
server_id= !master_host ? 1 : 2;
@ -3418,7 +3421,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
}
if (init_server_components())
exit(1);
unireg_abort(1);
network_init();
@ -3594,8 +3597,8 @@ static char *add_quoted_string(char *to, const char *from, char *to_end)
uint length= (uint) (to_end-to);
if (!strchr(from, ' '))
return strnmov(to, from, length);
return strxnmov(to, length, "\"", from, "\"", NullS);
return strmake(to, from, length-1);
return strxnmov(to, length-1, "\"", from, "\"", NullS);
}
@ -4563,7 +4566,7 @@ enum options_mysqld
OPT_RELAY_LOG_PURGE,
OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME,
OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING,
OPT_SORT_BUFFER, OPT_TABLE_CACHE,
OPT_SORT_BUFFER, OPT_TABLE_OPEN_CACHE, OPT_TABLE_DEF_CACHE,
OPT_THREAD_CONCURRENCY, OPT_THREAD_CACHE_SIZE,
OPT_TMP_TABLE_SIZE, OPT_THREAD_STACK,
OPT_WAIT_TIMEOUT, OPT_MYISAM_REPAIR_THREADS,
@ -5952,13 +5955,21 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.sync_replication_timeout,
0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0},
#endif /* HAVE_REPLICATION */
{"table_cache", OPT_TABLE_CACHE,
"The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
0, 1, 0},
{"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in "
"seconds to wait for a table level lock before returning an error. Used"
" only if the connection has active cursors.",
{"table_cache", OPT_TABLE_OPEN_CACHE,
"Deprecated; use --table_open_cache instead.",
(gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG,
REQUIRED_ARG, 64, 1, 512*1024L, 0, 1, 0},
{"table_definition_cache", OPT_TABLE_DEF_CACHE,
"The number of cached table definitions.",
(gptr*) &table_def_size, (gptr*) &table_def_size,
0, GET_ULONG, REQUIRED_ARG, 128, 1, 512*1024L, 0, 1, 0},
{"table_open_cache", OPT_TABLE_OPEN_CACHE,
"The number of cached open tables.",
(gptr*) &table_cache_size, (gptr*) &table_cache_size,
0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, 0, 1, 0},
{"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT,
"Timeout in seconds to wait for a table level lock before returning an "
"error. Used only if the connection has active cursors.",
(gptr*) &table_lock_wait_timeout, (gptr*) &table_lock_wait_timeout,
0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE,
@ -6158,7 +6169,8 @@ struct show_var_st status_vars[]= {
{"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST},
{"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST},
{"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST},
{"Open_tables", (char*) 0, SHOW_OPENTABLES},
{"Open_table_definitions", (char*) 0, SHOW_TABLE_DEFINITIONS},
{"Open_tables", (char*) 0, SHOW_OPEN_TABLES},
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
#ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST},
@ -6989,6 +7001,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case OPT_MYISAM_STATS_METHOD:
{
ulong method_conv;
LINT_INIT(method_conv);
myisam_stats_method_str= argument;
int method;
if ((method=find_type(argument, &myisam_stats_method_typelib, 2)) <= 0)

View File

@ -910,6 +910,7 @@ int QUICK_ROR_INTERSECT_SELECT::init()
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
handler *save_file= file;
THD *thd;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
if (reuse_handler)
@ -931,11 +932,12 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(0);
}
THD *thd= current_thd;
if (!(file= get_new_handler(head, thd->mem_root, head->s->db_type)))
thd= head->in_use;
if (!(file= get_new_handler(head->s, thd->mem_root, head->s->db_type)))
goto failure;
DBUG_PRINT("info", ("Allocated new handler %p", file));
if (file->ha_open(head->s->path, head->db_stat, HA_OPEN_IGNORE_IF_LOCKED))
if (file->ha_open(head, head->s->normalized_path.str, head->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
{
/* Caller will free the memory */
goto failure;
@ -6202,6 +6204,14 @@ int QUICK_RANGE_SELECT::reset()
multi_range_buff->buffer= mrange_buff;
multi_range_buff->buffer_end= mrange_buff + mrange_bufsiz;
multi_range_buff->end_of_used_area= mrange_buff;
#ifdef HAVE_purify
/*
We need this until ndb will use the buffer efficiently
(Now ndb stores complete row in here, instead of only the used fields
which gives us valgrind warnings in compare_record[])
*/
bzero((char*) mrange_buff, mrange_bufsiz);
#endif
}
DBUG_RETURN(0);
}

View File

@ -355,11 +355,11 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
{
char old_path[FN_REFLEN], new_path[FN_REFLEN], arc_path[FN_REFLEN];
strxnmov(old_path, FN_REFLEN, mysql_data_home, "/", schema, "/",
strxnmov(old_path, FN_REFLEN-1, mysql_data_home, "/", schema, "/",
old_name, reg_ext, NullS);
(void) unpack_filename(old_path, old_path);
strxnmov(new_path, FN_REFLEN, mysql_data_home, "/", schema, "/",
strxnmov(new_path, FN_REFLEN-1, mysql_data_home, "/", schema, "/",
new_name, reg_ext, NullS);
(void) unpack_filename(new_path, new_path);
@ -367,7 +367,7 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
return 1;
/* check if arc_dir exists */
strxnmov(arc_path, FN_REFLEN, mysql_data_home, "/", schema, "/arc", NullS);
strxnmov(arc_path, FN_REFLEN-1, mysql_data_home, "/", schema, "/arc", NullS);
(void) unpack_filename(arc_path, arc_path);
if (revision > 0 && !access(arc_path, F_OK))
@ -414,7 +414,7 @@ sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root,
char *end, *sign;
File_parser *parser;
File file;
DBUG_ENTER("sql__parse_prepare");
DBUG_ENTER("sql_parse_prepare");
if (!my_stat(file_name->str, &stat_info, MYF(MY_WME)))
{

View File

@ -421,7 +421,9 @@ sys_var_thd_ulong sys_sync_replication_timeout(
&SV::sync_replication_timeout);
#endif
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
sys_var_long_ptr sys_table_cache_size("table_cache",
sys_var_long_ptr sys_table_def_size("table_definition_cache",
&table_def_size);
sys_var_long_ptr sys_table_cache_size("table_open_cache",
&table_cache_size);
sys_var_long_ptr sys_table_lock_wait_timeout("table_lock_wait_timeout",
&table_lock_wait_timeout);
@ -877,7 +879,8 @@ struct show_var_st init_vars[]= {
#ifdef HAVE_TZNAME
{"system_time_zone", system_time_zone, SHOW_CHAR},
#endif
{"table_cache", (char*) &table_cache_size, SHOW_LONG},
{"table_definition_cache", (char*) &table_def_size, SHOW_LONG},
{"table_open_cache", (char*) &table_cache_size, SHOW_LONG},
{"table_lock_wait_timeout", (char*) &table_lock_wait_timeout, SHOW_LONG },
{sys_table_type.name, (char*) &sys_table_type, SHOW_SYS},
{sys_thread_cache_size.name,(char*) &sys_thread_cache_size, SHOW_SYS},

View File

@ -513,7 +513,7 @@ void st_relay_log_info::close_temporary_tables()
Don't ask for disk deletion. For now, anyway they will be deleted when
slave restarts, but it is a better intention to not delete them.
*/
close_temporary(table, 0);
close_temporary(table, 1, 0);
}
save_temporary_tables= 0;
slave_open_temp_tables= 0;
@ -1296,7 +1296,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
error=file->repair(thd,&check_opt) != 0;
thd->net.vio = save_vio;
if (error)
my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name);
my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name.str);
err:
close_thread_tables(thd);

View File

@ -14,7 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "mysql_priv.h"
#include "sp.h"
#include "sp_head.h"
@ -463,10 +462,12 @@ static void
sp_returns_type(THD *thd, String &result, sp_head *sp)
{
TABLE table;
TABLE_SHARE share;
Field *field;
bzero(&table, sizeof(table));
bzero((char*) &table, sizeof(table));
bzero((char*) &share, sizeof(share));
table.in_use= thd;
table.s = &table.share_not_to_be_used;
table.s = &share;
field= sp->make_field(0, 0, &table);
field->sql_type(result);
delete field;

View File

@ -243,6 +243,10 @@ sp_eval_func_item(THD *thd, Item **it_addr, enum enum_field_types type,
Item *old_item_next, *old_free_list, **p_free_list;
DBUG_PRINT("info", ("type: %d", type));
LINT_INIT(old_item_next);
LINT_INIT(old_free_list);
LINT_INIT(p_free_list);
if (!it)
DBUG_RETURN(NULL);
@ -518,7 +522,7 @@ void
sp_head::init_strings(THD *thd, LEX *lex, sp_name *name)
{
DBUG_ENTER("sp_head::init_strings");
uchar *endp; /* Used to trim the end */
const uchar *endp; /* Used to trim the end */
/* During parsing, we must use thd->mem_root */
MEM_ROOT *root= thd->mem_root;
@ -711,12 +715,14 @@ sp_head::make_field(uint max_length, const char *name, TABLE *dummy)
Field *field;
DBUG_ENTER("sp_head::make_field");
field= ::make_field((char *)0,
!m_returns_len ? max_length : m_returns_len,
(uchar *)"", 0, m_returns_pack, m_returns, m_returns_cs,
m_geom_returns, Field::NONE,
m_returns_typelib,
name ? name : (const char *)m_name.str, dummy);
field= ::make_field(dummy->s, (char *)0,
!m_returns_len ? max_length : m_returns_len,
(uchar *)"", 0, m_returns_pack, m_returns, m_returns_cs,
m_geom_returns, Field::NONE,
m_returns_typelib,
name ? name : (const char *)m_name.str);
if (field)
field->init(dummy);
DBUG_RETURN(field);
}

View File

@ -129,7 +129,7 @@ public:
TYPELIB *m_returns_typelib; // For FUNCTIONs only
uint m_returns_len; // For FUNCTIONs only
uint m_returns_pack; // For FUNCTIONs only
uchar *m_tmp_query; // Temporary pointer to sub query string
const uchar *m_tmp_query; // Temporary pointer to sub query string
uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value
st_sp_chistics *m_chistics;
ulong m_sql_mode; // For SHOW CREATE and execution
@ -178,7 +178,7 @@ public:
*/
HASH m_sroutines;
// Pointers set during parsing
uchar *m_param_begin, *m_param_end, *m_body_begin;
const uchar *m_param_begin, *m_param_end, *m_body_begin;
/*
Security context for stored routine which should be run under

View File

@ -2219,10 +2219,10 @@ void free_grant_table(GRANT_TABLE *grant_table)
/* Search after a matching grant. Prefer exact grants before not exact ones */
static GRANT_NAME *name_hash_search(HASH *name_hash,
const char *host,const char* ip,
const char *db,
const char *user, const char *tname,
bool exact)
const char *host,const char* ip,
const char *db,
const char *user, const char *tname,
bool exact)
{
char helping [NAME_LEN*2+USERNAME_LENGTH+3];
uint len;
@ -4678,7 +4678,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
by the searched record, if it exists.
*/
DBUG_PRINT("info",("read table: '%s' search: '%s'@'%s'",
table->s->table_name, user_str, host_str));
table->s->table_name.str, user_str, host_str));
host_field->store(host_str, user_from->host.length, system_charset_info);
user_field->store(user_str, user_from->user.length, system_charset_info);
@ -4721,7 +4721,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
{
#ifdef EXTRA_DEBUG
DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'",
table->s->table_name, user_str, host_str));
table->s->table_name.str, user_str, host_str));
#endif
while ((error= table->file->rnd_next(table->record[0])) !=
HA_ERR_END_OF_FILE)

File diff suppressed because it is too large Load Diff

View File

@ -850,7 +850,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
if (thd->db_length)
{
memcpy(thd->query+thd->query_length+1, thd->db, thd->db_length);
DBUG_PRINT("qcache", ("database : %s length %u",
DBUG_PRINT("qcache", ("database: %s length: %u",
thd->db, thd->db_length));
}
else
@ -1006,7 +1006,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
if (thd->db_length)
{
memcpy(sql+query_length+1, thd->db, thd->db_length);
DBUG_PRINT("qcache", ("database: '%s' length %u",
DBUG_PRINT("qcache", ("database: '%s' length: %u",
thd->db, thd->db_length));
}
else
@ -1103,9 +1103,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
*/
for (tmptable= thd->temporary_tables; tmptable ; tmptable= tmptable->next)
{
if (tmptable->s->key_length - TMP_TABLE_KEY_EXTRA ==
if (tmptable->s->table_cache_key.length - TMP_TABLE_KEY_EXTRA ==
table->key_length() &&
!memcmp(tmptable->s->table_cache_key, table->data(),
!memcmp(tmptable->s->table_cache_key.str, table->data(),
table->key_length()))
{
DBUG_PRINT("qcache",
@ -1268,7 +1268,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
for (; tables_used; tables_used= tables_used->next)
{
invalidate_table((byte*) tables_used->key, tables_used->key_length);
DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key,
DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key,
tables_used->key+
strlen(tables_used->key)+1));
}
@ -2135,7 +2135,8 @@ void Query_cache::invalidate_table(TABLE_LIST *table_list)
void Query_cache::invalidate_table(TABLE *table)
{
invalidate_table((byte*) table->s->table_cache_key, table->s->key_length);
invalidate_table((byte*) table->s->table_cache_key.str,
table->s->table_cache_key.length);
}
void Query_cache::invalidate_table(byte * key, uint32 key_length)
@ -2196,7 +2197,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
{
char key[MAX_DBKEY_LENGTH];
uint key_length;
DBUG_PRINT("qcache", ("view %s, db %s",
DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1,
@ -2216,14 +2217,15 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
else
{
DBUG_PRINT("qcache",
("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx",
tables_used->table->s->table_name,
tables_used->table->s->table_cache_key,
("table: %s db: %s openinfo: 0x%lx keylen: %u key: 0x%lx",
tables_used->table->s->table_name.str,
tables_used->table->s->table_cache_key.str,
(ulong) tables_used->table,
tables_used->table->s->key_length,
(ulong) tables_used->table->s->table_cache_key));
if (!insert_table(tables_used->table->s->key_length,
tables_used->table->s->table_cache_key, block_table,
tables_used->table->s->table_cache_key.length,
(ulong) tables_used->table->s->table_cache_key.str));
if (!insert_table(tables_used->table->s->table_cache_key.length,
tables_used->table->s->table_cache_key.str,
block_table,
tables_used->db_length,
tables_used->table->file->table_cache_type(),
tables_used->callback_func,
@ -2823,16 +2825,16 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
table_count++;
if (tables_used->view)
{
DBUG_PRINT("qcache", ("view %s, db %s",
DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
*tables_type|= HA_CACHE_TBL_NONTRANSACT;
}
else
{
DBUG_PRINT("qcache", ("table %s, db %s, type %u",
tables_used->table->s->table_name,
tables_used->table->s->table_cache_key,
DBUG_PRINT("qcache", ("table: %s db: %s type: %u",
tables_used->table->s->table_name.str,
tables_used->table->s->db.str,
tables_used->table->s->db_type));
if (tables_used->derived)
{
@ -2850,12 +2852,12 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
(*tables_type & HA_CACHE_TBL_NOCACHE) ||
(tables_used->db_length == 5 &&
my_strnncoll(table_alias_charset,
(uchar*)tables_used->table->s->table_cache_key, 6,
(uchar*)tables_used->table->s->table_cache_key.str, 6,
(uchar*)"mysql",6) == 0))
{
DBUG_PRINT("qcache",
("select not cacheable: temporary, system or \
other non-cacheable table(s)"));
("select not cacheable: temporary, system or "
"other non-cacheable table(s)"));
DBUG_RETURN(0);
}
if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM)
@ -2937,11 +2939,13 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
for (; tables_used; tables_used= tables_used->next_global)
{
TABLE *table;
handler *handler;
if (!(table= tables_used->table))
continue;
handler *handler= table->file;
if (!handler->register_query_cache_table(thd, table->s->table_cache_key,
table->s->key_length,
handler= table->file;
if (!handler->register_query_cache_table(thd,
table->s->table_cache_key.str,
table->s->table_cache_key.length,
&tables_used->callback_func,
&tables_used->engine_data))
{

View File

@ -658,7 +658,8 @@ void THD::add_changed_table(TABLE *table)
DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
table->file->has_transactions());
add_changed_table(table->s->table_cache_key, table->s->key_length);
add_changed_table(table->s->table_cache_key.str,
table->s->table_cache_key.length);
DBUG_VOID_RETURN;
}
@ -1053,7 +1054,8 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange,
if (!dirname_length(exchange->file_name))
{
strxnmov(path, FN_REFLEN, mysql_real_data_home, thd->db ? thd->db : "", NullS);
strxnmov(path, FN_REFLEN-1, mysql_real_data_home, thd->db ? thd->db : "",
NullS);
(void) fn_format(path, exchange->file_name, path, "", option);
}
else

View File

@ -628,6 +628,7 @@ typedef struct system_status_var
ulong net_big_packet_count;
ulong opened_tables;
ulong opened_shares;
ulong select_full_join_count;
ulong select_full_range_join_count;
ulong select_range_count;

View File

@ -272,7 +272,7 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{
ulong length;
length= (ulong) (strxnmov(buf, sizeof(buf), "default-character-set=",
length= (ulong) (strxnmov(buf, sizeof(buf)-1, "default-character-set=",
create->default_table_charset->csname,
"\ndefault-collation=",
create->default_table_charset->name,

View File

@ -814,29 +814,31 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
{
HA_CREATE_INFO create_info;
char path[FN_REFLEN];
TABLE **table_ptr;
TABLE *table;
bool error;
DBUG_ENTER("mysql_truncate");
bzero((char*) &create_info,sizeof(create_info));
/* If it is a temporary table, close and regenerate it */
if (!dont_send_ok && (table_ptr=find_temporary_table(thd,table_list->db,
table_list->table_name)))
if (!dont_send_ok && (table= find_temporary_table(thd, table_list)))
{
TABLE *table= *table_ptr;
table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
db_type table_type= table->s->db_type;
TABLE_SHARE *share= table->s;
if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE))
goto trunc_by_del;
strmov(path, table->s->path);
*table_ptr= table->next; // Unlink table from list
close_temporary(table,0);
*fn_ext(path)=0; // Remove the .frm extension
ha_create_table(path, &create_info,1);
table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK);
close_temporary_table(thd, table, 0, 0); // Don't free share
ha_create_table(thd, share->normalized_path.str,
share->db.str, share->table_name.str, &create_info, 1);
// We don't need to call invalidate() because this table is not in cache
if ((error= (int) !(open_temporary_table(thd, path, table_list->db,
table_list->table_name, 1))))
if ((error= (int) !(open_temporary_table(thd, share->path.str,
share->db.str,
share->table_name.str, 1))))
(void) rm_temporary_table(table_type, path);
free_table_share(share);
my_free((char*) table,MYF(0));
/*
If we return here we will not have logged the truncation to the bin log
and we will not send_ok() to the client.
@ -866,7 +868,8 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
}
*fn_ext(path)=0; // Remove the .frm extension
error= ha_create_table(path,&create_info,1);
error= ha_create_table(thd, path, table_list->db, table_list->table_name,
&create_info, 1);
query_cache_invalidate3(thd, table_list, 0);
end:
@ -895,7 +898,7 @@ end:
}
DBUG_RETURN(error);
trunc_by_del:
trunc_by_del:
/* Probably InnoDB table */
ulong save_options= thd->options;
table_list->lock_type= TL_WRITE;

View File

@ -179,8 +179,8 @@ exit:
}
orig_table_list->derived_result= derived_result;
orig_table_list->table= table;
orig_table_list->table_name= (char*) table->s->table_name;
orig_table_list->table_name_length= strlen((char*)table->s->table_name);
orig_table_list->table_name= table->s->table_name.str;
orig_table_list->table_name_length= table->s->table_name.length;
table->derived_select_number= first_select->select_number;
table->s->tmp_table= TMP_TABLE;
#ifndef NO_EMBEDDED_ACCESS_CHECKS

View File

@ -424,7 +424,8 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
#if MYSQL_VERSION_ID < 40100
char buff[MAX_DBKEY_LENGTH];
if (*tables->db)
strxnmov(buff, sizeof(buff), tables->db, ".", tables->table_name, NullS);
strxnmov(buff, sizeof(buff)-1, tables->db, ".", tables->table_name,
NullS);
else
strncpy(buff, tables->alias, sizeof(buff));
my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER");
@ -656,14 +657,15 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
while (*table_ptr)
{
if ((!*tmp_tables->db ||
!my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->db,
!my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->db.str,
tmp_tables->db)) &&
! my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->table_name,
! my_strcasecmp(&my_charset_latin1,
(*table_ptr)->s->table_name.str,
tmp_tables->table_name))
{
DBUG_PRINT("info",("*table_ptr '%s'.'%s' as '%s'",
(*table_ptr)->s->db,
(*table_ptr)->s->table_name,
(*table_ptr)->s->db.str,
(*table_ptr)->s->table_name.str,
(*table_ptr)->alias));
/* The first time it is required, lock for close_thread_table(). */
if (! did_lock && ! is_locked)
@ -733,7 +735,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags)
TABLE *table= *table_ptr;
DBUG_ENTER("mysql_ha_flush_table");
DBUG_PRINT("enter",("'%s'.'%s' as '%s' flags: 0x%02x",
table->s->db, table->s->table_name,
table->s->db.str, table->s->table_name.str,
table->alias, mode_flags));
if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash,

View File

@ -96,7 +96,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
Field_iterator_table fields;
fields.set_table(table);
if (check_grant_all_columns(thd, INSERT_ACL, &table->grant,
table->s->db, table->s->table_name,
table->s->db.str, table->s->table_name.str,
&fields))
return -1;
}
@ -1357,8 +1357,8 @@ delayed_insert *find_handler(THD *thd, TABLE_LIST *table_list)
delayed_insert *tmp;
while ((tmp=it++))
{
if (!strcmp(tmp->thd.db,table_list->db) &&
!strcmp(table_list->table_name,tmp->table->s->table_name))
if (!strcmp(tmp->thd.db, table_list->db) &&
!strcmp(table_list->table_name, tmp->table->s->table_name.str))
{
tmp->lock();
break;
@ -1511,6 +1511,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
my_ptrdiff_t adjust_ptrs;
Field **field,**org_field, *found_next_number_field;
TABLE *copy;
TABLE_SHARE *share= table->s;
/* First request insert thread to get a lock */
status=1;
@ -1536,19 +1537,16 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
client_thd->proc_info="allocating local table";
copy= (TABLE*) client_thd->alloc(sizeof(*copy)+
(table->s->fields+1)*sizeof(Field**)+
table->s->reclength);
(share->fields+1)*sizeof(Field**)+
share->reclength);
if (!copy)
goto error;
*copy= *table;
copy->s= &copy->share_not_to_be_used;
// No name hashing
bzero((char*) &copy->s->name_hash,sizeof(copy->s->name_hash));
/* We don't need to change the file handler here */
/* We don't need to change the file handler here */
field=copy->field=(Field**) (copy+1);
copy->record[0]=(byte*) (field+table->s->fields+1);
memcpy((char*) copy->record[0],(char*) table->record[0],table->s->reclength);
copy->record[0]=(byte*) (field+share->fields+1);
memcpy((char*) copy->record[0],(char*) table->record[0],share->reclength);
/* Make a copy of all fields */
@ -1560,7 +1558,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy)))
return 0;
(*field)->orig_table= copy; // Remove connection
(*field)->move_field(adjust_ptrs); // Point at copy->record[0]
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
if (*org_field == found_next_number_field)
(*field)->table->found_next_number_field= *field;
}
@ -1571,13 +1569,11 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
{
/* Restore offset as this may have been reset in handle_inserts */
copy->timestamp_field=
(Field_timestamp*) copy->field[table->s->timestamp_field_offset];
(Field_timestamp*) copy->field[share->timestamp_field_offset];
copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check;
copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type();
}
/* _rowid is not used with delayed insert */
copy->rowid_field=0;
/* Adjust in_use for pointing to client thread */
copy->in_use= client_thd;
@ -1595,8 +1591,9 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
/* Put a question in queue */
static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore,
char *query, uint query_length, bool log_on)
static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic,
bool ignore, char *query, uint query_length,
bool log_on)
{
delayed_row *row=0;
delayed_insert *di=thd->di;
@ -1958,7 +1955,7 @@ bool delayed_insert::handle_inserts(void)
if (thr_upgrade_write_delay_lock(*thd.lock->locks))
{
/* This can only happen if thread is killed by shutdown */
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name);
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name.str);
goto err;
}
@ -2051,7 +2048,8 @@ bool delayed_insert::handle_inserts(void)
if (thr_reschedule_write_lock(*thd.lock->locks))
{
/* This should never happen */
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name);
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),
table->s->table_name.str);
}
if (!using_bin_log)
table->file->extra(HA_EXTRA_WRITE_CACHE);
@ -2590,6 +2588,7 @@ void select_create::abort()
if (!table->s->tmp_table)
{
ulong version= table->s->version;
table->s->version= 0;
hash_delete(&open_cache,(byte*) table);
if (!create_info->table_existed)
quick_rm_table(table_type, create_table->db, create_table->table_name);
@ -2598,8 +2597,8 @@ void select_create::abort()
VOID(pthread_cond_broadcast(&COND_refresh));
}
else if (!create_info->table_existed)
close_temporary_table(thd, create_table->db, create_table->table_name);
table=0;
close_temporary_table(thd, table, 1, 1);
table=0; // Safety
}
VOID(pthread_mutex_unlock(&LOCK_open));
}

View File

@ -110,7 +110,7 @@ void lex_free(void)
(We already do too much here)
*/
void lex_start(THD *thd, uchar *buf,uint length)
void lex_start(THD *thd, const uchar *buf, uint length)
{
LEX *lex= thd->lex;
DBUG_ENTER("lex_start");
@ -196,9 +196,9 @@ void lex_end(LEX *lex)
static int find_keyword(LEX *lex, uint len, bool function)
{
uchar *tok=lex->tok_start;
const uchar *tok=lex->tok_start;
SYMBOL *symbol = get_hash_symbol((const char *)tok,len,function);
SYMBOL *symbol= get_hash_symbol((const char *)tok,len,function);
if (symbol)
{
lex->yylval->symbol.symbol=symbol;
@ -256,15 +256,16 @@ static LEX_STRING get_token(LEX *lex,uint length)
static LEX_STRING get_quoted_token(LEX *lex,uint length, char quote)
{
LEX_STRING tmp;
byte *from, *to, *end;
const uchar *from, *end;
uchar *to;
yyUnget(); // ptr points now after last token char
tmp.length=lex->yytoklen=length;
tmp.str=(char*) lex->thd->alloc(tmp.length+1);
for (from= (byte*) lex->tok_start, to= (byte*) tmp.str, end= to+length ;
for (from= lex->tok_start, to= (uchar*) tmp.str, end= to+length ;
to != end ;
)
{
if ((*to++= *from++) == quote)
if ((*to++= *from++) == (uchar) quote)
from++; // Skip double quotes
}
*to= 0; // End null for safety
@ -284,7 +285,6 @@ static char *get_text(LEX *lex)
CHARSET_INFO *cs= lex->thd->charset();
sep= yyGetLast(); // String should end with this
//lex->tok_start=lex->ptr-1; // Remember '
while (lex->ptr != lex->end_of_query)
{
c = yyGet();
@ -328,7 +328,8 @@ static char *get_text(LEX *lex)
yyUnget();
/* Found end. Unescape and return string */
uchar *str,*end,*start;
const uchar *str, *end;
uchar *start;
str=lex->tok_start+1;
end=lex->ptr-1;
@ -612,7 +613,7 @@ int yylex(void *arg, void *yythd)
break;
}
case MY_LEX_IDENT:
uchar *start;
const uchar *start;
#if defined(USE_MB) && defined(USE_MB_IDENT)
if (use_mb(cs))
{

View File

@ -459,7 +459,7 @@ public:
void set_limit(st_select_lex *values);
void set_thd(THD *thd_arg) { thd= thd_arg; }
friend void lex_start(THD *thd, uchar *buf, uint length);
friend void lex_start(THD *thd, const uchar *buf, uint length);
friend int subselect_union_engine::exec();
List<Item> *get_unit_column_types();
@ -626,7 +626,7 @@ public:
void cut_subtree() { slave= 0; }
bool test_limit();
friend void lex_start(THD *thd, uchar *buf, uint length);
friend void lex_start(THD *thd, const uchar *buf, uint length);
st_select_lex() {}
void make_empty_select()
{
@ -722,11 +722,11 @@ typedef struct st_lex
SELECT_LEX *current_select;
/* list of all SELECT_LEX */
SELECT_LEX *all_selects_list;
uchar *buf; /* The beginning of string, used by SPs */
uchar *ptr,*tok_start,*tok_end,*end_of_query;
const uchar *buf; /* The beginning of string, used by SPs */
const uchar *ptr,*tok_start,*tok_end,*end_of_query;
/* The values of tok_start/tok_end as they were one call of yylex before */
uchar *tok_start_prev, *tok_end_prev;
const uchar *tok_start_prev, *tok_end_prev;
char *length,*dec,*change,*name;
char *help_arg;
@ -931,7 +931,7 @@ typedef struct st_lex
Pointers to part of LOAD DATA statement that should be rewritten
during replication ("LOCAL 'filename' REPLACE INTO" part).
*/
uchar *fname_start, *fname_end;
const uchar *fname_start, *fname_end;
bool escape_used;
@ -1058,7 +1058,7 @@ struct st_lex_local: public st_lex
extern void lex_init(void);
extern void lex_free(void);
extern void lex_start(THD *thd, uchar *buf,uint length);
extern void lex_start(THD *thd, const uchar *buf, uint length);
extern void lex_end(LEX *lex);
extern int yylex(void *arg, void *yythd);

View File

@ -286,7 +286,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
#endif
if (!dirname_length(ex->file_name))
{
strxnmov(name, FN_REFLEN, mysql_real_data_home, tdb, NullS);
strxnmov(name, FN_REFLEN-1, mysql_real_data_home, tdb, NullS);
(void) fn_format(name, ex->file_name, name, "",
MY_RELATIVE_PATH | MY_UNPACK_FILENAME);
}

View File

@ -1978,7 +1978,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
uptime,
(int) thread_count, (ulong) thd->query_id,
(ulong) thd->status_var.long_query_count,
thd->status_var.opened_tables, refresh_version, cached_tables(),
thd->status_var.opened_tables, refresh_version,
cached_open_tables(),
(uptime ? (ulonglong2double(thd->query_id) / (double) uptime) :
(double) 0));
#ifdef SAFEMALLOC
@ -6202,12 +6203,16 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
if (!table)
DBUG_RETURN(0); // End of memory
alias_str= alias ? alias->str : table->table.str;
if (check_table_name(table->table.str,table->table.length) ||
table->db.str && check_db_name(table->db.str))
if (check_table_name(table->table.str,table->table.length))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str);
DBUG_RETURN(0);
}
if (table->db.str && check_db_name(table->db.str))
{
my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
DBUG_RETURN(0);
}
if (!alias) /* Alias is case sensitive */
{

View File

@ -39,6 +39,7 @@
#include "md5.h"
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include <ha_partition.h>
/*
Partition related functions declarations and some static constants;
*/
@ -901,7 +902,7 @@ static bool set_up_field_array(TABLE *table,
{
Field **ptr, *field, **field_array;
uint no_fields= 0, size_field_array, i= 0;
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
int result= FALSE;
DBUG_ENTER("set_up_field_array");
@ -1276,7 +1277,7 @@ static bool check_primary_key(TABLE *table)
if (primary_key < MAX_KEY)
{
set_indicator_in_key_fields(table->key_info+primary_key);
check_fields_in_PF(table->s->part_info->full_part_field_array,
check_fields_in_PF(table->part_info->full_part_field_array,
&all_fields, &some_fields);
clear_indicator_in_key_fields(table->key_info+primary_key);
if (unlikely(!all_fields))
@ -1314,7 +1315,7 @@ static bool check_unique_keys(TABLE *table)
if (table->key_info[i].flags & HA_NOSAME) //Unique index
{
set_indicator_in_key_fields(table->key_info+i);
check_fields_in_PF(table->s->part_info->full_part_field_array,
check_fields_in_PF(table->part_info->full_part_field_array,
&all_fields, &some_fields);
clear_indicator_in_key_fields(table->key_info+i);
if (unlikely(!all_fields))
@ -1596,26 +1597,32 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
}
/*
This function is called as part of opening the table by opening the .frm
file. It is a part of CREATE TABLE to do this so it is quite permissible
that errors due to erroneus syntax isn't found until we come here.
If the user has used a non-existing field in the table is one such example
of an error that is not discovered until here.
fix partition functions
SYNOPSIS
fix_partition_func()
thd The thread object
name The name of the partitioned table
table TABLE object for which partition fields are set-up
RETURN VALUE
TRUE
FALSE
DESCRIPTION
The name parameter contains the full table name and is used to get the
database name of the table which is used to set-up a correct
TABLE_LIST object for use in fix_fields.
NOTES
This function is called as part of opening the table by opening the .frm
file. It is a part of CREATE TABLE to do this so it is quite permissible
that errors due to erroneus syntax isn't found until we come here.
If the user has used a non-existing field in the table is one such example
of an error that is not discovered until here.
*/
bool fix_partition_func(THD *thd, const char* name, TABLE *table)
bool fix_partition_func(THD *thd, const char *name, TABLE *table)
{
bool result= TRUE;
uint dir_length, home_dir_length;
@ -1623,19 +1630,19 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table)
TABLE_SHARE *share= table->s;
char db_name_string[FN_REFLEN];
char* db_name;
partition_info *part_info= share->part_info;
partition_info *part_info= table->part_info;
ulong save_set_query_id= thd->set_query_id;
DBUG_ENTER("fix_partition_func");
thd->set_query_id= 0;
/*
Set-up the TABLE_LIST object to be a list with a single table
Set the object to zero to create NULL pointers and set alias
and real name to table name and get database name from file name.
Set-up the TABLE_LIST object to be a list with a single table
Set the object to zero to create NULL pointers and set alias
and real name to table name and get database name from file name.
*/
bzero((void*)&tables, sizeof(TABLE_LIST));
tables.alias= tables.table_name= (char*)share->table_name;
tables.alias= tables.table_name= (char*) share->table_name.str;
tables.table= table;
tables.next_local= 0;
tables.next_name_resolution_table= 0;
@ -1650,8 +1657,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table)
{
DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
/*
Subpartition is defined. We need to verify that subpartitioning
function is correct.
Subpartition is defined. We need to verify that subpartitioning
function is correct.
*/
if (part_info->linear_hash_ind)
set_linear_hash_mask(part_info, part_info->no_subparts);
@ -1664,7 +1671,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table)
else
{
if (unlikely(fix_fields_part_func(thd, &tables,
part_info->subpart_expr, part_info, TRUE)))
part_info->subpart_expr, part_info,
TRUE)))
goto end;
if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
{
@ -1676,8 +1684,8 @@ bool fix_partition_func(THD *thd, const char* name, TABLE *table)
}
DBUG_ASSERT(part_info->part_type != NOT_A_PARTITION);
/*
Partition is defined. We need to verify that partitioning
function is correct.
Partition is defined. We need to verify that partitioning
function is correct.
*/
if (part_info->part_type == HASH_PARTITION)
{
@ -1999,6 +2007,7 @@ char *generate_partition_syntax(partition_info *part_info,
File fptr;
char *buf= NULL; //Return buffer
const char *file_name;
sprintf(path, "%s_%lx_%lx", "part_syntax", current_pid,
current_thd->thread_id);
fn_format(path,path,mysql_tmpdir,".psy", MY_REPLACE_EXT);
@ -2160,7 +2169,7 @@ close_file:
bool partition_key_modified(TABLE *table, List<Item> &fields)
{
List_iterator_fast<Item> f(fields);
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
Item_field *item_field;
DBUG_ENTER("partition_key_modified");
if (!part_info)
@ -2770,7 +2779,7 @@ static uint32 get_sub_part_id_from_key(const TABLE *table,byte *buf,
const key_range *key_spec)
{
byte *rec0= table->record[0];
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
uint32 part_id;
DBUG_ENTER("get_sub_part_id_from_key");
@ -2809,7 +2818,7 @@ bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
{
bool result;
byte *rec0= table->record[0];
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
DBUG_ENTER("get_part_id_from_key");
key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
@ -2849,7 +2858,7 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
part_id_range *part_spec)
{
bool result;
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
byte *rec0= table->record[0];
DBUG_ENTER("get_full_part_id_from_key");
@ -2894,7 +2903,7 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
void get_partition_set(const TABLE *table, byte *buf, const uint index,
const key_range *key_spec, part_id_range *part_spec)
{
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
uint no_parts= get_tot_partitions(part_info), i, part_id;
uint sub_part= no_parts;
uint32 part_part= no_parts;
@ -3081,14 +3090,16 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
possible to retrace this given an item tree.
*/
bool mysql_unpack_partition(THD *thd, uchar *part_buf, uint part_info_len,
TABLE* table, enum db_type default_db_type)
bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
uint part_info_len, TABLE* table,
enum db_type default_db_type)
{
Item *thd_free_list= thd->free_list;
bool result= TRUE;
partition_info *part_info;
LEX *old_lex= thd->lex, lex;
DBUG_ENTER("mysql_unpack_partition");
thd->lex= &lex;
lex_start(thd, part_buf, part_info_len);
/*
@ -3116,7 +3127,8 @@ bool mysql_unpack_partition(THD *thd, uchar *part_buf, uint part_info_len,
goto end;
}
part_info= lex.part_info;
table->s->part_info= part_info;
table->part_info= part_info;
((ha_partition*)table->file)->set_part_info(part_info);
if (part_info->default_engine_type == DB_TYPE_UNKNOWN)
part_info->default_engine_type= default_db_type;
else
@ -3138,9 +3150,9 @@ bool mysql_unpack_partition(THD *thd, uchar *part_buf, uint part_info_len,
uint part_func_len= part_info->part_func_len;
uint subpart_func_len= part_info->subpart_func_len;
char *part_func_string, *subpart_func_string= NULL;
if (!((part_func_string= sql_alloc(part_func_len))) ||
if (!((part_func_string= thd->alloc(part_func_len))) ||
(subpart_func_len &&
!((subpart_func_string= sql_alloc(subpart_func_len)))))
!((subpart_func_string= thd->alloc(subpart_func_len)))))
{
my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
free_items(thd->free_list);
@ -3187,7 +3199,7 @@ void set_field_ptr(Field **ptr, const byte *new_buf,
do
{
(*ptr)->move_field(diff);
(*ptr)->move_field_offset(diff);
} while (*(++ptr));
DBUG_VOID_RETURN;
}
@ -3221,7 +3233,7 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf,
do
{
key_part->field->move_field(diff);
key_part->field->move_field_offset(diff);
key_part++;
} while (++i < key_parts);
DBUG_VOID_RETURN;

View File

@ -5063,7 +5063,7 @@ static void add_not_null_conds(JOIN *join)
SYNOPSIS
add_found_match_trig_cond()
tab the first inner table for most nested outer join
cond the predicate to be guarded
cond the predicate to be guarded (must be set)
root_tab the first inner table to stop
DESCRIPTION
@ -5081,12 +5081,11 @@ static COND*
add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab)
{
COND *tmp;
if (tab == root_tab || !cond)
DBUG_ASSERT(cond != 0);
if (tab == root_tab)
return cond;
if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab)))
{
tmp= new Item_func_trig_cond(tmp, &tab->found);
}
if (tmp)
{
tmp->quick_fix_field();
@ -5245,6 +5244,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
for (uint i=join->const_tables ; i < join->tables ; i++)
{
JOIN_TAB *tab=join->join_tab+i;
/*
first_inner is the X in queries like:
SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X
*/
JOIN_TAB *first_inner_tab= tab->first_inner;
table_map current_map= tab->table->map;
bool use_quick_range=0;
@ -5295,15 +5298,15 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
DBUG_PRINT("info", ("Item_int"));
tmp= new Item_int((longlong) 1,1); // Always true
DBUG_PRINT("info", ("Item_int 0x%lx", (ulong)tmp));
}
}
if (tmp || !cond)
{
DBUG_EXECUTE("where",print_where(tmp,tab->table->alias););
SQL_SELECT *sel=tab->select=(SQL_SELECT*)
thd->memdup((gptr) select, sizeof(SQL_SELECT));
SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
thd->memdup((gptr) select,
sizeof(*select)));
if (!sel)
DBUG_RETURN(1); // End of memory
/*
@ -7994,7 +7997,7 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
new_created field
*/
Field* create_tmp_field_from_field(THD *thd, Field* org_field,
Field *create_tmp_field_from_field(THD *thd, Field *org_field,
const char *name, TABLE *table,
Item_field *item, uint convert_blob_length)
{
@ -8003,12 +8006,14 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field,
if (convert_blob_length && (org_field->flags & BLOB_FLAG))
new_field= new Field_varstring(convert_blob_length,
org_field->maybe_null(),
org_field->field_name, table,
org_field->field_name, table->s,
org_field->charset());
else
new_field= org_field->new_field(thd->mem_root, table);
if (new_field)
{
new_field->init(table);
new_field->orig_table= org_field->orig_table;
if (item)
item->result_field= new_field;
else
@ -8051,18 +8056,18 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
Item ***copy_func, bool modify_item,
uint convert_blob_length)
{
bool maybe_null=item->maybe_null;
bool maybe_null= item->maybe_null;
Field *new_field;
LINT_INIT(new_field);
switch (item->result_type()) {
case REAL_RESULT:
new_field=new Field_double(item->max_length, maybe_null,
item->name, table, item->decimals);
new_field= new Field_double(item->max_length, maybe_null,
item->name, item->decimals);
break;
case INT_RESULT:
new_field=new Field_longlong(item->max_length, maybe_null,
item->name, table, item->unsigned_flag);
new_field= new Field_longlong(item->max_length, maybe_null,
item->name, item->unsigned_flag);
break;
case STRING_RESULT:
DBUG_ASSERT(item->collation.collation);
@ -8074,26 +8079,29 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
*/
if ((type= item->field_type()) == MYSQL_TYPE_DATETIME ||
type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE)
new_field= item->tmp_table_field_from_field_type(table);
new_field= item->tmp_table_field_from_field_type(table, 1);
else if (item->max_length/item->collation.collation->mbmaxlen > 255 &&
convert_blob_length)
new_field= new Field_varstring(convert_blob_length, maybe_null,
item->name, table,
item->name, table->s,
item->collation.collation);
else
new_field= item->make_string_field(table);
break;
case DECIMAL_RESULT:
new_field= new Field_new_decimal(item->max_length, maybe_null, item->name,
table, item->decimals, item->unsigned_flag);
item->decimals, item->unsigned_flag);
break;
case ROW_RESULT:
default:
// This case should never be choosen
DBUG_ASSERT(0);
new_field= 0; // to satisfy compiler (uninitialized variable)
new_field= 0;
break;
}
if (new_field)
new_field->init(table);
if (copy_func && item->is_result_field())
*((*copy_func)++) = item; // Save for copy_funcs
if (modify_item)
@ -8120,14 +8128,20 @@ Field *create_tmp_field_for_schema(THD *thd, Item *item, TABLE *table)
{
if (item->field_type() == MYSQL_TYPE_VARCHAR)
{
Field *field;
if (item->max_length > MAX_FIELD_VARCHARLENGTH /
item->collation.collation->mbmaxlen)
return new Field_blob(item->max_length, item->maybe_null,
item->name, table, item->collation.collation);
return new Field_varstring(item->max_length, item->maybe_null, item->name,
table, item->collation.collation);
field= new Field_blob(item->max_length, item->maybe_null,
item->name, item->collation.collation);
else
field= new Field_varstring(item->max_length, item->maybe_null,
item->name,
table->s, item->collation.collation);
if (field)
field->init(table);
return field;
}
return item->tmp_table_field_from_field_type(table);
return item->tmp_table_field_from_field_type(table, 0);
}
@ -8178,11 +8192,13 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
item= item->real_item();
type= Item::FIELD_ITEM;
}
switch (type) {
case Item::SUM_FUNC_ITEM:
{
Item_sum *item_sum=(Item_sum*) item;
Field *result= item_sum->create_tmp_field(group, table, convert_blob_length);
Field *result= item_sum->create_tmp_field(group, table,
convert_blob_length);
if (!result)
thd->fatal_error();
return result;
@ -8293,6 +8309,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
TABLE_SHARE *share;
uint i,field_count,null_count,null_pack_length;
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
@ -8361,6 +8378,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
&share, sizeof(*share),
&reg_field, sizeof(Field*) * (field_count+1),
&blob_field, sizeof(uint)*(field_count+1),
&from_field, sizeof(Field*)*field_count,
@ -8409,20 +8427,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
table->used_keys.init();
table->keys_in_use_for_query.init();
table->s= &table->share_not_to_be_used;
table->s->blob_field= blob_field;
table->s->table_name= table->s->path= tmpname;
table->s->db= "";
table->s->blob_ptr_size= mi_portable_sizeof_char_ptr;
table->s->tmp_table= TMP_TABLE;
table->s->db_low_byte_first=1; // True for HEAP and MyISAM
table->s->table_charset= param->table_charset;
table->s->primary_key= MAX_KEY; //Indicate no primary key
table->s->keys_for_keyread.init();
table->s->keys_in_use.init();
table->s= share;
init_tmp_table_share(share, "", 0, tmpname, tmpname);
share->blob_field= blob_field;
share->blob_ptr_size= mi_portable_sizeof_char_ptr;
share->db_low_byte_first=1; // True for HEAP and MyISAM
share->table_charset= param->table_charset;
share->primary_key= MAX_KEY; // Indicate no primary key
share->keys_for_keyread.init();
share->keys_in_use.init();
/* For easier error reporting */
table->s->table_cache_key= (char*) (table->s->db= "");
share->table_cache_key= share->db;
/* Calculate which type of fields we will store in the temporary table */
@ -8566,15 +8581,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
field_count= (uint) (reg_field - table->field);
*blob_field= 0; // End marker
table->s->fields= field_count;
share->fields= field_count;
/* If result table is small; use a heap */
if (blob_count || using_unique_constraint ||
(select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) ==
OPTION_BIG_TABLES || (select_options & TMP_TABLE_FORCE_MYISAM))
{
table->file= get_new_handler(table, &table->mem_root,
table->s->db_type= DB_TYPE_MYISAM);
table->file= get_new_handler(share, &table->mem_root,
share->db_type= DB_TYPE_MYISAM);
if (group &&
(param->group_parts > table->file->max_key_parts() ||
param->group_length > table->file->max_key_length()))
@ -8582,18 +8597,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
{
table->file= get_new_handler(table, &table->mem_root,
table->s->db_type= DB_TYPE_HEAP);
}
if (table->s->fields)
{
table->file->ha_set_all_bits_in_read_set();
table->file->ha_set_all_bits_in_write_set();
table->file= get_new_handler(share, &table->mem_root,
share->db_type= DB_TYPE_HEAP);
}
if (!table->file)
goto err;
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
table->s->blob_fields= blob_count;
share->blob_fields= blob_count;
if (blob_count == 0)
{
/* We need to ensure that first byte is not 0 for the delete link */
@ -8615,15 +8628,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS))
use_packed_rows= 1;
table->s->reclength= reclength;
share->reclength= reclength;
{
uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
table->s->rec_buff_length= alloc_length;
share->rec_buff_length= alloc_length;
if (!(table->record[0]= (byte*)
alloc_root(&table->mem_root, alloc_length*3)))
goto err;
table->record[1]= table->record[0]+alloc_length;
table->s->default_values= table->record[1]+alloc_length;
share->default_values= table->record[1]+alloc_length;
}
copy_func[0]=0; // End marker
@ -8639,8 +8652,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
bfill(null_flags,null_pack_length,255); // Set null fields
table->null_flags= (uchar*) table->record[0];
table->s->null_fields= null_count+ hidden_null_count;
table->s->null_bytes= null_pack_length;
share->null_fields= null_count+ hidden_null_count;
share->null_bytes= null_pack_length;
}
null_count= (blob_count == 0) ? 1 : 0;
hidden_field_count=param->hidden_field_count;
@ -8713,13 +8726,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
store_record(table,s->default_values); // Make empty default record
if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit
table->s->max_rows= ~(ha_rows) 0;
share->max_rows= ~(ha_rows) 0;
else
table->s->max_rows= (((table->s->db_type == DB_TYPE_HEAP) ?
share->max_rows= (((share->db_type == DB_TYPE_HEAP) ?
min(thd->variables.tmp_table_size,
thd->variables.max_heap_table_size) :
thd->variables.tmp_table_size)/ table->s->reclength);
set_if_bigger(table->s->max_rows,1); // For dummy start options
thd->variables.tmp_table_size)/ share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
keyinfo= param->keyinfo;
if (group)
@ -8727,8 +8740,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
DBUG_PRINT("info",("Creating group key in temporary table"));
table->group=group; /* Table is grouped by key */
param->group_buff=group_buff;
table->s->keys=1;
table->s->uniques= test(using_unique_constraint);
share->keys=1;
share->uniques= test(using_unique_constraint);
table->key_info=keyinfo;
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME;
@ -8796,14 +8809,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
null_pack_length-=hidden_null_pack_length;
keyinfo->key_parts= ((field_count-param->hidden_field_count)+
test(null_pack_length));
set_if_smaller(table->s->max_rows, rows_limit);
set_if_smaller(share->max_rows, rows_limit);
param->end_write_records= rows_limit;
table->distinct= 1;
table->s->keys= 1;
share->keys= 1;
if (blob_count)
{
using_unique_constraint=1;
table->s->uniques= 1;
share->uniques= 1;
}
if (!(key_part_info= (KEY_PART_INFO*)
alloc_root(&table->mem_root,
@ -8822,12 +8835,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
key_part_info->null_bit=0;
key_part_info->offset=hidden_null_pack_length;
key_part_info->length=null_pack_length;
key_part_info->field=new Field_string((char*) table->record[0],
(uint32) key_part_info->length,
(uchar*) 0,
(uint) 0,
Field::NONE,
NullS, table, &my_charset_bin);
key_part_info->field= new Field_string((char*) table->record[0],
(uint32) key_part_info->length,
(uchar*) 0,
(uint) 0,
Field::NONE,
NullS, &my_charset_bin);
if (!key_part_info->field)
goto err;
key_part_info->field->init(table);
key_part_info->key_type=FIELDFLAG_BINARY;
key_part_info->type= HA_KEYTYPE_BINARY;
key_part_info++;
@ -8851,8 +8867,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
if (thd->is_fatal_error) // If end of memory
goto err; /* purecov: inspected */
table->s->db_record_offset= 1;
if (table->s->db_type == DB_TYPE_MYISAM)
share->db_record_offset= 1;
if (share->db_type == DB_TYPE_MYISAM)
{
if (create_myisam_tmp_table(table,param,select_options))
goto err;
@ -8860,6 +8876,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (open_tmp_table(table))
goto err;
table->file->ha_set_all_bits_in_read_set();
table->file->ha_set_all_bits_in_write_set();
thd->mem_root= mem_root_save;
DBUG_RETURN(table);
@ -8884,7 +8902,7 @@ err:
field_list list of column definitions
DESCRIPTION
The created table doesn't have a table handler assotiated with
The created table doesn't have a table handler associated with
it, has no keys, no group/distinct, no copy_funcs array.
The sole purpose of this TABLE object is to use the power of Field
class to read/write data to/from table->record[0]. Then one can store
@ -8904,55 +8922,56 @@ TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list)
uint record_length= 0;
uint null_count= 0; /* number of columns which may be null */
uint null_pack_length; /* NULL representation array length */
TABLE_SHARE *s;
TABLE_SHARE *share;
/* Create the table and list of all fields */
TABLE *table= (TABLE*) thd->calloc(sizeof(*table));
TABLE *table= (TABLE*) thd->calloc(sizeof(*table)+sizeof(*share));
field= (Field**) thd->alloc((field_count + 1) * sizeof(Field*));
if (!table || !field)
return 0;
table->field= field;
table->s= s= &table->share_not_to_be_used;
s->fields= field_count;
table->s= share= (TABLE_SHARE*) (table+1);
share->fields= field_count;
/* Create all fields and calculate the total length of record */
List_iterator_fast<create_field> it(field_list);
while ((cdef= it++))
{
*field= make_field(0, cdef->length,
*field= make_field(share, 0, cdef->length,
(uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
f_maybe_null(cdef->pack_flag) ? 1 : 0,
cdef->pack_flag, cdef->sql_type, cdef->charset,
cdef->geom_type, cdef->unireg_check,
cdef->interval, cdef->field_name, table);
cdef->interval, cdef->field_name);
if (!*field)
goto error;
record_length+= (**field).pack_length();
if (! ((**field).flags & NOT_NULL_FLAG))
++null_count;
++field;
(*field)->init(table);
record_length+= (*field)->pack_length();
if (! ((*field)->flags & NOT_NULL_FLAG))
null_count++;
field++;
}
*field= NULL; /* mark the end of the list */
null_pack_length= (null_count + 7)/8;
s->reclength= record_length + null_pack_length;
s->rec_buff_length= ALIGN_SIZE(s->reclength + 1);
table->record[0]= (byte*) thd->alloc(s->rec_buff_length);
share->reclength= record_length + null_pack_length;
share->rec_buff_length= ALIGN_SIZE(share->reclength + 1);
table->record[0]= (byte*) thd->alloc(share->rec_buff_length);
if (!table->record[0])
goto error;
if (null_pack_length)
{
table->null_flags= (uchar*) table->record[0];
s->null_fields= null_count;
s->null_bytes= null_pack_length;
share->null_fields= null_count;
share->null_bytes= null_pack_length;
}
table->in_use= thd; /* field->reset() may access table->in_use */
{
/* Set up field pointers */
byte *null_pos= table->record[0];
byte *field_pos= null_pos + s->null_bytes;
byte *field_pos= null_pos + share->null_bytes;
uint null_bit= 1;
for (field= table->field; *field; ++field)
@ -8986,7 +9005,7 @@ error:
static bool open_tmp_table(TABLE *table)
{
int error;
if ((error=table->file->ha_open(table->s->table_name,O_RDWR,
if ((error=table->file->ha_open(table, table->s->table_name.str,O_RDWR,
HA_OPEN_TMP_TABLE)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
@ -9005,9 +9024,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
MI_KEYDEF keydef;
MI_UNIQUEDEF uniquedef;
KEY *keyinfo=param->keyinfo;
TABLE_SHARE *share= table->s;
DBUG_ENTER("create_myisam_tmp_table");
if (table->s->keys)
if (share->keys)
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
@ -9018,11 +9038,11 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
bzero(seg, sizeof(*seg) * keyinfo->key_parts);
if (keyinfo->key_length >= table->file->max_key_length() ||
keyinfo->key_parts > table->file->max_key_parts() ||
table->s->uniques)
share->uniques)
{
/* Can't create a key; Make a unique constraint instead of a key */
table->s->keys= 0;
table->s->uniques= 1;
share->keys= 0;
share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
uniquedef.keysegs=keyinfo->key_parts;
@ -9034,7 +9054,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
param->recinfo->type= FIELD_CHECK;
param->recinfo->length=MI_UNIQUE_HASH_LENGTH;
param->recinfo++;
table->s->reclength+=MI_UNIQUE_HASH_LENGTH;
share->reclength+=MI_UNIQUE_HASH_LENGTH;
}
else
{
@ -9056,7 +9076,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
seg->type=
((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
seg->bit_start= (uint8)(field->pack_length() - table->s->blob_ptr_size);
seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size);
seg->flag= HA_BLOB_PART;
seg->length=0; // Whole blob in unique constraint
}
@ -9089,10 +9109,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
OPTION_BIG_TABLES)
create_info.data_file_length= ~(ulonglong) 0;
if ((error=mi_create(table->s->table_name,table->s->keys,&keydef,
if ((error=mi_create(share->table_name.str, share->keys, &keydef,
(uint) (param->recinfo-param->start_recinfo),
param->start_recinfo,
table->s->uniques, &uniquedef,
share->uniques, &uniquedef,
&create_info,
HA_CREATE_TMP_TABLE)))
{
@ -9102,7 +9122,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
}
statistic_increment(table->in_use->status_var.created_tmp_disk_tables,
&LOCK_status);
table->s->db_record_offset= 1;
share->db_record_offset= 1;
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
@ -9123,9 +9143,9 @@ free_tmp_table(THD *thd, TABLE *entry)
if (entry->file)
{
if (entry->db_stat)
entry->file->drop_table(entry->s->table_name);
entry->file->drop_table(entry->s->table_name.str);
else
entry->file->delete_table(entry->s->table_name);
entry->file->delete_table(entry->s->table_name.str);
delete entry->file;
}
@ -9150,6 +9170,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
TABLE_SHARE share;
const char *save_proc_info;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
@ -9160,16 +9181,17 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
DBUG_RETURN(1);
}
new_table= *table;
new_table.s= &new_table.share_not_to_be_used;
share= *table->s;
new_table.s= &share;
new_table.s->db_type= DB_TYPE_MYISAM;
if (!(new_table.file= get_new_handler(&new_table, &new_table.mem_root,
if (!(new_table.file= get_new_handler(&share, &new_table.mem_root,
DB_TYPE_MYISAM)))
DBUG_RETURN(1); // End of memory
save_proc_info=thd->proc_info;
thd->proc_info="converting HEAP to MyISAM";
if (create_myisam_tmp_table(&new_table,param,
if (create_myisam_tmp_table(&new_table, param,
thd->lex->select_lex.options | thd->options))
goto err2;
if (open_tmp_table(&new_table))
@ -9218,12 +9240,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
/* remove heap table and change to use myisam table */
(void) table->file->ha_rnd_end();
(void) table->file->close();
(void) table->file->delete_table(table->s->table_name);
(void) table->file->delete_table(table->s->table_name.str);
delete table->file;
table->file=0;
new_table.s= table->s; // Keep old share
*table= new_table;
table->s= &table->share_not_to_be_used;
table->file->change_table_ptr(table);
*table->s= share;
table->file->change_table_ptr(table, table->s);
if (save_proc_info)
thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ?
"Copying to tmp table on disk" : save_proc_info);
@ -9235,7 +9258,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
(void) table->file->ha_rnd_end();
(void) new_table.file->close();
err1:
new_table.file->delete_table(new_table.s->table_name);
new_table.file->delete_table(new_table.s->table_name.str);
err2:
delete new_table.file;
thd->proc_info=save_proc_info;
@ -9473,7 +9496,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
given the selected plan prescribes to nest retrievals of the
joined tables in the following order: t1,t2,t3.
A pushed down predicate are attached to the table which it pushed to,
at the field select_cond.
at the field join_tab->select_cond.
When executing a nested loop of level k the function runs through
the rows of 'join_tab' and for each row checks the pushed condition
attached to the table.
@ -9512,7 +9535,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
is complemented by nulls for t2 and t3. Then the pushed down predicates
are checked for the composed row almost in the same way as it had
been done for the first row with a match. The only difference is
the predicates from on expressions are not checked.
the predicates from on expressions are not checked.
IMPLEMENTATION
The function forms output rows for a current partial join of k
@ -9521,7 +9544,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
join_tab it calls sub_select that builds all possible matching
tails from the result set.
To be able check predicates conditionally items of the class
Item_func_trig_cond are employed.
Item_func_trig_cond are employed.
An object of this class is constructed from an item of class COND
and a pointer to a guarding boolean variable.
When the value of the guard variable is true the value of the object

View File

@ -440,10 +440,11 @@ class store_key :public Sql_alloc
{
if (field_arg->type() == FIELD_TYPE_BLOB)
{
/* Key segments are always packed with a 2 byte length prefix */
to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1,
Field::NONE, field_arg->field_name,
field_arg->table, field_arg->charset());
/* Key segments are always packed with a 2 byte length prefix */
to_field= new Field_varstring(ptr, length, 2, (uchar*) null, 1,
Field::NONE, field_arg->field_name,
field_arg->table->s, field_arg->charset());
to_field->init(field_arg->table);
}
else
to_field=field_arg->new_key_field(thd->mem_root, field_arg->table,

View File

@ -607,7 +607,7 @@ mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd)
Protocol *protocol= thd->protocol;
String *packet= protocol->storage_packet();
DBUG_ENTER("mysqld_dump_create_info");
DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name));
DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name.str));
protocol->prepare_for_resend();
if (store_create_info(thd, table_list, packet))
@ -787,7 +787,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
(MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 |
MODE_MYSQL40)) != 0;
DBUG_ENTER("store_create_info");
DBUG_PRINT("enter",("table: %s", table->s->table_name));
DBUG_PRINT("enter",("table: %s", table->s->table_name.str));
restore_record(table, s->default_values); // Get empty record
@ -799,7 +799,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
alias= table_list->schema_table->table_name;
else
alias= (lower_case_table_names == 2 ? table->alias :
share->table_name);
share->table_name.str);
append_identifier(thd, packet, alias, strlen(alias));
packet->append(STRING_WITH_LEN(" (\n"));
@ -1005,9 +1005,9 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
else
packet->append(STRING_WITH_LEN(" ENGINE="));
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (table->s->part_info)
packet->append(ha_get_storage_engine(
table->s->part_info->default_engine_type));
if (table->part_info)
packet->append(ha_get_storage_engine(table->part_info->
default_engine_type));
else
packet->append(file->table_type());
#else
@ -1091,10 +1091,10 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
*/
uint part_syntax_len;
char *part_syntax;
if (table->s->part_info &&
((part_syntax= generate_partition_syntax(table->s->part_info,
&part_syntax_len,
FALSE,FALSE))))
if (table->part_info &&
((part_syntax= generate_partition_syntax(table->part_info,
&part_syntax_len,
FALSE,FALSE))))
{
packet->append(part_syntax, part_syntax_len);
my_free(part_syntax, MYF(0));
@ -1524,8 +1524,11 @@ static bool show_status_array(THD *thd, const char *wild,
break;
}
#endif /* HAVE_REPLICATION */
case SHOW_OPENTABLES:
end= int10_to_str((long) cached_tables(), buff, 10);
case SHOW_OPEN_TABLES:
end= int10_to_str((long) cached_open_tables(), buff, 10);
break;
case SHOW_TABLE_DEFINITIONS:
end= int10_to_str((long) cached_table_definitions(), buff, 10);
break;
case SHOW_CHAR_PTR:
{
@ -3749,8 +3752,8 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list)
table->alias_name_used= my_strcasecmp(table_alias_charset,
table_list->schema_table_name,
table_list->alias);
table_list->table_name= (char*) table->s->table_name;
table_list->table_name_length= strlen(table->s->table_name);
table_list->table_name= table->s->table_name.str;
table_list->table_name_length= table->s->table_name.length;
table_list->table= table;
table->next= thd->derived_tables;
thd->derived_tables= table;

View File

@ -279,9 +279,22 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
String wrong_tables;
int error;
bool some_tables_deleted=0, tmp_table_deleted=0, foreign_key_error=0;
DBUG_ENTER("mysql_rm_table_part2");
/*
If we have the table in the definition cache, we don't have to check the
.frm file to find if the table is a normal table (not view) and what
engine to use.
*/
for (table= tables; table; table= table->next_local)
{
TABLE_SHARE *share;
table->db_type= DB_TYPE_UNKNOWN;
if ((share= get_cached_table_share(table->db, table->table_name)))
table->db_type= share->db_type;
}
if (lock_table_names(thd, tables))
DBUG_RETURN(1);
@ -291,16 +304,17 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
for (table= tables; table; table= table->next_local)
{
char *db=table->db;
db_type table_type= DB_TYPE_UNKNOWN;
db_type table_type;
mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, TRUE);
if (!close_temporary_table(thd, db, table->table_name))
if (!close_temporary_table(thd, table))
{
tmp_table_deleted=1;
continue; // removed temporary table
}
error=0;
table_type= table->db_type;
if (!drop_temporary)
{
abort_locked_tables(thd, db, table->table_name);
@ -314,14 +328,15 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
DBUG_RETURN(-1);
}
alias= (lower_case_table_names == 2) ? table->alias : table->table_name;
/* remove form file and isam files */
/* remove .frm file and engine files */
build_table_path(path, sizeof(path), db, alias, reg_ext);
}
if (drop_temporary ||
(access(path,F_OK) &&
ha_create_table_from_engine(thd,db,alias)) ||
(!drop_view &&
mysql_frm_type(thd, path, &table_type) != FRMTYPE_TABLE))
if (table_type == DB_TYPE_UNKNOWN &&
(drop_temporary ||
(access(path, F_OK) &&
ha_create_table_from_engine(thd, db, alias)) ||
(!drop_view &&
mysql_frm_type(thd, path, &table_type) != FRMTYPE_TABLE)))
{
// Table was not found on disk and table can't be created from engine
if (if_exists)
@ -337,7 +352,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
if (table_type == DB_TYPE_UNKNOWN)
mysql_frm_type(thd, path, &table_type);
*(end=fn_ext(path))=0; // Remove extension for delete
error= ha_delete_table(thd, table_type, path, table->table_name,
error= ha_delete_table(thd, table_type, path, db, table->table_name,
!dont_log_query);
if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) &&
(if_exists || table_type == DB_TYPE_UNKNOWN))
@ -398,16 +413,19 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
}
int quick_rm_table(enum db_type base,const char *db,
bool quick_rm_table(enum db_type base,const char *db,
const char *table_name)
{
char path[FN_REFLEN];
int error=0;
bool error= 0;
DBUG_ENTER("quick_rm_table");
build_table_path(path, sizeof(path), db, table_name, reg_ext);
if (my_delete(path,MYF(0)))
error=1; /* purecov: inspected */
error= 1; /* purecov: inspected */
*fn_ext(path)= 0; // Remove reg_ext
return ha_delete_table(current_thd, base, path, table_name, 0) || error;
DBUG_RETURN(ha_delete_table(current_thd, base, path, db, table_name, 0) ||
error);
}
/*
@ -1613,7 +1631,8 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (create_info->row_type == ROW_TYPE_DYNAMIC)
db_options|=HA_OPTION_PACK_RECORD;
alias= table_case_name(create_info, table_name);
if (!(file=get_new_handler((TABLE*) 0, thd->mem_root, create_info->db_type)))
if (!(file=get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
create_info->db_type)))
{
my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented
DBUG_RETURN(TRUE);
@ -1718,8 +1737,8 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
build_table_path(path, sizeof(path), db, alias, reg_ext);
/* Check if table already exists */
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE)
&& find_temporary_table(thd,db,table_name))
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
find_temporary_table(thd, db, table_name))
{
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
@ -1745,6 +1764,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name);
goto unlock_and_end;
}
DBUG_ASSERT(get_cached_table_share(db, alias) == 0);
}
/*
@ -1778,13 +1798,14 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
create_info->data_file_name= create_info->index_file_name= 0;
create_info->table_options=db_options;
if (rea_create_table(thd, path, db, table_name,
create_info, fields, key_count,
key_info_buffer, file))
if (rea_create_table(thd, path, db, table_name, create_info, fields,
key_count, key_info_buffer, file))
goto unlock_and_end;
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
/* Open table and put in temporary table list */
*fn_ext(path)= 0;
if (!(open_temporary_table(thd, path, db, table_name, 1)))
{
(void) rm_temporary_table(create_info->db_type, path);
@ -1869,6 +1890,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
MYSQL_LOCK **lock)
{
TABLE tmp_table; // Used during 'create_field()'
TABLE_SHARE share;
TABLE *table= 0;
uint select_field_count= items->elements;
/* Add selected items to field list */
@ -1880,7 +1902,9 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
tmp_table.alias= 0;
tmp_table.timestamp_field= 0;
tmp_table.s= &tmp_table.share_not_to_be_used;
tmp_table.s= &share;
init_tmp_table_share(&share, "", 0, "", "");
tmp_table.s->db_create_options=0;
tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr;
tmp_table.s->db_low_byte_first= test(create_info->db_type == DB_TYPE_MYISAM ||
@ -1971,11 +1995,13 @@ mysql_rename_table(enum db_type base,
char from[FN_REFLEN], to[FN_REFLEN], lc_from[FN_REFLEN], lc_to[FN_REFLEN];
char *from_base= from, *to_base= to;
char tmp_name[NAME_LEN+1];
handler *file= (base == DB_TYPE_UNKNOWN ? 0 :
get_new_handler((TABLE*) 0, thd->mem_root, base));
handler *file;
int error=0;
DBUG_ENTER("mysql_rename_table");
file= (base == DB_TYPE_UNKNOWN ? 0 :
get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base));
build_table_path(from, sizeof(from), old_db, old_name, "");
build_table_path(to, sizeof(to), new_db, new_name, "");
@ -2036,17 +2062,19 @@ mysql_rename_table(enum db_type base,
static void wait_while_table_is_used(THD *thd,TABLE *table,
enum ha_extra_function function)
{
DBUG_PRINT("enter",("table: %s", table->s->table_name));
DBUG_ENTER("wait_while_table_is_used");
safe_mutex_assert_owner(&LOCK_open);
DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %u",
table->s->table_name.str, (ulong) table->s,
table->db_stat, table->s->version));
VOID(table->file->extra(function));
/* Mark all tables that are in use as 'old' */
mysql_lock_abort(thd, table); // end threads waiting on lock
/* Wait until all there are no other threads that has this table open */
remove_table_from_cache(thd, table->s->db,
table->s->table_name, RTFC_WAIT_OTHER_THREAD_FLAG);
remove_table_from_cache(thd, table->s->db.str,
table->s->table_name.str,
RTFC_WAIT_OTHER_THREAD_FLAG);
DBUG_VOID_RETURN;
}
@ -2168,11 +2196,15 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table,
}
static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
HA_CHECK_OPT *check_opt)
{
int error= 0;
TABLE tmp_table, *table;
TABLE_SHARE *share;
char from[FN_REFLEN],tmp[FN_REFLEN+32];
const char **ext;
MY_STAT stat_info;
DBUG_ENTER("prepare_for_repair");
if (!(check_opt->sql_flags & TT_USEFRM))
@ -2180,12 +2212,26 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
if (!(table= table_list->table)) /* if open_ltable failed */
{
char name[FN_REFLEN];
build_table_path(name, sizeof(name), table_list->db,
table_list->table_name, "");
if (openfrm(thd, name, "", 0, 0, 0, &tmp_table))
char key[MAX_DBKEY_LENGTH];
uint key_length;
key_length= create_table_def_key(thd, key, table_list, 0);
pthread_mutex_lock(&LOCK_open);
if (!(share= (get_table_share(thd, table_list, key, key_length, 0,
&error))))
{
pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(0); // Can't open frm file
}
if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table))
{
release_table_share(share, RELEASE_NORMAL);
pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(0); // Out of memory
}
table= &tmp_table;
pthread_mutex_unlock(&LOCK_open);
}
/*
@ -2198,18 +2244,16 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
- Run a normal repair using the new index file and the old data file
*/
char from[FN_REFLEN],tmp[FN_REFLEN+32];
const char **ext= table->file->bas_ext();
MY_STAT stat_info;
/*
Check if this is a table type that stores index and data separately,
like ISAM or MyISAM
*/
ext= table->file->bas_ext();
if (!ext[0] || !ext[1])
goto end; // No data file
strxmov(from, table->s->path, ext[1], NullS); // Name of data file
// Name of data file
strxmov(from, table->s->normalized_path.str, ext[1], NullS);
if (!my_stat(from, &stat_info, MYF(0)))
goto end; // Can't use USE_FRM flag
@ -2273,7 +2317,11 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list,
end:
if (table == &tmp_table)
closefrm(table); // Free allocated memory
{
pthread_mutex_lock(&LOCK_open);
closefrm(table, 1); // Free allocated memory
pthread_mutex_unlock(&LOCK_open);
}
DBUG_RETURN(error);
}
@ -2440,8 +2488,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
"Waiting to get writelock");
mysql_lock_abort(thd,table->table);
remove_table_from_cache(thd, table->table->s->db,
table->table->s->table_name,
remove_table_from_cache(thd, table->table->s->db.str,
table->table->s->table_name.str,
RTFC_WAIT_OTHER_THREAD_FLAG |
RTFC_CHECK_KILLED_FLAG);
thd->exit_cond(old_message);
@ -2609,8 +2657,8 @@ send_result_message:
else if (open_for_modify)
{
pthread_mutex_lock(&LOCK_open);
remove_table_from_cache(thd, table->table->s->db,
table->table->s->table_name, RTFC_NO_FLAG);
remove_table_from_cache(thd, table->table->s->db.str,
table->table->s->table_name.str, RTFC_NO_FLAG);
pthread_mutex_unlock(&LOCK_open);
/* Something may be modified, that's why we have to invalidate cache */
query_cache_invalidate3(thd, table->table, 0);
@ -2788,7 +2836,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
HA_CREATE_INFO *create_info,
Table_ident *table_ident)
{
TABLE **tmp_table;
TABLE *tmp_table;
char src_path[FN_REFLEN], dst_path[FN_REFLEN];
char *db= table->db;
char *table_name= table->table_name;
@ -2826,13 +2874,13 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
goto err;
if ((tmp_table= find_temporary_table(thd, src_db, src_table)))
strxmov(src_path, (*tmp_table)->s->path, reg_ext, NullS);
strxmov(src_path, tmp_table->s->path.str, reg_ext, NullS);
else
{
strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table,
reg_ext, NullS);
/* Resolve symlinks (for windows) */
fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME));
unpack_filename(src_path, src_path);
if (lower_case_table_names)
my_casedn_str(files_charset_info, src_path);
if (access(src_path, F_OK))
@ -2872,7 +2920,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
{
strxmov(dst_path, mysql_data_home, "/", db, "/", table_name,
reg_ext, NullS);
fn_format(dst_path, dst_path, "", "", MYF(MY_UNPACK_FILENAME));
unpack_filename(dst_path, dst_path);
if (!access(dst_path, F_OK))
goto table_exists;
}
@ -2894,8 +2942,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
creation, instead create the table directly (for both normal
and temporary tables).
*/
*fn_ext(dst_path)= 0;
err= ha_create_table(dst_path, create_info, 1);
*fn_ext(dst_path)= 0; // Remove .frm
err= ha_create_table(thd, dst_path, db, table_name, create_info, 1);
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
@ -3472,7 +3520,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
else
{
if (table->s->tmp_table)
if (table->s->tmp_table != NO_TMP_TABLE)
{
if (find_temporary_table(thd,new_db,new_name_buff))
{
@ -3483,7 +3531,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
else
{
char dir_buff[FN_REFLEN];
strxnmov(dir_buff, FN_REFLEN, mysql_real_data_home, new_db, NullS);
strxnmov(dir_buff, sizeof(dir_buff)-1,
mysql_real_data_home, new_db, NullS);
if (!access(fn_format(new_name_buff,new_name_buff,dir_buff,reg_ext,0),
F_OK))
{
@ -3516,7 +3565,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
ALTER_DROP_PARTITION + ALTER_COALESCE_PARTITION +
ALTER_REORGANISE_PARTITION))
{
partition_info *tab_part_info= table->s->part_info;
partition_info *tab_part_info= table->part_info;
if (!tab_part_info)
{
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
@ -3892,11 +3941,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
There was no partitioning before and no partitioning defined.
Obviously no work needed.
*/
if (table->s->part_info)
if (table->part_info)
{
if (!thd->lex->part_info &&
create_info->db_type == old_db_type)
thd->lex->part_info= table->s->part_info;
thd->lex->part_info= table->part_info;
}
if (thd->lex->part_info)
{
@ -3904,7 +3953,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
Need to cater for engine types that can handle partition without
using the partition handler.
*/
if (thd->lex->part_info != table->s->part_info)
if (thd->lex->part_info != table->part_info)
partition_changed= TRUE;
if (create_info->db_type != DB_TYPE_PARTITION_DB)
thd->lex->part_info->default_engine_type= create_info->db_type;
@ -3946,6 +3995,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
else
{
*fn_ext(new_name)=0;
table->s->version= 0; // Force removal of table def
close_cached_table(thd, table);
if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias))
error= -1;
@ -4320,7 +4370,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
*/
uint old_lock_type;
partition_info *part_info= table->s->part_info;
partition_info *part_info= table->part_info;
char path[FN_REFLEN+1];
uint db_options= 0, key_count, syntax_len;
KEY *key_info_buffer;
@ -4389,9 +4439,18 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
DBUG_RETURN(TRUE);
}
thd->proc_info="end";
write_bin_log(thd, FALSE);
send_ok(thd);
DBUG_RETURN(FALSE);
query_cache_invalidate3(thd, table_list, 0);
error= ha_commit_stmt(thd);
if (ha_commit(thd))
error= 1;
if (!error)
{
close_thread_tables(thd);
write_bin_log(thd, FALSE);
send_ok(thd);
DBUG_RETURN(FALSE);
}
DBUG_RETURN(error);
}
}
#endif
@ -4460,15 +4519,17 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
bzero((void*) &tbl, sizeof(tbl));
tbl.db= new_db;
tbl.table_name= tbl.alias= tmp_name;
/* Table is in thd->temporary_tables */
new_table= open_table(thd, &tbl, thd->mem_root, (bool*) 0,
MYSQL_LOCK_IGNORE_FLUSH);
}
else
{
char path[FN_REFLEN];
my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home,
new_db, tmp_name);
fn_format(path,path,"","",4);
/* table is a normal table: Create temporary table in same directory */
strxnmov(path, sizeof(path)-1, mysql_data_home, "/",new_db, "/",
tmp_name, NullS);
unpack_filename(path, path);
new_table=open_temporary_table(thd, path, new_db, tmp_name,0);
}
if (!new_table)
@ -4484,7 +4545,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->proc_info="copy to tmp table";
next_insert_id=thd->next_insert_id; // Remember for logging
copied=deleted=0;
if (new_table && !new_table->s->is_view)
if (new_table && !(new_table->file->table_flags() & HA_NO_COPY_ON_ALTER))
{
new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
@ -4495,7 +4556,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->last_insert_id=next_insert_id; // Needed for correct log
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
if (table->s->tmp_table)
if (table->s->tmp_table != NO_TMP_TABLE)
{
/* We changed a temporary table */
if (error)
@ -4504,7 +4565,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
The following function call will free the new_table pointer,
in close_temporary_table(), so we can safely directly jump to err
*/
close_temporary_table(thd,new_db,tmp_name);
close_temporary_table(thd, new_table, 1, 1);
goto err;
}
/* Close lock if this is a transactional table */
@ -4514,11 +4575,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->lock=0;
}
/* Remove link to old table and rename the new one */
close_temporary_table(thd, table->s->db, table_name);
close_temporary_table(thd, table, 1, 1);
/* Should pass the 'new_name' as we store table name in the cache */
if (rename_temporary_table(thd, new_table, new_db, new_name))
{ // Fatal error
close_temporary_table(thd,new_db,tmp_name);
close_temporary_table(thd, new_table, 1, 1);
my_free((gptr) new_table,MYF(0));
goto err;
}
@ -4528,7 +4589,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (new_table)
{
intern_close_table(new_table); /* close temporary table */
/* close temporary table that will be the new table */
intern_close_table(new_table);
my_free((gptr) new_table,MYF(0));
}
VOID(pthread_mutex_lock(&LOCK_open));
@ -4571,6 +4633,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
close the original table at before doing the rename
*/
table_name=thd->strdup(table_name); // must be saved
table->s->version= 0; // Force removal of table def
close_cached_table(thd, table);
table=0; // Marker that table is closed
}
@ -4603,18 +4666,24 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
closing the locked table.
*/
if (table)
{
table->s->version= 0; // Force removal of table def
close_cached_table(thd,table);
}
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
if (thd->lock || new_name != table_name) // True if WIN32
{
/*
Not table locking or alter table with rename
free locks and remove old table
Not table locking or alter table with rename.
Free locks and remove old table
*/
if (table)
{
table->s->version= 0; // Force removal of table def
close_cached_table(thd,table);
}
VOID(quick_rm_table(old_db_type,db,old_name));
}
else
@ -4637,7 +4706,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
reopen_tables(thd,1,0))
{ // This shouldn't happen
if (table)
{
table->s->version= 0; // Force removal of table def
close_cached_table(thd,table); // Remove lock for table
}
VOID(pthread_mutex_unlock(&LOCK_open));
goto err;
}
@ -4781,8 +4853,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
MYF(MY_FAE | MY_ZEROFILL));
bzero((char*) &tables,sizeof(tables));
tables.table= from;
tables.alias= tables.table_name= (char*) from->s->table_name;
tables.db= (char*) from->s->db;
tables.alias= tables.table_name= from->s->table_name.str;
tables.db= from->s->db.str;
error=1;
if (thd->lex->select_lex.setup_ref_array(thd, order_num) ||

View File

@ -79,7 +79,7 @@ void print_cached_tables(void)
{
TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n",
entry->s->db, entry->s->table_name, entry->s->version,
entry->s->db.str, entry->s->table_name.str, entry->s->version,
entry->in_use ? entry->in_use->thread_id : 0L,
entry->in_use ? entry->in_use->dbug_thread_id : 0L,
entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use");
@ -261,7 +261,7 @@ print_plan(JOIN* join, double read_time, double record_count,
pos = join->positions[i];
table= pos.table->table;
if (table)
fputs(table->s->table_name, DBUG_FILE);
fputs(table->s->table_name.str, DBUG_FILE);
fputc(' ', DBUG_FILE);
}
fputc('\n', DBUG_FILE);
@ -278,7 +278,7 @@ print_plan(JOIN* join, double read_time, double record_count,
pos= join->best_positions[i];
table= pos.table->table;
if (table)
fputs(table->s->table_name, DBUG_FILE);
fputs(table->s->table_name.str, DBUG_FILE);
fputc(' ', DBUG_FILE);
}
}
@ -289,7 +289,7 @@ print_plan(JOIN* join, double read_time, double record_count,
for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++)
{
join_table= (*plan_nodes);
fputs(join_table->table->s->table_name, DBUG_FILE);
fputs(join_table->table->s->table_name.str, DBUG_FILE);
fprintf(DBUG_FILE, "(%lu,%lu,%lu)",
(ulong) join_table->found_records,
(ulong) join_table->records,
@ -336,8 +336,8 @@ static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data,
{
TABLE_LOCK_INFO table_lock_info;
table_lock_info.thread_id= table->in_use->thread_id;
memcpy(table_lock_info.table_name, table->s->table_cache_key,
table->s->key_length);
memcpy(table_lock_info.table_name, table->s->table_cache_key.str,
table->s->table_cache_key.length);
table_lock_info.table_name[strlen(table_lock_info.table_name)]='.';
table_lock_info.waiting=wait;
table_lock_info.lock_text=text;
@ -484,7 +484,7 @@ Open tables: %10lu\n\
Open files: %10lu\n\
Open streams: %10lu\n",
tmp.opened_tables,
(ulong) cached_tables(),
(ulong) cached_open_tables(),
(ulong) my_file_opened,
(ulong) my_stream_opened);

View File

@ -188,7 +188,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
}
/* We do not allow creation of triggers on temporary tables. */
if (create && find_temporary_table(thd, tables->db, tables->table_name))
if (create && find_temporary_table(thd, tables))
{
my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias);
DBUG_RETURN(TRUE);
@ -307,7 +307,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
/* Trigger must be in the same schema as target table. */
if (my_strcasecmp(table_alias_charset, table->s->db,
if (my_strcasecmp(table_alias_charset, table->s->db.str,
lex->spname->m_db.str ? lex->spname->m_db.str :
thd->db))
{
@ -377,17 +377,17 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
sql_create_definition_file() files handles renaming and backup of older
versions
*/
strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, "/", NullS);
strxnmov(dir_buff, FN_REFLEN-1, mysql_data_home, "/", tables->db, "/", NullS);
dir.length= unpack_filename(dir_buff, dir_buff);
dir.str= dir_buff;
file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name,
file.length= strxnmov(file_buff, FN_REFLEN-1, tables->table_name,
triggers_file_ext, NullS) - file_buff;
file.str= file_buff;
trigname_file.length= strxnmov(trigname_buff, FN_REFLEN,
trigname_file.length= strxnmov(trigname_buff, FN_REFLEN-1,
lex->spname->m_name.str,
trigname_file_ext, NullS) - trigname_buff;
trigname_file.str= trigname_buff;
strxnmov(trigname_path, FN_REFLEN, dir_buff, trigname_buff, NullS);
strxnmov(trigname_path, FN_REFLEN-1, dir_buff, trigname_buff, NullS);
/* Use the filesystem to enforce trigger namespace constraints. */
if (!access(trigname_path, F_OK))
@ -474,7 +474,7 @@ err_with_cleanup:
static bool rm_trigger_file(char *path, char *db, char *table_name)
{
strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", table_name,
strxnmov(path, FN_REFLEN-1, mysql_data_home, "/", db, "/", table_name,
triggers_file_ext, NullS);
unpack_filename(path, path);
return my_delete(path, MYF(MY_WME));
@ -498,7 +498,7 @@ static bool rm_trigger_file(char *path, char *db, char *table_name)
static bool rm_trigname_file(char *path, char *db, char *trigger_name)
{
strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", trigger_name,
strxnmov(path, FN_REFLEN-1, mysql_data_home, "/", db, "/", trigger_name,
trigname_file_ext, NullS);
unpack_filename(path, path);
return my_delete(path, MYF(MY_WME));
@ -561,11 +561,11 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables)
char dir_buff[FN_REFLEN], file_buff[FN_REFLEN];
LEX_STRING dir, file;
strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db,
strxnmov(dir_buff, FN_REFLEN-1, mysql_data_home, "/", tables->db,
"/", NullS);
dir.length= unpack_filename(dir_buff, dir_buff);
dir.str= dir_buff;
file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name,
file.length= strxnmov(file_buff, FN_REFLEN-1, tables->table_name,
triggers_file_ext, NullS) - file_buff;
file.str= file_buff;
@ -628,7 +628,7 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
*/
if (!(*old_fld= (*fld)->new_field(&table->mem_root, table)))
return 1;
(*old_fld)->move_field((my_ptrdiff_t)(table->record[1] -
(*old_fld)->move_field_offset((my_ptrdiff_t)(table->record[1] -
table->record[0]));
}
*old_fld= 0;
@ -683,7 +683,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_ENTER("Table_triggers_list::check_n_load");
strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/", table_name,
strxnmov(path_buff, FN_REFLEN-1, mysql_data_home, "/", db, "/", table_name,
triggers_file_ext, NullS);
path.length= unpack_filename(path_buff, path_buff);
path.str= path_buff;
@ -771,7 +771,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
sizeof(LEX_STRING))))
DBUG_RETURN(1); // EOM
trg_definer->str= "";
trg_definer->str= (char*) "";
trg_definer->length= 0;
while (it++)
@ -866,7 +866,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
schema.
*/
lex.sphead->set_definer("", 0);
lex.sphead->set_definer((char*) "", 0);
/*
Triggers without definer information are executed under the
@ -1018,7 +1018,7 @@ static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig)
struct st_trigname trigname;
DBUG_ENTER("add_table_for_trigger");
strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/",
strxnmov(path_buff, FN_REFLEN-1, mysql_data_home, "/", db, "/",
trig->m_name.str, trigname_file_ext, NullS);
path.length= unpack_filename(path_buff, path_buff);
path.str= path_buff;
@ -1166,10 +1166,10 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
{
TABLE_LIST table_list;
bzero((char *) &table_list, sizeof (table_list));
table_list.db= (char *) table->s->db;
table_list.db_length= strlen(table_list.db);
table_list.table_name= (char *) table->s->table_name;
table_list.table_name_length= strlen(table_list.table_name);
table_list.db= (char *) table->s->db.str;
table_list.db_length= table->s->db.length;
table_list.table_name= table->s->table_name.str;
table_list.table_name_length= table->s->table_name.length;
table_list.alias= (char *) table->alias;
table_list.table= table;

View File

@ -1183,7 +1183,8 @@ multi_update::initialize_tables(JOIN *join)
/* ok to be on stack as this is not referenced outside of this func */
Field_string offset(table->file->ref_length, 0, "offset",
table, &my_charset_bin);
&my_charset_bin);
offset.init(table);
if (!(ifield= new Item_field(((Field *) &offset))))
DBUG_RETURN(1);
ifield->maybe_null= 0;

View File

@ -582,7 +582,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
dir.length= strlen(dir_buff);
file.str= file_buff;
file.length= (strxnmov(file_buff, FN_REFLEN, view->table_name, reg_ext,
file.length= (strxnmov(file_buff, FN_REFLEN-1, view->table_name, reg_ext,
NullS) - file_buff);
/* init timestamp */
if (!view->timestamp.str)
@ -1167,15 +1167,16 @@ err:
bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
{
DBUG_ENTER("mysql_drop_view");
char path[FN_REFLEN];
TABLE_LIST *view;
bool type= 0;
db_type not_used;
DBUG_ENTER("mysql_drop_view");
for (view= views; view; view= view->next_local)
{
strxnmov(path, FN_REFLEN, mysql_data_home, "/", view->db, "/",
TABLE_SHARE *share;
bool type;
strxnmov(path, FN_REFLEN-1, mysql_data_home, "/", view->db, "/",
view->table_name, reg_ext, NullS);
(void) unpack_filename(path, path);
VOID(pthread_mutex_lock(&LOCK_open));
@ -1200,6 +1201,20 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
}
if (my_delete(path, MYF(MY_WME)))
goto err;
/*
For a view, there is only one table_share object which should never
be used outside of LOCK_open
*/
if ((share= get_cached_table_share(view->db, view->table_name)))
{
DBUG_ASSERT(share->ref_count == 0);
pthread_mutex_lock(&share->mutex);
share->ref_count++;
share->version= 0;
pthread_mutex_unlock(&share->mutex);
release_table_share(share, RELEASE_WAIT_FOR_DROP);
}
query_cache_invalidate3(thd, view, 0);
sp_cache_invalidate();
VOID(pthread_mutex_unlock(&LOCK_open));
@ -1478,7 +1493,7 @@ mysql_rename_view(THD *thd,
DBUG_ENTER("mysql_rename_view");
strxnmov(view_path, FN_REFLEN, mysql_data_home, "/", view->db, "/",
strxnmov(view_path, FN_REFLEN-1, mysql_data_home, "/", view->db, "/",
view->table_name, reg_ext, NullS);
(void) unpack_filename(view_path, view_path);
@ -1512,7 +1527,8 @@ mysql_rename_view(THD *thd,
view_def.revision - 1, num_view_backups))
goto err;
strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", view->db, "/", NullS);
strxnmov(dir_buff, FN_REFLEN-1, mysql_data_home, "/", view->db, "/",
NullS);
(void) unpack_filename(dir_buff, dir_buff);
pathstr.str= (char*)dir_buff;

View File

@ -174,8 +174,8 @@ enum SHOW_TYPE
{
SHOW_UNDEF,
SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR,
SHOW_DOUBLE_STATUS,
SHOW_BOOL, SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION,
SHOW_DOUBLE_STATUS, SHOW_BOOL, SHOW_MY_BOOL,
SHOW_OPEN_TABLES, SHOW_TABLE_DEFINITIONS, SHOW_STARTTIME, SHOW_QUESTION,
SHOW_LONG_CONST, SHOW_INT_CONST, SHOW_HAVE, SHOW_SYS, SHOW_HA_ROWS,
SHOW_VARS,
#ifdef HAVE_OPENSSL

File diff suppressed because it is too large Load Diff

View File

@ -56,8 +56,11 @@ typedef struct st_grant_info
ulong orig_want_privilege;
} GRANT_INFO;
enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2,
SYSTEM_TMP_TABLE=3};
enum tmp_table_type
{
NO_TMP_TABLE, TMP_TABLE, TRANSACTIONAL_TMP_TABLE,
INTERNAL_TMP_TABLE, SYSTEM_TMP_TABLE
};
enum frm_type_enum
{
@ -66,6 +69,8 @@ enum frm_type_enum
FRMTYPE_VIEW
};
enum release_type { RELEASE_NORMAL, RELEASE_WAIT_FOR_DROP };
typedef struct st_filesort_info
{
IO_CACHE *io_cache; /* If sorted through filebyte */
@ -107,52 +112,55 @@ class Table_triggers_list;
typedef struct st_table_share
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info; /* Partition related information */
#endif
/* hash of field names (contains pointers to elements of field array) */
HASH name_hash; /* hash of field names */
MEM_ROOT mem_root;
TYPELIB keynames; /* Pointers to keynames */
TYPELIB fieldnames; /* Pointer to fieldnames */
TYPELIB *intervals; /* pointer to interval info */
#ifdef NOT_YET
pthread_mutex_t mutex; /* For locking the share */
pthread_cond_t cond; /* To signal that share is ready */
struct st_table_share *next, /* Link to unused shares */
**prev;
#ifdef NOT_YET
struct st_table *open_tables; /* link to open tables */
struct st_table *used_next, /* Link to used tables */
**used_prev;
#endif
/* The following is copied to each TABLE on OPEN */
Field **field;
Field **found_next_number_field;
Field *timestamp_field; /* Used only during open */
KEY *key_info; /* data of keys in database */
#endif
uint *blob_field; /* Index to blobs in Field arrray*/
byte *default_values; /* row with default values */
char *comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
/* A pair "database_name\0table_name\0", widely used as simply a db name */
char *table_cache_key;
const char *db; /* Pointer to db */
const char *table_name; /* Table name (for open) */
const char *path; /* Path to .frm file (from datadir) */
LEX_STRING table_cache_key;
LEX_STRING db; /* Pointer to db */
LEX_STRING table_name; /* Table name (for open) */
LEX_STRING path; /* Path to .frm file (from datadir) */
LEX_STRING normalized_path; /* unpack_filename(path) */
LEX_STRING connect_string;
key_map keys_in_use; /* Keys in use for table */
key_map keys_for_keyread;
ha_rows min_rows, max_rows; /* create information */
ulong avg_row_length; /* create information */
ulong raid_chunksize;
ulong version, flush_version, mysql_version;
ulong timestamp_offset; /* Set to offset+1 of record */
ulong reclength; /* Recordlength */
ha_rows min_rows, max_rows; /* create information */
enum db_type db_type; /* table_type for handler */
enum row_type row_type; /* How rows are stored */
enum tmp_table_type tmp_table;
uint ref_count; /* How many TABLE objects uses this */
uint open_count; /* Number of tables in open list */
uint blob_ptr_size; /* 4 or 8 */
uint null_bytes, last_null_bit_pos;
uint key_length; /* Length of table_cache_key */
uint fields; /* Number of fields */
uint rec_buff_length; /* Size of table->record[] buffer */
uint keys, key_parts;
@ -160,31 +168,40 @@ typedef struct st_table_share
uint uniques; /* Number of UNIQUE index */
uint null_fields; /* number of null fields */
uint blob_fields; /* number of blob fields */
uint timestamp_field_offset; /* Field number for timestamp field */
uint varchar_fields; /* number of varchar fields */
uint db_create_options; /* Create options from database */
uint db_options_in_use; /* Options in use */
uint db_record_offset; /* if HA_REC_IN_SEQ */
uint raid_type, raid_chunks;
uint open_count; /* Number of tables in open list */
uint rowid_field_offset; /* Field_nr +1 to rowid field */
/* Index of auto-updated TIMESTAMP field in field array */
uint primary_key;
uint timestamp_field_offset;
uint next_number_index;
uint next_number_key_offset;
uchar frm_version;
my_bool system; /* Set if system record */
my_bool crypted; /* If .frm file is crypted */
my_bool db_low_byte_first; /* Portable row format */
my_bool crashed;
my_bool is_view;
my_bool name_lock, replace_with_name_lock;
uint error, open_errno, errarg; /* error from open_table_def() */
uchar frm_version;
bool null_field_first;
bool system; /* Set if system table (one record) */
bool crypted; /* If .frm file is crypted */
bool db_low_byte_first; /* Portable row format */
bool crashed;
bool is_view;
bool name_lock, replace_with_name_lock;
bool waiting_on_cond; /* Protection against free */
/*
TRUE if this is a system table like 'mysql.proc', which we want to be
able to open and lock even when we already have some tables open and
locked. To avoid deadlocks we have to put certain restrictions on
locking of this table for writing. FALSE - otherwise.
*/
my_bool system_table;
bool system_table;
#ifdef WITH_PARTITION_STORAGE_ENGINE
const uchar *partition_info;
uint partition_info_len;
enum db_type default_part_db_type;
#endif
} TABLE_SHARE;
@ -195,8 +212,8 @@ struct st_table {
handler *file;
#ifdef NOT_YET
struct st_table *used_next, **used_prev; /* Link to used tables */
struct st_table *open_next, **open_prev; /* Link to open tables */
#endif
struct st_table *open_next, **open_prev; /* Link to open tables */
struct st_table *next, *prev;
THD *in_use; /* Which thread uses this */
@ -207,9 +224,8 @@ struct st_table {
key_map quick_keys, used_keys, keys_in_use_for_query;
KEY *key_info; /* data of keys in database */
Field *next_number_field, /* Set if next_number is activated */
*found_next_number_field, /* Set on open */
*rowid_field;
Field *next_number_field; /* Set if next_number is activated */
Field *found_next_number_field; /* Set on open */
Field_timestamp *timestamp_field;
/* Table's triggers, 0 if there are no of them */
@ -279,7 +295,9 @@ struct st_table {
MEM_ROOT mem_root;
GRANT_INFO grant;
FILESORT_INFO sort;
TABLE_SHARE share_not_to_be_used; /* To be deleted when true shares */
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info; /* Partition related information */
#endif
bool fill_item_list(List<Item> *item_list) const;
void reset_item_list(List<Item> *item_list) const;
@ -624,6 +642,7 @@ typedef struct st_table_list
bool where_processed;
/* FRMTYPE_ERROR if any type is acceptable */
enum frm_type_enum required_type;
enum db_type db_type; /* table_type for handler */
char timestamp_buffer[20]; /* buffer for timestamp (19+1) */
/*
This TABLE_LIST object is just placeholder for prelocking, it will be

View File

@ -55,7 +55,7 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
SYNOPSIS
mysql_create_frm()
thd Thread handler
file_name Name of file (including database and .frm)
file_name Path for file (including database and .frm)
db Name of database
table Name of table
create_info create info parameters
@ -69,7 +69,7 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
1 error
*/
bool mysql_create_frm(THD *thd, my_string file_name,
bool mysql_create_frm(THD *thd, const char *file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
@ -286,37 +286,45 @@ err3:
SYNOPSIS
rea_create_table()
thd Thread handler
file_name Name of file (including database and .frm)
db Name of database
table Name of table
path Name of file (including database and .frm)
db Data base name
table_name Table name
create_info create info parameters
create_fields Fields to create
keys number of keys to create
key_info Keys to create
file Handler to use.
file Handler to use
RETURN
0 ok
1 error
*/
int rea_create_table(THD *thd, my_string file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
uint keys, KEY *key_info, handler *file)
int rea_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
uint keys, KEY *key_info, handler *file)
{
char *ext;
DBUG_ENTER("rea_create_table");
if (mysql_create_frm(thd, file_name, db, table, create_info,
if (mysql_create_frm(thd, path, db, table_name, create_info,
create_fields, keys, key_info, file))
DBUG_RETURN(1);
if (file->create_handler_files(file_name))
if (file->create_handler_files(path))
goto err_handler;
if (!create_info->frm_only && ha_create_table(file_name,create_info,0))
*(ext= fn_ext(path))= 0; // Remove .frm
if (!create_info->frm_only && ha_create_table(thd, path, db, table_name,
create_info,0))
{
*ext= FN_EXTCHAR; // Add extension back
goto err_handler;
}
DBUG_RETURN(0);
err_handler:
my_delete(file_name, MYF(0));
my_delete(path, MYF(0));
DBUG_RETURN(1);
} /* rea_create_table */
@ -738,18 +746,20 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
ulong data_offset,
handler *handler)
{
int error;
int error= 0;
Field::utype type;
uint null_count;
uchar *buff,*null_pos;
TABLE table;
TABLE_SHARE share;
create_field *field;
enum_check_fields old_count_cuted_fields= thd->count_cuted_fields;
DBUG_ENTER("make_empty_rec");
/* We need a table to generate columns for default values */
bzero((char*) &table,sizeof(table));
table.s= &table.share_not_to_be_used;
bzero((char*) &table, sizeof(table));
bzero((char*) &share, sizeof(share));
table.s= &share;
if (!(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL))))
{
@ -775,21 +785,24 @@ static bool make_empty_rec(THD *thd, File file,enum db_type table_type,
/*
regfield don't have to be deleted as it's allocated with sql_alloc()
*/
Field *regfield=make_field((char*) buff+field->offset + data_offset,
field->length,
null_pos + null_count / 8,
null_count & 7,
field->pack_flag,
field->sql_type,
field->charset,
field->geom_type,
field->unireg_check,
field->interval,
field->field_name,
&table);
Field *regfield= make_field(&share,
(char*) buff+field->offset + data_offset,
field->length,
null_pos + null_count / 8,
null_count & 7,
field->pack_flag,
field->sql_type,
field->charset,
field->geom_type,
field->unireg_check,
field->interval,
field->field_name);
if (!regfield)
goto err; // End of memory
/* save_in_field() will access regfield->table->in_use */
regfield->init(&table);
if (!(field->flags & NOT_NULL_FLAG))
{
*regfield->null_ptr|= regfield->null_bit;

View File

@ -154,13 +154,13 @@
#define DONT_GIVE_ERROR 256 /* Don't do frm_error on openfrm */
#define READ_SCREENS 1024 /* Read screens, info and helpfile */
#define DELAYED_OPEN 4096 /* Open table later */
#define NO_ERR_ON_NEW_FRM 8192 /* stop error sending on new format */
#define OPEN_VIEW 8192 /* Allow open on view */
#define SC_INFO_LENGTH 4 /* Form format constant */
#define TE_INFO_LENGTH 3
#define MTYP_NOEMPTY_BIT 128
#define FRM_VER_TRUE_VARCHAR (FRM_VER+4)
#define FRM_VER_TRUE_VARCHAR (FRM_VER+4) /* 10 */
/*
Minimum length pattern before Turbo Boyer-Moore is used
for SELECT "text" LIKE "%pattern%", excluding the two

View File

@ -22,19 +22,17 @@
strxnmov(dst, len, src1, ..., srcn, NullS)
moves the first len characters of the concatenation of src1,...,srcn
to dst. If there aren't that many characters, a NUL character will
be added to the end of dst to terminate it properly. This gives the
same effect as calling strxcpy(buff, src1, ..., srcn, NullS) with a
large enough buffer, and then calling strnmov(dst, buff, len).
to dst and add a closing NUL character.
It is just like strnmov except that it concatenates multiple sources.
Beware: the last argument should be the null character pointer.
Take VERY great care not to omit it! Also be careful to use NullS
and NOT to use 0, as on some machines 0 is not the same size as a
character pointer, or not the same bit pattern as NullS.
Note: strxnmov is like strnmov in that it moves up to len
characters; dst will be padded on the right with one NUL characters if
needed.
NOTE
strxnmov is like strnmov in that it moves up to len
characters; dst will be padded on the right with one '\0' character.
if total-string-length >= length then dst[length] will be set to \0
*/
#include <my_global.h>
@ -58,8 +56,8 @@ char *strxnmov(char *dst,uint len, const char *src, ...)
dst--;
src = va_arg(pvar, char *);
}
*dst=0;
end:
*dst=0;
va_end(pvar);
return dst;
}