Merge mysql-5.5-innodb -> mysql-5.5

This commit is contained in:
Vasil Dimov 2011-01-08 17:00:48 +02:00
commit 1d3fd9d931
59 changed files with 1864 additions and 781 deletions

View File

@ -604,6 +604,9 @@
#cmakedefine SO_EXT "@CMAKE_SHARED_MODULE_SUFFIX@"
#define MYSQL_MAJOR_VERSION @MAJOR_VERSION@
#define MYSQL_MINOR_VERSION @MINOR_VERSION@
#define PACKAGE "mysql"
#define PACKAGE_BUGREPORT ""
#define PACKAGE_NAME "MySQL Server"

View File

@ -22,7 +22,6 @@
class THD;
uint filename_to_tablename(const char *from, char *to, uint to_length);
int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
bool schema_table_store_record(THD *thd, TABLE *table);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);

View File

@ -0,0 +1,5 @@
perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix --vardir=var-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --comment=ps_row --vardir=var-ps_row --ps-protocol --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --comment=embedded --vardir=var-emebbed --embedded --experimental=collections/default.experimental --skip-ndb
perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --suite=rpl,binlog --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --comment=funcs_1 --vardir=var-funcs_1 --suite=funcs_1 --experimental=collections/default.experimental --skip-ndb

View File

@ -0,0 +1,5 @@
perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix --vardir=var-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --comment=ps_row --vardir=var-ps_row --ps-protocol --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --comment=embedded --vardir=var-emebbed --embedded --experimental=collections/default.experimental --skip-ndb
perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --suite=rpl,binlog --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --comment=funcs_1 --vardir=var-funcs_1 --suite=funcs_1 --experimental=collections/default.experimental --skip-ndb

View File

@ -0,0 +1,5 @@
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=n_mix --vardir=var-n_mix --mysqld=--binlog-format=mixed --suite=main,binlog,innodb,rpl,sys_vars,perfschema --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=ps_row --vardir=var-ps_row --ps-protocol --mysqld=--binlog-format=row --suite=main,binlog,innodb,rpl,sys_vars,perfschema --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=embedded --vardir=var-emebbed --embedded --suite=main,binlog,innodb,rpl,sys_vars,perfschema
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --mysqld=--binlog-format=row --suite=rpl,binlog --skip-test-list=collections/disabled-per-push.list
perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=funcs_1 --vardir=var-funcs_1 --suite=funcs_1

View File

@ -5,3 +5,111 @@
CREATE DATABASE XY;
USE XY;
DROP DATABASE XY;
USE TEST;
#
# Bug55222 Mysqldump table names case bug in REFERENCES clause
# InnoDB did not handle lower_case_table_names=2 for
# foreign_table_names and referenced_table_names.
#
SHOW VARIABLES LIKE 'lower_case_table_names';
Variable_name Value
lower_case_table_names 2
DROP TABLE IF EXISTS `Table2`;
DROP TABLE IF EXISTS `Table1`;
CREATE TABLE `Table1`(c1 INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE `Table2`(c1 INT PRIMARY KEY, c2 INT) ENGINE=InnoDB;
ALTER TABLE `Table2` ADD CONSTRAINT fk1 FOREIGN KEY(c2) REFERENCES `Table1`(c1);
SHOW CREATE TABLE `Table2`;
Table Table2
Create Table CREATE TABLE `Table2` (
`c1` int(11) NOT NULL,
`c2` int(11) DEFAULT NULL,
PRIMARY KEY (`c1`),
KEY `fk1` (`c2`),
CONSTRAINT `fk1` FOREIGN KEY (`c2`) REFERENCES `Table1` (`c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME fk1
UNIQUE_CONSTRAINT_CATALOG def
UNIQUE_CONSTRAINT_SCHEMA test
UNIQUE_CONSTRAINT_NAME PRIMARY
MATCH_OPTION NONE
UPDATE_RULE RESTRICT
DELETE_RULE RESTRICT
TABLE_NAME Table2
REFERENCED_TABLE_NAME Table1
DROP TABLE `Table2`;
DROP TABLE `Table1`;
DROP TABLE IF EXISTS Product_Order;
DROP TABLE IF EXISTS Product;
DROP TABLE IF EXISTS Customer;
CREATE TABLE Product (Category INT NOT NULL, Id INT NOT NULL,
Price DECIMAL, PRIMARY KEY(Category, Id)) ENGINE=InnoDB;
CREATE TABLE Customer (Id INT NOT NULL, PRIMARY KEY (Id)) ENGINE=InnoDB;
CREATE TABLE Product_Order (No INT NOT NULL AUTO_INCREMENT,
Product_Category INT NOT NULL,
Product_Id INT NOT NULL,
Customer_Id INT NOT NULL,
PRIMARY KEY(No),
INDEX (Product_Category, Product_Id),
FOREIGN KEY (Product_Category, Product_Id)
REFERENCES Product(Category, Id) ON UPDATE CASCADE ON DELETE RESTRICT,
INDEX (Customer_Id),
FOREIGN KEY (Customer_Id)
REFERENCES Customer(Id)
) ENGINE=INNODB;
SHOW CREATE TABLE Product_Order;
Table Product_Order
Create Table CREATE TABLE `Product_Order` (
`No` int(11) NOT NULL AUTO_INCREMENT,
`Product_Category` int(11) NOT NULL,
`Product_Id` int(11) NOT NULL,
`Customer_Id` int(11) NOT NULL,
PRIMARY KEY (`No`),
KEY `Product_Category` (`Product_Category`,`Product_Id`),
KEY `Customer_Id` (`Customer_Id`),
CONSTRAINT `product_order_ibfk_1` FOREIGN KEY (`Product_Category`, `Product_Id`) REFERENCES `Product` (`Category`, `Id`) ON UPDATE CASCADE,
CONSTRAINT `product_order_ibfk_2` FOREIGN KEY (`Customer_Id`) REFERENCES `Customer` (`Id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW CREATE TABLE Product;
Table Product
Create Table CREATE TABLE `Product` (
`Category` int(11) NOT NULL,
`Id` int(11) NOT NULL,
`Price` decimal(10,0) DEFAULT NULL,
PRIMARY KEY (`Category`,`Id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW CREATE TABLE Customer;
Table Customer
Create Table CREATE TABLE `Customer` (
`Id` int(11) NOT NULL,
PRIMARY KEY (`Id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME product_order_ibfk_1
UNIQUE_CONSTRAINT_CATALOG def
UNIQUE_CONSTRAINT_SCHEMA test
UNIQUE_CONSTRAINT_NAME PRIMARY
MATCH_OPTION NONE
UPDATE_RULE CASCADE
DELETE_RULE RESTRICT
TABLE_NAME Product_Order
REFERENCED_TABLE_NAME Product
CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME product_order_ibfk_2
UNIQUE_CONSTRAINT_CATALOG def
UNIQUE_CONSTRAINT_SCHEMA test
UNIQUE_CONSTRAINT_NAME PRIMARY
MATCH_OPTION NONE
UPDATE_RULE RESTRICT
DELETE_RULE RESTRICT
TABLE_NAME Product_Order
REFERENCED_TABLE_NAME Customer
DROP TABLE Product_Order;
DROP TABLE Product;
DROP TABLE Customer;

View File

@ -0,0 +1,26 @@
drop table if exists t1;
SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1;
CREATE TABLE t1 (c1 INT PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (null);
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
DELETE FROM t1;
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
test.t1 optimize status OK
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
INSERT INTO t1 VALUES(null);
SELECT * FROM t1;
c1
2
DROP TABLE t1;

View File

@ -1,5 +1,8 @@
drop table if exists t1,t2,t3,t4;
drop database if exists mysqltest;
CREATE TABLE bug58912 (a BLOB, b TEXT, PRIMARY KEY(a(1))) ENGINE=InnoDB;
INSERT INTO bug58912 VALUES(REPEAT('a',8000),REPEAT('b',8000));
UPDATE bug58912 SET a=REPEAT('a',7999);
create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=innodb;
insert into t1 (code, name) values (1, 'Tim'), (1, 'Monty'), (2, 'David'), (2, 'Erik'), (3, 'Sasha'), (3, 'Jeremy'), (4, 'Matt');
select id, code, name from t1 order by id;
@ -1670,10 +1673,10 @@ variable_value - @innodb_rows_deleted_orig
71
SELECT variable_value - @innodb_rows_inserted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_inserted';
variable_value - @innodb_rows_inserted_orig
1065
1066
SELECT variable_value - @innodb_rows_updated_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_updated';
variable_value - @innodb_rows_updated_orig
865
866
SELECT variable_value - @innodb_row_lock_waits_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_row_lock_waits';
variable_value - @innodb_row_lock_waits_orig
0
@ -3173,3 +3176,4 @@ Variable_name Value
Handler_update 1
Variable_name Value
Handler_delete 1
DROP TABLE bug58912;

View File

@ -1,16 +1,16 @@
CREATE TABLE product (category INT NOT NULL, id INT NOT NULL,
CREATE TABLE product (category INT NOT NULL, id INT NOT NULL,
price DECIMAL, PRIMARY KEY(category, id)) ENGINE=INNODB;
CREATE TABLE customer (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
CREATE TABLE product_order (no INT NOT NULL AUTO_INCREMENT,
product_category INT NOT NULL,
product_id INT NOT NULL,
customer_id INT NOT NULL,
PRIMARY KEY(no),
INDEX (product_category, product_id),
FOREIGN KEY (product_category, product_id)
REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT,
INDEX (customer_id),
FOREIGN KEY (customer_id)
CREATE TABLE product_order (no INT NOT NULL AUTO_INCREMENT,
product_category INT NOT NULL,
product_id INT NOT NULL,
customer_id INT NOT NULL,
PRIMARY KEY(no),
INDEX (product_category, product_id),
FOREIGN KEY (product_category, product_id)
REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT,
INDEX (customer_id),
FOREIGN KEY (customer_id)
REFERENCES customer(id)
) ENGINE=INNODB;
SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;

View File

@ -0,0 +1,26 @@
-- source include/have_innodb.inc
# embedded server ignores 'delayed', so skip this
-- source include/not_embedded.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Bug #18274 InnoDB auto_increment field reset on OPTIMIZE TABLE
SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1;
CREATE TABLE t1 (c1 INT PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (null);
SHOW CREATE TABLE t1;
DELETE FROM t1;
OPTIMIZE TABLE t1;
SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES(null);
SELECT * FROM t1;
DROP TABLE t1;
#
# restore environment to the state it was before this test execution
#
-- disable_query_log

View File

@ -49,6 +49,15 @@ drop table if exists t1,t2,t3,t4;
drop database if exists mysqltest;
--enable_warnings
# Bug#58912 InnoDB unnecessarily avoids update-in-place on column prefixes
CREATE TABLE bug58912 (a BLOB, b TEXT, PRIMARY KEY(a(1))) ENGINE=InnoDB;
INSERT INTO bug58912 VALUES(REPEAT('a',8000),REPEAT('b',8000));
UPDATE bug58912 SET a=REPEAT('a',7999);
# The above statements used to trigger a failure during purge when
# Bug#55284 was fixed while Bug#58912 was not. Defer the DROP TABLE,
# so that purge gets a chance to run (and a double free of the
# off-page column can be detected, if one is to occur.)
#
# Small basic test with ignore
#
@ -2548,6 +2557,9 @@ SET GLOBAL innodb_thread_concurrency = @innodb_thread_concurrency_orig;
-- enable_query_log
# Clean up after the Bug#55284/Bug#58912 test case.
DROP TABLE bug58912;
#######################################################################
# #
# Please, DO NOT TOUCH this file as well as the innodb.result file. #

View File

@ -12,6 +12,7 @@ create table C(id int not null auto_increment primary key, f1 int not null, fore
insert into A values(1), (2);
--disable_query_log
begin;
let $i=257;
while ($i)
{
@ -24,6 +25,7 @@ while ($i)
insert into C(f1) values(2);
dec $i;
}
commit;
--enable_query_log
# Following Deletes should not report error

View File

@ -3,19 +3,19 @@
#
-- source include/have_innodb.inc
CREATE TABLE product (category INT NOT NULL, id INT NOT NULL,
CREATE TABLE product (category INT NOT NULL, id INT NOT NULL,
price DECIMAL, PRIMARY KEY(category, id)) ENGINE=INNODB;
CREATE TABLE customer (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
CREATE TABLE product_order (no INT NOT NULL AUTO_INCREMENT,
product_category INT NOT NULL,
product_id INT NOT NULL,
customer_id INT NOT NULL,
PRIMARY KEY(no),
INDEX (product_category, product_id),
FOREIGN KEY (product_category, product_id)
REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT,
INDEX (customer_id),
FOREIGN KEY (customer_id)
CREATE TABLE product_order (no INT NOT NULL AUTO_INCREMENT,
product_category INT NOT NULL,
product_id INT NOT NULL,
customer_id INT NOT NULL,
PRIMARY KEY(no),
INDEX (product_category, product_id),
FOREIGN KEY (product_category, product_id)
REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT,
INDEX (customer_id),
FOREIGN KEY (customer_id)
REFERENCES customer(id)
) ENGINE=INNODB;

View File

@ -134,7 +134,7 @@ Table Create Table
t1 CREATE TABLE `t1` (
`c1` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
) ENGINE=InnoDB AUTO_INCREMENT=102 DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1
(a INT NULL AUTO_INCREMENT,
@ -440,7 +440,7 @@ Table Create Table
t1 CREATE TABLE `t1` (
`c1` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`c1`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
) ENGINE=InnoDB AUTO_INCREMENT=102 DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (c1)
PARTITIONS 2 */
DROP TABLE t1;

View File

@ -53,4 +53,56 @@ eval SELECT * FROM XY.T_$tcs LIMIT 1;
--enable_query_log
--enable_result_log
DROP DATABASE XY;
USE TEST;
--echo #
--echo # Bug55222 Mysqldump table names case bug in REFERENCES clause
--echo # InnoDB did not handle lower_case_table_names=2 for
--echo # foreign_table_names and referenced_table_names.
--echo #
SHOW VARIABLES LIKE 'lower_case_table_names';
--disable_warnings
DROP TABLE IF EXISTS `Table2`;
DROP TABLE IF EXISTS `Table1`;
--disable_warnings
CREATE TABLE `Table1`(c1 INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE `Table2`(c1 INT PRIMARY KEY, c2 INT) ENGINE=InnoDB;
ALTER TABLE `Table2` ADD CONSTRAINT fk1 FOREIGN KEY(c2) REFERENCES `Table1`(c1);
query_vertical SHOW CREATE TABLE `Table2`;
query_vertical SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
DROP TABLE `Table2`;
DROP TABLE `Table1`;
--disable_warnings
DROP TABLE IF EXISTS Product_Order;
DROP TABLE IF EXISTS Product;
DROP TABLE IF EXISTS Customer;
--enable_warnings
CREATE TABLE Product (Category INT NOT NULL, Id INT NOT NULL,
Price DECIMAL, PRIMARY KEY(Category, Id)) ENGINE=InnoDB;
CREATE TABLE Customer (Id INT NOT NULL, PRIMARY KEY (Id)) ENGINE=InnoDB;
CREATE TABLE Product_Order (No INT NOT NULL AUTO_INCREMENT,
Product_Category INT NOT NULL,
Product_Id INT NOT NULL,
Customer_Id INT NOT NULL,
PRIMARY KEY(No),
INDEX (Product_Category, Product_Id),
FOREIGN KEY (Product_Category, Product_Id)
REFERENCES Product(Category, Id) ON UPDATE CASCADE ON DELETE RESTRICT,
INDEX (Customer_Id),
FOREIGN KEY (Customer_Id)
REFERENCES Customer(Id)
) ENGINE=INNODB;
query_vertical SHOW CREATE TABLE Product_Order;
query_vertical SHOW CREATE TABLE Product;
query_vertical SHOW CREATE TABLE Customer;
query_vertical SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
DROP TABLE Product_Order;
DROP TABLE Product;
DROP TABLE Customer;

View File

@ -371,7 +371,11 @@ uint explain_filename(THD* thd,
Table name length.
*/
uint filename_to_tablename(const char *from, char *to, uint to_length)
uint filename_to_tablename(const char *from, char *to, uint to_length
#ifndef DBUG_OFF
, bool stay_quiet
#endif /* DBUG_OFF */
)
{
uint errors;
size_t res;
@ -391,7 +395,13 @@ uint filename_to_tablename(const char *from, char *to, uint to_length)
{
res= (strxnmov(to, to_length, MYSQL50_TABLE_NAME_PREFIX, from, NullS) -
to);
sql_print_error("Invalid (old?) table or database name '%s'", from);
#ifndef DBUG_OFF
if (!stay_quiet) {
#endif /* DBUG_OFF */
sql_print_error("Invalid (old?) table or database name '%s'", from);
#ifndef DBUG_OFF
}
#endif /* DBUG_OFF */
/*
TODO: add a stored procedure for fix table and database names,
and mention its name in error log.

View File

@ -123,7 +123,11 @@ enum enum_explain_filename_mode
#define NO_FRM_RENAME (1 << 2)
#define FRM_ONLY (1 << 3)
uint filename_to_tablename(const char *from, char *to, uint to_length);
uint filename_to_tablename(const char *from, char *to, uint to_length
#ifndef DBUG_OFF
, bool stay_quiet = false
#endif /* DBUG_OFF */
);
uint tablename_to_filename(const char *from, char *to, uint to_length);
uint check_n_cut_mysql50_prefix(const char *from, char *to, uint to_length);
bool check_mysql50_prefix(const char *name);

View File

@ -40,10 +40,9 @@ IF(UNIX)
ENDIF()
ENDIF()
# Enable InnoDB's UNIV_DEBUG if MySQL's WITH_DEBUG is defined
IF(WITH_DEBUG)
ADD_DEFINITIONS("-DUNIV_DEBUG")
ENDIF()
# Enable InnoDB's UNIV_DEBUG for debug builds
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DUNIV_DEBUG")
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DUNIV_DEBUG")
IF(NOT MSVC)
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
@ -190,7 +189,7 @@ IF(SIZEOF_PTHREAD_T)
ENDIF()
IF(MSVC)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS -DHAVE_IB_PAUSE_INSTRUCTION)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS)
ENDIF()

View File

@ -664,7 +664,7 @@ btr_page_get_father_node_ptr_func(
" to fix the\n"
"InnoDB: corruption. If the crash happens at "
"the database startup, see\n"
"InnoDB: " REFMAN "forcing-recovery.html about\n"
"InnoDB: " REFMAN "forcing-innodb-recovery.html about\n"
"InnoDB: forcing recovery. "
"Then dump + drop + reimport.\n", stderr);

View File

@ -1874,7 +1874,8 @@ btr_cur_update_in_place(
NOT call it if index is secondary */
if (!dict_index_is_clust(index)
|| row_upd_changes_ord_field_binary(NULL, index, update)) {
|| row_upd_changes_ord_field_binary(NULL, NULL,
index, update)) {
/* Remove possible hash index pointer to this record */
btr_search_update_hash_on_delete(cursor);
@ -2626,27 +2627,24 @@ ulint
btr_cur_del_mark_set_clust_rec(
/*===========================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor */
buf_block_t* block, /*!< in/out: buffer block of the record */
rec_t* rec, /*!< in/out: record */
dict_index_t* index, /*!< in: clustered index of the record */
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr */
{
dict_index_t* index;
buf_block_t* block;
roll_ptr_t roll_ptr;
ulint err;
rec_t* rec;
page_zip_des_t* page_zip;
trx_t* trx;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
rec_offs_init(offsets_);
rec = btr_cur_get_rec(cursor);
index = cursor->index;
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
ut_ad(buf_block_get_frame(block) == page_align(rec));
ut_ad(page_is_leaf(page_align(rec)));
#ifdef UNIV_DEBUG
if (btr_cur_print_record_ops && thr) {
@ -2658,13 +2656,12 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(dict_index_is_clust(index));
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
err = lock_clust_rec_modify_check_and_lock(flags,
btr_cur_get_block(cursor),
err = lock_clust_rec_modify_check_and_lock(flags, block,
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
goto func_exit;
return(err);
}
err = trx_undo_report_row_operation(flags, TRX_UNDO_MODIFY_OP, thr,
@ -2672,11 +2669,9 @@ btr_cur_del_mark_set_clust_rec(
&roll_ptr);
if (err != DB_SUCCESS) {
goto func_exit;
return(err);
}
block = btr_cur_get_block(cursor);
if (block->is_hashed) {
rw_lock_x_lock(&btr_search_latch);
}
@ -2699,10 +2694,6 @@ btr_cur_del_mark_set_clust_rec(
btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx,
roll_ptr, mtr);
func_exit:
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(err);
}
@ -3754,108 +3745,36 @@ btr_cur_set_ownership_of_extern_field(
}
/*******************************************************************//**
Marks not updated extern fields as not-owned by this record. The ownership
is transferred to the updated record which is inserted elsewhere in the
Marks non-updated off-page fields as disowned by this record. The ownership
must be transferred to the updated record which is inserted elsewhere in the
index tree. In purge only the owner of externally stored field is allowed
to free the field.
@return TRUE if BLOB ownership was transferred */
to free the field. */
UNIV_INTERN
ibool
btr_cur_mark_extern_inherited_fields(
/*=================================*/
void
btr_cur_disown_inherited_fields(
/*============================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
const upd_t* update, /*!< in: update vector */
mtr_t* mtr) /*!< in: mtr, or NULL if not logged */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint n;
ulint j;
ulint i;
ibool change_ownership = FALSE;
ut_ad(rec_offs_validate(rec, NULL, offsets));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_any_extern(offsets));
ut_ad(mtr);
if (!rec_offs_any_extern(offsets)) {
return(FALSE);
}
n = rec_offs_n_fields(offsets);
for (i = 0; i < n; i++) {
if (rec_offs_nth_extern(offsets, i)) {
/* Check it is not in updated fields */
if (update) {
for (j = 0; j < upd_get_n_fields(update);
j++) {
if (upd_get_nth_field(update, j)
->field_no == i) {
goto updated;
}
}
}
for (i = 0; i < rec_offs_n_fields(offsets); i++) {
if (rec_offs_nth_extern(offsets, i)
&& !upd_get_field_by_field_no(update, i)) {
btr_cur_set_ownership_of_extern_field(
page_zip, rec, index, offsets, i, FALSE, mtr);
change_ownership = TRUE;
updated:
;
}
}
return(change_ownership);
}
/*******************************************************************//**
The complement of the previous function: in an update entry may inherit
some externally stored fields from a record. We must mark them as inherited
in entry, so that they are not freed in a rollback. */
UNIV_INTERN
void
btr_cur_mark_dtuple_inherited_extern(
/*=================================*/
dtuple_t* entry, /*!< in/out: updated entry to be
inserted to clustered index */
const upd_t* update) /*!< in: update vector */
{
ulint i;
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
dfield_t* dfield = dtuple_get_nth_field(entry, i);
byte* data;
ulint len;
ulint j;
if (!dfield_is_ext(dfield)) {
continue;
}
/* Check if it is in updated fields */
for (j = 0; j < upd_get_n_fields(update); j++) {
if (upd_get_nth_field(update, j)->field_no == i) {
goto is_updated;
}
}
data = dfield_get_data(dfield);
len = dfield_get_len(dfield);
data[len - BTR_EXTERN_FIELD_REF_SIZE + BTR_EXTERN_LEN]
|= BTR_EXTERN_INHERITED_FLAG;
is_updated:
;
}
}
/*******************************************************************//**
@ -3893,29 +3812,6 @@ btr_cur_unmark_extern_fields(
}
}
/*******************************************************************//**
Marks all extern fields in a dtuple as owned by the record. */
UNIV_INTERN
void
btr_cur_unmark_dtuple_extern_fields(
/*================================*/
dtuple_t* entry) /*!< in/out: clustered index entry */
{
ulint i;
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
dfield_t* dfield = dtuple_get_nth_field(entry, i);
if (dfield_is_ext(dfield)) {
byte* data = dfield_get_data(dfield);
ulint len = dfield_get_len(dfield);
data[len - BTR_EXTERN_FIELD_REF_SIZE + BTR_EXTERN_LEN]
&= ~BTR_EXTERN_OWNER_FLAG;
}
}
}
/*******************************************************************//**
Flags the data tuple fields that are marked as extern storage in the
update vector. We use this function to remember which fields we must

View File

@ -536,7 +536,7 @@ buf_page_is_corrupted(
"you may have copied the InnoDB\n"
"InnoDB: tablespace but not the InnoDB "
"log files. See\n"
"InnoDB: " REFMAN "forcing-recovery.html\n"
"InnoDB: " REFMAN "forcing-innodb-recovery.html\n"
"InnoDB: for more information.\n",
(ulong) mach_read_from_4(read_buf
+ FIL_PAGE_OFFSET),
@ -2745,7 +2745,8 @@ buf_page_get_gen(
ut_ad(zip_size == fil_space_get_zip_size(space));
ut_ad(ut_is_2pow(zip_size));
#ifndef UNIV_LOG_DEBUG
ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset, NULL));
ut_ad(!ibuf_inside() || ibuf_page_low(space, zip_size, offset,
FALSE, file, line, NULL));
#endif
buf_pool->stat.n_page_gets++;
fold = buf_page_address_fold(space, offset);
@ -4038,7 +4039,7 @@ corrupt:
"InnoDB: TABLE to scan your"
" table for corruption.\n"
"InnoDB: See also "
REFMAN "forcing-recovery.html\n"
REFMAN "forcing-innodb-recovery.html\n"
"InnoDB: about forcing recovery.\n", stderr);
if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) {
@ -4793,23 +4794,203 @@ buf_get_modified_ratio_pct(void)
return(ratio);
}
/*******************************************************************//**
Aggregates a pool stats information with the total buffer pool stats */
static
void
buf_stats_aggregate_pool_info(
/*==========================*/
buf_pool_info_t* total_info, /*!< in/out: the buffer pool
info to store aggregated
result */
const buf_pool_info_t* pool_info) /*!< in: individual buffer pool
stats info */
{
ut_a(total_info && pool_info);
/* Nothing to copy if total_info is the same as pool_info */
if (total_info == pool_info) {
return;
}
total_info->pool_size += pool_info->pool_size;
total_info->lru_len += pool_info->lru_len;
total_info->old_lru_len += pool_info->old_lru_len;
total_info->free_list_len += pool_info->free_list_len;
total_info->flush_list_len += pool_info->flush_list_len;
total_info->n_pend_unzip += pool_info->n_pend_unzip;
total_info->n_pend_reads += pool_info->n_pend_reads;
total_info->n_pending_flush_lru += pool_info->n_pending_flush_lru;
total_info->n_pending_flush_list += pool_info->n_pending_flush_list;
total_info->n_pending_flush_single_page +=
pool_info->n_pending_flush_single_page;
total_info->n_pages_made_young += pool_info->n_pages_made_young;
total_info->n_pages_not_made_young += pool_info->n_pages_not_made_young;
total_info->n_pages_read += pool_info->n_pages_read;
total_info->n_pages_created += pool_info->n_pages_created;
total_info->n_pages_written += pool_info->n_pages_written;
total_info->n_page_gets += pool_info->n_page_gets;
total_info->n_ra_pages_read += pool_info->n_ra_pages_read;
total_info->n_ra_pages_evicted += pool_info->n_ra_pages_evicted;
total_info->page_made_young_rate += pool_info->page_made_young_rate;
total_info->page_not_made_young_rate +=
pool_info->page_not_made_young_rate;
total_info->pages_read_rate += pool_info->pages_read_rate;
total_info->pages_created_rate += pool_info->pages_created_rate;
total_info->pages_written_rate += pool_info->pages_written_rate;
total_info->n_page_get_delta += pool_info->n_page_get_delta;
total_info->page_read_delta += pool_info->page_read_delta;
total_info->young_making_delta += pool_info->young_making_delta;
total_info->not_young_making_delta += pool_info->not_young_making_delta;
total_info->pages_readahead_rate += pool_info->pages_readahead_rate;
total_info->pages_evicted_rate += pool_info->pages_evicted_rate;
total_info->unzip_lru_len += pool_info->unzip_lru_len;
total_info->io_sum += pool_info->io_sum;
total_info->io_cur += pool_info->io_cur;
total_info->unzip_sum += pool_info->unzip_sum;
total_info->unzip_cur += pool_info->unzip_cur;
}
/*******************************************************************//**
Collect buffer pool stats information for a buffer pool. Also
record aggregated stats if there are more than one buffer pool
in the server */
static
void
buf_stats_get_pool_info(
/*====================*/
buf_pool_t* buf_pool, /*!< in: buffer pool */
ulint pool_id, /*!< in: buffer pool ID */
buf_pool_info_t* all_pool_info) /*!< in/out: buffer pool info
to fill */
{
buf_pool_info_t* pool_info;
time_t current_time;
double time_elapsed;
/* Find appropriate pool_info to store stats for this buffer pool */
pool_info = &all_pool_info[pool_id];
buf_pool_mutex_enter(buf_pool);
buf_flush_list_mutex_enter(buf_pool);
pool_info->pool_unique_id = pool_id;
pool_info->pool_size = buf_pool->curr_size;
pool_info->lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
pool_info->old_lru_len = buf_pool->LRU_old_len;
pool_info->free_list_len = UT_LIST_GET_LEN(buf_pool->free);
pool_info->flush_list_len = UT_LIST_GET_LEN(buf_pool->flush_list);
pool_info->n_pend_unzip = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
pool_info->n_pend_reads = buf_pool->n_pend_reads;
pool_info->n_pending_flush_lru =
(buf_pool->n_flush[BUF_FLUSH_LRU]
+ buf_pool->init_flush[BUF_FLUSH_LRU]);
pool_info->n_pending_flush_list =
(buf_pool->n_flush[BUF_FLUSH_LIST]
+ buf_pool->init_flush[BUF_FLUSH_LIST]);
pool_info->n_pending_flush_single_page =
buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE];
buf_flush_list_mutex_exit(buf_pool);
current_time = time(NULL);
time_elapsed = 0.001 + difftime(current_time,
buf_pool->last_printout_time);
pool_info->n_pages_made_young = buf_pool->stat.n_pages_made_young;
pool_info->n_pages_not_made_young =
buf_pool->stat.n_pages_not_made_young;
pool_info->n_pages_read = buf_pool->stat.n_pages_read;
pool_info->n_pages_created = buf_pool->stat.n_pages_created;
pool_info->n_pages_written = buf_pool->stat.n_pages_written;
pool_info->n_page_gets = buf_pool->stat.n_page_gets;
pool_info->n_ra_pages_read = buf_pool->stat.n_ra_pages_read;
pool_info->n_ra_pages_evicted = buf_pool->stat.n_ra_pages_evicted;
pool_info->page_made_young_rate =
(buf_pool->stat.n_pages_made_young
- buf_pool->old_stat.n_pages_made_young) / time_elapsed;
pool_info->page_not_made_young_rate =
(buf_pool->stat.n_pages_not_made_young
- buf_pool->old_stat.n_pages_not_made_young) / time_elapsed;
pool_info->pages_read_rate =
(buf_pool->stat.n_pages_read
- buf_pool->old_stat.n_pages_read) / time_elapsed;
pool_info->pages_created_rate =
(buf_pool->stat.n_pages_created
- buf_pool->old_stat.n_pages_created) / time_elapsed;
pool_info->pages_written_rate =
(buf_pool->stat.n_pages_written
- buf_pool->old_stat.n_pages_written) / time_elapsed;
pool_info->n_page_get_delta = buf_pool->stat.n_page_gets
- buf_pool->old_stat.n_page_gets;
if (pool_info->n_page_get_delta) {
pool_info->page_read_delta = buf_pool->stat.n_pages_read
- buf_pool->old_stat.n_pages_read;
pool_info->young_making_delta =
buf_pool->stat.n_pages_made_young
- buf_pool->old_stat.n_pages_made_young;
pool_info->not_young_making_delta =
buf_pool->stat.n_pages_not_made_young
- buf_pool->old_stat.n_pages_not_made_young;
}
pool_info->pages_readahead_rate =
(buf_pool->stat.n_ra_pages_read
- buf_pool->old_stat.n_ra_pages_read) / time_elapsed;
pool_info->pages_evicted_rate =
(buf_pool->stat.n_ra_pages_evicted
- buf_pool->old_stat.n_ra_pages_evicted) / time_elapsed;
pool_info->unzip_lru_len = UT_LIST_GET_LEN(buf_pool->unzip_LRU);
pool_info->io_sum = buf_LRU_stat_sum.io;
pool_info->io_cur = buf_LRU_stat_cur.io;
pool_info->unzip_sum = buf_LRU_stat_sum.unzip;
pool_info->unzip_cur = buf_LRU_stat_cur.unzip;
buf_refresh_io_stats(buf_pool);
buf_pool_mutex_exit(buf_pool);
}
/*********************************************************************//**
Prints info of the buffer i/o. */
UNIV_INTERN
void
buf_print_io_instance(
/*==================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
buf_pool_info_t*pool_info, /*!< in: buffer pool info */
FILE* file) /*!< in/out: buffer where to print */
{
time_t current_time;
double time_elapsed;
ulint n_gets_diff;
ut_ad(buf_pool);
buf_pool_mutex_enter(buf_pool);
buf_flush_list_mutex_enter(buf_pool);
ut_ad(pool_info);
fprintf(file,
"Buffer pool size %lu\n"
@ -4819,70 +5000,42 @@ buf_print_io_instance(
"Modified db pages %lu\n"
"Pending reads %lu\n"
"Pending writes: LRU %lu, flush list %lu, single page %lu\n",
(ulong) buf_pool->curr_size,
(ulong) UT_LIST_GET_LEN(buf_pool->free),
(ulong) UT_LIST_GET_LEN(buf_pool->LRU),
(ulong) buf_pool->LRU_old_len,
(ulong) UT_LIST_GET_LEN(buf_pool->flush_list),
(ulong) buf_pool->n_pend_reads,
(ulong) buf_pool->n_flush[BUF_FLUSH_LRU]
+ buf_pool->init_flush[BUF_FLUSH_LRU],
(ulong) buf_pool->n_flush[BUF_FLUSH_LIST]
+ buf_pool->init_flush[BUF_FLUSH_LIST],
(ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
buf_flush_list_mutex_exit(buf_pool);
current_time = time(NULL);
time_elapsed = 0.001 + difftime(current_time,
buf_pool->last_printout_time);
pool_info->pool_size,
pool_info->free_list_len,
pool_info->lru_len,
pool_info->old_lru_len,
pool_info->flush_list_len,
pool_info->n_pend_reads,
pool_info->n_pending_flush_lru,
pool_info->n_pending_flush_list,
pool_info->n_pending_flush_single_page);
fprintf(file,
"Pages made young %lu, not young %lu\n"
"%.2f youngs/s, %.2f non-youngs/s\n"
"Pages read %lu, created %lu, written %lu\n"
"%.2f reads/s, %.2f creates/s, %.2f writes/s\n",
(ulong) buf_pool->stat.n_pages_made_young,
(ulong) buf_pool->stat.n_pages_not_made_young,
(buf_pool->stat.n_pages_made_young
- buf_pool->old_stat.n_pages_made_young)
/ time_elapsed,
(buf_pool->stat.n_pages_not_made_young
- buf_pool->old_stat.n_pages_not_made_young)
/ time_elapsed,
(ulong) buf_pool->stat.n_pages_read,
(ulong) buf_pool->stat.n_pages_created,
(ulong) buf_pool->stat.n_pages_written,
(buf_pool->stat.n_pages_read
- buf_pool->old_stat.n_pages_read)
/ time_elapsed,
(buf_pool->stat.n_pages_created
- buf_pool->old_stat.n_pages_created)
/ time_elapsed,
(buf_pool->stat.n_pages_written
- buf_pool->old_stat.n_pages_written)
/ time_elapsed);
pool_info->n_pages_made_young,
pool_info->n_pages_not_made_young,
pool_info->page_made_young_rate,
pool_info->page_not_made_young_rate,
pool_info->n_pages_read,
pool_info->n_pages_created,
pool_info->n_pages_written,
pool_info->pages_read_rate,
pool_info->pages_created_rate,
pool_info->pages_written_rate);
n_gets_diff = buf_pool->stat.n_page_gets
- buf_pool->old_stat.n_page_gets;
if (n_gets_diff) {
if (pool_info->n_page_get_delta) {
fprintf(file,
"Buffer pool hit rate %lu / 1000,"
" young-making rate %lu / 1000 not %lu / 1000\n",
(ulong)
(1000 - ((1000 * (buf_pool->stat.n_pages_read
- buf_pool->old_stat.n_pages_read))
/ (buf_pool->stat.n_page_gets
- buf_pool->old_stat.n_page_gets))),
(ulong)
(1000 * (buf_pool->stat.n_pages_made_young
- buf_pool->old_stat.n_pages_made_young)
/ n_gets_diff),
(ulong)
(1000 * (buf_pool->stat.n_pages_not_made_young
- buf_pool->old_stat.n_pages_not_made_young)
/ n_gets_diff));
(ulong) (1000 - (1000 * pool_info->page_read_delta
/ pool_info->n_page_get_delta)),
(ulong) (1000 * pool_info->young_making_delta
/ pool_info->n_page_get_delta),
(ulong) (1000 * pool_info->not_young_making_delta
/ pool_info->n_page_get_delta));
} else {
fputs("No buffer pool page gets since the last printout\n",
file);
@ -4891,25 +5044,17 @@ buf_print_io_instance(
/* Statistics about read ahead algorithm */
fprintf(file, "Pages read ahead %.2f/s,"
" evicted without access %.2f/s\n",
(buf_pool->stat.n_ra_pages_read
- buf_pool->old_stat.n_ra_pages_read)
/ time_elapsed,
(buf_pool->stat.n_ra_pages_evicted
- buf_pool->old_stat.n_ra_pages_evicted)
/ time_elapsed);
pool_info->pages_readahead_rate,
pool_info->pages_evicted_rate);
/* Print some values to help us with visualizing what is
happening with LRU eviction. */
fprintf(file,
"LRU len: %lu, unzip_LRU len: %lu\n"
"I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
UT_LIST_GET_LEN(buf_pool->LRU),
UT_LIST_GET_LEN(buf_pool->unzip_LRU),
buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);
buf_refresh_io_stats(buf_pool);
buf_pool_mutex_exit(buf_pool);
pool_info->lru_len, pool_info->unzip_lru_len,
pool_info->io_sum, pool_info->io_cur,
pool_info->unzip_sum, pool_info->unzip_cur);
}
/*********************************************************************//**
@ -4920,14 +5065,58 @@ buf_print_io(
/*=========*/
FILE* file) /*!< in/out: buffer where to print */
{
ulint i;
ulint i;
buf_pool_info_t* pool_info;
buf_pool_info_t* pool_info_total;
/* If srv_buf_pool_instances is greater than 1, allocate
one extra buf_pool_info_t, the last one stores
aggregated/total values from all pools */
if (srv_buf_pool_instances > 1) {
pool_info = (buf_pool_info_t*) mem_zalloc((
srv_buf_pool_instances + 1) * sizeof *pool_info);
pool_info_total = &pool_info[srv_buf_pool_instances];
} else {
ut_a(srv_buf_pool_instances == 1);
pool_info_total = pool_info = (buf_pool_info_t*) mem_zalloc(
sizeof *pool_info)
}
for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
buf_print_io_instance(buf_pool, file);
/* Fetch individual buffer pool info and calculate
aggregated stats along the way */
buf_stats_get_pool_info(buf_pool, i, pool_info);
/* If we have more than one buffer pool, store
the aggregated stats */
if (srv_buf_pool_instances > 1) {
buf_stats_aggregate_pool_info(pool_info_total,
&pool_info[i]);
}
}
/* Print the aggreate buffer pool info */
buf_print_io_instance(pool_info_total, file);
/* If there are more than one buffer pool, print each individual pool
info */
if (srv_buf_pool_instances > 1) {
fputs("----------------------\n"
"INDIVIDUAL BUFFER POOL INFO\n"
"----------------------\n", file);
for (i = 0; i < srv_buf_pool_instances; i++) {
fprintf(file, "---BUFFER POOL %lu\n", i);
buf_print_io_instance(&pool_info[i], file);
}
}
mem_free(pool_info);
}
/**********************************************************************//**

View File

@ -88,6 +88,34 @@ ibool
buf_flush_validate_low(
/*===================*/
buf_pool_t* buf_pool); /*!< in: Buffer pool instance */
/******************************************************************//**
Validates the flush list some of the time.
@return TRUE if ok or the check was skipped */
static
ibool
buf_flush_validate_skip(
/*====================*/
buf_pool_t* buf_pool) /*!< in: Buffer pool instance */
{
/** Try buf_flush_validate_low() every this many times */
# define BUF_FLUSH_VALIDATE_SKIP 23
/** The buf_flush_validate_low() call skip counter.
Use a signed type because of the race condition below. */
static int buf_flush_validate_count = BUF_FLUSH_VALIDATE_SKIP;
/* There is a race condition below, but it does not matter,
because this call is only for heuristic purposes. We want to
reduce the call frequency of the costly buf_flush_validate_low()
check in debug builds. */
if (--buf_flush_validate_count > 0) {
return(TRUE);
}
buf_flush_validate_count = BUF_FLUSH_VALIDATE_SKIP;
return(buf_flush_validate_low(buf_pool));
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/******************************************************************//**
@ -293,7 +321,7 @@ buf_flush_insert_into_flush_list(
}
#endif /* UNIV_DEBUG_VALGRIND */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low(buf_pool));
ut_a(buf_flush_validate_skip(buf_pool));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_flush_list_mutex_exit(buf_pool);
@ -515,7 +543,7 @@ buf_flush_remove(
bpage->oldest_modification = 0;
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
ut_a(buf_flush_validate_low(buf_pool));
ut_a(buf_flush_validate_skip(buf_pool));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_flush_list_mutex_exit(buf_pool);

View File

@ -2076,6 +2076,7 @@ buf_LRU_stat_update(void)
buf_LRU_stat_t* item;
buf_pool_t* buf_pool;
ibool evict_started = FALSE;
buf_LRU_stat_t cur_stat;
/* If we haven't started eviction yet then don't update stats. */
for (i = 0; i < srv_buf_pool_instances; i++) {
@ -2097,12 +2098,19 @@ buf_LRU_stat_update(void)
buf_LRU_stat_arr_ind++;
buf_LRU_stat_arr_ind %= BUF_LRU_STAT_N_INTERVAL;
/* Add the current value and subtract the obsolete entry. */
buf_LRU_stat_sum.io += buf_LRU_stat_cur.io - item->io;
buf_LRU_stat_sum.unzip += buf_LRU_stat_cur.unzip - item->unzip;
/* Add the current value and subtract the obsolete entry.
Since buf_LRU_stat_cur is not protected by any mutex,
it can be changing between adding to buf_LRU_stat_sum
and copying to item. Assign it to local variables to make
sure the same value assign to the buf_LRU_stat_sum
and item */
cur_stat = buf_LRU_stat_cur;
buf_LRU_stat_sum.io += cur_stat.io - item->io;
buf_LRU_stat_sum.unzip += cur_stat.unzip - item->unzip;
/* Put current entry in the array. */
memcpy(item, &buf_LRU_stat_cur, sizeof *item);
memcpy(item, &cur_stat, sizeof *item);
func_exit:
/* Clear the current entry. */

View File

@ -52,8 +52,9 @@ UNIV_INTERN dict_index_t* dict_ind_compact;
#include "que0que.h"
#include "rem0cmp.h"
#include "row0merge.h"
#include "srv0srv.h" /* srv_lower_case_table_names */
#include "m_ctype.h" /* my_isspace() */
#include "ha_prototypes.h" /* innobase_strcasecmp() */
#include "ha_prototypes.h" /* innobase_strcasecmp(), innobase_casedn_str()*/
#include <ctype.h>
@ -74,6 +75,7 @@ UNIV_INTERN rw_lock_t dict_operation_lock;
#ifdef UNIV_PFS_RWLOCK
UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key;
UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key;
UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key;
#endif /* UNIV_PFS_RWLOCK */
#ifdef UNIV_PFS_MUTEX
@ -714,7 +716,7 @@ dict_init(void)
&dict_foreign_err_mutex, SYNC_ANY_LATCH);
for (i = 0; i < DICT_TABLE_STATS_LATCHES_SIZE; i++) {
rw_lock_create(PFS_NOT_INSTRUMENTED,
rw_lock_create(dict_table_stats_latch_key,
&dict_table_stats_latches[i], SYNC_INDEX_TREE);
}
}
@ -1080,13 +1082,13 @@ dict_table_rename_in_cache(
/* Allocate a longer name buffer;
TODO: store buf len to save memory */
foreign->foreign_table_name
= mem_heap_alloc(foreign->heap,
ut_strlen(table->name) + 1);
foreign->foreign_table_name = mem_heap_strdup(
foreign->heap, table->name);
dict_mem_foreign_table_name_lookup_set(foreign, TRUE);
} else {
strcpy(foreign->foreign_table_name, table->name);
dict_mem_foreign_table_name_lookup_set(foreign, FALSE);
}
strcpy(foreign->foreign_table_name, table->name);
if (strchr(foreign->id, '/')) {
ulint db_len;
char* old_id;
@ -1152,12 +1154,14 @@ dict_table_rename_in_cache(
/* Allocate a longer name buffer;
TODO: store buf len to save memory */
foreign->referenced_table_name = mem_heap_alloc(
foreign->heap, strlen(table->name) + 1);
foreign->referenced_table_name = mem_heap_strdup(
foreign->heap, table->name);
dict_mem_referenced_table_name_lookup_set(foreign, TRUE);
} else {
/* Use the same buffer */
strcpy(foreign->referenced_table_name, table->name);
dict_mem_referenced_table_name_lookup_set(foreign, FALSE);
}
strcpy(foreign->referenced_table_name, table->name);
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
@ -2583,10 +2587,10 @@ dict_foreign_add_to_cache(
ut_ad(mutex_own(&(dict_sys->mutex)));
for_table = dict_table_check_if_in_cache_low(
foreign->foreign_table_name);
foreign->foreign_table_name_lookup);
ref_table = dict_table_check_if_in_cache_low(
foreign->referenced_table_name);
foreign->referenced_table_name_lookup);
ut_a(for_table || ref_table);
if (for_table) {
@ -2703,7 +2707,7 @@ dict_scan_to(
quote = '\0';
} else if (quote) {
/* Within quotes: do nothing. */
} else if (*ptr == '`' || *ptr == '"') {
} else if (*ptr == '`' || *ptr == '"' || *ptr == '\'') {
/* Starting quote: remember the quote character. */
quote = *ptr;
} else {
@ -3015,19 +3019,25 @@ dict_scan_table_name(
memcpy(ref, database_name, database_name_len);
ref[database_name_len] = '/';
memcpy(ref + database_name_len + 1, table_name, table_name_len + 1);
#ifndef __WIN__
if (srv_lower_case_table_names) {
#endif /* !__WIN__ */
/* The table name is always put to lower case on Windows. */
/* Values; 0 = Store and compare as given; case sensitive
1 = Store and compare in lower; case insensitive
2 = Store as given, compare in lower; case semi-sensitive */
if (srv_lower_case_table_names == 2) {
innobase_casedn_str(ref);
#ifndef __WIN__
*table = dict_table_get_low(ref);
memcpy(ref, database_name, database_name_len);
ref[database_name_len] = '/';
memcpy(ref + database_name_len + 1, table_name, table_name_len + 1);
} else {
if (srv_lower_case_table_names == 1) {
innobase_casedn_str(ref);
}
*table = dict_table_get_low(ref);
}
#endif /* !__WIN__ */
*success = TRUE;
*ref_name = ref;
*table = dict_table_get_low(ref);
return(ptr);
}
@ -3516,8 +3526,10 @@ col_loop1:
}
foreign->foreign_table = table;
foreign->foreign_table_name = mem_heap_strdup(foreign->heap,
table->name);
foreign->foreign_table_name = mem_heap_strdup(
foreign->heap, table->name);
dict_mem_foreign_table_name_lookup_set(foreign, TRUE);
foreign->foreign_index = index;
foreign->n_fields = (unsigned int) i;
foreign->foreign_col_names = mem_heap_alloc(foreign->heap,
@ -3774,8 +3786,9 @@ try_find_index:
foreign->referenced_index = index;
foreign->referenced_table = referenced_table;
foreign->referenced_table_name
= mem_heap_strdup(foreign->heap, referenced_table_name);
foreign->referenced_table_name = mem_heap_strdup(
foreign->heap, referenced_table_name);
dict_mem_referenced_table_name_lookup_set(foreign, TRUE);
foreign->referenced_col_names = mem_heap_alloc(foreign->heap,
i * sizeof(void*));
@ -4586,8 +4599,8 @@ dict_print_info_on_foreign_key_in_create_format(
fputs(") REFERENCES ", file);
if (dict_tables_have_same_db(foreign->foreign_table_name,
foreign->referenced_table_name)) {
if (dict_tables_have_same_db(foreign->foreign_table_name_lookup,
foreign->referenced_table_name_lookup)) {
/* Do not print the database name of the referenced table */
ut_print_name(file, trx, TRUE,
dict_remove_db_name(

View File

@ -40,6 +40,7 @@ Created 4/24/1996 Heikki Tuuri
#include "rem0cmp.h"
#include "srv0start.h"
#include "srv0srv.h"
#include "ha_prototypes.h" /* innobase_casedn_str() */
/** Following are six InnoDB system tables */
@ -435,10 +436,12 @@ dict_process_sys_fields_rec(
return(err_msg);
}
#ifdef FOREIGN_NOT_USED
/********************************************************************//**
This function parses a SYS_FOREIGN record and populate a dict_foreign_t
structure with the information from the record. For detail information
about SYS_FOREIGN fields, please refer to dict_load_foreign() function
about SYS_FOREIGN fields, please refer to dict_load_foreign() function.
@return error message, or NULL on success */
UNIV_INTERN
const char*
@ -466,6 +469,11 @@ dict_process_sys_foreign_rec(
err_len:
return("incorrect column length in SYS_FOREIGN");
}
/* This recieves a dict_foreign_t* that points to a stack variable.
So mem_heap_free(foreign->heap) is not used as elsewhere.
Since the heap used here is freed elsewhere, foreign->heap
is not assigned. */
foreign->id = mem_heap_strdupl(heap, (const char*) field, len);
rec_get_nth_field_offs_old(rec, 1/*DB_TRX_ID*/, &len);
@ -477,6 +485,9 @@ err_len:
goto err_len;
}
/* The _lookup versions of the referenced and foreign table names
are not assigned since they are not used in this dict_foreign_t */
field = rec_get_nth_field_old(rec, 3/*FOR_NAME*/, &len);
if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) {
goto err_len;
@ -502,6 +513,9 @@ err_len:
return(NULL);
}
#endif /* FOREIGN_NOT_USED */
#ifdef FOREIGN_NOT_USED
/********************************************************************//**
This function parses a SYS_FOREIGN_COLS record and extract necessary
information from the record and return to caller.
@ -565,6 +579,8 @@ err_len:
return(NULL);
}
#endif /* FOREIGN_NOT_USED */
/********************************************************************//**
Determine the flags of a table described in SYS_TABLES.
@return compressed page size in kilobytes; or 0 if the tablespace is
@ -2057,12 +2073,15 @@ dict_load_foreign(
foreign->id = mem_heap_strdup(foreign->heap, id);
field = rec_get_nth_field_old(rec, 3, &len);
foreign->foreign_table_name = mem_heap_strdupl(
foreign->heap, (char*) field, len);
dict_mem_foreign_table_name_lookup_set(foreign, TRUE);
field = rec_get_nth_field_old(rec, 4, &len);
foreign->referenced_table_name = mem_heap_strdupl(
foreign->heap, (char*) field, len);
dict_mem_referenced_table_name_lookup_set(foreign, TRUE);
btr_pcur_close(&pcur);
mtr_commit(&mtr);
@ -2070,7 +2089,7 @@ dict_load_foreign(
dict_load_foreign_cols(id, foreign);
ref_table = dict_table_check_if_in_cache_low(
foreign->referenced_table_name);
foreign->referenced_table_name_lookup);
/* We could possibly wind up in a deep recursive calls if
we call dict_table_get_low() again here if there
@ -2103,7 +2122,7 @@ dict_load_foreign(
have to load it so that we are able to make type comparisons
in the next function call. */
for_table = dict_table_get_low(foreign->foreign_table_name);
for_table = dict_table_get_low(foreign->foreign_table_name_lookup);
if (for_table && ref_table && check_recursive) {
/* This is to record the longest chain of ancesters

View File

@ -33,6 +33,8 @@ Created 1/8/1996 Heikki Tuuri
#include "data0type.h"
#include "mach0data.h"
#include "dict0dict.h"
#include "srv0srv.h" /* srv_lower_case_table_names */
#include "ha_prototypes.h" /* innobase_casedn_str()*/
#ifndef UNIV_HOTBACKUP
# include "lock0lock.h"
#endif /* !UNIV_HOTBACKUP */
@ -287,6 +289,60 @@ dict_mem_foreign_create(void)
return(foreign);
}
/**********************************************************************//**
Sets the foreign_table_name_lookup pointer based on the value of
srv_lower_case_table_names. If that is 0 or 1, foreign_table_name_lookup
will point to foreign_table_name. If 2, then another string is allocated
of the heap and set to lower case. */
UNIV_INTERN
void
dict_mem_foreign_table_name_lookup_set(
/*===================================*/
dict_foreign_t* foreign, /*!< in/out: foreign struct */
ibool do_alloc) /*!< in: is an alloc needed */
{
if (srv_lower_case_table_names == 2) {
if (do_alloc) {
foreign->foreign_table_name_lookup = mem_heap_alloc(
foreign->heap,
strlen(foreign->foreign_table_name) + 1);
}
strcpy(foreign->foreign_table_name_lookup,
foreign->foreign_table_name);
innobase_casedn_str(foreign->foreign_table_name_lookup);
} else {
foreign->foreign_table_name_lookup
= foreign->foreign_table_name;
}
}
/**********************************************************************//**
Sets the referenced_table_name_lookup pointer based on the value of
srv_lower_case_table_names. If that is 0 or 1,
referenced_table_name_lookup will point to referenced_table_name. If 2,
then another string is allocated of the heap and set to lower case. */
UNIV_INTERN
void
dict_mem_referenced_table_name_lookup_set(
/*======================================*/
dict_foreign_t* foreign, /*!< in/out: foreign struct */
ibool do_alloc) /*!< in: is an alloc needed */
{
if (srv_lower_case_table_names == 2) {
if (do_alloc) {
foreign->referenced_table_name_lookup = mem_heap_alloc(
foreign->heap,
strlen(foreign->referenced_table_name) + 1);
}
strcpy(foreign->referenced_table_name_lookup,
foreign->referenced_table_name);
innobase_casedn_str(foreign->referenced_table_name_lookup);
} else {
foreign->referenced_table_name_lookup
= foreign->referenced_table_name;
}
}
/**********************************************************************//**
Adds a field definition to an index. NOTE: does not take a copy
of the column name if the field is a column. The memory occupied

View File

@ -299,6 +299,34 @@ struct fil_system_struct {
initialized. */
static fil_system_t* fil_system = NULL;
#ifdef UNIV_DEBUG
/** Try fil_validate() every this many times */
# define FIL_VALIDATE_SKIP 17
/******************************************************************//**
Checks the consistency of the tablespace cache some of the time.
@return TRUE if ok or the check was skipped */
static
ibool
fil_validate_skip(void)
/*===================*/
{
/** The fil_validate() call skip counter. Use a signed type
because of the race condition below. */
static int fil_validate_count = FIL_VALIDATE_SKIP;
/* There is a race condition below, but it does not matter,
because this call is only for heuristic purposes. We want to
reduce the call frequency of the costly fil_validate() check
in debug builds. */
if (--fil_validate_count > 0) {
return(TRUE);
}
fil_validate_count = FIL_VALIDATE_SKIP;
return(fil_validate());
}
#endif /* UNIV_DEBUG */
/********************************************************************//**
NOTE: you must call fil_mutex_enter_and_prepare_for_io() first!
@ -4307,7 +4335,7 @@ fil_io(
#if (1 << UNIV_PAGE_SIZE_SHIFT) != UNIV_PAGE_SIZE
# error "(1 << UNIV_PAGE_SIZE_SHIFT) != UNIV_PAGE_SIZE"
#endif
ut_ad(fil_validate());
ut_ad(fil_validate_skip());
#ifndef UNIV_HOTBACKUP
# ifndef UNIV_LOG_DEBUG
/* ibuf bitmap pages must be read in the sync aio mode: */
@ -4466,7 +4494,7 @@ fil_io(
mutex_exit(&fil_system->mutex);
ut_ad(fil_validate());
ut_ad(fil_validate_skip());
}
return(DB_SUCCESS);
@ -4490,7 +4518,7 @@ fil_aio_wait(
void* message;
ulint type;
ut_ad(fil_validate());
ut_ad(fil_validate_skip());
if (srv_use_native_aio) {
srv_set_io_thread_op_info(segment, "native aio handle");
@ -4521,7 +4549,7 @@ fil_aio_wait(
mutex_exit(&fil_system->mutex);
ut_ad(fil_validate());
ut_ad(fil_validate_skip());
/* Do the i/o handling */
/* IMPORTANT: since i/o handling for reads will read also the insert

View File

@ -3321,7 +3321,7 @@ fseg_free_page_low(
"InnoDB: database!\n", (ulong) page);
crash:
fputs("InnoDB: Please refer to\n"
"InnoDB: " REFMAN "forcing-recovery.html\n"
"InnoDB: " REFMAN "forcing-innodb-recovery.html\n"
"InnoDB: about forcing recovery.\n", stderr);
ut_error;
}

View File

@ -285,7 +285,8 @@ static PSI_rwlock_info all_innodb_rwlocks[] = {
{&checkpoint_lock_key, "checkpoint_lock", 0},
{&trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0},
{&trx_purge_latch_key, "trx_purge_latch", 0},
{&index_tree_rw_lock_key, "index_tree_rw_lock", 0}
{&index_tree_rw_lock_key, "index_tree_rw_lock", 0},
{&dict_table_stats_latch_key, "dict_table_stats", 0}
};
# endif /* UNIV_PFS_RWLOCK */
@ -2164,13 +2165,13 @@ innobase_init(
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
#ifdef UNIV_DEBUG
#ifndef DBUG_OFF
static const char test_filename[] = "-@";
char test_tablename[sizeof test_filename
+ sizeof srv_mysql50_table_name_prefix];
if ((sizeof test_tablename) - 1
!= filename_to_tablename(test_filename, test_tablename,
sizeof test_tablename)
sizeof test_tablename, true)
|| strncmp(test_tablename,
srv_mysql50_table_name_prefix,
sizeof srv_mysql50_table_name_prefix)
@ -2180,7 +2181,7 @@ innobase_init(
sql_print_error("tablename encoding has been changed");
goto error;
}
#endif /* UNIV_DEBUG */
#endif /* DBUG_OFF */
/* Check that values don't overflow on 32-bit systems. */
if (sizeof(ulint) == 4) {
@ -3639,6 +3640,7 @@ ha_innobase::open(
UT_NOT_USED(test_if_locked);
thd = ha_thd();
srv_lower_case_table_names = lower_case_table_names;
/* Under some cases MySQL seems to call this function while
holding btr_search_latch. This breaks the latching order as
@ -6498,10 +6500,11 @@ create_clustered_index_when_no_primary(
/*****************************************************************//**
Return a display name for the row format
@return row format name */
const char *get_row_format_name(
/*============================*/
enum row_type row_format) /*!< in: Row Format */
UNIV_INTERN
const char*
get_row_format_name(
/*================*/
enum row_type row_format) /*!< in: Row Format */
{
switch (row_format) {
case ROW_TYPE_COMPACT:
@ -6516,12 +6519,38 @@ enum row_type row_format) /*!< in: Row Format */
return("DEFAULT");
case ROW_TYPE_FIXED:
return("FIXED");
default:
case ROW_TYPE_PAGE:
case ROW_TYPE_NOT_USED:
break;
}
return("NOT USED");
}
/** If file-per-table is missing, issue warning and set ret false */
#define CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE \
if (!srv_file_per_table) { \
push_warning_printf( \
thd, MYSQL_ERROR::WARN_LEVEL_WARN, \
ER_ILLEGAL_HA_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_per_table.", \
get_row_format_name(row_format)); \
ret = FALSE; \
}
/** If file-format is Antelope, issue warning and set ret false */
#define CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE \
if (srv_file_format < DICT_TF_FORMAT_ZIP) { \
push_warning_printf( \
thd, MYSQL_ERROR::WARN_LEVEL_WARN, \
ER_ILLEGAL_HA_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_format > Antelope.", \
get_row_format_name(row_format)); \
ret = FALSE; \
}
/*****************************************************************//**
Validates the create options. We may build on this function
in future. For now, it checks two specifiers:
@ -6539,7 +6568,7 @@ create_options_are_valid(
{
ibool kbs_specified = FALSE;
ibool ret = TRUE;
enum row_type row_type = form->s->row_type;
enum row_type row_format = form->s->row_type;
ut_ad(thd != NULL);
@ -6548,23 +6577,6 @@ create_options_are_valid(
return(TRUE);
}
/* Check for a valid Innodb ROW_FORMAT specifier. For example,
ROW_TYPE_FIXED can be sent to Innodb */
switch (row_type) {
case ROW_TYPE_COMPACT:
case ROW_TYPE_COMPRESSED:
case ROW_TYPE_DYNAMIC:
case ROW_TYPE_REDUNDANT:
case ROW_TYPE_DEFAULT:
break;
default:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: invalid ROW_FORMAT specifier.");
ret = FALSE;
}
ut_ad(form != NULL);
ut_ad(create_info != NULL);
@ -6577,7 +6589,23 @@ create_options_are_valid(
case 4:
case 8:
case 16:
/* Valid value. */
/* Valid KEY_BLOCK_SIZE, check its dependencies. */
if (!srv_file_per_table) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
ret = FALSE;
}
if (srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
ret = FALSE;
}
break;
default:
push_warning_printf(
@ -6587,72 +6615,43 @@ create_options_are_valid(
" Valid values are [1, 2, 4, 8, 16]",
create_info->key_block_size);
ret = FALSE;
break;
}
}
/* If KEY_BLOCK_SIZE was specified, check for its
dependencies. */
if (kbs_specified && !srv_file_per_table) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
ret = FALSE;
}
if (kbs_specified && srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
ret = FALSE;
}
switch (row_type) {
/* Check for a valid Innodb ROW_FORMAT specifier and
other incompatibilities. */
switch (row_format) {
case ROW_TYPE_COMPRESSED:
case ROW_TYPE_DYNAMIC:
/* These two ROW_FORMATs require srv_file_per_table
and srv_file_format > Antelope */
if (!srv_file_per_table) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_per_table.",
get_row_format_name(row_type));
ret = FALSE;
}
if (srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_type));
ret = FALSE;
}
default:
CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE;
CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE;
break;
}
switch (row_type) {
case ROW_TYPE_REDUNDANT:
case ROW_TYPE_COMPACT:
case ROW_TYPE_DYNAMIC:
/* KEY_BLOCK_SIZE is only allowed with Compressed or Default */
CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE;
CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE;
/* fall through since dynamic also shuns KBS */
case ROW_TYPE_COMPACT:
case ROW_TYPE_REDUNDANT:
if (kbs_specified) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: cannot specify ROW_FORMAT = %s"
" with KEY_BLOCK_SIZE.",
get_row_format_name(row_type));
ret = FALSE;
get_row_format_name(row_format));
ret = FALSE;
}
default:
break;
case ROW_TYPE_DEFAULT:
break;
case ROW_TYPE_FIXED:
case ROW_TYPE_PAGE:
case ROW_TYPE_NOT_USED:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION, \
"InnoDB: invalid ROW_FORMAT specifier.");
ret = FALSE;
break;
}
@ -6703,7 +6702,7 @@ ha_innobase::create(
const ulint file_format = srv_file_format;
const char* stmt;
size_t stmt_len;
enum row_type row_type;
enum row_type row_format;
DBUG_ENTER("ha_innobase::create");
@ -6753,11 +6752,7 @@ ha_innobase::create(
trx = innobase_trx_allocate(thd);
if (lower_case_table_names) {
srv_lower_case_table_names = TRUE;
} else {
srv_lower_case_table_names = FALSE;
}
srv_lower_case_table_names = lower_case_table_names;
strcpy(name2, name);
@ -6821,20 +6816,19 @@ ha_innobase::create(
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ignoring"
" KEY_BLOCK_SIZE=%lu.",
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu.",
create_info->key_block_size);
}
}
row_type = form->s->row_type;
row_format = form->s->row_type;
if (flags) {
/* if ROW_FORMAT is set to default,
automatically change it to COMPRESSED.*/
if (row_type == ROW_TYPE_DEFAULT) {
row_type = ROW_TYPE_COMPRESSED;
} else if (row_type != ROW_TYPE_COMPRESSED) {
if (row_format == ROW_TYPE_DEFAULT) {
row_format = ROW_TYPE_COMPRESSED;
} else if (row_format != ROW_TYPE_COMPRESSED) {
/* ROW_FORMAT other than COMPRESSED
ignores KEY_BLOCK_SIZE. It does not
make sense to reject conflicting
@ -6851,7 +6845,7 @@ ha_innobase::create(
}
} else {
/* flags == 0 means no KEY_BLOCK_SIZE.*/
if (row_type == ROW_TYPE_COMPRESSED) {
if (row_format == ROW_TYPE_COMPRESSED) {
/* ROW_FORMAT=COMPRESSED without
KEY_BLOCK_SIZE implies half the
maximum KEY_BLOCK_SIZE. */
@ -6866,7 +6860,7 @@ ha_innobase::create(
}
}
switch (row_type) {
switch (row_format) {
case ROW_TYPE_REDUNDANT:
break;
case ROW_TYPE_COMPRESSED:
@ -6877,25 +6871,25 @@ ha_innobase::create(
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_per_table.",
get_row_format_name(row_type));
get_row_format_name(row_format));
} else if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_type));
get_row_format_name(row_format));
} else {
flags |= DICT_TF_COMPACT
| (DICT_TF_FORMAT_ZIP
<< DICT_TF_FORMAT_SHIFT);
| (DICT_TF_FORMAT_ZIP
<< DICT_TF_FORMAT_SHIFT);
break;
}
/* fall through */
case ROW_TYPE_NOT_USED:
case ROW_TYPE_FIXED:
default:
case ROW_TYPE_PAGE:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
@ -7036,23 +7030,25 @@ ha_innobase::create(
setup at this stage and so we use thd. */
/* We need to copy the AUTOINC value from the old table if
this is an ALTER TABLE or CREATE INDEX because CREATE INDEX
does a table copy too. */
this is an ALTER|OPTIMIZE TABLE or CREATE INDEX because CREATE INDEX
does a table copy too. If query was one of :
CREATE TABLE ...AUTO_INCREMENT = x; or
ALTER TABLE...AUTO_INCREMENT = x; or
OPTIMIZE TABLE t; or
CREATE INDEX x on t(...);
Find out a table definition from the dictionary and get
the current value of the auto increment field. Set a new
value to the auto increment field if the value is greater
than the maximum value in the column. */
if (((create_info->used_fields & HA_CREATE_USED_AUTO)
|| thd_sql_command(thd) == SQLCOM_ALTER_TABLE
|| thd_sql_command(thd) == SQLCOM_OPTIMIZE
|| thd_sql_command(thd) == SQLCOM_CREATE_INDEX)
&& create_info->auto_increment_value > 0) {
/* Query was one of :
CREATE TABLE ...AUTO_INCREMENT = x; or
ALTER TABLE...AUTO_INCREMENT = x; or
CREATE INDEX x on t(...);
Find out a table definition from the dictionary and get
the current value of the auto increment field. Set a new
value to the auto increment field if the value is greater
than the maximum value in the column. */
auto_inc_value = create_info->auto_increment_value;
dict_table_autoinc_lock(innobase_table);
@ -7181,11 +7177,7 @@ ha_innobase::delete_table(
trx = innobase_trx_allocate(thd);
if (lower_case_table_names) {
srv_lower_case_table_names = TRUE;
} else {
srv_lower_case_table_names = FALSE;
}
srv_lower_case_table_names = lower_case_table_names;
name_len = strlen(name);
@ -7308,11 +7300,7 @@ innobase_rename_table(
char* norm_to;
char* norm_from;
if (lower_case_table_names) {
srv_lower_case_table_names = TRUE;
} else {
srv_lower_case_table_names = FALSE;
}
srv_lower_case_table_names = lower_case_table_names;
// Magic number 64 arbitrary
norm_to = (char*) my_malloc(strlen(to) + 64, MYF(0));
@ -7849,14 +7837,14 @@ ha_innobase::info_low(
are asked by MySQL to avoid locking. Another reason to
avoid the call is that it uses quite a lot of CPU.
See Bug#38185. */
if (flag & HA_STATUS_NO_LOCK ||
!(flag & HA_STATUS_VARIABLE_EXTRA)) {
if (flag & HA_STATUS_NO_LOCK
|| !(flag & HA_STATUS_VARIABLE_EXTRA)) {
/* We do not update delete_length if no
locking is requested so the "old" value can
remain. delete_length is initialized to 0 in
the ha_statistics' constructor. Also we only
need delete_length to be set when
HA_STATUS_VARIABLE_EXTRA is set */
need delete_length to be set when
HA_STATUS_VARIABLE_EXTRA is set */
} else if (UNIV_UNLIKELY
(srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) {
/* Avoid accessing the tablespace if

View File

@ -495,8 +495,16 @@ fill_innodb_trx_from_cache(
row->trx_mysql_thread_id));
/* trx_query */
OK(field_store_string(fields[IDX_TRX_QUERY],
row->trx_query));
if (row->trx_query) {
/* store will do appropriate character set
conversion check */
fields[IDX_TRX_QUERY]->store(
row->trx_query, strlen(row->trx_query),
row->trx_query_cs);
fields[IDX_TRX_QUERY]->set_notnull();
} else {
fields[IDX_TRX_QUERY]->set_null();
}
/* trx_operation_state */
OK(field_store_string(fields[IDX_TRX_OPERATION_STATE],

View File

@ -656,22 +656,49 @@ ibuf_parse_bitmap_init(
return(ptr);
}
#ifndef UNIV_HOTBACKUP
# ifdef UNIV_DEBUG
/** Gets the desired bits for a given page from a bitmap page.
@param page in: bitmap page
@param offset in: page whose bits to get
@param zs in: compressed page size in bytes; 0 for uncompressed pages
@param bit in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param mtr in: mini-transaction holding an x-latch on the bitmap page
@return value of bits */
# define ibuf_bitmap_page_get_bits(page, offset, zs, bit, mtr) \
ibuf_bitmap_page_get_bits_low(page, offset, zs, \
MTR_MEMO_PAGE_X_FIX, mtr, bit)
# else /* UNIV_DEBUG */
/** Gets the desired bits for a given page from a bitmap page.
@param page in: bitmap page
@param offset in: page whose bits to get
@param zs in: compressed page size in bytes; 0 for uncompressed pages
@param bit in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param mtr in: mini-transaction holding an x-latch on the bitmap page
@return value of bits */
# define ibuf_bitmap_page_get_bits(page, offset, zs, bit, mtr) \
ibuf_bitmap_page_get_bits_low(page, offset, zs, bit)
# endif /* UNIV_DEBUG */
/********************************************************************//**
Gets the desired bits for a given page from a bitmap page.
@return value of bits */
UNIV_INLINE
ulint
ibuf_bitmap_page_get_bits(
/*======================*/
ibuf_bitmap_page_get_bits_low(
/*==========================*/
const page_t* page, /*!< in: bitmap page */
ulint page_no,/*!< in: page whose bits to get */
ulint zip_size,/*!< in: compressed page size in bytes;
0 for uncompressed pages */
ulint bit, /*!< in: IBUF_BITMAP_FREE,
#ifdef UNIV_DEBUG
ulint latch_type,
/*!< in: MTR_MEMO_PAGE_X_FIX,
MTR_MEMO_BUF_FIX, ... */
mtr_t* mtr, /*!< in: mini-transaction holding latch_type
on the bitmap page */
#endif /* UNIV_DEBUG */
ulint bit) /*!< in: IBUF_BITMAP_FREE,
IBUF_BITMAP_BUFFERED, ... */
mtr_t* mtr __attribute__((unused)))
/*!< in: mtr containing an
x-latch to the bitmap page */
{
ulint byte_offset;
ulint bit_offset;
@ -683,7 +710,7 @@ ibuf_bitmap_page_get_bits(
# error "IBUF_BITS_PER_PAGE % 2 != 0"
#endif
ut_ad(ut_is_2pow(zip_size));
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr_memo_contains_page(mtr, page, latch_type));
if (!zip_size) {
bit_offset = (page_no % UNIV_PAGE_SIZE) * IBUF_BITS_PER_PAGE
@ -1109,21 +1136,29 @@ Must not be called when recv_no_ibuf_operations==TRUE.
@return TRUE if level 2 or level 3 page */
UNIV_INTERN
ibool
ibuf_page(
/*======*/
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
ulint page_no,/*!< in: page number */
mtr_t* mtr) /*!< in: mtr which will contain an x-latch to the
bitmap page if the page is not one of the fixed
address ibuf pages, or NULL, in which case a new
transaction is created. */
ibuf_page_low(
/*==========*/
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
ulint page_no,/*!< in: page number */
#ifdef UNIV_DEBUG
ibool x_latch,/*!< in: FALSE if relaxed check
(avoid latching the bitmap page) */
#endif /* UNIV_DEBUG */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in: mtr which will contain an
x-latch to the bitmap page if the page
is not one of the fixed address ibuf
pages, or NULL, in which case a new
transaction is created. */
{
ibool ret;
mtr_t local_mtr;
page_t* bitmap_page;
ut_ad(!recv_no_ibuf_operations);
ut_ad(x_latch || mtr == NULL);
if (ibuf_fixed_addr_page(space, zip_size, page_no)) {
@ -1135,12 +1170,55 @@ ibuf_page(
ut_ad(fil_space_get_type(IBUF_SPACE_ID) == FIL_TABLESPACE);
#ifdef UNIV_DEBUG
if (!x_latch) {
mtr_start(&local_mtr);
/* Get the bitmap page without a page latch, so that
we will not be violating the latching order when
another bitmap page has already been latched by this
thread. The page will be buffer-fixed, and thus it
cannot be removed or relocated while we are looking at
it. The contents of the page could change, but the
IBUF_BITMAP_IBUF bit that we are interested in should
not be modified by any other thread. Nobody should be
calling ibuf_add_free_page() or ibuf_remove_free_page()
while the page is linked to the insert buffer b-tree. */
bitmap_page = buf_block_get_frame(
buf_page_get_gen(
space, zip_size,
ibuf_bitmap_page_no_calc(zip_size, page_no),
RW_NO_LATCH, NULL, BUF_GET_NO_LATCH,
file, line, &local_mtr));
# ifdef UNIV_SYNC_DEBUG
/* This is for tracking Bug #58212. This check and message can
be removed once it has been established that our assumptions
about this condition are correct. The bug was only a one-time
occurrence, unable to repeat since then. */
void* latch = sync_thread_levels_contains(SYNC_IBUF_BITMAP);
if (latch) {
fprintf(stderr, "Bug#58212 UNIV_SYNC_DEBUG"
" levels %p (%u,%u)\n",
latch, (unsigned) space, (unsigned) page_no);
}
# endif /* UNIV_SYNC_DEBUG */
ret = ibuf_bitmap_page_get_bits_low(
bitmap_page, page_no, zip_size,
MTR_MEMO_BUF_FIX, &local_mtr, IBUF_BITMAP_IBUF);
mtr_commit(&local_mtr);
return(ret);
}
#endif /* UNIV_DEBUG */
if (mtr == NULL) {
mtr = &local_mtr;
mtr_start(mtr);
}
bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr);
bitmap_page = ibuf_bitmap_get_map_page_func(space, page_no, zip_size,
file, line, mtr);
ret = ibuf_bitmap_page_get_bits(bitmap_page, page_no, zip_size,
IBUF_BITMAP_IBUF, mtr);

View File

@ -333,10 +333,14 @@ ulint
btr_cur_del_mark_set_clust_rec(
/*===========================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor */
buf_block_t* block, /*!< in/out: buffer block of the record */
rec_t* rec, /*!< in/out: record */
dict_index_t* index, /*!< in: clustered index of the record */
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr); /*!< in: mtr */
mtr_t* mtr) /*!< in: mtr */
__attribute__((nonnull));
/***********************************************************//**
Sets a secondary index record delete mark to TRUE or FALSE.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
@ -469,40 +473,22 @@ btr_estimate_number_of_different_key_vals(
/*======================================*/
dict_index_t* index); /*!< in: index */
/*******************************************************************//**
Marks not updated extern fields as not-owned by this record. The ownership
is transferred to the updated record which is inserted elsewhere in the
Marks non-updated off-page fields as disowned by this record. The ownership
must be transferred to the updated record which is inserted elsewhere in the
index tree. In purge only the owner of externally stored field is allowed
to free the field.
@return TRUE if BLOB ownership was transferred */
to free the field. */
UNIV_INTERN
ibool
btr_cur_mark_extern_inherited_fields(
/*=================================*/
void
btr_cur_disown_inherited_fields(
/*============================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed
part will be updated, or NULL */
rec_t* rec, /*!< in/out: record in a clustered index */
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
const upd_t* update, /*!< in: update vector */
mtr_t* mtr); /*!< in: mtr, or NULL if not logged */
/*******************************************************************//**
The complement of the previous function: in an update entry may inherit
some externally stored fields from a record. We must mark them as inherited
in entry, so that they are not freed in a rollback. */
UNIV_INTERN
void
btr_cur_mark_dtuple_inherited_extern(
/*=================================*/
dtuple_t* entry, /*!< in/out: updated entry to be
inserted to clustered index */
const upd_t* update); /*!< in: update vector */
/*******************************************************************//**
Marks all extern fields in a dtuple as owned by the record. */
UNIV_INTERN
void
btr_cur_unmark_dtuple_extern_fields(
/*================================*/
dtuple_t* entry); /*!< in/out: clustered index entry */
mtr_t* mtr) /*!< in/out: mini-transaction */
__attribute__((nonnull(2,3,4,5,6)));
/*******************************************************************//**
Stores the fields in big_rec_vec to the tablespace and puts pointers to
them in rec. The extern flags in rec will have to be set beforehand.

View File

@ -116,6 +116,76 @@ enum buf_page_state {
before putting to the free list */
};
/** This structure defines information we will fetch from each buffer pool. It
will be used to print table IO stats */
struct buf_pool_info_struct{
/* General buffer pool info */
ulint pool_unique_id; /*!< Buffer Pool ID */
ulint pool_size; /*!< Buffer Pool size in pages */
ulint lru_len; /*!< Length of buf_pool->LRU */
ulint old_lru_len; /*!< buf_pool->LRU_old_len */
ulint free_list_len; /*!< Length of buf_pool->free list */
ulint flush_list_len; /*!< Length of buf_pool->flush_list */
ulint n_pend_unzip; /*!< buf_pool->n_pend_unzip, pages
pending decompress */
ulint n_pend_reads; /*!< buf_pool->n_pend_reads, pages
pending read */
ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */
ulint n_pending_flush_list; /*!< Pages pending flush in FLUSH
LIST */
ulint n_pending_flush_single_page;/*!< Pages pending flush in
BUF_FLUSH_SINGLE_PAGE list */
ulint n_pages_made_young; /*!< number of pages made young */
ulint n_pages_not_made_young; /*!< number of pages not made young */
ulint n_pages_read; /*!< buf_pool->n_pages_read */
ulint n_pages_created; /*!< buf_pool->n_pages_created */
ulint n_pages_written; /*!< buf_pool->n_pages_written */
ulint n_page_gets; /*!< buf_pool->n_page_gets */
ulint n_ra_pages_read; /*!< buf_pool->n_ra_pages_read, number
of pages readahead */
ulint n_ra_pages_evicted; /*!< buf_pool->n_ra_pages_evicted,
number of readahead pages evicted
without access */
ulint n_page_get_delta; /*!< num of buffer pool page gets since
last printout */
/* Buffer pool access stats */
double page_made_young_rate; /*!< page made young rate in pages
per second */
double page_not_made_young_rate;/*!< page not made young rate
in pages per second */
double pages_read_rate; /*!< num of pages read per second */
double pages_created_rate; /*!< num of pages create per second */
double pages_written_rate; /*!< num of pages written per second */
ulint page_read_delta; /*!< num of pages read since last
printout */
ulint young_making_delta; /*!< num of pages made young since
last printout */
ulint not_young_making_delta; /*!< num of pages not make young since
last printout */
/* Statistics about read ahead algorithm. */
double pages_readahead_rate; /*!< readahead rate in pages per
second */
double pages_evicted_rate; /*!< rate of readahead page evicted
without access, in pages per second */
/* Stats about LRU eviction */
ulint unzip_lru_len; /*!< length of buf_pool->unzip_LRU
list */
/* Counters for LRU policy */
ulint io_sum; /*!< buf_LRU_stat_sum.io */
ulint io_cur; /*!< buf_LRU_stat_cur.io, num of IO
for current interval */
ulint unzip_sum; /*!< buf_LRU_stat_sum.unzip */
ulint unzip_cur; /*!< buf_LRU_stat_cur.unzip, num
pages decompressed in current
interval */
};
typedef struct buf_pool_info_struct buf_pool_info_t;
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Acquire mutex on all buffer pool instances */

View File

@ -154,14 +154,19 @@ dfield_dup(
dfield_t* field, /*!< in/out: data field */
mem_heap_t* heap); /*!< in: memory heap where allocated */
/*********************************************************************//**
Tests if data length and content is equal for two dfields.
@return TRUE if equal */
Tests if two data fields are equal.
If len==0, tests the data length and content for equality.
If len>0, tests the first len bytes of the content for equality.
@return TRUE if both fields are NULL or if they are equal */
UNIV_INLINE
ibool
dfield_datas_are_binary_equal(
/*==========================*/
const dfield_t* field1, /*!< in: field */
const dfield_t* field2);/*!< in: field */
const dfield_t* field2, /*!< in: field */
ulint len) /*!< in: maximum prefix to compare,
or 0 to compare the whole field length */
__attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Tests if dfield data length and content is equal to the given.
@return TRUE if equal */

View File

@ -229,20 +229,30 @@ dfield_dup(
}
/*********************************************************************//**
Tests if data length and content is equal for two dfields.
@return TRUE if equal */
Tests if two data fields are equal.
If len==0, tests the data length and content for equality.
If len>0, tests the first len bytes of the content for equality.
@return TRUE if both fields are NULL or if they are equal */
UNIV_INLINE
ibool
dfield_datas_are_binary_equal(
/*==========================*/
const dfield_t* field1, /*!< in: field */
const dfield_t* field2) /*!< in: field */
const dfield_t* field2, /*!< in: field */
ulint len) /*!< in: maximum prefix to compare,
or 0 to compare the whole field length */
{
ulint len;
ulint len2 = len;
len = field1->len;
if (field1->len == UNIV_SQL_NULL || len == 0 || field1->len < len) {
len = field1->len;
}
return(len == field2->len
if (field2->len == UNIV_SQL_NULL || len2 == 0 || field2->len < len2) {
len2 = field2->len;
}
return(len == len2
&& (len == UNIV_SQL_NULL
|| !memcmp(field1->data, field2->data, len)));
}

View File

@ -238,6 +238,26 @@ dict_foreign_t*
dict_mem_foreign_create(void);
/*=========================*/
/**********************************************************************//**
Sets the foreign_table_name_lookup pointer based on the value of
srv_lower_case_table_names. */
UNIV_INTERN
void
dict_mem_foreign_table_name_lookup_set(
/*===================================*/
dict_foreign_t* foreign, /*!< in/out: foreign struct */
ibool do_alloc); /*!< in: is an alloc needed */
/**********************************************************************//**
Sets the reference_table_name_lookup pointer based on the value of
srv_lower_case_table_names. */
UNIV_INTERN
void
dict_mem_referenced_table_name_lookup_set(
/*======================================*/
dict_foreign_t* foreign, /*!< in/out: foreign struct */
ibool do_alloc); /*!< in: is an alloc needed */
/** Data structure for a column in a table */
struct dict_col_struct{
/*----------------------*/
@ -393,10 +413,14 @@ struct dict_foreign_struct{
unsigned type:6; /*!< 0 or DICT_FOREIGN_ON_DELETE_CASCADE
or DICT_FOREIGN_ON_DELETE_SET_NULL */
char* foreign_table_name;/*!< foreign table name */
char* foreign_table_name_lookup;
/*!< foreign table name used for dict lookup */
dict_table_t* foreign_table; /*!< table where the foreign key is */
const char** foreign_col_names;/*!< names of the columns in the
foreign key */
char* referenced_table_name;/*!< referenced table name */
char* referenced_table_name_lookup;
/*!< referenced table name for dict lookup*/
dict_table_t* referenced_table;/*!< table where the referenced key
is */
const char** referenced_col_names;/*!< names of the referenced

View File

@ -244,15 +244,44 @@ Must not be called when recv_no_ibuf_operations==TRUE.
@return TRUE if level 2 or level 3 page */
UNIV_INTERN
ibool
ibuf_page(
/*======*/
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
ulint page_no,/*!< in: page number */
mtr_t* mtr); /*!< in: mtr which will contain an x-latch to the
bitmap page if the page is not one of the fixed
address ibuf pages, or NULL, in which case a new
transaction is created. */
ibuf_page_low(
/*==========*/
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size in bytes, or 0 */
ulint page_no,/*!< in: page number */
#ifdef UNIV_DEBUG
ibool x_latch,/*!< in: FALSE if relaxed check
(avoid latching the bitmap page) */
#endif /* UNIV_DEBUG */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
mtr_t* mtr) /*!< in: mtr which will contain an
x-latch to the bitmap page if the page
is not one of the fixed address ibuf
pages, or NULL, in which case a new
transaction is created. */
__attribute__((warn_unused_result));
#ifdef UNIV_DEBUG
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of
pages. Must not be called when recv_no_ibuf_operations==TRUE.
@param space tablespace identifier
@param zip_size compressed page size in bytes, or 0
@param page_no page number
@param mtr mini-transaction or NULL
@return TRUE if level 2 or level 3 page */
# define ibuf_page(space, zip_size, page_no, mtr) \
ibuf_page_low(space, zip_size, page_no, TRUE, __FILE__, __LINE__, mtr)
#else /* UVIV_DEBUG */
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of
pages. Must not be called when recv_no_ibuf_operations==TRUE.
@param space tablespace identifier
@param zip_size compressed page size in bytes, or 0
@param page_no page number
@param mtr mini-transaction or NULL
@return TRUE if level 2 or level 3 page */
# define ibuf_page(space, zip_size, page_no, mtr) \
ibuf_page_low(space, zip_size, page_no, __FILE__, __LINE__, mtr)
#endif /* UVIV_DEBUG */
/***********************************************************************//**
Frees excess pages from the ibuf free list. This function is called when an OS
thread calls fsp services to allocate a new file segment, or a new page to a

View File

@ -350,27 +350,27 @@ mem_heap_get_top(
ulint n) /*!< in: size of the topmost element */
{
mem_block_t* block;
void* buf;
byte* buf;
ut_ad(mem_heap_check(heap));
block = UT_LIST_GET_LAST(heap->base);
buf = (byte*)block + mem_block_get_free(block) - MEM_SPACE_NEEDED(n);
buf = (byte*) block + mem_block_get_free(block) - MEM_SPACE_NEEDED(n);
#ifdef UNIV_MEM_DEBUG
ut_ad(mem_block_get_start(block) <=(ulint)((byte*)buf - (byte*)block));
ut_ad(mem_block_get_start(block) <= (ulint) (buf - (byte*) block));
/* In the debug version, advance buf to point at the storage which
was given to the caller in the allocation*/
buf = (byte*)buf + MEM_FIELD_HEADER_SIZE;
buf += MEM_FIELD_HEADER_SIZE;
/* Check that the field lengths agree */
ut_ad(n == (ulint)mem_field_header_get_len(buf));
ut_ad(n == mem_field_header_get_len(buf));
#endif
return(buf);
return((void*) buf);
}
/*****************************************************************//**

View File

@ -160,7 +160,7 @@ mtr_memo_contains(
while (offset > 0) {
offset -= sizeof(mtr_memo_slot_t);
slot = dyn_array_get_element(memo, offset);
slot = (mtr_memo_slot_t*) dyn_array_get_element(memo, offset);
if ((object == slot->object) && (type == slot->type)) {

View File

@ -373,6 +373,7 @@ typedef HANDLE os_file_dir_t; /*!< directory stream */
typedef DIR* os_file_dir_t; /*!< directory stream */
#endif
#ifdef __WIN__
/***********************************************************************//**
Gets the operating system version. Currently works only on Windows.
@return OS_WIN95, OS_WIN31, OS_WINNT, OS_WIN2000, OS_WINXP, OS_WINVISTA,
@ -381,6 +382,7 @@ UNIV_INTERN
ulint
os_get_os_version(void);
/*===================*/
#endif /* __WIN__ */
#ifndef UNIV_HOTBACKUP
/****************************************************************//**
Creates the seek mutexes used in positioned reads and writes. */

View File

@ -286,10 +286,13 @@ row_upd_changes_ord_field_binary(
row and the data values in update are not
known when this function is called, e.g., at
compile time */
const row_ext_t*ext, /*!< NULL, or prefixes of the externally
stored columns in the old row */
dict_index_t* index, /*!< in: index of the record */
const upd_t* update);/*!< in: update vector for the row; NOTE: the
const upd_t* update) /*!< in: update vector for the row; NOTE: the
field numbers in this MUST be clustered index
positions! */
__attribute__((nonnull(3,4), warn_unused_result));
/***********************************************************//**
Checks if an update vector changes an ordering field of an index record.
This function is fast if the update vector is short or the number of ordering
@ -462,11 +465,16 @@ struct upd_node_struct{
#define UPD_NODE_INSERT_CLUSTERED 3 /* clustered index record should be
inserted, old record is already delete
marked */
#define UPD_NODE_UPDATE_ALL_SEC 4 /* an ordering field of the clustered
#define UPD_NODE_INSERT_BLOB 4 /* clustered index record should be
inserted, old record is already
delete-marked; non-updated BLOBs
should be inherited by the new record
and disowned by the old record */
#define UPD_NODE_UPDATE_ALL_SEC 5 /* an ordering field of the clustered
index record was changed, or this is
a delete operation: should update
all the secondary index records */
#define UPD_NODE_UPDATE_SOME_SEC 5 /* secondary index entries should be
#define UPD_NODE_UPDATE_SOME_SEC 6 /* secondary index entries should be
looked at and updated if an ordering
field changed */

View File

@ -71,8 +71,8 @@ at a time */
#define SRV_AUTO_EXTEND_INCREMENT \
(srv_auto_extend_increment * ((1024 * 1024) / UNIV_PAGE_SIZE))
/* This is set to TRUE if the MySQL user has set it in MySQL */
extern ibool srv_lower_case_table_names;
/* This is set to the MySQL server value for this variable. */
extern uint srv_lower_case_table_names;
/* Mutex for locking srv_monitor_file */
extern mutex_t srv_monitor_file_mutex;
@ -476,6 +476,14 @@ enum srv_thread_type
srv_get_thread_type(void);
/*=====================*/
/*********************************************************************//**
Check whether thread type has reserved a slot.
@return slot number or UNDEFINED if not found*/
UNIV_INTERN
ulint
srv_thread_has_reserved_slot(
/*=========================*/
enum srv_thread_type type); /*!< in: thread type to check */
/*********************************************************************//**
Sets the info describing an i/o thread current state. */
UNIV_INTERN
void

View File

@ -122,6 +122,7 @@ extern mysql_pfs_key_t checkpoint_lock_key;
extern mysql_pfs_key_t trx_i_s_cache_lock_key;
extern mysql_pfs_key_t trx_purge_latch_key;
extern mysql_pfs_key_t index_tree_rw_lock_key;
extern mysql_pfs_key_t dict_table_stats_latch_key;
#endif /* UNIV_PFS_RWLOCK */

View File

@ -140,6 +140,9 @@ struct i_s_trx_row_struct {
ulint trx_mysql_thread_id; /*!< thd_get_thread_id() */
const char* trx_query; /*!< MySQL statement being
executed in the transaction */
struct charset_info_st* trx_query_cs;
/*!< charset encode the MySQL
statement */
const char* trx_operation_state; /*!< trx_struct::op_info */
ulint trx_tables_in_use;/*!< n_mysql_tables_in_use in
trx_struct */

View File

@ -44,9 +44,14 @@ Created 1/20/1994 Heikki Tuuri
#include "hb_univ.i"
#endif /* UNIV_HOTBACKUP */
/* aux macros to convert M into "123" (string) if M is defined like
#define M 123 */
#define _IB_TO_STR(s) #s
#define IB_TO_STR(s) _IB_TO_STR(s)
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 1
#define INNODB_VERSION_BUGFIX 4
#define INNODB_VERSION_BUGFIX 5
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
@ -57,16 +62,14 @@ component, i.e. we show M.N.P as M.N */
#define INNODB_VERSION_SHORT \
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
/* auxiliary macros to help creating the version as string */
#define __INNODB_VERSION(a, b, c) (#a "." #b "." #c)
#define _INNODB_VERSION(a, b, c) __INNODB_VERSION(a, b, c)
#define INNODB_VERSION_STR \
_INNODB_VERSION(INNODB_VERSION_MAJOR, \
INNODB_VERSION_MINOR, \
INNODB_VERSION_BUGFIX)
IB_TO_STR(INNODB_VERSION_MAJOR) "." \
IB_TO_STR(INNODB_VERSION_MINOR) "." \
IB_TO_STR(INNODB_VERSION_BUGFIX)
#define REFMAN "http://dev.mysql.com/doc/refman/5.1/en/"
#define REFMAN "http://dev.mysql.com/doc/refman/" \
IB_TO_STR(MYSQL_MAJOR_VERSION) "." \
IB_TO_STR(MYSQL_MINOR_VERSION) "/en/"
#ifdef MYSQL_DYNAMIC_PLUGIN
/* In the dynamic plugin, redefine some externally visible symbols
@ -250,7 +253,7 @@ easy way to get it to work. See http://bugs.mysql.com/bug.php?id=52263. */
# define UNIV_INTERN
#endif
#if (!defined(UNIV_DEBUG) && !defined(UNIV_MUST_NOT_INLINE))
#ifndef UNIV_MUST_NOT_INLINE
/* Definition for inline version */
#ifdef __WIN__

View File

@ -55,24 +55,24 @@ Created 1/20/1994 Heikki Tuuri
typedef time_t ib_time_t;
#ifndef UNIV_HOTBACKUP
#if defined(HAVE_IB_PAUSE_INSTRUCTION)
# ifdef WIN32
/* In the Win32 API, the x86 PAUSE instruction is executed by calling
the YieldProcessor macro defined in WinNT.h. It is a CPU architecture-
independent way by using YieldProcessor.*/
# define UT_RELAX_CPU() YieldProcessor()
# else
/* According to the gcc info page, asm volatile means that the
instruction has important side-effects and must not be removed.
Also asm volatile may trigger a memory barrier (spilling all registers
to memory). */
# define UT_RELAX_CPU() __asm__ __volatile__ ("pause")
# endif
#if defined(HAVE_PAUSE_INSTRUCTION)
/* According to the gcc info page, asm volatile means that the
instruction has important side-effects and must not be removed.
Also asm volatile may trigger a memory barrier (spilling all registers
to memory). */
# define UT_RELAX_CPU() __asm__ __volatile__ ("pause")
#elif defined(HAVE_FAKE_PAUSE_INSTRUCTION)
# define UT_RELAX_CPU() __asm__ __volatile__ ("rep; nop")
#elif defined(HAVE_ATOMIC_BUILTINS)
# define UT_RELAX_CPU() do { \
volatile lint volatile_var; \
os_compare_and_swap_lint(&volatile_var, 0, 1); \
} while (0)
#elif defined(HAVE_WINDOWS_ATOMICS)
/* In the Win32 API, the x86 PAUSE instruction is executed by calling
the YieldProcessor macro defined in WinNT.h. It is a CPU architecture-
independent way by using YieldProcessor. */
# define UT_RELAX_CPU() YieldProcessor()
#else
# define UT_RELAX_CPU() ((void)0) /* avoid warning for an empty statement */
#endif

View File

@ -2199,7 +2199,7 @@ recv_report_corrupt_log(
"InnoDB: far enough in recovery! Please run CHECK TABLE\n"
"InnoDB: on your InnoDB tables to check that they are ok!\n"
"InnoDB: If mysqld crashes after this recovery, look at\n"
"InnoDB: " REFMAN "forcing-recovery.html\n"
"InnoDB: " REFMAN "forcing-innodb-recovery.html\n"
"InnoDB: about forcing recovery.\n", stderr);
fflush(stderr);

View File

@ -302,6 +302,36 @@ UNIV_INTERN ulint os_n_pending_writes = 0;
/** Number of pending read operations */
UNIV_INTERN ulint os_n_pending_reads = 0;
#ifdef UNIV_DEBUG
/**********************************************************************//**
Validates the consistency the aio system some of the time.
@return TRUE if ok or the check was skipped */
UNIV_INTERN
ibool
os_aio_validate_skip(void)
/*======================*/
{
/** Try os_aio_validate() every this many times */
# define OS_AIO_VALIDATE_SKIP 13
/** The os_aio_validate() call skip counter.
Use a signed type because of the race condition below. */
static int os_aio_validate_count = OS_AIO_VALIDATE_SKIP;
/* There is a race condition below, but it does not matter,
because this call is only for heuristic purposes. We want to
reduce the call frequency of the costly os_aio_validate()
check in debug builds. */
if (--os_aio_validate_count > 0) {
return(TRUE);
}
os_aio_validate_count = OS_AIO_VALIDATE_SKIP;
return(os_aio_validate());
}
#endif /* UNIV_DEBUG */
#ifdef __WIN__
/***********************************************************************//**
Gets the operating system version. Currently works only on Windows.
@return OS_WIN95, OS_WIN31, OS_WINNT, OS_WIN2000, OS_WINXP, OS_WINVISTA,
@ -311,7 +341,6 @@ ulint
os_get_os_version(void)
/*===================*/
{
#ifdef __WIN__
OSVERSIONINFO os_info;
os_info.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
@ -340,12 +369,8 @@ os_get_os_version(void)
ut_error;
return(0);
}
#else
ut_error;
return(0);
#endif
}
#endif /* __WIN__ */
/***********************************************************************//**
Retrieves the last error number if an error occurs in a file io function.
@ -4008,7 +4033,7 @@ os_aio_func(
ut_ad(n > 0);
ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(offset % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_ad(os_aio_validate());
ut_ad(os_aio_validate_skip());
#ifdef WIN_ASYNC_IO
ut_ad((n & 0xFFFFFFFFUL) == n);
#endif
@ -4210,7 +4235,7 @@ os_aio_windows_handle(
/* NOTE! We only access constant fields in os_aio_array. Therefore
we do not have to acquire the protecting mutex yet */
ut_ad(os_aio_validate());
ut_ad(os_aio_validate_skip());
ut_ad(segment < array->n_segments);
n = array->n_slots / array->n_segments;
@ -4630,7 +4655,7 @@ restart:
srv_set_io_thread_op_info(global_segment,
"looking for i/o requests (a)");
ut_ad(os_aio_validate());
ut_ad(os_aio_validate_skip());
ut_ad(segment < array->n_segments);
n = array->n_slots / array->n_segments;

View File

@ -1525,7 +1525,7 @@ row_ins_check_foreign_constraints(
if (foreign->foreign_index == index) {
if (foreign->referenced_table == NULL) {
dict_table_get(foreign->referenced_table_name,
dict_table_get(foreign->referenced_table_name_lookup,
FALSE);
}

View File

@ -632,7 +632,7 @@ handle_new_error:
"InnoDB: If the mysqld server crashes"
" after the startup or when\n"
"InnoDB: you dump the tables, look at\n"
"InnoDB: " REFMAN "forcing-recovery.html"
"InnoDB: " REFMAN "forcing-innodb-recovery.html"
" for help.\n", stderr);
break;
case DB_FOREIGN_EXCEED_MAX_CASCADE:
@ -3163,7 +3163,7 @@ check_next_foreign:
if (foreign && trx->check_foreigns
&& !(drop_db && dict_tables_have_same_db(
name, foreign->foreign_table_name))) {
name, foreign->foreign_table_name_lookup))) {
FILE* ef = dict_foreign_err_file;
/* We only allow dropping a referenced table if

View File

@ -515,7 +515,7 @@ row_purge_upd_exist_or_extern(
while (node->index != NULL) {
index = node->index;
if (row_upd_changes_ord_field_binary(NULL, node->index,
if (row_upd_changes_ord_field_binary(NULL, NULL, node->index,
node->update)) {
/* Build the older version of the index entry */
entry = row_build_index_entry(node->row, NULL,

View File

@ -693,19 +693,18 @@ row_undo_mod_upd_exist_sec(
while (node->index != NULL) {
index = node->index;
if (row_upd_changes_ord_field_binary(node->row, node->index,
node->update)) {
if (row_upd_changes_ord_field_binary(
node->row, node->ext, node->index, node->update)) {
/* Build the newest version of the index entry */
entry = row_build_index_entry(node->row, node->ext,
index, heap);
if (UNIV_UNLIKELY(!entry)) {
/* The server must have crashed in
row_upd_clust_rec_by_insert(), in
row_ins_index_entry_low() before
btr_store_big_rec_extern_fields()
has written the externally stored columns
(BLOBs) of the new clustered index entry. */
row_upd_clust_rec_by_insert() before
the updated externally stored columns (BLOBs)
of the new clustered index entry were
written. */
/* The table must be in DYNAMIC or COMPRESSED
format. REDUNDANT and COMPACT formats

View File

@ -238,7 +238,7 @@ row_upd_check_references_constraints(
foreign->n_fields))) {
if (foreign->foreign_table == NULL) {
dict_table_get(foreign->foreign_table_name,
dict_table_get(foreign->foreign_table_name_lookup,
FALSE);
}
@ -1198,20 +1198,21 @@ row_upd_changes_ord_field_binary(
row and the data values in update are not
known when this function is called, e.g., at
compile time */
const row_ext_t*ext, /*!< NULL, or prefixes of the externally
stored columns in the old row */
dict_index_t* index, /*!< in: index of the record */
const upd_t* update) /*!< in: update vector for the row; NOTE: the
field numbers in this MUST be clustered index
positions! */
{
ulint n_unique;
ulint n_upd_fields;
ulint i, j;
dict_index_t* clust_index;
ulint n_unique;
ulint i;
const dict_index_t* clust_index;
ut_ad(update && index);
ut_ad(update);
ut_ad(index);
n_unique = dict_index_get_n_unique(index);
n_upd_fields = upd_get_n_fields(update);
clust_index = dict_table_get_first_index(index->table);
@ -1219,33 +1220,72 @@ row_upd_changes_ord_field_binary(
const dict_field_t* ind_field;
const dict_col_t* col;
ulint col_pos;
ulint col_no;
const upd_field_t* upd_field;
const dfield_t* dfield;
dfield_t dfield_ext;
ulint dfield_len;
const byte* buf;
ind_field = dict_index_get_nth_field(index, i);
col = dict_field_get_col(ind_field);
col_pos = dict_col_get_clust_pos(col, clust_index);
col_no = dict_col_get_no(col);
for (j = 0; j < n_upd_fields; j++) {
upd_field = upd_get_field_by_field_no(
update, dict_col_get_clust_pos(col, clust_index));
const upd_field_t* upd_field
= upd_get_nth_field(update, j);
if (upd_field == NULL) {
continue;
}
/* Note that if the index field is a column prefix
then it may be that row does not contain an externally
stored part of the column value, and we cannot compare
the datas */
if (row == NULL) {
ut_ad(ext == NULL);
return(TRUE);
}
if (col_pos == upd_field->field_no
&& (row == NULL
|| ind_field->prefix_len > 0
|| !dfield_datas_are_binary_equal(
dtuple_get_nth_field(row, col_no),
&(upd_field->new_val)))) {
dfield = dtuple_get_nth_field(row, col_no);
return(TRUE);
/* This treatment of column prefix indexes is loosely
based on row_build_index_entry(). */
if (UNIV_LIKELY(ind_field->prefix_len == 0)
|| dfield_is_null(dfield)) {
/* do nothing special */
} else if (UNIV_LIKELY_NULL(ext)) {
/* See if the column is stored externally. */
buf = row_ext_lookup(ext, col_no, &dfield_len);
ut_ad(col->ord_part);
if (UNIV_LIKELY_NULL(buf)) {
if (UNIV_UNLIKELY(buf == field_ref_zero)) {
/* This should never happen, but
we try to fail safe here. */
ut_ad(0);
return(TRUE);
}
goto copy_dfield;
}
} else if (dfield_is_ext(dfield)) {
dfield_len = dfield_get_len(dfield);
ut_a(dfield_len > BTR_EXTERN_FIELD_REF_SIZE);
dfield_len -= BTR_EXTERN_FIELD_REF_SIZE;
ut_a(dict_index_is_clust(index)
|| ind_field->prefix_len <= dfield_len);
buf = dfield_get_data(dfield);
copy_dfield:
ut_a(dfield_len > 0);
dfield_copy(&dfield_ext, dfield);
dfield_set_data(&dfield_ext, buf, dfield_len);
dfield = &dfield_ext;
}
if (!dfield_datas_are_binary_equal(
dfield, &upd_field->new_val,
ind_field->prefix_len)) {
return(TRUE);
}
}
@ -1329,7 +1369,7 @@ row_upd_changes_first_fields_binary(
if (col_pos == upd_field->field_no
&& !dfield_datas_are_binary_equal(
dtuple_get_nth_field(entry, i),
&(upd_field->new_val))) {
&upd_field->new_val, 0)) {
return(TRUE);
}
@ -1596,14 +1636,99 @@ row_upd_sec_step(
ut_ad(!dict_index_is_clust(node->index));
if (node->state == UPD_NODE_UPDATE_ALL_SEC
|| row_upd_changes_ord_field_binary(node->row, node->index,
node->update)) {
|| row_upd_changes_ord_field_binary(node->row, node->ext,
node->index, node->update)) {
return(row_upd_sec_index_entry(node, thr));
}
return(DB_SUCCESS);
}
#ifdef UNIV_DEBUG
# define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
row_upd_clust_rec_by_insert_inherit_func(rec,offsets,entry,update)
#else /* UNIV_DEBUG */
# define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
row_upd_clust_rec_by_insert_inherit_func(entry,update)
#endif /* UNIV_DEBUG */
/*******************************************************************//**
Mark non-updated off-page columns inherited when the primary key is
updated. We must mark them as inherited in entry, so that they are not
freed in a rollback. A limited version of this function used to be
called btr_cur_mark_dtuple_inherited_extern().
@return TRUE if any columns were inherited */
static __attribute__((warn_unused_result))
ibool
row_upd_clust_rec_by_insert_inherit_func(
/*=====================================*/
#ifdef UNIV_DEBUG
const rec_t* rec, /*!< in: old record, or NULL */
const ulint* offsets,/*!< in: rec_get_offsets(rec), or NULL */
#endif /* UNIV_DEBUG */
dtuple_t* entry, /*!< in/out: updated entry to be
inserted into the clustered index */
const upd_t* update) /*!< in: update vector */
{
ibool inherit = FALSE;
ulint i;
ut_ad(!rec == !offsets);
ut_ad(!rec || rec_offs_any_extern(offsets));
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
dfield_t* dfield = dtuple_get_nth_field(entry, i);
byte* data;
ulint len;
ut_ad(!offsets
|| !rec_offs_nth_extern(offsets, i)
== !dfield_is_ext(dfield)
|| upd_get_field_by_field_no(update, i));
if (!dfield_is_ext(dfield)
|| upd_get_field_by_field_no(update, i)) {
continue;
}
#ifdef UNIV_DEBUG
if (UNIV_LIKELY(rec != NULL)) {
const byte* rec_data
= rec_get_nth_field(rec, offsets, i, &len);
ut_ad(len == dfield_get_len(dfield));
ut_ad(len != UNIV_SQL_NULL);
ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE);
rec_data += len - BTR_EXTERN_FIELD_REF_SIZE;
/* The pointer must not be zero. */
ut_ad(memcmp(rec_data, field_ref_zero,
BTR_EXTERN_FIELD_REF_SIZE));
/* The BLOB must be owned. */
ut_ad(!(rec_data[BTR_EXTERN_LEN]
& BTR_EXTERN_OWNER_FLAG));
}
#endif /* UNIV_DEBUG */
len = dfield_get_len(dfield);
ut_a(len != UNIV_SQL_NULL);
ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE);
data = dfield_get_data(dfield);
data += len - BTR_EXTERN_FIELD_REF_SIZE;
/* The pointer must not be zero. */
ut_a(memcmp(data, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE));
/* The BLOB must be owned. */
ut_a(!(data[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG));
data[BTR_EXTERN_LEN] |= BTR_EXTERN_INHERITED_FLAG;
/* The BTR_EXTERN_INHERITED_FLAG only matters in
rollback. Purge will always free the extern fields of
a delete-marked row. */
inherit = TRUE;
}
return(inherit);
}
/***********************************************************//**
Marks the clustered index record deleted and inserts the updated version
of the record to the index. This function should be used when the ordering
@ -1622,14 +1747,16 @@ row_upd_clust_rec_by_insert(
a foreign key constraint */
mtr_t* mtr) /*!< in/out: mtr; gets committed here */
{
mem_heap_t* heap = NULL;
mem_heap_t* heap;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
trx_t* trx;
dict_table_t* table;
dtuple_t* entry;
ulint err;
ibool change_ownership = FALSE;
ibool change_ownership = FALSE;
rec_t* rec;
ulint* offsets = NULL;
ut_ad(node);
ut_ad(dict_index_is_clust(index));
@ -1639,59 +1766,7 @@ row_upd_clust_rec_by_insert(
pcur = node->pcur;
btr_cur = btr_pcur_get_btr_cur(pcur);
if (node->state != UPD_NODE_INSERT_CLUSTERED) {
rec_t* rec;
dict_index_t* index;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets;
rec_offs_init(offsets_);
err = btr_cur_del_mark_set_clust_rec(BTR_NO_LOCKING_FLAG,
btr_cur, TRUE, thr, mtr);
if (err != DB_SUCCESS) {
mtr_commit(mtr);
return(err);
}
/* Mark as not-owned the externally stored fields which the new
row inherits from the delete marked record: purge should not
free those externally stored fields even if the delete marked
record is removed from the index tree, or updated. */
rec = btr_cur_get_rec(btr_cur);
index = dict_table_get_first_index(table);
offsets = rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap);
change_ownership = btr_cur_mark_extern_inherited_fields(
btr_cur_get_page_zip(btr_cur),
rec, index, offsets, node->update, mtr);
if (referenced) {
/* NOTE that the following call loses
the position of pcur ! */
err = row_upd_check_references_constraints(
node, pcur, table, index, offsets, thr, mtr);
if (err != DB_SUCCESS) {
mtr_commit(mtr);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(err);
}
}
}
mtr_commit(mtr);
if (!heap) {
heap = mem_heap_create(500);
}
node->state = UPD_NODE_INSERT_CLUSTERED;
heap = mem_heap_create(1000);
entry = row_build_index_entry(node->upd_row, node->upd_ext,
index, heap);
@ -1699,23 +1774,106 @@ row_upd_clust_rec_by_insert(
row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
if (change_ownership) {
/* If we return from a lock wait, for example, we may have
extern fields marked as not-owned in entry (marked in the
if-branch above). We must unmark them, take the ownership
back. */
switch (node->state) {
default:
ut_error;
case UPD_NODE_INSERT_BLOB:
/* A lock wait occurred in row_ins_index_entry() in
the previous invocation of this function. Mark the
off-page columns in the entry inherited. */
btr_cur_unmark_dtuple_extern_fields(entry);
change_ownership = row_upd_clust_rec_by_insert_inherit(
NULL, NULL, entry, node->update);
ut_a(change_ownership);
/* fall through */
case UPD_NODE_INSERT_CLUSTERED:
/* A lock wait occurred in row_ins_index_entry() in
the previous invocation of this function. */
break;
case UPD_NODE_UPDATE_CLUSTERED:
/* This is the first invocation of the function where
we update the primary key. Delete-mark the old record
in the clustered index and prepare to insert a new entry. */
rec = btr_cur_get_rec(btr_cur);
offsets = rec_get_offsets(rec, index, NULL,
ULINT_UNDEFINED, &heap);
ut_ad(page_rec_is_user_rec(rec));
/* We must mark non-updated extern fields in entry as
inherited, so that a possible rollback will not free them. */
err = btr_cur_del_mark_set_clust_rec(
BTR_NO_LOCKING_FLAG, btr_cur_get_block(btr_cur),
rec, index, offsets, TRUE, thr, mtr);
if (err != DB_SUCCESS) {
err_exit:
mtr_commit(mtr);
mem_heap_free(heap);
return(err);
}
btr_cur_mark_dtuple_inherited_extern(entry, node->update);
/* If the the new row inherits externally stored
fields (off-page columns a.k.a. BLOBs) from the
delete-marked old record, mark them disowned by the
old record and owned by the new entry. */
if (rec_offs_any_extern(offsets)) {
change_ownership = row_upd_clust_rec_by_insert_inherit(
rec, offsets, entry, node->update);
if (change_ownership) {
btr_pcur_store_position(pcur, mtr);
}
}
if (referenced) {
/* NOTE that the following call loses
the position of pcur ! */
err = row_upd_check_references_constraints(
node, pcur, table, index, offsets, thr, mtr);
if (err != DB_SUCCESS) {
goto err_exit;
}
}
}
mtr_commit(mtr);
err = row_ins_index_entry(index, entry,
node->upd_ext ? node->upd_ext->n_ext : 0,
TRUE, thr);
node->state = change_ownership
? UPD_NODE_INSERT_BLOB
: UPD_NODE_INSERT_CLUSTERED;
if (err == DB_SUCCESS && change_ownership) {
/* Mark the non-updated fields disowned by the old record. */
/* NOTE: this transaction has an x-lock on the record
and therefore other transactions cannot modify the
record when we have no latch on the page. In addition,
we assume that other query threads of the same
transaction do not modify the record in the meantime.
Therefore we can assert that the restoration of the
cursor succeeds. */
mtr_start(mtr);
if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr)) {
ut_error;
}
rec = btr_cur_get_rec(btr_cur);
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
ut_ad(page_rec_is_user_rec(rec));
btr_cur_disown_inherited_fields(
btr_cur_get_page_zip(btr_cur),
rec, index, offsets, node->update, mtr);
mtr_commit(mtr);
}
mem_heap_free(heap);
return(err);
@ -1860,8 +2018,9 @@ row_upd_del_mark_clust_rec(
/* Mark the clustered index record deleted; we do not have to check
locks, because we assume that we have an x-lock on the record */
err = btr_cur_del_mark_set_clust_rec(BTR_NO_LOCKING_FLAG,
btr_cur, TRUE, thr, mtr);
err = btr_cur_del_mark_set_clust_rec(
BTR_NO_LOCKING_FLAG, btr_cur_get_block(btr_cur),
btr_cur_get_rec(btr_cur), index, offsets, TRUE, thr, mtr);
if (err == DB_SUCCESS && referenced) {
/* NOTE that the following call loses the position of pcur ! */
@ -2006,7 +2165,8 @@ exit_func:
row_upd_store_row(node);
if (row_upd_changes_ord_field_binary(node->row, index, node->update)) {
if (row_upd_changes_ord_field_binary(node->row, node->ext, index,
node->update)) {
/* Update causes an ordering field (ordering fields within
the B-tree) of the clustered index record to change: perform
@ -2076,7 +2236,8 @@ row_upd(
}
if (node->state == UPD_NODE_UPDATE_CLUSTERED
|| node->state == UPD_NODE_INSERT_CLUSTERED) {
|| node->state == UPD_NODE_INSERT_CLUSTERED
|| node->state == UPD_NODE_INSERT_BLOB) {
log_free_check();
err = row_upd_clust_step(node, thr);

View File

@ -87,9 +87,11 @@ Created 10/8/1995 Heikki Tuuri
#include "mysql/plugin.h"
#include "mysql/service_thd_wait.h"
/* This is set to TRUE if the MySQL user has set it in MySQL; currently
affects only FOREIGN KEY definition parsing */
UNIV_INTERN ibool srv_lower_case_table_names = FALSE;
/* This is set to the MySQL server value for this variable. It is only
needed for FOREIGN KEY definition parsing since FOREIGN KEY names are not
stored in the server metadata. The server stores and enforces it for
regular database and table names.*/
UNIV_INTERN uint srv_lower_case_table_names = 0;
/* The following counter is incremented whenever there is some user activity
in the server */
@ -831,6 +833,7 @@ srv_table_reserve_slot(
ut_a(type > 0);
ut_a(type <= SRV_MASTER);
ut_ad(mutex_own(&kernel_mutex));
i = 0;
slot = srv_table_get_nth_slot(i);
@ -979,6 +982,37 @@ srv_get_thread_type(void)
return(type);
}
/*********************************************************************//**
Check whether thread type has reserved a slot. Return the first slot that
is found. This works because we currently have only 1 thread of each type.
@return slot number or ULINT_UNDEFINED if not found*/
UNIV_INTERN
ulint
srv_thread_has_reserved_slot(
/*=========================*/
enum srv_thread_type type) /*!< in: thread type to check */
{
ulint i;
ulint slot_no = ULINT_UNDEFINED;
mutex_enter(&kernel_mutex);
for (i = 0; i < OS_THREAD_MAX_N; i++) {
srv_slot_t* slot;
slot = srv_table_get_nth_slot(i);
if (slot->in_use && slot->type == type) {
slot_no = i;
break;
}
}
mutex_exit(&kernel_mutex);
return(slot_no);
}
/*********************************************************************//**
Initializes the server. */
UNIV_INTERN
@ -2627,10 +2661,10 @@ srv_master_thread(
srv_main_thread_process_no = os_proc_get_number();
srv_main_thread_id = os_thread_pf(os_thread_get_curr_id());
srv_table_reserve_slot(SRV_MASTER);
mutex_enter(&kernel_mutex);
srv_table_reserve_slot(SRV_MASTER);
srv_n_threads_active[SRV_MASTER]++;
mutex_exit(&kernel_mutex);

View File

@ -1026,26 +1026,35 @@ innobase_start_or_create_for_mysql(void)
on Mac OS X 10.3 or later. */
struct utsname utsname;
if (uname(&utsname)) {
fputs("InnoDB: cannot determine Mac OS X version!\n", stderr);
ut_print_timestamp(stderr);
fputs(" InnoDB: cannot determine Mac OS X version!\n", stderr);
} else {
srv_have_fullfsync = strcmp(utsname.release, "7.") >= 0;
}
if (!srv_have_fullfsync) {
fputs("InnoDB: On Mac OS X, fsync() may be"
" broken on internal drives,\n"
"InnoDB: making transactions unsafe!\n", stderr);
ut_print_timestamp(stderr);
fputs(" InnoDB: On Mac OS X, fsync() may be "
"broken on internal drives,\n", stderr);
ut_print_timestamp(stderr);
fputs(" InnoDB: making transactions unsafe!\n", stderr);
}
# endif /* F_FULLFSYNC */
#endif /* HAVE_DARWIN_THREADS */
if (sizeof(ulint) != sizeof(void*)) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: size of InnoDB's ulint is %lu,"
" but size of void* is %lu.\n"
"InnoDB: The sizes should be the same"
" so that on a 64-bit platform you can\n"
"InnoDB: allocate more than 4 GB of memory.",
(ulong)sizeof(ulint), (ulong)sizeof(void*));
" InnoDB: Error: size of InnoDB's ulint is %lu, "
"but size of void*\n", (ulong) sizeof(ulint));
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: is %lu. The sizes should be the same "
"so that on a 64-bit\n",
(ulong) sizeof(void*));
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: platforms you can allocate more than 4 GB "
"of memory.\n");
}
/* System tables are created in tablespace 0. Thus, we must
@ -1054,53 +1063,68 @@ innobase_start_or_create_for_mysql(void)
innodb_file_per_table) until this function has returned. */
srv_file_per_table = FALSE;
#ifdef UNIV_DEBUG
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_DEBUG switched on !!!!!!!!!\n");
" InnoDB: !!!!!!!! UNIV_DEBUG switched on !!!!!!!!!\n");
#endif
#ifdef UNIV_IBUF_DEBUG
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_IBUF_DEBUG switched on !!!!!!!!!\n"
" InnoDB: !!!!!!!! UNIV_IBUF_DEBUG switched on !!!!!!!!!\n");
# ifdef UNIV_IBUF_COUNT_DEBUG
"InnoDB: !!!!!!!! UNIV_IBUF_COUNT_DEBUG switched on !!!!!!!!!\n"
"InnoDB: Crash recovery will fail with UNIV_IBUF_COUNT_DEBUG\n"
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: !!!!!!!! UNIV_IBUF_COUNT_DEBUG switched on "
"!!!!!!!!!\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Crash recovery will fail with UNIV_IBUF_COUNT_DEBUG\n");
# endif
);
#endif
#ifdef UNIV_SYNC_DEBUG
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_SYNC_DEBUG switched on !!!!!!!!!\n");
" InnoDB: !!!!!!!! UNIV_SYNC_DEBUG switched on !!!!!!!!!\n");
#endif
#ifdef UNIV_SEARCH_DEBUG
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_SEARCH_DEBUG switched on !!!!!!!!!\n");
" InnoDB: !!!!!!!! UNIV_SEARCH_DEBUG switched on !!!!!!!!!\n");
#endif
#ifdef UNIV_LOG_LSN_DEBUG
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_LOG_LSN_DEBUG switched on !!!!!!!!!\n");
" InnoDB: !!!!!!!! UNIV_LOG_LSN_DEBUG switched on !!!!!!!!!\n");
#endif /* UNIV_LOG_LSN_DEBUG */
#ifdef UNIV_MEM_DEBUG
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!!!!!!! UNIV_MEM_DEBUG switched on !!!!!!!!!\n");
" InnoDB: !!!!!!!! UNIV_MEM_DEBUG switched on !!!!!!!!!\n");
#endif
if (UNIV_LIKELY(srv_use_sys_malloc)) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: The InnoDB memory heap is disabled\n");
" InnoDB: The InnoDB memory heap is disabled\n");
}
fputs("InnoDB: " IB_ATOMICS_STARTUP_MSG
"\nInnoDB: Compressed tables use zlib " ZLIB_VERSION
ut_print_timestamp(stderr);
fputs(" InnoDB: " IB_ATOMICS_STARTUP_MSG "\n", stderr);
ut_print_timestamp(stderr);
fputs(" InnoDB: Compressed tables use zlib " ZLIB_VERSION
#ifdef UNIV_ZIP_DEBUG
" with validation"
#endif /* UNIV_ZIP_DEBUG */
#ifdef UNIV_ZIP_COPY
" and extra copying"
#endif /* UNIV_ZIP_COPY */
"\n" , stderr);
#ifdef UNIV_ZIP_COPY
ut_print_timestamp(stderr);
fputs(" InnoDB: and extra copying\n", stderr);
#endif /* UNIV_ZIP_COPY */
/* Since InnoDB does not currently clean up all its internal data
structures in MySQL Embedded Server Library server_end(), we
@ -1108,13 +1132,17 @@ innobase_start_or_create_for_mysql(void)
second time during the process lifetime. */
if (srv_start_has_been_called) {
fprintf(stderr,
"InnoDB: Error: startup called second time"
" during the process lifetime.\n"
"InnoDB: In the MySQL Embedded Server Library"
" you cannot call server_init()\n"
"InnoDB: more than once during"
" the process lifetime.\n");
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Error: startup called second time "
"during the process\n");
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: lifetime. In the MySQL Embedded "
"Server Library you\n");
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: cannot call server_init() more "
"than once during the\n");
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: process lifetime.\n");
}
srv_start_has_been_called = TRUE;
@ -1158,7 +1186,7 @@ innobase_start_or_create_for_mysql(void)
if (srv_use_native_aio) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Using Linux native AIO\n");
" InnoDB: Using Linux native AIO\n");
}
#else
/* Currently native AIO is supported only on windows and linux
@ -1203,8 +1231,9 @@ innobase_start_or_create_for_mysql(void)
srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
#endif
} else {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Unrecognized value %s for"
" InnoDB: Unrecognized value %s for"
" innodb_flush_method\n",
srv_file_flush_method_str);
return(DB_ERROR);
@ -1285,7 +1314,8 @@ innobase_start_or_create_for_mysql(void)
we'll emit a message telling the user that this parameter
is now deprecated. */
if (srv_n_file_io_threads != 4) {
fprintf(stderr, "InnoDB: Warning:"
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Warning:"
" innodb_file_io_threads is deprecated."
" Please use innodb_read_io_threads and"
" innodb_write_io_threads instead\n");
@ -1316,7 +1346,7 @@ innobase_start_or_create_for_mysql(void)
/* Print time to initialize the buffer pool */
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Initializing buffer pool, size =");
" InnoDB: Initializing buffer pool, size =");
if (srv_buf_pool_size >= 1024 * 1024 * 1024) {
fprintf(stderr,
@ -1332,11 +1362,12 @@ innobase_start_or_create_for_mysql(void)
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Completed initialization of buffer pool\n");
" InnoDB: Completed initialization of buffer pool\n");
if (err != DB_SUCCESS) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Fatal error: cannot allocate the memory"
" InnoDB: Fatal error: cannot allocate memory"
" for the buffer pool\n");
return(DB_ERROR);
@ -1348,7 +1379,8 @@ innobase_start_or_create_for_mysql(void)
if (srv_buf_pool_size <= 5 * 1024 * 1024) {
fprintf(stderr, "InnoDB: Warning: Small buffer pool size "
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Warning: Small buffer pool size "
"(%luM), the flst_validate() debug function "
"can cause a deadlock if the buffer pool fills up.\n",
srv_buf_pool_size / 1024 / 1024);
@ -1370,18 +1402,19 @@ innobase_start_or_create_for_mysql(void)
#ifdef UNIV_LOG_ARCHIVE
if (0 != ut_strcmp(srv_log_group_home_dirs[0], srv_arch_dir)) {
fprintf(stderr,
"InnoDB: Error: you must set the log group"
" home dir in my.cnf the\n"
"InnoDB: same as log arch dir.\n");
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Error: you must set the log group home dir in my.cnf\n");
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: the same as log arch dir.\n");
return(DB_ERROR);
}
#endif /* UNIV_LOG_ARCHIVE */
if (srv_n_log_files * srv_log_file_size >= 262144) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: combined size of log files"
" InnoDB: Error: combined size of log files"
" must be < 4 GB\n");
return(DB_ERROR);
@ -1392,10 +1425,13 @@ innobase_start_or_create_for_mysql(void)
for (i = 0; i < srv_n_data_files; i++) {
#ifndef __WIN__
if (sizeof(off_t) < 5 && srv_data_file_sizes[i] >= 262144) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: file size must be < 4 GB"
" with this MySQL binary\n"
"InnoDB: and operating system combination,"
" InnoDB: Error: file size must be < 4 GB"
" with this MySQL binary\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: and operating system combination,"
" in some OS's < 2 GB\n");
return(DB_ERROR);
@ -1405,8 +1441,9 @@ innobase_start_or_create_for_mysql(void)
}
if (sum_of_new_sizes < 10485760 / UNIV_PAGE_SIZE) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: tablespace size must be"
" InnoDB: Error: tablespace size must be"
" at least 10 MB\n");
return(DB_ERROR);
@ -1419,19 +1456,32 @@ innobase_start_or_create_for_mysql(void)
&min_flushed_lsn, &max_flushed_lsn,
&sum_of_new_sizes);
if (err != DB_SUCCESS) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Could not open or create data files.\n"
"InnoDB: If you tried to add new data files,"
" and it failed here,\n"
"InnoDB: you should now edit innodb_data_file_path"
" in my.cnf back\n"
"InnoDB: to what it was, and remove the"
" new ibdata files InnoDB created\n"
"InnoDB: in this failed attempt. InnoDB only wrote"
" those files full of\n"
"InnoDB: zeros, but did not yet use them in any way."
" But be careful: do not\n"
"InnoDB: remove old data files"
" InnoDB: Could not open or create data files.\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: If you tried to add new data files,"
" and it failed here,\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: you should now edit innodb_data_file_path"
" in my.cnf back\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: to what it was, and remove the"
" new ibdata files InnoDB created\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: in this failed attempt. InnoDB only wrote"
" those files full of\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: zeros, but did not yet use them in any way."
" But be careful: do not\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: remove old data files"
" which contain your precious data!\n");
return((int) err);
@ -1457,18 +1507,29 @@ innobase_start_or_create_for_mysql(void)
}
if ((log_opened && create_new_db)
|| (log_opened && log_created)) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: all log files must be"
" created at the same time.\n"
"InnoDB: All log files must be"
" created also in database creation.\n"
"InnoDB: If you want bigger or smaller"
" log files, shut down the\n"
"InnoDB: database and make sure there"
" were no errors in shutdown.\n"
"InnoDB: Then delete the existing log files."
" Edit the .cnf file\n"
"InnoDB: and start the database again.\n");
" InnoDB: Error: all log files must be"
" created at the same time.\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: All log files must be"
" created also in database creation.\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: If you want bigger or smaller"
" log files, shut down the\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: database and make sure there"
" were no errors in shutdown.\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Then delete the existing log files."
" Edit the .cnf file\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: and start the database again.\n");
return(DB_ERROR);
}
@ -1489,27 +1550,41 @@ innobase_start_or_create_for_mysql(void)
|| max_arch_log_no != min_arch_log_no
#endif /* UNIV_LOG_ARCHIVE */
) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Cannot initialize created"
" log files because\n"
"InnoDB: data files were not in sync"
" with each other\n"
"InnoDB: or the data files are corrupt.\n");
" InnoDB: Cannot initialize created"
" log files because\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: data files were not in sync"
" with each other\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: or the data files are corrupt.\n");
return(DB_ERROR);
}
if (max_flushed_lsn < (ib_uint64_t) 1000) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Cannot initialize created"
" log files because\n"
"InnoDB: data files are corrupt,"
" or new data files were\n"
"InnoDB: created when the database"
" was started previous\n"
"InnoDB: time but the database"
" was not shut down\n"
"InnoDB: normally after that.\n");
" InnoDB: Cannot initialize created"
" log files because\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: data files are corrupt,"
" or new data files were\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: created when the database"
" was started previous\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: time but the database"
" was not shut down\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: normally after that.\n");
return(DB_ERROR);
}
@ -1548,8 +1623,9 @@ innobase_start_or_create_for_mysql(void)
#ifdef UNIV_LOG_ARCHIVE
} else if (srv_archive_recovery) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Starting archive"
" InnoDB: Starting archive"
" recovery from a backup...\n");
err = recv_recovery_from_archive_start(
min_flushed_lsn, srv_archive_recovery_limit_lsn,
@ -1757,6 +1833,24 @@ innobase_start_or_create_for_mysql(void)
os_thread_create(&srv_purge_thread, NULL, NULL);
}
/* Wait for the purge and master thread to startup. */
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
if (srv_thread_has_reserved_slot(SRV_MASTER) == ULINT_UNDEFINED
|| (srv_n_purge_threads == 1
&& srv_thread_has_reserved_slot(SRV_WORKER)
== ULINT_UNDEFINED)) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: "
"Waiting for the background threads to "
"start\n");
os_thread_sleep(1000000);
} else {
break;
}
}
#ifdef UNIV_DEBUG
/* buf_debug_prints = TRUE; */
#endif /* UNIV_DEBUG */
@ -1771,11 +1865,14 @@ innobase_start_or_create_for_mysql(void)
if (!srv_auto_extend_last_data_file
&& sum_of_data_file_sizes != tablespace_size_in_header) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: tablespace size"
" stored in header is %lu pages, but\n",
(ulong) tablespace_size_in_header);
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: tablespace size"
" stored in header is %lu pages, but\n"
"InnoDB: the sum of data file sizes is %lu pages\n",
(ulong) tablespace_size_in_header,
(ulong) sum_of_data_file_sizes);
if (srv_force_recovery == 0
@ -1783,16 +1880,25 @@ innobase_start_or_create_for_mysql(void)
/* This is a fatal error, the tail of a tablespace is
missing */
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Cannot start InnoDB."
" The tail of the system tablespace is\n"
"InnoDB: missing. Have you edited"
" innodb_data_file_path in my.cnf in an\n"
"InnoDB: inappropriate way, removing"
" ibdata files from there?\n"
"InnoDB: You can set innodb_force_recovery=1"
" in my.cnf to force\n"
"InnoDB: a startup if you are trying"
" InnoDB: Cannot start InnoDB."
" The tail of the system tablespace is\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: missing. Have you edited"
" innodb_data_file_path in my.cnf in an\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: inappropriate way, removing"
" ibdata files from there?\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: You can set innodb_force_recovery=1"
" in my.cnf to force\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: a startup if you are trying"
" to recover a badly corrupt database.\n");
return(DB_ERROR);
@ -1802,26 +1908,38 @@ innobase_start_or_create_for_mysql(void)
if (srv_auto_extend_last_data_file
&& sum_of_data_file_sizes < tablespace_size_in_header) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: tablespace size stored in header"
" is %lu pages, but\n"
"InnoDB: the sum of data file sizes"
" InnoDB: Error: tablespace size stored in header"
" is %lu pages, but\n",
(ulong) tablespace_size_in_header);
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: the sum of data file sizes"
" is only %lu pages\n",
(ulong) tablespace_size_in_header,
(ulong) sum_of_data_file_sizes);
if (srv_force_recovery == 0) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Cannot start InnoDB. The tail of"
" the system tablespace is\n"
"InnoDB: missing. Have you edited"
" innodb_data_file_path in my.cnf in an\n"
"InnoDB: inappropriate way, removing"
" ibdata files from there?\n"
"InnoDB: You can set innodb_force_recovery=1"
" in my.cnf to force\n"
"InnoDB: a startup if you are trying to"
" InnoDB: Cannot start InnoDB. The tail of"
" the system tablespace is\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: missing. Have you edited"
" innodb_data_file_path in my.cnf in an\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: inappropriate way, removing"
" ibdata files from there?\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: You can set innodb_force_recovery=1"
" in my.cnf to force\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: a startup if you are trying to"
" recover a badly corrupt database.\n");
return(DB_ERROR);
@ -1832,10 +1950,13 @@ innobase_start_or_create_for_mysql(void)
os_fast_mutex_init(&srv_os_test_mutex);
if (0 != os_fast_mutex_trylock(&srv_os_test_mutex)) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: pthread_mutex_trylock returns"
" an unexpected value on\n"
"InnoDB: success! Cannot continue.\n");
" InnoDB: Error: pthread_mutex_trylock returns"
" an unexpected value on\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: success! Cannot continue.\n");
exit(1);
}
@ -1850,14 +1971,15 @@ innobase_start_or_create_for_mysql(void)
if (srv_print_verbose_log) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: %s started; "
" InnoDB: %s started; "
"log sequence number %llu\n",
INNODB_VERSION_STR, srv_start_lsn);
}
if (srv_force_recovery > 0) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: !!! innodb_force_recovery"
" InnoDB: !!! innodb_force_recovery"
" is set to %lu !!!\n",
(ulong) srv_force_recovery);
}
@ -1878,12 +2000,17 @@ innobase_start_or_create_for_mysql(void)
4.1.1. It is essential that the insert buffer is emptied
here! */
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: You are upgrading to an"
" InnoDB version which allows multiple\n"
"InnoDB: tablespaces. Wait that purge"
" and insert buffer merge run to\n"
"InnoDB: completion...\n");
" InnoDB: You are upgrading to an"
" InnoDB version which allows multiple\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: tablespaces. Wait that purge"
" and insert buffer merge run to\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: completion...\n");
for (;;) {
os_thread_sleep(1000000);
@ -1895,21 +2022,29 @@ innobase_start_or_create_for_mysql(void)
break;
}
}
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Full purge and insert buffer merge"
" InnoDB: Full purge and insert buffer merge"
" completed.\n");
trx_sys_mark_upgraded_to_multiple_tablespaces();
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: You have now successfully upgraded"
" to the multiple tablespaces\n"
"InnoDB: format. You should NOT DOWNGRADE"
" to an earlier version of\n"
"InnoDB: InnoDB! But if you absolutely need to"
" downgrade, see\n"
"InnoDB: " REFMAN "multiple-tablespaces.html\n"
"InnoDB: for instructions.\n");
" InnoDB: You have now successfully upgraded"
" to the multiple tablespaces\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: format. You should NOT DOWNGRADE"
" to an earlier version of\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: InnoDB! But if you absolutely need to"
" downgrade, see\n");
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: " REFMAN "multiple-tablespaces.html\n"
" InnoDB: for instructions.\n");
}
if (srv_force_recovery == 0) {

View File

@ -590,9 +590,6 @@ sync_array_deadlock_step(
ulint depth) /*!< in: recursion depth */
{
sync_cell_t* new;
ibool ret;
depth++;
if (pass != 0) {
/* If pass != 0, then we do not know which threads are
@ -604,7 +601,7 @@ sync_array_deadlock_step(
new = sync_array_find_thread(arr, thread);
if (new == start) {
if (UNIV_UNLIKELY(new == start)) {
/* Stop running of other threads */
ut_dbg_stop_threads = TRUE;
@ -616,11 +613,7 @@ sync_array_deadlock_step(
return(TRUE);
} else if (new) {
ret = sync_array_detect_deadlock(arr, start, new, depth);
if (ret) {
return(TRUE);
}
return(sync_array_detect_deadlock(arr, start, new, depth + 1));
}
return(FALSE);
}
@ -930,12 +923,25 @@ sync_array_print_long_waits(void)
ulint fatal_timeout = srv_fatal_semaphore_wait_threshold;
ibool fatal = FALSE;
#ifdef UNIV_DEBUG_VALGRIND
/* Increase the timeouts if running under valgrind because it executes
extremely slowly. UNIV_DEBUG_VALGRIND does not necessary mean that
we are running under valgrind but we have no better way to tell.
See Bug#58432 innodb.innodb_bug56143 fails under valgrind
for an example */
# define SYNC_ARRAY_TIMEOUT 2400
fatal_timeout *= 10;
#else
# define SYNC_ARRAY_TIMEOUT 240
#endif
for (i = 0; i < sync_primary_wait_array->n_cells; i++) {
cell = sync_array_get_nth_cell(sync_primary_wait_array, i);
if (cell->wait_object != NULL && cell->waiting
&& difftime(time(NULL), cell->reservation_time) > 240) {
&& difftime(time(NULL), cell->reservation_time)
> SYNC_ARRAY_TIMEOUT) {
fputs("InnoDB: Warning: a long semaphore wait:\n",
stderr);
sync_array_cell_print(stderr, cell);
@ -977,6 +983,8 @@ sync_array_print_long_waits(void)
" to the standard error stream\n");
}
#undef SYNC_ARRAY_TIMEOUT
return(fatal);
}

View File

@ -442,7 +442,7 @@ i_s_locks_row_validate(
/* record lock */
ut_ad(!strcmp("RECORD", row->lock_type));
ut_ad(row->lock_index != NULL);
ut_ad(row->lock_data != NULL);
/* row->lock_data == NULL if buf_page_try_get() == NULL */
ut_ad(row->lock_page != ULINT_UNDEFINED);
ut_ad(row->lock_rec != ULINT_UNDEFINED);
}
@ -508,7 +508,6 @@ fill_trx_row(
stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len);
if (stmt != NULL) {
char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
if (stmt_len > TRX_I_S_TRX_QUERY_MAX_LEN) {
@ -522,6 +521,8 @@ fill_trx_row(
cache->storage, stmt, stmt_len + 1,
MAX_ALLOWED_FOR_STORAGE(cache));
row->trx_query_cs = innobase_get_charset(trx->mysql_thd);
if (row->trx_query == NULL) {
return(FALSE);

View File

@ -1160,7 +1160,7 @@ trx_sys_file_format_max_check(
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: highest supported file format is %s.\n",
" InnoDB: highest supported file format is %s.\n",
trx_sys_file_format_id_to_name(DICT_TF_FORMAT_MAX));
if (format_id > DICT_TF_FORMAT_MAX) {
@ -1169,7 +1169,7 @@ trx_sys_file_format_max_check(
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: %s: the system tablespace is in a file "
" InnoDB: %s: the system tablespace is in a file "
"format that this version doesn't support - %s\n",
((max_format_id <= DICT_TF_FORMAT_MAX)
? "Error" : "Warning"),

View File

@ -74,7 +74,7 @@ ut_dbg_assertion_failed(
" or crashes, even\n"
"InnoDB: immediately after the mysqld startup, there may be\n"
"InnoDB: corruption in the InnoDB tablespace. Please refer to\n"
"InnoDB: " REFMAN "forcing-recovery.html\n"
"InnoDB: " REFMAN "forcing-innodb-recovery.html\n"
"InnoDB: about forcing recovery.\n", stderr);
#if defined(UNIV_SYNC_DEBUG) || !defined(UT_DBG_USE_ABORT)
ut_dbg_stop_threads = TRUE;