Merge jlindstrom@bk-internal.mysql.com:/home/bk/mysql-5.0
into hundin.mysql.fi:/home/jan/mysql-5.0 sql/ha_innodb.cc: Auto merged
This commit is contained in:
commit
c4409a7cbf
@ -1642,7 +1642,7 @@ btr_cur_optimistic_update(
|
||||
|
||||
btr_search_update_hash_on_delete(cursor);
|
||||
|
||||
page_cur_delete_rec(page_cursor, index, mtr);
|
||||
page_cur_delete_rec(page_cursor, index, offsets, mtr);
|
||||
|
||||
page_cur_move_to_prev(page_cursor);
|
||||
|
||||
@ -1885,7 +1885,7 @@ btr_cur_pessimistic_update(
|
||||
|
||||
btr_search_update_hash_on_delete(cursor);
|
||||
|
||||
page_cur_delete_rec(page_cursor, index, mtr);
|
||||
page_cur_delete_rec(page_cursor, index, offsets, mtr);
|
||||
|
||||
page_cur_move_to_prev(page_cursor);
|
||||
|
||||
@ -2401,6 +2401,7 @@ btr_cur_optimistic_delete(
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[100] = { 100, };
|
||||
ulint* offsets = offsets_;
|
||||
ibool no_compress_needed;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_page(cursor)),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
@ -2414,9 +2415,11 @@ btr_cur_optimistic_delete(
|
||||
offsets = rec_get_offsets(rec, cursor->index, offsets,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (!rec_offs_any_extern(offsets)
|
||||
no_compress_needed = !rec_offs_any_extern(offsets)
|
||||
&& btr_cur_can_delete_without_compress(
|
||||
cursor, rec_offs_size(offsets), mtr)) {
|
||||
cursor, rec_offs_size(offsets), mtr);
|
||||
|
||||
if (no_compress_needed) {
|
||||
|
||||
lock_update_delete(rec);
|
||||
|
||||
@ -2425,20 +2428,17 @@ btr_cur_optimistic_delete(
|
||||
max_ins_size = page_get_max_insert_size_after_reorganize(page,
|
||||
1);
|
||||
page_cur_delete_rec(btr_cur_get_page_cur(cursor),
|
||||
cursor->index, mtr);
|
||||
cursor->index, offsets, mtr);
|
||||
|
||||
ibuf_update_free_bits_low(cursor->index, page, max_ins_size,
|
||||
mtr);
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
return(FALSE);
|
||||
|
||||
return(no_compress_needed);
|
||||
}
|
||||
|
||||
/*****************************************************************
|
||||
@ -2478,6 +2478,7 @@ btr_cur_pessimistic_delete(
|
||||
ibool success;
|
||||
ibool ret = FALSE;
|
||||
mem_heap_t* heap;
|
||||
ulint* offsets;
|
||||
|
||||
page = btr_cur_get_page(cursor);
|
||||
tree = btr_cur_get_tree(cursor);
|
||||
@ -2503,20 +2504,20 @@ btr_cur_pessimistic_delete(
|
||||
}
|
||||
}
|
||||
|
||||
heap = mem_heap_create(256);
|
||||
heap = mem_heap_create(1024);
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
|
||||
offsets = rec_get_offsets(rec, cursor->index,
|
||||
NULL, ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* Free externally stored fields if the record is neither
|
||||
a node pointer nor in two-byte format.
|
||||
This avoids unnecessary calls to rec_get_offsets(). */
|
||||
This avoids an unnecessary loop. */
|
||||
if (cursor->index->table->comp
|
||||
? !rec_get_node_ptr_flag(rec)
|
||||
: !rec_get_1byte_offs_flag(rec)) {
|
||||
btr_rec_free_externally_stored_fields(cursor->index,
|
||||
rec, rec_get_offsets(rec, cursor->index,
|
||||
NULL, ULINT_UNDEFINED, &heap),
|
||||
in_rollback, mtr);
|
||||
mem_heap_empty(heap);
|
||||
rec, offsets, in_rollback, mtr);
|
||||
}
|
||||
|
||||
if ((page_get_n_recs(page) < 2)
|
||||
@ -2568,7 +2569,8 @@ btr_cur_pessimistic_delete(
|
||||
|
||||
btr_search_update_hash_on_delete(cursor);
|
||||
|
||||
page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index, mtr);
|
||||
page_cur_delete_rec(btr_cur_get_page_cur(cursor), cursor->index,
|
||||
offsets, mtr);
|
||||
|
||||
ut_ad(btr_check_node_ptr(tree, page, mtr));
|
||||
|
||||
|
@ -729,14 +729,17 @@ dict_drop_index_tree(
|
||||
/***********************************************************************
|
||||
Truncates the index tree associated with a row in SYS_INDEXES table. */
|
||||
|
||||
void
|
||||
ulint
|
||||
dict_truncate_index_tree(
|
||||
/*=====================*/
|
||||
/* out: new root page number, or
|
||||
FIL_NULL on failure */
|
||||
dict_table_t* table, /* in: the table the index belongs to */
|
||||
rec_t* rec, /* in: record in the clustered index of
|
||||
SYS_INDEXES table */
|
||||
mtr_t* mtr) /* in: mtr having the latch
|
||||
on the record page */
|
||||
on the record page. The mtr may be
|
||||
committed and restarted in this call. */
|
||||
{
|
||||
ulint root_page_no;
|
||||
ulint space;
|
||||
@ -761,7 +764,10 @@ dict_truncate_index_tree(
|
||||
if (root_page_no == FIL_NULL) {
|
||||
/* The tree has been freed. */
|
||||
|
||||
return;
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr, " InnoDB: Trying to TRUNCATE"
|
||||
" a missing index of table %s!\n", table->name);
|
||||
return(FIL_NULL);
|
||||
}
|
||||
|
||||
ptr = rec_get_nth_field_old(rec,
|
||||
@ -775,7 +781,10 @@ dict_truncate_index_tree(
|
||||
/* It is a single table tablespace and the .ibd file is
|
||||
missing: do nothing */
|
||||
|
||||
return;
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr, " InnoDB: Trying to TRUNCATE"
|
||||
" a missing .ibd file of table %s!\n", table->name);
|
||||
return(FIL_NULL);
|
||||
}
|
||||
|
||||
ptr = rec_get_nth_field_old(rec,
|
||||
@ -801,6 +810,20 @@ dict_truncate_index_tree(
|
||||
space, root_page_no, RW_X_LATCH, mtr));
|
||||
|
||||
btr_free_root(space, root_page_no, mtr);
|
||||
/* We will temporarily write FIL_NULL to the PAGE_NO field
|
||||
in SYS_INDEXES, so that the database will not get into an
|
||||
inconsistent state in case it crashes between the mtr_commit()
|
||||
below and the following mtr_commit() call. */
|
||||
page_rec_write_index_page_no(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD,
|
||||
FIL_NULL, mtr);
|
||||
|
||||
/* We will need to commit the mini-transaction in order to avoid
|
||||
deadlocks in the btr_create() call, because otherwise we would
|
||||
be freeing and allocating pages in the same mini-transaction. */
|
||||
mtr_commit(mtr);
|
||||
/* mtr_commit() will invalidate rec. */
|
||||
rec = NULL;
|
||||
mtr_start(mtr);
|
||||
|
||||
/* Find the index corresponding to this SYS_INDEXES record. */
|
||||
for (index = UT_LIST_GET_FIRST(table->indexes);
|
||||
@ -814,11 +837,17 @@ dict_truncate_index_tree(
|
||||
root_page_no = btr_create(type, space, index_id, comp, mtr);
|
||||
if (index) {
|
||||
index->tree->page = root_page_no;
|
||||
} else {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Index %lu %lu of table %s is missing\n"
|
||||
"InnoDB: from the data dictionary during TRUNCATE!\n",
|
||||
ut_dulint_get_high(index_id),
|
||||
ut_dulint_get_low(index_id),
|
||||
table->name);
|
||||
}
|
||||
|
||||
page_rec_write_index_page_no(rec,
|
||||
DICT_SYS_INDEXES_PAGE_NO_FIELD,
|
||||
root_page_no, mtr);
|
||||
return(root_page_no);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
|
@ -56,14 +56,17 @@ dict_create_index_step(
|
||||
/***********************************************************************
|
||||
Truncates the index tree associated with a row in SYS_INDEXES table. */
|
||||
|
||||
void
|
||||
ulint
|
||||
dict_truncate_index_tree(
|
||||
/*=====================*/
|
||||
/* out: new root page number, or
|
||||
FIL_NULL on failure */
|
||||
dict_table_t* table, /* in: the table the index belongs to */
|
||||
rec_t* rec, /* in: record in the clustered index of
|
||||
SYS_INDEXES table */
|
||||
mtr_t* mtr); /* in: mtr having the latch
|
||||
on the record page */
|
||||
on the record page. The mtr may be
|
||||
committed and restarted in this call. */
|
||||
/***********************************************************************
|
||||
Drops the index tree associated with a row in SYS_INDEXES table. */
|
||||
|
||||
|
@ -182,9 +182,10 @@ next record after the deleted one. */
|
||||
void
|
||||
page_cur_delete_rec(
|
||||
/*================*/
|
||||
page_cur_t* cursor, /* in: a page cursor */
|
||||
dict_index_t* index, /* in: record descriptor */
|
||||
mtr_t* mtr); /* in: mini-transaction handle */
|
||||
page_cur_t* cursor, /* in: a page cursor */
|
||||
dict_index_t* index, /* in: record descriptor */
|
||||
const ulint* offsets,/* in: rec_get_offsets(cursor->rec, index) */
|
||||
mtr_t* mtr); /* in: mini-transaction handle */
|
||||
/********************************************************************
|
||||
Searches the right position for a page cursor. */
|
||||
UNIV_INLINE
|
||||
|
@ -528,7 +528,7 @@ page_mem_free(
|
||||
/*==========*/
|
||||
page_t* page, /* in: index page */
|
||||
rec_t* rec, /* in: pointer to the (origin of) record */
|
||||
dict_index_t* index); /* in: record descriptor */
|
||||
const ulint* offsets);/* in: array returned by rec_get_offsets() */
|
||||
/**************************************************************
|
||||
The index page creation function. */
|
||||
|
||||
|
@ -777,20 +777,31 @@ page_mem_free(
|
||||
/*==========*/
|
||||
page_t* page, /* in: index page */
|
||||
rec_t* rec, /* in: pointer to the (origin of) record */
|
||||
dict_index_t* index) /* in: record descriptor */
|
||||
const ulint* offsets)/* in: array returned by rec_get_offsets() */
|
||||
{
|
||||
rec_t* free;
|
||||
ulint garbage;
|
||||
|
||||
ut_ad(rec_offs_validate(rec, NULL, offsets));
|
||||
free = page_header_get_ptr(page, PAGE_FREE);
|
||||
|
||||
page_rec_set_next(rec, free);
|
||||
page_header_set_ptr(page, PAGE_FREE, rec);
|
||||
|
||||
#if 0 /* It's better not to destroy the user's data. */
|
||||
|
||||
/* Clear the data bytes of the deleted record in order to improve
|
||||
the compression ratio of the page and to make it easier to read
|
||||
page dumps in corruption reports. The extra bytes of the record
|
||||
cannot be cleared, because page_mem_alloc() needs them in order
|
||||
to determine the size of the deleted record. */
|
||||
memset(rec, 0, rec_offs_data_size(offsets));
|
||||
#endif
|
||||
|
||||
garbage = page_header_get_field(page, PAGE_GARBAGE);
|
||||
|
||||
page_header_set_field(page, PAGE_GARBAGE,
|
||||
garbage + rec_get_size(rec, index));
|
||||
garbage + rec_offs_size(offsets));
|
||||
}
|
||||
|
||||
#ifdef UNIV_MATERIALIZE
|
||||
|
@ -435,15 +435,6 @@ rec_offs_size(
|
||||
/* out: size */
|
||||
const ulint* offsets);/* in: array returned by rec_get_offsets() */
|
||||
/**************************************************************
|
||||
Returns the total size of a physical record. */
|
||||
|
||||
ulint
|
||||
rec_get_size(
|
||||
/*=========*/
|
||||
/* out: size */
|
||||
rec_t* rec, /* in: physical record */
|
||||
dict_index_t* index); /* in: record descriptor */
|
||||
/**************************************************************
|
||||
Returns a pointer to the start of the record. */
|
||||
UNIV_INLINE
|
||||
byte*
|
||||
|
@ -1437,7 +1437,7 @@ loop:
|
||||
|
||||
/* This page is allocated from the buffer pool and used in the function
|
||||
below */
|
||||
page_t* recv_backup_application_page = NULL;
|
||||
static page_t* recv_backup_application_page = NULL;
|
||||
|
||||
/***********************************************************************
|
||||
Applies log records in the hash table to a backup. */
|
||||
|
@ -1267,9 +1267,18 @@ page_cur_parse_delete_rec(
|
||||
ut_a(offset <= UNIV_PAGE_SIZE);
|
||||
|
||||
if (page) {
|
||||
page_cur_position(page + offset, &cursor);
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[100] = { 100, };
|
||||
rec_t* rec = page + offset;
|
||||
|
||||
page_cur_delete_rec(&cursor, index, mtr);
|
||||
page_cur_position(rec, &cursor);
|
||||
|
||||
page_cur_delete_rec(&cursor, index,
|
||||
rec_get_offsets(rec, index, offsets_,
|
||||
ULINT_UNDEFINED, &heap), mtr);
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
}
|
||||
|
||||
return(ptr);
|
||||
@ -1284,6 +1293,7 @@ page_cur_delete_rec(
|
||||
/*================*/
|
||||
page_cur_t* cursor, /* in: a page cursor */
|
||||
dict_index_t* index, /* in: record descriptor */
|
||||
const ulint* offsets,/* in: rec_get_offsets(cursor->rec, index) */
|
||||
mtr_t* mtr) /* in: mini-transaction handle */
|
||||
{
|
||||
page_dir_slot_t* cur_dir_slot;
|
||||
@ -1300,6 +1310,7 @@ page_cur_delete_rec(
|
||||
|
||||
page = page_cur_get_page(cursor);
|
||||
current_rec = cursor->rec;
|
||||
ut_ad(rec_offs_validate(current_rec, index, offsets));
|
||||
|
||||
/* The record must not be the supremum or infimum record. */
|
||||
ut_ad(current_rec != page_get_supremum_rec(page));
|
||||
@ -1365,7 +1376,7 @@ page_cur_delete_rec(
|
||||
page_dir_slot_set_n_owned(cur_dir_slot, cur_n_owned - 1);
|
||||
|
||||
/* 6. Free the memory occupied by the record */
|
||||
page_mem_free(page, current_rec, index);
|
||||
page_mem_free(page, current_rec, offsets);
|
||||
|
||||
/* 7. Now we have decremented the number of owned records of the slot.
|
||||
If the number drops below PAGE_DIR_SLOT_MIN_N_OWNED, we balance the
|
||||
|
@ -416,7 +416,7 @@ page_create(
|
||||
|
||||
mem_heap_free(heap);
|
||||
|
||||
/* 4. INITIALIZE THE PAGE HEADER */
|
||||
/* 4. INITIALIZE THE PAGE */
|
||||
|
||||
page_header_set_field(page, PAGE_N_DIR_SLOTS, 2);
|
||||
page_header_set_ptr(page, PAGE_HEAP_TOP, heap_top);
|
||||
@ -428,7 +428,9 @@ page_create(
|
||||
page_header_set_field(page, PAGE_N_DIRECTION, 0);
|
||||
page_header_set_field(page, PAGE_N_RECS, 0);
|
||||
page_set_max_trx_id(page, ut_dulint_zero);
|
||||
|
||||
memset(heap_top, 0, UNIV_PAGE_SIZE - PAGE_EMPTY_DIR_START
|
||||
- (heap_top - page));
|
||||
|
||||
/* 5. SET POINTERS IN RECORDS AND DIR SLOTS */
|
||||
|
||||
/* Set the slots to point to infimum and supremum. */
|
||||
@ -829,12 +831,18 @@ page_delete_rec_list_start(
|
||||
{
|
||||
page_cur_t cur1;
|
||||
ulint log_mode;
|
||||
ulint offsets_[100] = { 100, };
|
||||
ulint* offsets = offsets_;
|
||||
mem_heap_t* heap = NULL;
|
||||
byte type;
|
||||
|
||||
page_delete_rec_list_write_log(page, rec, index,
|
||||
index->table->comp
|
||||
? MLOG_COMP_LIST_START_DELETE
|
||||
: MLOG_LIST_START_DELETE,
|
||||
mtr);
|
||||
if (index->table->comp) {
|
||||
type = MLOG_COMP_LIST_START_DELETE;
|
||||
} else {
|
||||
type = MLOG_LIST_START_DELETE;
|
||||
}
|
||||
|
||||
page_delete_rec_list_write_log(page, rec, index, type, mtr);
|
||||
|
||||
page_cur_set_before_first(page, &cur1);
|
||||
|
||||
@ -850,8 +858,13 @@ page_delete_rec_list_start(
|
||||
log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
|
||||
|
||||
while (page_cur_get_rec(&cur1) != rec) {
|
||||
offsets = rec_get_offsets(page_cur_get_rec(&cur1), index,
|
||||
offsets, ULINT_UNDEFINED, &heap);
|
||||
page_cur_delete_rec(&cur1, index, offsets, mtr);
|
||||
}
|
||||
|
||||
page_cur_delete_rec(&cur1, index, mtr);
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
/* Restore log mode */
|
||||
|
@ -620,7 +620,7 @@ rec_set_nth_field_extern_bit_new(
|
||||
mlog_write_ulint(lens + 1, len,
|
||||
MLOG_1BYTE, mtr);
|
||||
} else {
|
||||
lens[1] = len;
|
||||
lens[1] = (byte) len;
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -658,29 +658,6 @@ rec_set_field_extern_bits(
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
Returns the total size of a physical record. */
|
||||
|
||||
ulint
|
||||
rec_get_size(
|
||||
/*=========*/
|
||||
/* out: size */
|
||||
rec_t* rec, /* in: physical record */
|
||||
dict_index_t* index) /* in: record descriptor */
|
||||
{
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[100 + REC_OFFS_HEADER_SIZE]
|
||||
= { 100, };
|
||||
ulint* offsets = rec_get_offsets(rec, index, offsets_,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
ulint size = rec_offs_size(offsets);
|
||||
|
||||
if (heap) {
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
return(size);
|
||||
}
|
||||
|
||||
/***************************************************************
|
||||
Sets an old-style record field to SQL null.
|
||||
The physical size of the field is not changed. */
|
||||
@ -935,13 +912,13 @@ init:
|
||||
|| dtype_get_mtype(type) == DATA_BLOB);
|
||||
if (len < 128 || (dtype_get_len(type) < 256
|
||||
&& dtype_get_mtype(type) != DATA_BLOB)) {
|
||||
*lens-- = len;
|
||||
*lens-- = (byte) len;
|
||||
}
|
||||
else {
|
||||
/* the extern bits will be set later */
|
||||
ut_ad(len < 16384);
|
||||
*lens-- = len >> 8 | 0x80;
|
||||
*lens-- = len;
|
||||
*lens-- = (byte) (len >> 8) | 0x80;
|
||||
*lens-- = (byte) len;
|
||||
}
|
||||
}
|
||||
copy:
|
||||
|
@ -2615,6 +2615,7 @@ do not allow the TRUNCATE. We also reserve the data dictionary latch. */
|
||||
rec_t* rec;
|
||||
const byte* field;
|
||||
ulint len;
|
||||
ulint root_page_no;
|
||||
|
||||
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)) {
|
||||
/* The end of SYS_INDEXES has been reached. */
|
||||
@ -2633,11 +2634,33 @@ do not allow the TRUNCATE. We also reserve the data dictionary latch. */
|
||||
|
||||
if (rec_get_deleted_flag(rec, FALSE)) {
|
||||
/* The index has been dropped. */
|
||||
continue;
|
||||
goto next_rec;
|
||||
}
|
||||
|
||||
dict_truncate_index_tree(table, rec, &mtr);
|
||||
btr_pcur_store_position(&pcur, &mtr);
|
||||
|
||||
/* This call may commit and restart mtr. */
|
||||
root_page_no = dict_truncate_index_tree(table, rec, &mtr);
|
||||
|
||||
btr_pcur_restore_position(BTR_MODIFY_LEAF, &pcur, &mtr);
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
if (root_page_no != FIL_NULL) {
|
||||
page_rec_write_index_page_no(rec,
|
||||
DICT_SYS_INDEXES_PAGE_NO_FIELD,
|
||||
root_page_no, &mtr);
|
||||
/* We will need to commit and restart the
|
||||
mini-transaction in order to avoid deadlocks.
|
||||
The dict_truncate_index_tree() call has allocated
|
||||
a page in this mini-transaction, and the rest of
|
||||
this loop could latch another index page. */
|
||||
mtr_commit(&mtr);
|
||||
mtr_start(&mtr);
|
||||
btr_pcur_restore_position(BTR_MODIFY_LEAF,
|
||||
&pcur, &mtr);
|
||||
}
|
||||
|
||||
next_rec:
|
||||
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
|
||||
}
|
||||
|
||||
|
@ -1501,7 +1501,7 @@ srv_suspend_mysql_thread(
|
||||
ut_usectime(&sec, &ms);
|
||||
finish_time = (ib_longlong)sec * 1000000 + ms;
|
||||
|
||||
diff_time = finish_time - start_time;
|
||||
diff_time = (ulint) (finish_time - start_time);
|
||||
|
||||
srv_n_lock_wait_current_count--;
|
||||
srv_n_lock_wait_time = srv_n_lock_wait_time + diff_time;
|
||||
@ -1799,9 +1799,12 @@ srv_export_innodb_status(void)
|
||||
export_vars.innodb_row_lock_waits= srv_n_lock_wait_count;
|
||||
export_vars.innodb_row_lock_current_waits= srv_n_lock_wait_current_count;
|
||||
export_vars.innodb_row_lock_time= srv_n_lock_wait_time / 10000;
|
||||
export_vars.innodb_row_lock_time_avg=
|
||||
(srv_n_lock_wait_count > 0) ?
|
||||
(srv_n_lock_wait_time / 10000 / srv_n_lock_wait_count) : 0;
|
||||
if (srv_n_lock_wait_count > 0) {
|
||||
export_vars.innodb_row_lock_time_avg = (ulint)
|
||||
(srv_n_lock_wait_time / 10000 / srv_n_lock_wait_count);
|
||||
} else {
|
||||
export_vars.innodb_row_lock_time_avg = 0;
|
||||
}
|
||||
export_vars.innodb_row_lock_time_max= srv_n_lock_max_wait_time / 10000;
|
||||
export_vars.innodb_rows_read= srv_n_rows_read;
|
||||
export_vars.innodb_rows_inserted= srv_n_rows_inserted;
|
||||
|
@ -369,11 +369,11 @@ mutex_spin_wait(
|
||||
{
|
||||
ulint index; /* index of the reserved wait cell */
|
||||
ulint i; /* spin round count */
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
ib_longlong lstart_time = 0, lfinish_time; /* for timing os_wait */
|
||||
ulint ltime_diff;
|
||||
ulint sec;
|
||||
ulint ms;
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
uint timer_started = 0;
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
ut_ad(mutex);
|
||||
@ -535,7 +535,7 @@ finish_timing:
|
||||
ut_usectime(&sec, &ms);
|
||||
lfinish_time= (ib_longlong)sec * 1000000 + ms;
|
||||
|
||||
ltime_diff= lfinish_time - lstart_time;
|
||||
ltime_diff= (ulint) (lfinish_time - lstart_time);
|
||||
mutex->lspent_time += ltime_diff;
|
||||
if (mutex->lmax_spent_time < ltime_diff)
|
||||
{
|
||||
|
@ -54,3 +54,19 @@ select collation(a), collation(b), collation(binary 'ccc') from t1 limit 1;
|
||||
collation(a) collation(b) collation(binary 'ccc')
|
||||
cp1251_bin binary binary
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
a varchar(16) character set cp1251 collate cp1251_bin not null,
|
||||
b int(10) default null,
|
||||
primary key(a)
|
||||
) charset=cp1251;
|
||||
insert into t1 (a) values ('air'),
|
||||
('we'),('g'),('we_toshko'), ('s0urce'),('we_ivo'),('we_iliyan'),
|
||||
('we_martin'),('vw_grado'),('vw_vasko'),('tn_vili'),('tn_kalina'),
|
||||
('tn_fakira'),('vw_silvia'),('vw_starshi'),('vw_geo'),('vw_b0x1');
|
||||
select * from t1 where a like 'we_%';
|
||||
a b
|
||||
we_iliyan NULL
|
||||
we_ivo NULL
|
||||
we_martin NULL
|
||||
we_toshko NULL
|
||||
drop table t1;
|
||||
|
@ -669,12 +669,24 @@ select charset(max(a)), coercibility(max(a)),
|
||||
charset(min(a)), coercibility(min(a)) from t1;
|
||||
charset(max(a)) coercibility(max(a)) charset(min(a)) coercibility(min(a))
|
||||
latin2 2 latin2 2
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` char(1) character set latin2 default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
create table t2 select max(a),min(a) from t1;
|
||||
show create table t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`max(a)` varchar(1) character set latin2 default NULL,
|
||||
`min(a)` varchar(1) character set latin2 default NULL
|
||||
`max(a)` char(1) character set latin2 default NULL,
|
||||
`min(a)` char(1) character set latin2 default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t2;
|
||||
create table t2 select concat(a) from t1;
|
||||
show create table t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`concat(a)` varchar(1) character set latin2 default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t2,t1;
|
||||
create table t1 (a int);
|
||||
@ -757,6 +769,15 @@ one 2
|
||||
two 2
|
||||
three 1
|
||||
drop table t1;
|
||||
create table t1(a int, b datetime);
|
||||
insert into t1 values (1, NOW()), (2, NOW());
|
||||
create table t2 select MAX(b) from t1 group by a;
|
||||
show create table t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`MAX(b)` datetime default NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||
drop table t1, t2;
|
||||
create table t1(f1 datetime);
|
||||
insert into t1 values (now());
|
||||
create table t2 select f2 from (select max(now()) f2 from t1) a;
|
||||
|
@ -129,7 +129,7 @@ FOUND_ROWS()
|
||||
1
|
||||
execute stmt1;
|
||||
FOUND_ROWS()
|
||||
0
|
||||
1
|
||||
deallocate prepare stmt1;
|
||||
drop table t1;
|
||||
create table t1
|
||||
|
@ -246,6 +246,31 @@ SELECT FOUND_ROWS();
|
||||
FOUND_ROWS()
|
||||
0
|
||||
DROP TABLE t1;
|
||||
SELECT 'foo';
|
||||
foo
|
||||
foo
|
||||
SELECT FOUND_ROWS();
|
||||
FOUND_ROWS()
|
||||
1
|
||||
SELECT SQL_CALC_FOUND_ROWS 'foo';
|
||||
foo
|
||||
foo
|
||||
SELECT FOUND_ROWS();
|
||||
FOUND_ROWS()
|
||||
1
|
||||
SELECT SQL_CALC_FOUND_ROWS 'foo' limit 0;
|
||||
foo
|
||||
SELECT FOUND_ROWS();
|
||||
FOUND_ROWS()
|
||||
1
|
||||
SELECT FOUND_ROWS();
|
||||
FOUND_ROWS()
|
||||
1
|
||||
SELECT SQL_CALC_FOUND_ROWS 'foo' UNION SELECT 'bar' LIMIT 0;
|
||||
foo
|
||||
SELECT FOUND_ROWS();
|
||||
FOUND_ROWS()
|
||||
2
|
||||
CREATE TABLE t1 (a int, b int);
|
||||
INSERT INTO t1 VALUES (1,2), (1,3), (1,4), (1,5);
|
||||
SELECT SQL_CALC_FOUND_ROWS DISTINCT 'a' FROM t1 GROUP BY b LIMIT 2;
|
||||
|
@ -78,4 +78,19 @@ id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t2 ref b b 21 test.t1.b 6 Using where
|
||||
SET MAX_SEEKS_FOR_KEY=DEFAULT;
|
||||
drop table t1;
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1),(2),(3),(4),(5);
|
||||
insert into t1 select * from t1;
|
||||
insert into t1 select * from t1;
|
||||
insert into t1 select * from t1;
|
||||
set local max_join_size=8;
|
||||
select * from (select * from t1) x;
|
||||
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
|
||||
set local max_join_size=1;
|
||||
select * from (select * from t1 a, t1 b) x;
|
||||
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
|
||||
set local max_join_size=1;
|
||||
select * from (select 1 union select 2 union select 3) x;
|
||||
ERROR 42000: The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay
|
||||
drop table t1;
|
||||
SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, SQL_MAX_JOIN_SIZE=DEFAULT;
|
||||
|
@ -254,7 +254,7 @@ create table t1 (a int not null);
|
||||
create table t2 select max(a) from t1;
|
||||
show columns from t2;
|
||||
Field Type Null Key Default Extra
|
||||
max(a) bigint(20) YES NULL
|
||||
max(a) int(11) YES NULL
|
||||
drop table t1,t2;
|
||||
create table t1 (c decimal, d double, f float, r real);
|
||||
show columns from t1;
|
||||
|
@ -32,3 +32,17 @@ select * from t1 where lower(b)='bbb';
|
||||
select charset(a), charset(b), charset(binary 'ccc') from t1 limit 1;
|
||||
select collation(a), collation(b), collation(binary 'ccc') from t1 limit 1;
|
||||
drop table t1;
|
||||
|
||||
# Test for BUG#8560
|
||||
create table t1 (
|
||||
a varchar(16) character set cp1251 collate cp1251_bin not null,
|
||||
b int(10) default null,
|
||||
primary key(a)
|
||||
) charset=cp1251;
|
||||
insert into t1 (a) values ('air'),
|
||||
('we'),('g'),('we_toshko'), ('s0urce'),('we_ivo'),('we_iliyan'),
|
||||
('we_martin'),('vw_grado'),('vw_vasko'),('tn_vili'),('tn_kalina'),
|
||||
('tn_fakira'),('vw_silvia'),('vw_starshi'),('vw_geo'),('vw_b0x1');
|
||||
|
||||
select * from t1 where a like 'we_%';
|
||||
drop table t1;
|
||||
|
@ -395,8 +395,12 @@ create table t1 (a char character set latin2);
|
||||
insert into t1 values ('a'),('b');
|
||||
select charset(max(a)), coercibility(max(a)),
|
||||
charset(min(a)), coercibility(min(a)) from t1;
|
||||
show create table t1;
|
||||
create table t2 select max(a),min(a) from t1;
|
||||
show create table t2;
|
||||
drop table t2;
|
||||
create table t2 select concat(a) from t1;
|
||||
show create table t2;
|
||||
drop table t2,t1;
|
||||
|
||||
#
|
||||
@ -479,6 +483,15 @@ INSERT INTO t1 VALUES
|
||||
select val, count(*) from t1 group by val;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug #5615: type of aggregate function column wrong when using group by
|
||||
#
|
||||
|
||||
create table t1(a int, b datetime);
|
||||
insert into t1 values (1, NOW()), (2, NOW());
|
||||
create table t2 select MAX(b) from t1 group by a;
|
||||
show create table t2;
|
||||
drop table t1, t2;
|
||||
|
||||
#
|
||||
# Bug 7833: Wrong datatype of aggregate column is returned
|
||||
|
@ -167,6 +167,21 @@ SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE a = 0 GROUP BY a HAVING a > 10;
|
||||
SELECT FOUND_ROWS();
|
||||
DROP TABLE t1;
|
||||
|
||||
#
|
||||
# Bug #6089: queries which don't use any tables
|
||||
#
|
||||
|
||||
SELECT 'foo';
|
||||
SELECT FOUND_ROWS();
|
||||
SELECT SQL_CALC_FOUND_ROWS 'foo';
|
||||
SELECT FOUND_ROWS();
|
||||
SELECT SQL_CALC_FOUND_ROWS 'foo' limit 0;
|
||||
SELECT FOUND_ROWS();
|
||||
SELECT FOUND_ROWS();
|
||||
|
||||
SELECT SQL_CALC_FOUND_ROWS 'foo' UNION SELECT 'bar' LIMIT 0;
|
||||
SELECT FOUND_ROWS();
|
||||
|
||||
#
|
||||
# Bug #7945: group by + distinct with constant expression + limit
|
||||
#
|
||||
|
@ -66,4 +66,24 @@ SET MAX_SEEKS_FOR_KEY=DEFAULT;
|
||||
|
||||
drop table t1;
|
||||
|
||||
# BUG#8726
|
||||
create table t1 (a int);
|
||||
insert into t1 values (1),(2),(3),(4),(5);
|
||||
insert into t1 select * from t1;
|
||||
insert into t1 select * from t1;
|
||||
insert into t1 select * from t1;
|
||||
|
||||
set local max_join_size=8;
|
||||
--error 1104
|
||||
select * from (select * from t1) x;
|
||||
|
||||
set local max_join_size=1;
|
||||
--error 1104
|
||||
select * from (select * from t1 a, t1 b) x;
|
||||
|
||||
set local max_join_size=1;
|
||||
--error 1104
|
||||
select * from (select 1 union select 2 union select 3) x;
|
||||
drop table t1;
|
||||
|
||||
SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, SQL_MAX_JOIN_SIZE=DEFAULT;
|
||||
|
@ -85,9 +85,7 @@ int my_msync(int fd, void *addr, size_t len, int flags)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef _WINDOWS
|
||||
#pragma message "no mmap!"
|
||||
#else
|
||||
#ifndef __WIN__
|
||||
#warning "no mmap!"
|
||||
#endif
|
||||
|
||||
|
@ -74,8 +74,8 @@ testReadPerf_SOURCES = testReadPerf.cpp
|
||||
testLcp_SOURCES = testLcp.cpp
|
||||
testPartitioning_SOURCES = testPartitioning.cpp
|
||||
testBitfield_SOURCES = testBitfield.cpp
|
||||
DbCreate_SOURCES= bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp
|
||||
DbAsyncGenerator_SOURCES= bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp
|
||||
DbCreate_SOURCES = bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
|
||||
DbAsyncGenerator_SOURCES = bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
|
||||
|
||||
INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel
|
||||
|
||||
|
223
sql/ha_innodb.cc
223
sql/ha_innodb.cc
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2000 MySQL AB & Innobase Oy
|
||||
/* Copyright (C) 2000-2005 MySQL AB & Innobase Oy
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -336,14 +336,18 @@ innobase_release_temporary_latches(
|
||||
/*===============================*/
|
||||
THD *thd)
|
||||
{
|
||||
trx_t* trx;
|
||||
|
||||
if (!innodb_inited) {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
trx_t *trx= (trx_t*) thd->ha_data[innobase_hton.slot];
|
||||
if (trx)
|
||||
innobase_release_stat_resources(trx);
|
||||
trx = (trx_t*) thd->ha_data[innobase_hton.slot];
|
||||
|
||||
if (trx) {
|
||||
innobase_release_stat_resources(trx);
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -743,14 +747,15 @@ transaction internally. */
|
||||
static
|
||||
void
|
||||
register_trans(
|
||||
/*============*/
|
||||
/*===========*/
|
||||
THD* thd) /* in: thd to use the handle */
|
||||
{
|
||||
/* register the start of the statement */
|
||||
/* Register the start of the statement */
|
||||
trans_register_ha(thd, FALSE, &innobase_hton);
|
||||
|
||||
if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
|
||||
|
||||
/* no autocommit mode, register for a transaction */
|
||||
/* No autocommit mode, register for a transaction */
|
||||
trans_register_ha(thd, TRUE, &innobase_hton);
|
||||
}
|
||||
}
|
||||
@ -1051,7 +1056,7 @@ ha_innobase::init_table_handle_for_HANDLER(void)
|
||||
/*************************************************************************
|
||||
Opens an InnoDB database. */
|
||||
|
||||
handlerton *
|
||||
handlerton*
|
||||
innobase_init(void)
|
||||
/*===============*/
|
||||
/* out: TRUE if error */
|
||||
@ -1220,7 +1225,7 @@ innobase_init(void)
|
||||
|
||||
srv_print_verbose_log = mysqld_embedded ? 0 : 1;
|
||||
|
||||
/* Store the default charset-collation number of this MySQL
|
||||
/* Store the default charset-collation number of this MySQL
|
||||
installation */
|
||||
|
||||
data_mysql_default_charset_coll = (ulint)default_charset_info->number;
|
||||
@ -1346,14 +1351,16 @@ innobase_commit_low(
|
||||
return;
|
||||
}
|
||||
|
||||
/* The following will be enabled later when we put the 4.1 functionality back
|
||||
to 5.0. */
|
||||
#ifdef DISABLE_HAVE_REPLICATION
|
||||
if (current_thd->slave_thread) {
|
||||
/* Update the replication position info inside InnoDB */
|
||||
|
||||
trx->mysql_master_log_file_name
|
||||
= active_mi->rli.group_master_log_name;
|
||||
trx->mysql_master_log_pos= ((ib_longlong)
|
||||
active_mi->rli.future_group_master_log_pos);
|
||||
trx->mysql_master_log_pos = ((ib_longlong)
|
||||
active_mi->rli.future_group_master_log_pos);
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
|
||||
@ -1456,7 +1463,8 @@ innobase_commit(
|
||||
"InnoDB: but trx->conc_state != TRX_NOT_STARTED\n");
|
||||
}
|
||||
|
||||
if (all || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
|
||||
if (all
|
||||
|| (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
|
||||
|
||||
/* We were instructed to commit the whole transaction, or
|
||||
this is an SQL statement end and autocommit is on */
|
||||
@ -1489,10 +1497,9 @@ innobase_commit(
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/*
|
||||
don't delete it - it may be re-enabled later
|
||||
as an optimization for the most common case InnoDB+binlog
|
||||
*/
|
||||
/* The following defined-out code will be enabled later when we put the
|
||||
MySQL-4.1 functionality back to 5.0. This is needed to get InnoDB Hot Backup
|
||||
to work. */
|
||||
#if 0
|
||||
/*********************************************************************
|
||||
This is called when MySQL writes the binlog entry for the current
|
||||
@ -1627,7 +1634,8 @@ innobase_rollback(
|
||||
row_unlock_table_autoinc_for_mysql(trx);
|
||||
}
|
||||
|
||||
if (all || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
|
||||
if (all
|
||||
|| (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
|
||||
|
||||
error = trx_rollback_for_mysql(trx);
|
||||
trx->active_trans = 0;
|
||||
@ -1686,6 +1694,7 @@ innobase_rollback_to_savepoint(
|
||||
ib_longlong mysql_binlog_cache_pos;
|
||||
int error = 0;
|
||||
trx_t* trx;
|
||||
char name[64];
|
||||
|
||||
DBUG_ENTER("innobase_rollback_to_savepoint");
|
||||
|
||||
@ -1698,8 +1707,8 @@ innobase_rollback_to_savepoint(
|
||||
innobase_release_stat_resources(trx);
|
||||
|
||||
/* TODO: use provided savepoint data area to store savepoint data */
|
||||
char name[64];
|
||||
longlong2str((ulonglong)savepoint,name,36);
|
||||
|
||||
longlong2str((ulonglong)savepoint, name, 36);
|
||||
|
||||
error = trx_rollback_to_savepoint_for_mysql(trx, name,
|
||||
&mysql_binlog_cache_pos);
|
||||
@ -1708,26 +1717,27 @@ innobase_rollback_to_savepoint(
|
||||
|
||||
/*********************************************************************
|
||||
Release transaction savepoint name. */
|
||||
|
||||
static int
|
||||
static
|
||||
int
|
||||
innobase_release_savepoint(
|
||||
/*===========================*/
|
||||
/*=======================*/
|
||||
/* out: 0 if success, HA_ERR_NO_SAVEPOINT if
|
||||
no savepoint with the given name */
|
||||
THD* thd, /* in: handle to the MySQL thread of the user
|
||||
whose transaction should be rolled back */
|
||||
void *savepoint) /* in: savepoint data */
|
||||
void* savepoint) /* in: savepoint data */
|
||||
{
|
||||
int error = 0;
|
||||
trx_t* trx;
|
||||
char name[64];
|
||||
|
||||
DBUG_ENTER("innobase_release_savepoint");
|
||||
|
||||
trx = check_trx_exists(thd);
|
||||
|
||||
/* TODO: use provided savepoint data area to store savepoint data */
|
||||
char name[64];
|
||||
longlong2str((ulonglong)savepoint,name,36);
|
||||
|
||||
longlong2str((ulonglong)savepoint, name, 36);
|
||||
|
||||
error = trx_release_savepoint_for_mysql(trx, name);
|
||||
|
||||
@ -1736,13 +1746,13 @@ innobase_release_savepoint(
|
||||
|
||||
/*********************************************************************
|
||||
Sets a transaction savepoint. */
|
||||
|
||||
static int
|
||||
static
|
||||
int
|
||||
innobase_savepoint(
|
||||
/*===============*/
|
||||
/* out: always 0, that is, always succeeds */
|
||||
THD* thd, /* in: handle to the MySQL thread */
|
||||
void *savepoint) /* in: savepoint data */
|
||||
void* savepoint) /* in: savepoint data */
|
||||
{
|
||||
int error = 0;
|
||||
trx_t* trx;
|
||||
@ -1911,7 +1921,8 @@ ha_innobase::open(
|
||||
fields when packed actually became 1 byte longer, when we also
|
||||
stored the string length as the first byte. */
|
||||
|
||||
upd_and_key_val_buff_len = table->s->reclength + table->s->max_key_length
|
||||
upd_and_key_val_buff_len =
|
||||
table->s->reclength + table->s->max_key_length
|
||||
+ MAX_REF_PARTS * 3;
|
||||
if (!(mysql_byte*) my_multi_malloc(MYF(MY_WME),
|
||||
&upd_buff, upd_and_key_val_buff_len,
|
||||
@ -1963,7 +1974,8 @@ ha_innobase::open(
|
||||
|
||||
innobase_prebuilt = row_create_prebuilt(ib_table);
|
||||
|
||||
((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len = table->s->reclength;
|
||||
((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len =
|
||||
table->s->reclength;
|
||||
|
||||
/* Looks like MySQL-3.23 sometimes has primary key number != 0 */
|
||||
|
||||
@ -1985,13 +1997,11 @@ ha_innobase::open(
|
||||
|
||||
((row_prebuilt_t*)innobase_prebuilt)
|
||||
->clust_index_was_generated = FALSE;
|
||||
/*
|
||||
MySQL allocates the buffer for ref. key_info->key_length
|
||||
includes space for all key columns + one byte for each column
|
||||
that may be NULL. ref_length must be as exact as possible to
|
||||
save space, because all row reference buffers are allocated
|
||||
based on ref_length.
|
||||
*/
|
||||
/* MySQL allocates the buffer for ref. key_info->key_length
|
||||
includes space for all key columns + one byte for each column
|
||||
that may be NULL. ref_length must be as exact as possible to
|
||||
save space, because all row reference buffers are allocated
|
||||
based on ref_length. */
|
||||
|
||||
ref_length = table->key_info[primary_key].key_length;
|
||||
} else {
|
||||
@ -2013,15 +2023,13 @@ ha_innobase::open(
|
||||
|
||||
ref_length = DATA_ROW_ID_LEN;
|
||||
|
||||
/*
|
||||
If we automatically created the clustered index, then
|
||||
MySQL does not know about it, and MySQL must NOT be aware
|
||||
of the index used on scan, to make it avoid checking if we
|
||||
update the column of the index. That is why we assert below
|
||||
that key_used_on_scan is the undefined value MAX_KEY.
|
||||
The column is the row id in the automatical generation case,
|
||||
and it will never be updated anyway.
|
||||
*/
|
||||
/* If we automatically created the clustered index, then
|
||||
MySQL does not know about it, and MySQL must NOT be aware
|
||||
of the index used on scan, to make it avoid checking if we
|
||||
update the column of the index. That is why we assert below
|
||||
that key_used_on_scan is the undefined value MAX_KEY.
|
||||
The column is the row id in the automatical generation case,
|
||||
and it will never be updated anyway. */
|
||||
|
||||
if (key_used_on_scan != MAX_KEY) {
|
||||
fprintf(stderr,
|
||||
@ -2611,7 +2619,8 @@ ha_innobase::write_row(
|
||||
"InnoDB: Dump of 200 bytes around transaction.all: ",
|
||||
stderr);
|
||||
ut_print_buf(stderr,
|
||||
((byte*)(&(current_thd->ha_data[innobase_hton.slot]))) - 100, 200);
|
||||
((byte*)(&(current_thd->ha_data[innobase_hton.slot]))) - 100,
|
||||
200);
|
||||
putc('\n', stderr);
|
||||
ut_error;
|
||||
}
|
||||
@ -2646,7 +2655,7 @@ ha_innobase::write_row(
|
||||
src_table = lock_get_src_table(
|
||||
prebuilt->trx, prebuilt->table, &mode);
|
||||
if (!src_table) {
|
||||
no_commit:
|
||||
no_commit:
|
||||
/* Unknown situation: do not commit */
|
||||
/*
|
||||
ut_print_timestamp(stderr);
|
||||
@ -2669,6 +2678,7 @@ ha_innobase::write_row(
|
||||
} else {
|
||||
/* Ensure that there are no other table locks than
|
||||
LOCK_IX and LOCK_AUTO_INC on the destination table. */
|
||||
|
||||
if (!lock_is_table_exclusive(prebuilt->table,
|
||||
prebuilt->trx)) {
|
||||
goto no_commit;
|
||||
@ -2746,11 +2756,11 @@ ha_innobase::write_row(
|
||||
|
||||
if (error == DB_SUCCESS && auto_inc_used) {
|
||||
|
||||
/* Fetch the value that was set in the autoincrement field */
|
||||
/* Fetch the value that was set in the autoincrement field */
|
||||
|
||||
auto_inc = table->next_number_field->val_int();
|
||||
auto_inc = table->next_number_field->val_int();
|
||||
|
||||
if (auto_inc != 0) {
|
||||
if (auto_inc != 0) {
|
||||
/* This call will calculate the max of the current
|
||||
value and the value supplied by the user and
|
||||
update the counter accordingly */
|
||||
@ -2762,15 +2772,15 @@ ha_innobase::write_row(
|
||||
The lock is released at each SQL statement's
|
||||
end. */
|
||||
|
||||
error = row_lock_table_autoinc_for_mysql(prebuilt);
|
||||
error = row_lock_table_autoinc_for_mysql(prebuilt);
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
|
||||
error = convert_error_code_to_mysql(error, user_thd);
|
||||
goto func_exit;
|
||||
}
|
||||
dict_table_autoinc_update(prebuilt->table, auto_inc);
|
||||
}
|
||||
if (error != DB_SUCCESS) {
|
||||
error = convert_error_code_to_mysql(error,
|
||||
user_thd);
|
||||
goto func_exit;
|
||||
}
|
||||
dict_table_autoinc_update(prebuilt->table, auto_inc);
|
||||
}
|
||||
}
|
||||
|
||||
innodb_srv_conc_exit_innodb(prebuilt->trx);
|
||||
@ -2785,7 +2795,6 @@ func_exit:
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************
|
||||
Converts field data for storage in an InnoDB update vector. */
|
||||
inline
|
||||
@ -4511,10 +4520,10 @@ ha_innobase::records_in_range(
|
||||
dict_index_t* index;
|
||||
mysql_byte* key_val_buff2 = (mysql_byte*) my_malloc(
|
||||
table->s->reclength
|
||||
+ table->s->max_key_length + 100,
|
||||
+ table->s->max_key_length + 100,
|
||||
MYF(MY_WME));
|
||||
ulint buff2_len = table->s->reclength
|
||||
+ table->s->max_key_length + 100;
|
||||
+ table->s->max_key_length + 100;
|
||||
dtuple_t* range_start;
|
||||
dtuple_t* range_end;
|
||||
ib_longlong n_rows;
|
||||
@ -4671,21 +4680,27 @@ ha_innobase::read_time(
|
||||
ha_rows total_rows;
|
||||
double time_for_scan;
|
||||
|
||||
if (index != table->s->primary_key)
|
||||
return handler::read_time(index, ranges, rows); // Not clustered
|
||||
if (index != table->s->primary_key) {
|
||||
/* Not clustered */
|
||||
return(handler::read_time(index, ranges, rows));
|
||||
}
|
||||
|
||||
if (rows <= 2)
|
||||
return (double) rows;
|
||||
if (rows <= 2) {
|
||||
|
||||
return((double) rows);
|
||||
}
|
||||
|
||||
/* Assume that the read time is proportional to the scan time for all
|
||||
rows + at most one seek per range. */
|
||||
|
||||
time_for_scan = scan_time();
|
||||
|
||||
if ((total_rows = estimate_rows_upper_bound()) < rows)
|
||||
return time_for_scan;
|
||||
if ((total_rows = estimate_rows_upper_bound()) < rows) {
|
||||
|
||||
return (ranges + (double) rows / (double) total_rows * time_for_scan);
|
||||
return(time_for_scan);
|
||||
}
|
||||
|
||||
return(ranges + (double) rows / (double) total_rows * time_for_scan);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
@ -5103,7 +5118,7 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
|
||||
tmp_buff, i, 1);
|
||||
tmp_buff+= i + 1;
|
||||
f_key_info.referenced_table= make_lex_string(thd, 0,
|
||||
tmp_buff, strlen(tmp_buff), 1);
|
||||
tmp_buff, strlen(tmp_buff), 1);
|
||||
|
||||
for (i= 0;;)
|
||||
{
|
||||
@ -5621,7 +5636,6 @@ innodb_export_status(void)
|
||||
srv_export_innodb_status();
|
||||
}
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB
|
||||
Monitor to the client. */
|
||||
@ -5633,6 +5647,8 @@ innodb_show_status(
|
||||
{
|
||||
Protocol *protocol= thd->protocol;
|
||||
trx_t* trx;
|
||||
long flen;
|
||||
char* str;
|
||||
|
||||
DBUG_ENTER("innodb_show_status");
|
||||
|
||||
@ -5649,14 +5665,13 @@ innodb_show_status(
|
||||
|
||||
/* We let the InnoDB Monitor to output at most 64000 bytes of text. */
|
||||
|
||||
long flen;
|
||||
char* str;
|
||||
|
||||
mutex_enter_noninline(&srv_monitor_file_mutex);
|
||||
rewind(srv_monitor_file);
|
||||
|
||||
srv_printf_innodb_monitor(srv_monitor_file);
|
||||
flen = ftell(srv_monitor_file);
|
||||
os_file_set_eof(srv_monitor_file);
|
||||
|
||||
if (flen < 0) {
|
||||
flen = 0;
|
||||
} else if (flen > 64000 - 1) {
|
||||
@ -5666,10 +5681,10 @@ innodb_show_status(
|
||||
/* allocate buffer for the string, and
|
||||
read the contents of the temporary file */
|
||||
|
||||
if (!(str = my_malloc(flen + 1, MYF(0))))
|
||||
{
|
||||
mutex_exit_noninline(&srv_monitor_file_mutex);
|
||||
DBUG_RETURN(TRUE);
|
||||
if (!(str = my_malloc(flen + 1, MYF(0)))) {
|
||||
mutex_exit_noninline(&srv_monitor_file_mutex);
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
|
||||
rewind(srv_monitor_file);
|
||||
@ -5683,7 +5698,6 @@ innodb_show_status(
|
||||
|
||||
if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
|
||||
Protocol::SEND_EOF)) {
|
||||
|
||||
my_free(str, MYF(0));
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
@ -5693,10 +5707,12 @@ innodb_show_status(
|
||||
protocol->store(str, flen, system_charset_info);
|
||||
my_free(str, MYF(0));
|
||||
|
||||
if (protocol->write())
|
||||
DBUG_RETURN(TRUE);
|
||||
if (protocol->write()) {
|
||||
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
send_eof(thd);
|
||||
|
||||
DBUG_RETURN(FALSE);
|
||||
}
|
||||
|
||||
@ -6090,7 +6106,7 @@ ha_innobase::get_auto_increment()
|
||||
|
||||
if (error) {
|
||||
|
||||
return(~(ulonglong) 0);
|
||||
return(~(ulonglong) 0);
|
||||
}
|
||||
|
||||
return((ulonglong) nr);
|
||||
@ -6112,7 +6128,8 @@ ha_innobase::cmp_ref(
|
||||
|
||||
/* Do type-aware comparison of Primary Key members. PK members
|
||||
are always NOT NULL, so no checks for NULL are performed */
|
||||
KEY_PART_INFO *key_part= table->key_info[table->s->primary_key].key_part;
|
||||
KEY_PART_INFO *key_part=
|
||||
table->key_info[table->s->primary_key].key_part;
|
||||
KEY_PART_INFO *key_part_end=
|
||||
key_part + table->key_info[table->s->primary_key].key_parts;
|
||||
for (; key_part != key_part_end; ++key_part) {
|
||||
@ -6257,19 +6274,21 @@ innobase_query_is_update(void)
|
||||
|
||||
thd = (THD *)innobase_current_thd();
|
||||
|
||||
if ( thd->lex->sql_command == SQLCOM_REPLACE ||
|
||||
thd->lex->sql_command == SQLCOM_REPLACE_SELECT ||
|
||||
( thd->lex->sql_command == SQLCOM_LOAD &&
|
||||
thd->lex->duplicates == DUP_REPLACE )) {
|
||||
return true;
|
||||
if (thd->lex->sql_command == SQLCOM_REPLACE ||
|
||||
thd->lex->sql_command == SQLCOM_REPLACE_SELECT ||
|
||||
(thd->lex->sql_command == SQLCOM_LOAD &&
|
||||
thd->lex->duplicates == DUP_REPLACE)) {
|
||||
|
||||
return(1);
|
||||
}
|
||||
|
||||
if ( thd->lex->sql_command == SQLCOM_INSERT &&
|
||||
thd->lex->duplicates == DUP_UPDATE ) {
|
||||
return true;
|
||||
if (thd->lex->sql_command == SQLCOM_INSERT &&
|
||||
thd->lex->duplicates == DUP_UPDATE) {
|
||||
|
||||
return(1);
|
||||
}
|
||||
|
||||
return false;
|
||||
return(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6305,11 +6324,20 @@ innobase_xa_prepare(
|
||||
"InnoDB: but trx->conc_state != TRX_NOT_STARTED\n");
|
||||
}
|
||||
|
||||
if (all || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
|
||||
if (all
|
||||
|| (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) {
|
||||
|
||||
/* We were instructed to prepare the whole transaction, or
|
||||
this is an SQL statement end and autocommit is on */
|
||||
|
||||
/* If there is no active InnoDB transaction,
|
||||
trx_prepare_for_mysql() will (temporarily) start one */
|
||||
|
||||
if (trx->active_trans == 0) {
|
||||
|
||||
trx->active_trans = 1;
|
||||
}
|
||||
|
||||
error = trx_prepare_for_mysql(trx);
|
||||
} else {
|
||||
/* We just mark the SQL statement ended and do not do a
|
||||
@ -6348,10 +6376,11 @@ innobase_xa_recover(
|
||||
uint len) /* in: number of slots in xid_list */
|
||||
{
|
||||
if (len == 0 || xid_list == NULL) {
|
||||
return 0;
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
||||
return (trx_recover_for_mysql(xid_list, len));
|
||||
return(trx_recover_for_mysql(xid_list, len));
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
@ -6362,7 +6391,7 @@ int
|
||||
innobase_commit_by_xid(
|
||||
/*===================*/
|
||||
/* out: 0 or error number */
|
||||
XID* xid) /* in: X/Open XA Transaction Identification */
|
||||
XID* xid) /* in: X/Open XA transaction identification */
|
||||
{
|
||||
trx_t* trx;
|
||||
|
||||
@ -6385,7 +6414,7 @@ int
|
||||
innobase_rollback_by_xid(
|
||||
/*=====================*/
|
||||
/* out: 0 or error number */
|
||||
XID *xid) /* in : X/Open XA Transaction Idenfification */
|
||||
XID *xid) /* in: X/Open XA transaction idenfification */
|
||||
{
|
||||
trx_t* trx;
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#endif
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#include "sql_select.h"
|
||||
|
||||
Item_sum::Item_sum(List<Item> &list)
|
||||
:arg_count(list.elements)
|
||||
@ -303,6 +304,21 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
|
||||
uint convert_blob_length)
|
||||
{
|
||||
if (args[0]->type() == Item::FIELD_ITEM)
|
||||
{
|
||||
Field *field= ((Item_field*) args[0])->field;
|
||||
|
||||
if ((field= create_tmp_field_from_field(current_thd, field, this, table,
|
||||
0, convert_blob_length)))
|
||||
field->flags&= ~NOT_NULL_FLAG;
|
||||
return field;
|
||||
}
|
||||
return Item_sum::create_tmp_field(group, table, convert_blob_length);
|
||||
}
|
||||
|
||||
|
||||
/***********************************************************************
|
||||
** reset and add of sum_func
|
||||
@ -2075,8 +2091,6 @@ my_decimal *Item_variance_field::val_decimal(my_decimal *dec_buf)
|
||||
** COUNT(DISTINCT ...)
|
||||
****************************************************************************/
|
||||
|
||||
#include "sql_select.h"
|
||||
|
||||
int simple_str_key_cmp(void* arg, byte* key1, byte* key2)
|
||||
{
|
||||
Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg;
|
||||
|
@ -94,7 +94,6 @@ public:
|
||||
Item *get_tmp_table_item(THD *thd);
|
||||
virtual Field *create_tmp_field(bool group, TABLE *table,
|
||||
uint convert_blob_length);
|
||||
|
||||
bool walk (Item_processor processor, byte *argument);
|
||||
};
|
||||
|
||||
@ -525,6 +524,8 @@ protected:
|
||||
void cleanup();
|
||||
bool any_value() { return was_values; }
|
||||
void no_rows_in_result();
|
||||
Field *create_tmp_field(bool group, TABLE *table,
|
||||
uint convert_blob_length);
|
||||
};
|
||||
|
||||
|
||||
|
@ -4268,7 +4268,7 @@ struct my_option my_long_options[] =
|
||||
(gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count,
|
||||
0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
#endif /* HAVE_REPLICATION */
|
||||
{"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax.", 0, 0, 0,
|
||||
{"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'.", 0, 0, 0,
|
||||
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"auto-increment-increment", OPT_AUTO_INCREMENT,
|
||||
"Auto-increment columns are incremented by this",
|
||||
|
@ -183,6 +183,7 @@ THD::THD()
|
||||
lock=locked_tables=0;
|
||||
used_tables=0;
|
||||
cuted_fields= sent_row_count= 0L;
|
||||
limit_found_rows= 0;
|
||||
statement_id_counter= 0UL;
|
||||
// Must be reset to handle error with THD's created for init of mysqld
|
||||
lex->current_select= 0;
|
||||
|
@ -664,7 +664,7 @@ JOIN::optimize()
|
||||
!(select_options & SELECT_DESCRIBE))
|
||||
{ /* purecov: inspected */
|
||||
my_message(ER_TOO_BIG_SELECT, ER(ER_TOO_BIG_SELECT), MYF(0));
|
||||
error= 1; /* purecov: inspected */
|
||||
error= -1;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
if (const_tables && !thd->locked_tables &&
|
||||
@ -1194,7 +1194,9 @@ JOIN::exec()
|
||||
else
|
||||
error=(int) result->send_eof();
|
||||
}
|
||||
thd->limit_found_rows= thd->examined_row_count= 0;
|
||||
/* Single select (without union and limit) always returns 1 row */
|
||||
thd->limit_found_rows= 1;
|
||||
thd->examined_row_count= 0;
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
thd->limit_found_rows= thd->examined_row_count= 0;
|
||||
@ -7591,10 +7593,10 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
|
||||
new_created field
|
||||
*/
|
||||
|
||||
static Field* create_tmp_field_from_field(THD *thd, Field* org_field,
|
||||
Item *item, TABLE *table,
|
||||
bool modify_item,
|
||||
uint convert_blob_length)
|
||||
Field* create_tmp_field_from_field(THD *thd, Field* org_field,
|
||||
Item *item, TABLE *table,
|
||||
bool modify_item,
|
||||
uint convert_blob_length)
|
||||
{
|
||||
Field *new_field;
|
||||
|
||||
|
@ -399,6 +399,10 @@ void copy_funcs(Item **func_ptr);
|
||||
bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
|
||||
int error, bool ignore_last_dupp_error);
|
||||
uint find_shortest_key(TABLE *table, const key_map *usable_keys);
|
||||
Field* create_tmp_field_from_field(THD *thd, Field* org_field,
|
||||
Item *item, TABLE *table,
|
||||
bool modify_item,
|
||||
uint convert_blob_length);
|
||||
|
||||
/* functions from opt_sum.cc */
|
||||
bool simple_pred(Item_func *func_item, Item **args, bool *inv_order);
|
||||
|
@ -67,6 +67,13 @@ static uchar bin_char_array[] =
|
||||
};
|
||||
|
||||
|
||||
static my_bool
|
||||
my_coll_init_8bit_bin(CHARSET_INFO *cs,
|
||||
void *(*alloc)(uint) __attribute__((unused)))
|
||||
{
|
||||
cs->max_sort_char=255;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static int my_strnncoll_binary(CHARSET_INFO * cs __attribute__((unused)),
|
||||
const uchar *s, uint slen,
|
||||
@ -443,7 +450,7 @@ skip:
|
||||
|
||||
MY_COLLATION_HANDLER my_collation_8bit_bin_handler =
|
||||
{
|
||||
NULL, /* init */
|
||||
my_coll_init_8bit_bin,
|
||||
my_strnncoll_8bit_bin,
|
||||
my_strnncollsp_8bit_bin,
|
||||
my_strnxfrm_8bit_bin,
|
||||
|
Loading…
x
Reference in New Issue
Block a user