Reduce the number of dict_table_page_size() calls
This commit is contained in:
parent
f648145717
commit
67e3d1ee93
@ -591,7 +591,7 @@ inconsistent:
|
||||
} else {
|
||||
col->def_val.data = btr_copy_externally_stored_field(
|
||||
&col->def_val.len, data,
|
||||
dict_table_page_size(index->table),
|
||||
cur.page_cur.block->page.size,
|
||||
len, index->table->heap);
|
||||
}
|
||||
}
|
||||
@ -3686,9 +3686,9 @@ btr_cur_pessimistic_insert(
|
||||
index->first_user_field())));
|
||||
|
||||
if (page_zip_rec_needs_ext(rec_get_converted_size(index, entry, n_ext),
|
||||
dict_table_is_comp(index->table),
|
||||
index->table->not_redundant(),
|
||||
dtuple_get_n_fields(entry),
|
||||
dict_table_page_size(index->table))
|
||||
btr_cur_get_block(cursor)->page.size)
|
||||
|| UNIV_UNLIKELY(entry->is_alter_metadata())) {
|
||||
/* The record is so big that we have to store some fields
|
||||
externally on separate database pages */
|
||||
@ -4558,7 +4558,7 @@ any_extern:
|
||||
|
||||
if (page_zip_rec_needs_ext(new_rec_size, page_is_comp(page),
|
||||
dict_index_get_n_fields(index),
|
||||
dict_table_page_size(index->table))) {
|
||||
block->page.size)) {
|
||||
goto any_extern;
|
||||
}
|
||||
|
||||
@ -7526,8 +7526,8 @@ btr_store_big_rec_extern_fields(
|
||||
ut_ad(buf_block_get_frame(rec_block) == page_align(rec));
|
||||
ut_a(dict_index_is_clust(index));
|
||||
|
||||
ut_a(dict_table_page_size(index->table)
|
||||
.equals_to(rec_block->page.size));
|
||||
ut_ad(dict_table_page_size(index->table)
|
||||
.equals_to(rec_block->page.size));
|
||||
|
||||
btr_blob_log_check_t redo_log(pcur, btr_mtr, offsets, &rec_block,
|
||||
&rec, op);
|
||||
@ -7572,15 +7572,13 @@ btr_store_big_rec_extern_fields(
|
||||
}
|
||||
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
|
||||
|
||||
const page_size_t page_size(dict_table_page_size(index->table));
|
||||
|
||||
/* Space available in compressed page to carry blob data */
|
||||
const ulint payload_size_zip = page_size.physical()
|
||||
const ulint payload_size_zip = rec_block->page.size.physical()
|
||||
- FIL_PAGE_DATA;
|
||||
|
||||
/* Space available in uncompressed page to carry blob data */
|
||||
const ulint payload_size = page_size.physical()
|
||||
- FIL_PAGE_DATA - BTR_BLOB_HDR_SIZE - FIL_PAGE_DATA_END;
|
||||
const ulint payload_size = payload_size_zip
|
||||
- (BTR_BLOB_HDR_SIZE + FIL_PAGE_DATA_END);
|
||||
|
||||
/* We have to create a file segment to the tablespace
|
||||
for each field and put the pointer to the field in rec */
|
||||
|
@ -657,7 +657,7 @@ dtuple_convert_big_rec(
|
||||
|
||||
while (page_zip_rec_needs_ext(rec_get_converted_size(index, entry,
|
||||
*n_ext),
|
||||
dict_table_is_comp(index->table),
|
||||
index->table->not_redundant(),
|
||||
dict_index_get_n_fields(index),
|
||||
dict_table_page_size(index->table))) {
|
||||
longest_i = 0;
|
||||
|
@ -3282,7 +3282,7 @@ fts_fetch_doc_from_rec(
|
||||
doc->text.f_str =
|
||||
btr_rec_copy_externally_stored_field(
|
||||
clust_rec, offsets,
|
||||
dict_table_page_size(table),
|
||||
btr_pcur_get_block(pcur)->page.size,
|
||||
clust_pos, &doc->text.f_len,
|
||||
static_cast<mem_heap_t*>(
|
||||
doc->self_heap->arg));
|
||||
|
@ -746,14 +746,15 @@ rtr_adjust_upper_level(
|
||||
prev_page_no = btr_page_get_prev(page, mtr);
|
||||
next_page_no = btr_page_get_next(page, mtr);
|
||||
space = block->page.id.space();
|
||||
const page_size_t& page_size = dict_table_page_size(index->table);
|
||||
ut_ad(block->page.size.equals_to(dict_table_page_size(index->table)));
|
||||
|
||||
/* Update page links of the level */
|
||||
if (prev_page_no != FIL_NULL) {
|
||||
page_id_t prev_page_id(space, prev_page_no);
|
||||
|
||||
buf_block_t* prev_block = btr_block_get(
|
||||
prev_page_id, page_size, RW_X_LATCH, index, mtr);
|
||||
prev_page_id, block->page.size, RW_X_LATCH,
|
||||
index, mtr);
|
||||
#ifdef UNIV_BTR_DEBUG
|
||||
ut_a(page_is_comp(prev_block->frame) == page_is_comp(page));
|
||||
ut_a(btr_page_get_next(prev_block->frame, mtr)
|
||||
@ -769,7 +770,8 @@ rtr_adjust_upper_level(
|
||||
page_id_t next_page_id(space, next_page_no);
|
||||
|
||||
buf_block_t* next_block = btr_block_get(
|
||||
next_page_id, page_size, RW_X_LATCH, index, mtr);
|
||||
next_page_id, block->page.size, RW_X_LATCH,
|
||||
index, mtr);
|
||||
#ifdef UNIV_BTR_DEBUG
|
||||
ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
|
||||
ut_a(btr_page_get_prev(next_block->frame, mtr)
|
||||
|
@ -3326,7 +3326,7 @@ row_sel_get_clust_rec_for_mysql(
|
||||
and is it not unsafe to use RW_NO_LATCH here? */
|
||||
buf_block_t* block = buf_page_get_gen(
|
||||
btr_pcur_get_block(prebuilt->pcur)->page.id,
|
||||
dict_table_page_size(sec_index->table),
|
||||
btr_pcur_get_block(prebuilt->pcur)->page.size,
|
||||
RW_NO_LATCH, NULL, BUF_GET,
|
||||
__FILE__, __LINE__, mtr, &err);
|
||||
mem_heap_t* heap = mem_heap_create(256);
|
||||
|
@ -1335,6 +1335,8 @@ store_len:
|
||||
table, col);
|
||||
|
||||
ut_a(prefix_len < sizeof ext_buf);
|
||||
const page_size_t& page_size
|
||||
= dict_table_page_size(table);
|
||||
|
||||
/* If there is a spatial index on it,
|
||||
log its MBR */
|
||||
@ -1343,9 +1345,7 @@ store_len:
|
||||
col->mtype));
|
||||
|
||||
trx_undo_get_mbr_from_ext(
|
||||
mbr,
|
||||
dict_table_page_size(
|
||||
table),
|
||||
mbr, page_size,
|
||||
field, &flen);
|
||||
}
|
||||
|
||||
@ -1354,7 +1354,7 @@ store_len:
|
||||
flen < REC_ANTELOPE_MAX_INDEX_COL_LEN
|
||||
&& !ignore_prefix
|
||||
? ext_buf : NULL, prefix_len,
|
||||
dict_table_page_size(table),
|
||||
page_size,
|
||||
&field, &flen,
|
||||
spatial_status);
|
||||
} else {
|
||||
|
Loading…
x
Reference in New Issue
Block a user