Partial fix for Bug#11764622 57480: MEMORY LEAK WHEN HAVING 256+ TABLES
Port vasil.dimov@oracle.com-20111205083046-jtgi1emlvtfnjatt from mysql-trunk
This commit is contained in:
parent
a4fa485f5f
commit
cb80ad09da
@ -3737,22 +3737,9 @@ ha_innobase::open(
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
|
||||
/* Create buffers for packing the fields of a record. Why
|
||||
table->reclength did not work here? Obviously, because char
|
||||
fields when packed actually became 1 byte longer, when we also
|
||||
stored the string length as the first byte. */
|
||||
|
||||
upd_and_key_val_buff_len =
|
||||
table->s->reclength + table->s->max_key_length
|
||||
+ MAX_REF_PARTS * 3;
|
||||
if (!(uchar*) my_multi_malloc(MYF(MY_WME),
|
||||
&upd_buff, upd_and_key_val_buff_len,
|
||||
&key_val_buff, upd_and_key_val_buff_len,
|
||||
NullS)) {
|
||||
free_share(share);
|
||||
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
/* Will be allocated if it is needed in ::update_row() */
|
||||
upd_buf = NULL;
|
||||
upd_buf_size = 0;
|
||||
|
||||
/* We look for pattern #P# to see if the table is partitioned
|
||||
MySQL table. The retry logic for partitioned tables is a
|
||||
@ -3793,7 +3780,6 @@ retry:
|
||||
"how you can resolve the problem.\n",
|
||||
norm_name);
|
||||
free_share(share);
|
||||
my_free(upd_buff);
|
||||
my_errno = ENOENT;
|
||||
|
||||
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
|
||||
@ -3809,7 +3795,6 @@ retry:
|
||||
"how you can resolve the problem.\n",
|
||||
norm_name);
|
||||
free_share(share);
|
||||
my_free(upd_buff);
|
||||
my_errno = ENOENT;
|
||||
|
||||
dict_table_decrement_handle_count(ib_table, FALSE);
|
||||
@ -4006,7 +3991,13 @@ ha_innobase::close(void)
|
||||
|
||||
row_prebuilt_free(prebuilt, FALSE);
|
||||
|
||||
my_free(upd_buff);
|
||||
if (upd_buf != NULL) {
|
||||
ut_ad(upd_buf_size != 0);
|
||||
my_free(upd_buf);
|
||||
upd_buf = NULL;
|
||||
upd_buf_size = 0;
|
||||
}
|
||||
|
||||
free_share(share);
|
||||
|
||||
/* Tell InnoDB server that there might be work for
|
||||
@ -5299,6 +5290,23 @@ ha_innobase::update_row(
|
||||
|
||||
ut_a(prebuilt->trx == trx);
|
||||
|
||||
if (upd_buf == NULL) {
|
||||
ut_ad(upd_buf_size == 0);
|
||||
|
||||
/* Create a buffer for packing the fields of a record. Why
|
||||
table->reclength did not work here? Obviously, because char
|
||||
fields when packed actually became 1 byte longer, when we also
|
||||
stored the string length as the first byte. */
|
||||
|
||||
upd_buf_size = table->s->reclength + table->s->max_key_length
|
||||
+ MAX_REF_PARTS * 3;
|
||||
upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME));
|
||||
if (upd_buf == NULL) {
|
||||
upd_buf_size = 0;
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
}
|
||||
}
|
||||
|
||||
ha_statistic_increment(&SSV::ha_update_count);
|
||||
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||
@ -5311,11 +5319,10 @@ ha_innobase::update_row(
|
||||
}
|
||||
|
||||
/* Build an update vector from the modified fields in the rows
|
||||
(uses upd_buff of the handle) */
|
||||
(uses upd_buf of the handle) */
|
||||
|
||||
calc_row_difference(uvect, (uchar*) old_row, new_row, table,
|
||||
upd_buff, (ulint)upd_and_key_val_buff_len,
|
||||
prebuilt, user_thd);
|
||||
upd_buf, upd_buf_size, prebuilt, user_thd);
|
||||
|
||||
/* This is not a delete */
|
||||
prebuilt->upd_node->is_delete = FALSE;
|
||||
@ -5692,8 +5699,7 @@ ha_innobase::index_read(
|
||||
|
||||
row_sel_convert_mysql_key_to_innobase(
|
||||
prebuilt->search_tuple,
|
||||
(byte*) key_val_buff,
|
||||
(ulint)upd_and_key_val_buff_len,
|
||||
srch_key_val1, sizeof(srch_key_val1),
|
||||
index,
|
||||
(byte*) key_ptr,
|
||||
(ulint) key_len,
|
||||
@ -7511,12 +7517,6 @@ ha_innobase::records_in_range(
|
||||
{
|
||||
KEY* key;
|
||||
dict_index_t* index;
|
||||
uchar* key_val_buff2 = (uchar*) my_malloc(
|
||||
table->s->reclength
|
||||
+ table->s->max_key_length + 100,
|
||||
MYF(MY_FAE));
|
||||
ulint buff2_len = table->s->reclength
|
||||
+ table->s->max_key_length + 100;
|
||||
dtuple_t* range_start;
|
||||
dtuple_t* range_end;
|
||||
ib_int64_t n_rows;
|
||||
@ -7568,8 +7568,8 @@ ha_innobase::records_in_range(
|
||||
dict_index_copy_types(range_end, index, key->key_parts);
|
||||
|
||||
row_sel_convert_mysql_key_to_innobase(
|
||||
range_start, (byte*) key_val_buff,
|
||||
(ulint)upd_and_key_val_buff_len,
|
||||
range_start,
|
||||
srch_key_val1, sizeof(srch_key_val1),
|
||||
index,
|
||||
(byte*) (min_key ? min_key->key :
|
||||
(const uchar*) 0),
|
||||
@ -7580,8 +7580,9 @@ ha_innobase::records_in_range(
|
||||
: range_start->n_fields == 0);
|
||||
|
||||
row_sel_convert_mysql_key_to_innobase(
|
||||
range_end, (byte*) key_val_buff2,
|
||||
buff2_len, index,
|
||||
range_end,
|
||||
srch_key_val2, sizeof(srch_key_val2),
|
||||
index,
|
||||
(byte*) (max_key ? max_key->key :
|
||||
(const uchar*) 0),
|
||||
(ulint) (max_key ? max_key->length : 0),
|
||||
@ -7608,7 +7609,6 @@ ha_innobase::records_in_range(
|
||||
mem_heap_free(heap);
|
||||
|
||||
func_exit:
|
||||
my_free(key_val_buff2);
|
||||
|
||||
prebuilt->trx->op_info = (char*)"";
|
||||
|
||||
|
@ -78,13 +78,14 @@ class ha_innobase: public handler
|
||||
INNOBASE_SHARE* share; /*!< information for MySQL
|
||||
table locking */
|
||||
|
||||
uchar* upd_buff; /*!< buffer used in updates */
|
||||
uchar* key_val_buff; /*!< buffer used in converting
|
||||
uchar* upd_buf; /*!< buffer used in updates */
|
||||
ulint upd_buf_size; /*!< the size of upd_buf in bytes */
|
||||
uchar srch_key_val1[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
|
||||
uchar srch_key_val2[REC_VERSION_56_MAX_INDEX_COL_LEN + 2];
|
||||
/*!< buffers used in converting
|
||||
search key values from MySQL format
|
||||
to Innodb format */
|
||||
ulong upd_and_key_val_buff_len;
|
||||
/* the length of each of the previous
|
||||
two buffers */
|
||||
to InnoDB format. "+ 2" for the two
|
||||
bytes where the length is stored */
|
||||
Table_flags int_table_flags;
|
||||
uint primary_key;
|
||||
ulong start_of_scan; /*!< this is set to 1 when we are
|
||||
|
@ -128,7 +128,12 @@ row_sel_convert_mysql_key_to_innobase(
|
||||
in the tuple is already according
|
||||
to index! */
|
||||
byte* buf, /*!< in: buffer to use in field
|
||||
conversions */
|
||||
conversions; NOTE that dtuple->data
|
||||
may end up pointing inside buf so
|
||||
do not discard that buffer while
|
||||
the tuple is being used. See
|
||||
row_mysql_store_col_in_innobase_format()
|
||||
in the case of DATA_INT */
|
||||
ulint buf_len, /*!< in: buffer length */
|
||||
dict_index_t* index, /*!< in: index of the key value */
|
||||
const byte* key_ptr, /*!< in: MySQL key value */
|
||||
|
@ -2301,7 +2301,12 @@ row_sel_convert_mysql_key_to_innobase(
|
||||
in the tuple is already according
|
||||
to index! */
|
||||
byte* buf, /*!< in: buffer to use in field
|
||||
conversions */
|
||||
conversions; NOTE that dtuple->data
|
||||
may end up pointing inside buf so
|
||||
do not discard that buffer while
|
||||
the tuple is being used. See
|
||||
row_mysql_store_col_in_innobase_format()
|
||||
in the case of DATA_INT */
|
||||
ulint buf_len, /*!< in: buffer length */
|
||||
dict_index_t* index, /*!< in: index of the key value */
|
||||
const byte* key_ptr, /*!< in: MySQL key value */
|
||||
@ -2433,6 +2438,7 @@ row_sel_convert_mysql_key_to_innobase(
|
||||
/* Storing may use at most data_len bytes of buf */
|
||||
|
||||
if (UNIV_LIKELY(!is_null)) {
|
||||
ut_a(buf + data_len <= original_buf + buf_len);
|
||||
row_mysql_store_col_in_innobase_format(
|
||||
dfield, buf,
|
||||
FALSE, /* MySQL key value format col */
|
||||
|
Loading…
x
Reference in New Issue
Block a user