5.6.33-79.0

This commit is contained in:
Sergei Golubchik 2016-10-25 17:01:37 +02:00
parent 93ab3093cb
commit d7dc03a267
38 changed files with 2892 additions and 85 deletions

View File

@ -4492,7 +4492,9 @@ corrupt:
recv_recover_page(TRUE, (buf_block_t*) bpage);
}
if (uncompressed && !recv_no_ibuf_operations) {
if (uncompressed && !recv_no_ibuf_operations
&& fil_page_get_type(frame) == FIL_PAGE_INDEX
&& page_is_leaf(frame)) {
buf_block_t* block;
ibool update_ibuf_bitmap;

View File

@ -521,7 +521,7 @@ buf_dblwr_process()
if (buf_page_is_corrupted(true, read_buf, zip_size)) {
fprintf(stderr,
"InnoDB: Warning: database page"
"InnoDB: Database page"
" corruption or a failed\n"
"InnoDB: file read of"
" space %lu page %lu.\n"

View File

@ -2568,6 +2568,11 @@ page_cleaner_sleep_if_needed(
ulint next_loop_time) /*!< in: time when next loop iteration
should start */
{
/* No sleep if we are cleaning the buffer pool during the shutdown
with everything else finished */
if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE)
return;
ulint cur_time = ut_time_ms();
if (next_loop_time > cur_time) {

View File

@ -272,6 +272,10 @@ dict_boot(void)
ut_ad(DICT_NUM_FIELDS__SYS_FOREIGN_FOR_NAME == 2);
ut_ad(DICT_NUM_COLS__SYS_FOREIGN_COLS == 4);
ut_ad(DICT_NUM_FIELDS__SYS_FOREIGN_COLS == 6);
ut_ad(DICT_NUM_COLS__SYS_ZIP_DICT == 3);
ut_ad(DICT_NUM_FIELDS__SYS_ZIP_DICT == 5);
ut_ad(DICT_NUM_COLS__SYS_ZIP_DICT_COLS == 3);
ut_ad(DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS == 5);
mtr_start(&mtr);

View File

@ -38,6 +38,7 @@ Created 1/8/1996 Heikki Tuuri
#include "que0que.h"
#include "row0ins.h"
#include "row0mysql.h"
#include "row0sel.h"
#include "pars0pars.h"
#include "trx0roll.h"
#include "usr0sess.h"
@ -1790,6 +1791,135 @@ dict_create_or_check_sys_tablespace(void)
return(err);
}
/** Creates the zip_dict system table inside InnoDB
at server bootstrap or server start if it is not found or is
not of the right form.
@return DB_SUCCESS or error code */
UNIV_INTERN
dberr_t
dict_create_or_check_sys_zip_dict(void)
{
trx_t* trx;
my_bool srv_file_per_table_backup;
dberr_t err;
dberr_t sys_zip_dict_err;
dberr_t sys_zip_dict_cols_err;
ut_a(srv_get_active_thread_type() == SRV_NONE);
/* Note: The master thread has not been started at this point. */
sys_zip_dict_err = dict_check_if_system_table_exists(
"SYS_ZIP_DICT", DICT_NUM_FIELDS__SYS_ZIP_DICT + 1, 2);
sys_zip_dict_cols_err = dict_check_if_system_table_exists(
"SYS_ZIP_DICT_COLS", DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS + 1,
1);
if (sys_zip_dict_err == DB_SUCCESS &&
sys_zip_dict_cols_err == DB_SUCCESS)
return (DB_SUCCESS);
trx = trx_allocate_for_mysql();
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
trx->op_info = "creating zip_dict and zip_dict_cols sys tables";
row_mysql_lock_data_dictionary(trx);
/* Check which incomplete table definition to drop. */
if (sys_zip_dict_err == DB_CORRUPTION) {
ib_logf(IB_LOG_LEVEL_WARN,
"Dropping incompletely created "
"SYS_ZIP_DICT table.");
row_drop_table_for_mysql("SYS_ZIP_DICT", trx, TRUE);
}
if (sys_zip_dict_cols_err == DB_CORRUPTION) {
ib_logf(IB_LOG_LEVEL_WARN,
"Dropping incompletely created "
"SYS_ZIP_DICT_COLS table.");
row_drop_table_for_mysql("SYS_ZIP_DICT_COLS", trx, TRUE);
}
ib_logf(IB_LOG_LEVEL_INFO,
"Creating zip_dict and zip_dict_cols system tables.");
/* We always want SYSTEM tables to be created inside the system
tablespace. */
srv_file_per_table_backup = srv_file_per_table;
srv_file_per_table = 0;
err = que_eval_sql(
NULL,
"PROCEDURE CREATE_SYS_ZIP_DICT_PROC () IS\n"
"BEGIN\n"
"CREATE TABLE SYS_ZIP_DICT(\n"
" ID INT UNSIGNED NOT NULL,\n"
" NAME CHAR("
STRINGIFY_ARG(ZIP_DICT_MAX_NAME_LENGTH)
") NOT NULL,\n"
" DATA BLOB NOT NULL\n"
");\n"
"CREATE UNIQUE CLUSTERED INDEX SYS_ZIP_DICT_ID"
" ON SYS_ZIP_DICT (ID);\n"
"CREATE UNIQUE INDEX SYS_ZIP_DICT_NAME"
" ON SYS_ZIP_DICT (NAME);\n"
"CREATE TABLE SYS_ZIP_DICT_COLS(\n"
" TABLE_ID INT UNSIGNED NOT NULL,\n"
" COLUMN_POS INT UNSIGNED NOT NULL,\n"
" DICT_ID INT UNSIGNED NOT NULL\n"
");\n"
"CREATE UNIQUE CLUSTERED INDEX SYS_ZIP_DICT_COLS_COMPOSITE"
" ON SYS_ZIP_DICT_COLS (TABLE_ID, COLUMN_POS);\n"
"END;\n",
FALSE, trx);
if (err != DB_SUCCESS) {
ib_logf(IB_LOG_LEVEL_ERROR,
"Creation of SYS_ZIP_DICT and SYS_ZIP_DICT_COLS"
"has failed with error %lu. Tablespace is full. "
"Dropping incompletely created tables.",
(ulong) err);
ut_a(err == DB_OUT_OF_FILE_SPACE
|| err == DB_TOO_MANY_CONCURRENT_TRXS);
row_drop_table_for_mysql("SYS_ZIP_DICT", trx, TRUE);
row_drop_table_for_mysql("SYS_ZIP_DICT_COLS", trx, TRUE);
if (err == DB_OUT_OF_FILE_SPACE) {
err = DB_MUST_GET_MORE_FILE_SPACE;
}
}
trx_commit_for_mysql(trx);
row_mysql_unlock_data_dictionary(trx);
trx_free_for_mysql(trx);
srv_file_per_table = srv_file_per_table_backup;
if (err == DB_SUCCESS) {
ib_logf(IB_LOG_LEVEL_INFO,
"zip_dict and zip_dict_cols system tables created.");
}
/* Note: The master thread has not been started at this point. */
/* Confirm and move to the non-LRU part of the table LRU list. */
sys_zip_dict_err = dict_check_if_system_table_exists(
"SYS_ZIP_DICT", DICT_NUM_FIELDS__SYS_ZIP_DICT + 1, 2);
ut_a(sys_zip_dict_err == DB_SUCCESS);
sys_zip_dict_cols_err = dict_check_if_system_table_exists(
"SYS_ZIP_DICT_COLS",
DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS + 1, 1);
ut_a(sys_zip_dict_cols_err == DB_SUCCESS);
return(err);
}
/********************************************************************//**
Add a single tablespace definition to the data dictionary tables in the
database.
@ -1843,3 +1973,456 @@ dict_create_add_tablespace_to_dictionary(
return(error);
}
/** Add a single compression dictionary definition to the SYS_ZIP_DICT
InnoDB system table.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_add_zip_dict(
const char* name, /*!< in: dict name */
ulint name_len, /*!< in: dict name length */
const char* data, /*!< in: dict data */
ulint data_len, /*!< in: dict data length */
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(name);
ut_ad(data);
pars_info_t* info = pars_info_create();
pars_info_add_literal(info, "name", name, name_len,
DATA_VARCHAR, DATA_ENGLISH);
pars_info_add_literal(info, "data", data, data_len,
DATA_BLOB, DATA_BINARY_TYPE | DATA_NOT_NULL);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
" max_id INT;\n"
"DECLARE CURSOR cur IS\n"
" SELECT ID FROM SYS_ZIP_DICT\n"
" ORDER BY ID DESC;\n"
"BEGIN\n"
" max_id := 0;\n"
" OPEN cur;\n"
" FETCH cur INTO max_id;\n"
" IF (cur % NOTFOUND) THEN\n"
" max_id := 0;\n"
" END IF;\n"
" CLOSE cur;\n"
" INSERT INTO SYS_ZIP_DICT VALUES"
" (max_id + 1, :name, :data);\n"
"END;\n",
FALSE, trx);
return error;
}
/** Fetch callback, just stores extracted zip_dict id in the external
variable.
@return TRUE if all OK */
static
ibool
dict_create_extract_int_aux(
void* row, /*!< in: sel_node_t* */
void* user_arg) /*!< in: int32 id */
{
sel_node_t* node = static_cast<sel_node_t*>(row);
dfield_t* dfield = que_node_get_val(node->select_list);
dtype_t* type = dfield_get_type(dfield);
ulint len = dfield_get_len(dfield);
ut_a(dtype_get_mtype(type) == DATA_INT);
ut_a(len == sizeof(ib_uint32_t));
memcpy(user_arg, dfield_get_data(dfield), sizeof(ib_uint32_t));
return(TRUE);
}
/** Add a single compression dictionary reference to the SYS_ZIP_DICT_COLS
InnoDB system table.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_add_zip_dict_reference(
ulint table_id, /*!< in: table id */
ulint column_pos, /*!< in: column position */
ulint dict_id, /*!< in: dict id */
trx_t* trx) /*!< in/out: transaction */
{
pars_info_t* info = pars_info_create();
pars_info_add_int4_literal(info, "table_id", table_id);
pars_info_add_int4_literal(info, "column_pos", column_pos);
pars_info_add_int4_literal(info, "dict_id", dict_id);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
"BEGIN\n"
" INSERT INTO SYS_ZIP_DICT_COLS VALUES"
" (:table_id, :column_pos, :dict_id);\n"
"END;\n",
FALSE, trx);
return error;
}
/** Get a single compression dictionary id for the given
(table id, column pos) pair.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_get_zip_dict_id_by_reference(
ulint table_id, /*!< in: table id */
ulint column_pos, /*!< in: column position */
ulint* dict_id, /*!< out: dict id */
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(dict_id);
pars_info_t* info = pars_info_create();
ib_uint32_t dict_id_buf;
mach_write_to_4(reinterpret_cast<byte*>(&dict_id_buf ),
ULINT32_UNDEFINED);
pars_info_add_int4_literal(info, "table_id", table_id);
pars_info_add_int4_literal(info, "column_pos", column_pos);
pars_info_bind_function(
info, "my_func", dict_create_extract_int_aux, &dict_id_buf);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
"DECLARE FUNCTION my_func;\n"
"DECLARE CURSOR cur IS\n"
" SELECT DICT_ID FROM SYS_ZIP_DICT_COLS\n"
" WHERE TABLE_ID = :table_id AND\n"
" COLUMN_POS = :column_pos;\n"
"BEGIN\n"
" OPEN cur;\n"
" FETCH cur INTO my_func();\n"
" CLOSE cur;\n"
"END;\n",
FALSE, trx);
if (error == DB_SUCCESS) {
ib_uint32_t local_dict_id = mach_read_from_4(
reinterpret_cast<const byte*>(&dict_id_buf));
if (local_dict_id == ULINT32_UNDEFINED)
error = DB_RECORD_NOT_FOUND;
else
*dict_id = local_dict_id;
}
return error;
}
/** Get compression dictionary id for the given name.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_get_zip_dict_id_by_name(
const char* dict_name, /*!< in: dict name */
ulint dict_name_len, /*!< in: dict name length */
ulint* dict_id, /*!< out: dict id */
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(dict_name);
ut_ad(dict_name_len);
ut_ad(dict_id);
pars_info_t* info = pars_info_create();
pars_info_add_literal(info, "dict_name", dict_name, dict_name_len,
DATA_VARCHAR, DATA_ENGLISH);
ib_uint32_t dict_id_buf;
mach_write_to_4(reinterpret_cast<byte*>(&dict_id_buf),
ULINT32_UNDEFINED);
pars_info_bind_function(
info, "my_func", dict_create_extract_int_aux, &dict_id_buf);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
"DECLARE FUNCTION my_func;\n"
"DECLARE CURSOR cur IS\n"
" SELECT ID FROM SYS_ZIP_DICT\n"
" WHERE NAME = :dict_name;\n"
"BEGIN\n"
" OPEN cur;\n"
" FETCH cur INTO my_func();\n"
" CLOSE cur;\n"
"END;\n",
FALSE, trx);
if (error == DB_SUCCESS) {
ib_uint32_t local_dict_id = mach_read_from_4(
reinterpret_cast<const byte*>(&dict_id_buf));
if (local_dict_id == ULINT32_UNDEFINED)
error = DB_RECORD_NOT_FOUND;
else
*dict_id = local_dict_id;
}
return error;
}
/** Auxiliary enum used to indicate zip dict data extraction result code */
enum zip_dict_info_aux_code {
zip_dict_info_success, /*!< success */
zip_dict_info_not_found, /*!< zip dict record not found */
zip_dict_info_oom, /*!< out of memory */
zip_dict_info_corrupted_name, /*!< corrupted zip dict name */
zip_dict_info_corrupted_data /*!< corrupted zip dict data */
};
/** Auxiliary struct used to return zip dict info aling with result code */
struct zip_dict_info_aux {
LEX_STRING name; /*!< zip dict name */
LEX_STRING data; /*!< zip dict data */
int code; /*!< result code (0 - success) */
};
/** Fetch callback, just stores extracted zip_dict data in the external
variable.
@return always returns TRUE */
static
ibool
dict_create_get_zip_dict_info_by_id_aux(
void* row, /*!< in: sel_node_t* */
void* user_arg) /*!< in: pointer to zip_dict_info_aux* */
{
sel_node_t* node = static_cast<sel_node_t*>(row);
zip_dict_info_aux* result =
static_cast<zip_dict_info_aux*>(user_arg);
result->code = zip_dict_info_success;
result->name.str = 0;
result->name.length = 0;
result->data.str = 0;
result->data.length = 0;
/* NAME field */
que_node_t* exp = node->select_list;
ut_a(exp != 0);
dfield_t* dfield = que_node_get_val(exp);
dtype_t* type = dfield_get_type(dfield);
ut_a(dtype_get_mtype(type) == DATA_VARCHAR);
ulint len = dfield_get_len(dfield);
void* data = dfield_get_data(dfield);
if (len == UNIV_SQL_NULL) {
result->code = zip_dict_info_corrupted_name;
}
else {
result->name.str =
static_cast<char*>(my_malloc(len + 1, MYF(0)));
if (result->name.str == 0) {
result->code = zip_dict_info_oom;
}
else {
memcpy(result->name.str, data, len);
result->name.str[len] = '\0';
result->name.length = len;
}
}
/* DATA field */
exp = que_node_get_next(exp);
ut_a(exp != 0);
dfield = que_node_get_val(exp);
type = dfield_get_type(dfield);
ut_a(dtype_get_mtype(type) == DATA_BLOB);
len = dfield_get_len(dfield);
data = dfield_get_data(dfield);
if (len == UNIV_SQL_NULL) {
result->code = zip_dict_info_corrupted_data;
}
else {
result->data.str =
static_cast<char*>(my_malloc(
len == 0 ? 1 : len, MYF(0)));
if (result->data.str == 0) {
result->code = zip_dict_info_oom;
}
else {
memcpy(result->data.str, data, len);
result->data.length = len;
}
}
ut_ad(que_node_get_next(exp) == 0);
if (result->code != zip_dict_info_success) {
if (result->name.str == 0) {
mem_free(result->name.str);
result->name.str = 0;
result->name.length = 0;
}
if (result->data.str == 0) {
mem_free(result->data.str);
result->data.str = 0;
result->data.length = 0;
}
}
return TRUE;
}
/** Get compression dictionary info (name and data) for the given id.
Allocates memory for name and data on success.
Must be freed with mem_free().
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_get_zip_dict_info_by_id(
ulint dict_id, /*!< in: dict id */
char** name, /*!< out: dict name */
ulint* name_len, /*!< out: dict name length*/
char** data, /*!< out: dict data */
ulint* data_len, /*!< out: dict data length*/
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(name);
ut_ad(data);
zip_dict_info_aux rec;
rec.code = zip_dict_info_not_found;
pars_info_t* info = pars_info_create();
pars_info_add_int4_literal(info, "id", dict_id);
pars_info_bind_function(
info, "my_func", dict_create_get_zip_dict_info_by_id_aux,
&rec);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
"DECLARE FUNCTION my_func;\n"
"DECLARE CURSOR cur IS\n"
" SELECT NAME, DATA FROM SYS_ZIP_DICT\n"
" WHERE ID = :id;\n"
"BEGIN\n"
" OPEN cur;\n"
" FETCH cur INTO my_func();\n"
" CLOSE cur;\n"
"END;\n",
FALSE, trx);
if (error == DB_SUCCESS) {
switch (rec.code) {
case zip_dict_info_success:
*name = rec.name.str;
*name_len = rec.name.length;
*data = rec.data.str;
*data_len = rec.data.length;
break;
case zip_dict_info_not_found:
error = DB_RECORD_NOT_FOUND;
break;
case zip_dict_info_oom:
error = DB_OUT_OF_MEMORY;
break;
case zip_dict_info_corrupted_name:
case zip_dict_info_corrupted_data:
error = DB_INVALID_NULL;
break;
default:
ut_error;
}
}
return error;
}
/** Remove a single compression dictionary from the data dictionary
tables in the database.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_remove_zip_dict(
const char* name, /*!< in: dict name */
ulint name_len, /*!< in: dict name length */
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(name);
pars_info_t* info = pars_info_create();
ib_uint32_t dict_id_buf;
mach_write_to_4(reinterpret_cast<byte*>(&dict_id_buf),
ULINT32_UNDEFINED);
ib_uint32_t counter_buf;
mach_write_to_4(reinterpret_cast<byte*>(&counter_buf),
ULINT32_UNDEFINED);
pars_info_add_literal(info, "name", name, name_len,
DATA_VARCHAR, DATA_ENGLISH);
pars_info_bind_int4_literal(info, "dict_id", &dict_id_buf);
pars_info_bind_function(info, "find_dict_func",
dict_create_extract_int_aux, &dict_id_buf);
pars_info_bind_function(info, "count_func",
dict_create_extract_int_aux, &counter_buf);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
"DECLARE FUNCTION find_dict_func;\n"
"DECLARE FUNCTION count_func;\n"
"DECLARE CURSOR dict_cur IS\n"
" SELECT ID FROM SYS_ZIP_DICT\n"
" WHERE NAME = :name\n"
" FOR UPDATE;\n"
"DECLARE CURSOR ref_cur IS\n"
" SELECT 1 FROM SYS_ZIP_DICT_COLS\n"
" WHERE DICT_ID = :dict_id;\n"
"BEGIN\n"
" OPEN dict_cur;\n"
" FETCH dict_cur INTO find_dict_func();\n"
" IF NOT (SQL % NOTFOUND) THEN\n"
" OPEN ref_cur;\n"
" FETCH ref_cur INTO count_func();\n"
" IF SQL % NOTFOUND THEN\n"
" DELETE FROM SYS_ZIP_DICT WHERE CURRENT OF dict_cur;\n"
" END IF;\n"
" CLOSE ref_cur;\n"
" END IF;\n"
" CLOSE dict_cur;\n"
"END;\n",
FALSE, trx);
if (error == DB_SUCCESS) {
ib_uint32_t local_dict_id = mach_read_from_4(
reinterpret_cast<const byte*>(&dict_id_buf));
if (local_dict_id == ULINT32_UNDEFINED) {
error = DB_RECORD_NOT_FOUND;
}
else {
ib_uint32_t local_counter = mach_read_from_4(
reinterpret_cast<const byte*>(&counter_buf));
if (local_counter != ULINT32_UNDEFINED)
error = DB_ROW_IS_REFERENCED;
}
}
return error;
}
/** Remove all compression dictionary references for the given table ID from
the data dictionary tables in the database.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_remove_zip_dict_references_for_table(
ulint table_id, /*!< in: table id */
trx_t* trx) /*!< in/out: transaction */
{
pars_info_t* info = pars_info_create();
pars_info_add_int4_literal(info, "table_id", table_id);
dberr_t error = que_eval_sql(info,
"PROCEDURE P () IS\n"
"BEGIN\n"
" DELETE FROM SYS_ZIP_DICT_COLS\n"
" WHERE TABLE_ID = :table_id;\n"
"END;\n",
FALSE, trx);
return error;
}

View File

@ -6781,3 +6781,161 @@ dict_tf_to_row_format_string(
return(0);
}
#endif /* !UNIV_HOTBACKUP */
/** Insert a records into SYS_ZIP_DICT.
@retval DB_SUCCESS if OK
@retval dberr_t if the insert failed */
UNIV_INTERN
dberr_t
dict_create_zip_dict(
const char* name, /*!< in: zip_dict name */
ulint name_len, /*!< in: zip_dict name length*/
const char* data, /*!< in: zip_dict data */
ulint data_len) /*!< in: zip_dict data length */
{
dberr_t err = DB_SUCCESS;
trx_t* trx;
ut_ad(name);
ut_ad(data);
rw_lock_x_lock(&dict_operation_lock);
dict_mutex_enter_for_mysql();
trx = trx_allocate_for_background();
trx->op_info = "insert zip_dict";
trx->dict_operation_lock_mode = RW_X_LATCH;
trx_start_if_not_started(trx);
err = dict_create_add_zip_dict(name, name_len, data, data_len, trx);
if (err == DB_SUCCESS) {
trx_commit_for_mysql(trx);
}
else {
trx->op_info = "rollback of internal trx on zip_dict table";
trx_rollback_to_savepoint(trx, NULL);
ut_a(trx->error_state == DB_SUCCESS);
}
trx->op_info = "";
trx->dict_operation_lock_mode = 0;
trx_free_for_background(trx);
dict_mutex_exit_for_mysql();
rw_lock_x_unlock(&dict_operation_lock);
return err;
}
/** Get single compression dictionary id for the given
(table id, column pos) pair.
@retval DB_SUCCESS if OK
@retval DB_RECORD_NOT_FOUND if not found */
UNIV_INTERN
dberr_t
dict_get_dictionary_id_by_key(
ulint table_id, /*!< in: table id */
ulint column_pos, /*!< in: column position */
ulint* dict_id) /*!< out: zip_dict id */
{
dberr_t err = DB_SUCCESS;
trx_t* trx;
rw_lock_s_lock(&dict_operation_lock);
dict_mutex_enter_for_mysql();
trx = trx_allocate_for_background();
trx->op_info = "get zip dict id by composite key";
trx->dict_operation_lock_mode = RW_S_LATCH;
trx_start_if_not_started(trx);
err = dict_create_get_zip_dict_id_by_reference(table_id, column_pos,
dict_id, trx);
trx_commit_for_mysql(trx);
trx->dict_operation_lock_mode = 0;
trx_free_for_background(trx);
dict_mutex_exit_for_mysql();
rw_lock_s_unlock(&dict_operation_lock);
return err;
}
/** Get compression dictionary info (name and data) for the given id.
Allocates memory in name->str and data->str on success.
Must be freed with mem_free().
@retval DB_SUCCESS if OK
@retval DB_RECORD_NOT_FOUND if not found */
UNIV_INTERN
dberr_t
dict_get_dictionary_info_by_id(
ulint dict_id, /*!< in: table name */
char** name, /*!< out: dictionary name */
ulint* name_len, /*!< out: dictionary name length*/
char** data, /*!< out: dictionary data */
ulint* data_len) /*!< out: dictionary data length*/
{
dberr_t err = DB_SUCCESS;
trx_t* trx;
rw_lock_s_lock(&dict_operation_lock);
dict_mutex_enter_for_mysql();
trx = trx_allocate_for_background();
trx->op_info = "get zip dict name and data by id";
trx->dict_operation_lock_mode = RW_S_LATCH;
trx_start_if_not_started(trx);
err = dict_create_get_zip_dict_info_by_id(dict_id, name, name_len,
data, data_len, trx);
trx_commit_for_mysql(trx);
trx->dict_operation_lock_mode = 0;
trx_free_for_background(trx);
dict_mutex_exit_for_mysql();
rw_lock_s_unlock(&dict_operation_lock);
return err;
}
/** Delete a record in SYS_ZIP_DICT with the given name.
@retval DB_SUCCESS if OK
@retval DB_RECORD_NOT_FOUND if not found
@retval DB_ROW_IS_REFERENCED if in use */
UNIV_INTERN
dberr_t
dict_drop_zip_dict(
const char* name, /*!< in: zip_dict name */
ulint name_len) /*!< in: zip_dict name length*/
{
dberr_t err = DB_SUCCESS;
trx_t* trx;
ut_ad(name);
rw_lock_x_lock(&dict_operation_lock);
dict_mutex_enter_for_mysql();
trx = trx_allocate_for_background();
trx->op_info = "delete zip_dict";
trx->dict_operation_lock_mode = RW_X_LATCH;
trx_start_if_not_started(trx);
err = dict_create_remove_zip_dict(name, name_len, trx);
if (err == DB_SUCCESS) {
trx_commit_for_mysql(trx);
}
else {
trx->op_info = "rollback of internal trx on zip_dict table";
trx_rollback_to_savepoint(trx, NULL);
ut_a(trx->error_state == DB_SUCCESS);
}
trx->op_info = "";
trx->dict_operation_lock_mode = 0;
trx_free_for_background(trx);
dict_mutex_exit_for_mysql();
rw_lock_x_unlock(&dict_operation_lock);
return err;
}

View File

@ -56,7 +56,9 @@ static const char* SYSTEM_TABLE_NAME[] = {
"SYS_FOREIGN",
"SYS_FOREIGN_COLS",
"SYS_TABLESPACES",
"SYS_DATAFILES"
"SYS_DATAFILES",
"SYS_ZIP_DICT",
"SYS_ZIP_DICT_COLS"
};
/* If this flag is TRUE, then we will load the cluster index's (and tables')
@ -728,6 +730,161 @@ err_len:
return(NULL);
}
/** This function parses a SYS_ZIP_DICT record, extracts necessary
information from the record and returns to caller.
@return error message, or NULL on success */
UNIV_INTERN
const char*
dict_process_sys_zip_dict(
mem_heap_t* heap, /*!< in/out: heap memory */
ulint zip_size, /*!< in: nonzero=compressed BLOB page size */
const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
ulint* id, /*!< out: dict id */
const char** name, /*!< out: dict name */
const char** data, /*!< out: dict data */
ulint* data_len) /*!< out: dict data length */
{
ulint len;
const byte* field;
/* Initialize the output values */
*id = ULINT_UNDEFINED;
*name = NULL;
*data = NULL;
*data_len = 0;
if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) {
return("delete-marked record in SYS_ZIP_DICT");
}
if (UNIV_UNLIKELY(
rec_get_n_fields_old(rec)!= DICT_NUM_FIELDS__SYS_ZIP_DICT)) {
return("wrong number of columns in SYS_ZIP_DICT record");
}
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_ZIP_DICT__ID, &len);
if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
goto err_len;
}
*id = mach_read_from_4(field);
rec_get_nth_field_offs_old(
rec, DICT_FLD__SYS_ZIP_DICT__DB_TRX_ID, &len);
if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) {
goto err_len;
}
rec_get_nth_field_offs_old(
rec, DICT_FLD__SYS_ZIP_DICT__DB_ROLL_PTR, &len);
if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) {
goto err_len;
}
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_ZIP_DICT__NAME, &len);
if (UNIV_UNLIKELY(len == 0 || len == UNIV_SQL_NULL)) {
goto err_len;
}
*name = mem_heap_strdupl(heap, (char*) field, len);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_ZIP_DICT__DATA, &len);
if (UNIV_UNLIKELY(len == UNIV_SQL_NULL)) {
goto err_len;
}
if (rec_get_1byte_offs_flag(rec) == 0 &&
rec_2_is_field_extern(rec, DICT_FLD__SYS_ZIP_DICT__DATA)) {
ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE);
if (UNIV_UNLIKELY
(!memcmp(field + len - BTR_EXTERN_FIELD_REF_SIZE,
field_ref_zero,
BTR_EXTERN_FIELD_REF_SIZE))) {
goto err_len;
}
*data = reinterpret_cast<char*>(
btr_copy_externally_stored_field(data_len, field,
zip_size, len, heap));
}
else {
*data_len = len;
*data = static_cast<char*>(mem_heap_dup(heap, field, len));
}
return(NULL);
err_len:
return("incorrect column length in SYS_ZIP_DICT");
}
/** This function parses a SYS_ZIP_DICT_COLS record, extracts necessary
information from the record and returns to caller.
@return error message, or NULL on success */
UNIV_INTERN
const char*
dict_process_sys_zip_dict_cols(
mem_heap_t* heap, /*!< in/out: heap memory */
const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
ulint* table_id, /*!< out: table id */
ulint* column_pos, /*!< out: column position */
ulint* dict_id) /*!< out: dict id */
{
ulint len;
const byte* field;
/* Initialize the output values */
*table_id = ULINT_UNDEFINED;
*column_pos = ULINT_UNDEFINED;
*dict_id = ULINT_UNDEFINED;
if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) {
return("delete-marked record in SYS_ZIP_DICT_COLS");
}
if (UNIV_UNLIKELY(rec_get_n_fields_old(rec) !=
DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS)) {
return("wrong number of columns in SYS_ZIP_DICT_COLS"
" record");
}
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_ZIP_DICT_COLS__TABLE_ID, &len);
if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
err_len:
return("incorrect column length in SYS_ZIP_DICT_COLS");
}
*table_id = mach_read_from_4(field);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_ZIP_DICT_COLS__COLUMN_POS, &len);
if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
goto err_len;
}
*column_pos = mach_read_from_4(field);
rec_get_nth_field_offs_old(
rec, DICT_FLD__SYS_ZIP_DICT_COLS__DB_TRX_ID, &len);
if (UNIV_UNLIKELY(len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL)) {
goto err_len;
}
rec_get_nth_field_offs_old(
rec, DICT_FLD__SYS_ZIP_DICT_COLS__DB_ROLL_PTR, &len);
if (UNIV_UNLIKELY(len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL)) {
goto err_len;
}
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_ZIP_DICT_COLS__DICT_ID, &len);
if (UNIV_UNLIKELY(len != DICT_FLD_LEN_SPACE)) {
goto err_len;
}
*dict_id = mach_read_from_4(field);
return(NULL);
}
/********************************************************************//**
Determine the flags of a table as stored in SYS_TABLES.TYPE and N_COLS.
@return ULINT_UNDEFINED if error, else a valid dict_table_t::flags. */

View File

@ -489,6 +489,8 @@ fil_space_get_by_id(
ut_ad(space->magic_n == FIL_SPACE_MAGIC_N),
space->id == id);
/* The system tablespace must always be found */
ut_ad(space || id != 0 || srv_is_being_started);
return(space);
}

View File

@ -108,6 +108,7 @@ UNIV_INTERN mysql_pfs_key_t fts_pll_tokenize_mutex_key;
/** variable to record innodb_fts_internal_tbl_name for information
schema table INNODB_FTS_INSERTED etc. */
UNIV_INTERN char* fts_internal_tbl_name = NULL;
UNIV_INTERN char* fts_internal_tbl_name2 = NULL;
/** InnoDB default stopword list:
There are different versions of stopwords, the stop words listed
@ -6569,6 +6570,36 @@ fts_check_corrupt_index(
return(0);
}
/* Get parent table name if it's a fts aux table
@param[in] aux_table_name aux table name
@param[in] aux_table_len aux table length
@return parent table name, or NULL */
char*
fts_get_parent_table_name(
const char* aux_table_name,
ulint aux_table_len)
{
fts_aux_table_t aux_table;
char* parent_table_name = NULL;
if (fts_is_aux_table_name(&aux_table, aux_table_name, aux_table_len)) {
dict_table_t* parent_table;
parent_table = dict_table_open_on_id(
aux_table.parent_id, TRUE, DICT_TABLE_OP_NORMAL);
if (parent_table != NULL) {
parent_table_name = mem_strdupl(
parent_table->name,
strlen(parent_table->name));
dict_table_close(parent_table, TRUE, FALSE);
}
}
return(parent_table_name);
}
/** Check the validity of the parent table.
@param[in] aux_table auxiliary table
@return true if it is a valid table or false if it is not */

View File

@ -1361,6 +1361,29 @@ normalize_table_name_low(
ibool set_lower_case); /* in: TRUE if we want to set
name to lower case */
/** Creates a new compression dictionary. */
static
handler_create_zip_dict_result
innobase_create_zip_dict(
handlerton* hton, /*!< in: innobase handlerton */
THD* thd, /*!< in: handle to the MySQL thread */
const char* name, /*!< in: zip dictionary name */
ulint* name_len,
/*!< in/out: zip dictionary name length */
const char* data, /*!< in: zip dictionary data */
ulint* data_len);
/*!< in/out: zip dictionary data length */
/** Drops a existing compression dictionary. */
static
handler_drop_zip_dict_result
innobase_drop_zip_dict(
handlerton* hton, /*!< in: innobase handlerton */
THD* thd, /*!< in: handle to the MySQL thread */
const char* name, /*!< in: zip dictionary name */
ulint* name_len);
/*!< in/out: zip dictionary name length */
/*************************************************************//**
Checks if buffer pool is big enough to enable backoff algorithm.
InnoDB empty free list algorithm backoff requires free pages
@ -3422,6 +3445,9 @@ innobase_init(
innobase_hton->kill_connection = innobase_kill_connection;
innobase_hton->create_zip_dict = innobase_create_zip_dict;
innobase_hton->drop_zip_dict = innobase_drop_zip_dict;
ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR);
#ifndef DBUG_OFF
@ -4100,6 +4126,89 @@ innobase_purge_changed_page_bitmaps(
return (my_bool)log_online_purge_changed_page_bitmaps(lsn);
}
/** Creates a new compression dictionary. */
static
handler_create_zip_dict_result
innobase_create_zip_dict(
handlerton* hton, /*!< in: innobase handlerton */
THD* thd, /*!< in: handle to the MySQL thread */
const char* name, /*!< in: zip dictionary name */
ulint* name_len,
/*!< in/out: zip dictionary name length */
const char* data, /*!< in: zip dictionary data */
ulint* data_len)
/*!< in/out: zip dictionary data length */
{
handler_create_zip_dict_result result =
HA_CREATE_ZIP_DICT_UNKNOWN_ERROR;
DBUG_ENTER("innobase_create_zip_dict");
DBUG_ASSERT(hton == innodb_hton_ptr);
if (UNIV_UNLIKELY(high_level_read_only)) {
DBUG_RETURN(HA_CREATE_ZIP_DICT_READ_ONLY);
}
if (UNIV_UNLIKELY(*name_len > ZIP_DICT_MAX_NAME_LENGTH)) {
*name_len = ZIP_DICT_MAX_NAME_LENGTH;
DBUG_RETURN(HA_CREATE_ZIP_DICT_NAME_TOO_LONG);
}
if (UNIV_UNLIKELY(*data_len > ZIP_DICT_MAX_DATA_LENGTH)) {
*data_len = ZIP_DICT_MAX_DATA_LENGTH;
DBUG_RETURN(HA_CREATE_ZIP_DICT_DATA_TOO_LONG);
}
switch (dict_create_zip_dict(name, *name_len, data, *data_len)) {
case DB_SUCCESS:
result = HA_CREATE_ZIP_DICT_OK;
break;
case DB_DUPLICATE_KEY:
result = HA_CREATE_ZIP_DICT_ALREADY_EXISTS;
break;
default:
ut_ad(0);
result = HA_CREATE_ZIP_DICT_UNKNOWN_ERROR;
}
DBUG_RETURN(result);
}
/** Drops a existing compression dictionary. */
static
handler_drop_zip_dict_result
innobase_drop_zip_dict(
handlerton* hton, /*!< in: innobase handlerton */
THD* thd, /*!< in: handle to the MySQL thread */
const char* name, /*!< in: zip dictionary name */
ulint* name_len)
/*!< in/out: zip dictionary name length */
{
handler_drop_zip_dict_result result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR;
DBUG_ENTER("innobase_drop_zip_dict");
DBUG_ASSERT(hton == innodb_hton_ptr);
if (UNIV_UNLIKELY(high_level_read_only)) {
DBUG_RETURN(HA_DROP_ZIP_DICT_READ_ONLY);
}
switch (dict_drop_zip_dict(name, *name_len)) {
case DB_SUCCESS:
result = HA_DROP_ZIP_DICT_OK;
break;
case DB_RECORD_NOT_FOUND:
result = HA_DROP_ZIP_DICT_DOES_NOT_EXIST;
break;
case DB_ROW_IS_REFERENCED:
result = HA_DROP_ZIP_DICT_IS_REFERENCED;
break;
default:
ut_ad(0);
result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR;
}
DBUG_RETURN(result);
}
/*****************************************************************//**
Check whether this is a fake change transaction.
@return TRUE if a fake change transaction */
@ -5460,6 +5569,86 @@ func_exit:
DBUG_RETURN(ret);
}
/** This function checks if all the compression dictionaries referenced
in table->fields exist in SYS_ZIP_DICT InnoDB system table.
@return true if all referenced dictionaries exist */
UNIV_INTERN
bool
innobase_check_zip_dicts(
const TABLE* table, /*!< in: table in MySQL data
dictionary */
ulint* dict_ids, /*!< out: identified zip dict ids
(at least n_fields long) */
trx_t* trx, /*!< in: transaction */
const char** err_dict_name) /*!< out: the name of the
zip_dict which does not exist. */
{
DBUG_ENTER("innobase_check_zip_dicts");
bool res = true;
dberr_t err = DB_SUCCESS;
const size_t n_fields = table->s->fields;
Field* field_ptr;
for (size_t field_idx = 0; err == DB_SUCCESS && field_idx < n_fields;
++field_idx)
{
field_ptr = table->field[field_idx];
if (field_ptr->has_associated_compression_dictionary()) {
err = dict_create_get_zip_dict_id_by_name(
field_ptr->zip_dict_name.str,
field_ptr->zip_dict_name.length,
&dict_ids[field_idx],
trx);
ut_a(err == DB_SUCCESS || err == DB_RECORD_NOT_FOUND);
}
else {
dict_ids[field_idx] = ULINT_UNDEFINED;
}
}
if (err != DB_SUCCESS) {
res = false;
*err_dict_name = field_ptr->zip_dict_name.str;
}
DBUG_RETURN(res);
}
/** This function creates compression dictionary references in
SYS_ZIP_DICT_COLS InnoDB system table for table_id based on info
in table->fields and provided zip dict ids. */
UNIV_INTERN
void
innobase_create_zip_dict_references(
const TABLE* table, /*!< in: table in MySQL data
dictionary */
table_id_t ib_table_id, /*!< in: table ID in Innodb data
dictionary */
ulint* zip_dict_ids, /*!< in: zip dict ids
(at least n_fields long) */
trx_t* trx) /*!< in: transaction */
{
DBUG_ENTER("innobase_create_zip_dict_references");
dberr_t err = DB_SUCCESS;
const size_t n_fields = table->s->fields;
for (size_t field_idx = 0; err == DB_SUCCESS && field_idx < n_fields;
++field_idx)
{
if (zip_dict_ids[field_idx] != ULINT_UNDEFINED) {
err = dict_create_add_zip_dict_reference(ib_table_id,
table->field[field_idx]->field_index,
zip_dict_ids[field_idx], trx);
ut_a(err == DB_SUCCESS);
}
}
DBUG_VOID_RETURN;
}
/*******************************************************************//**
This function uses index translation table to quickly locate the
requested index structure.
@ -6749,7 +6938,12 @@ ha_innobase::store_key_val_for_row(
blob_data = row_mysql_read_blob_ref(&blob_len,
(byte*) (record
+ (ulint) get_field_offset(table, field)),
(ulint) field->pack_length());
(ulint) field->pack_length(),
field->column_format() ==
COLUMN_FORMAT_TYPE_COMPRESSED,
reinterpret_cast<const byte*>(
field->zip_dict_data.str),
field->zip_dict_data.length, prebuilt);
true_len = blob_len;
@ -7004,6 +7198,9 @@ build_template_field(
templ->mbminlen = dict_col_get_mbminlen(col);
templ->mbmaxlen = dict_col_get_mbmaxlen(col);
templ->is_unsigned = col->prtype & DATA_UNSIGNED;
templ->compressed = (field->column_format()
== COLUMN_FORMAT_TYPE_COMPRESSED);
templ->zip_dict_data = field->zip_dict_data;
if (!dict_index_is_clust(index)
&& templ->rec_field_no == ULINT_UNDEFINED) {
@ -7761,8 +7958,11 @@ calc_row_difference(
switch (col_type) {
case DATA_BLOB:
o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len);
n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len);
/* Do not compress blob column while comparing*/
o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len,
false, 0, 0, prebuilt);
n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len,
false, 0, 0, prebuilt);
break;
@ -7832,7 +8032,13 @@ calc_row_difference(
TRUE,
new_mysql_row_col,
col_pack_len,
dict_table_is_comp(prebuilt->table));
dict_table_is_comp(prebuilt->table),
field->column_format() ==
COLUMN_FORMAT_TYPE_COMPRESSED,
reinterpret_cast<const byte*>(
field->zip_dict_data.str),
field->zip_dict_data.length,
prebuilt);
dfield_copy(&ufield->new_val, &dfield);
} else {
dfield_set_null(&ufield->new_val);
@ -9503,6 +9709,7 @@ create_table_def(
ulint unsigned_type;
ulint binary_type;
ulint long_true_varchar;
ulint compressed;
ulint charset_no;
ulint i;
ulint doc_id_col = 0;
@ -9649,6 +9856,13 @@ create_table_def(
}
}
/* Check if the the field has COMPRESSED attribute */
compressed = 0;
if (field->column_format() ==
COLUMN_FORMAT_TYPE_COMPRESSED) {
compressed = DATA_COMPRESSED;
}
/* First check whether the column to be added has a
system reserved name. */
if (dict_col_name_is_reserved(field->field_name)){
@ -9669,7 +9883,8 @@ err_col:
dtype_form_prtype(
(ulint) field->type()
| nulls_allowed | unsigned_type
| binary_type | long_true_varchar,
| binary_type | long_true_varchar
| compressed,
charset_no),
col_len);
}
@ -10505,6 +10720,10 @@ ha_innobase::create(
const char* stmt;
size_t stmt_len;
mem_heap_t* heap = 0;
ulint* zip_dict_ids = 0;
const char* err_zip_dict_name = 0;
DBUG_ENTER("ha_innobase::create");
DBUG_ASSERT(thd != NULL);
@ -10595,6 +10814,18 @@ ha_innobase::create(
row_mysql_lock_data_dictionary(trx);
heap = mem_heap_create(form->s->fields * sizeof(ulint));
zip_dict_ids = static_cast<ulint*>(
mem_heap_alloc(heap, form->s->fields * sizeof(ulint)));
if (!innobase_check_zip_dicts(form, zip_dict_ids,
trx, &err_zip_dict_name)) {
error = -1;
my_error(ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST,
MYF(0), err_zip_dict_name);
goto cleanup;
}
error = create_table_def(trx, form, norm_name, temp_path,
remote_path, flags, flags2);
if (error) {
@ -10702,6 +10933,22 @@ ha_innobase::create(
dict_table_get_all_fts_indexes(innobase_table, fts->indexes);
}
/*
Adding compression dictionary <-> compressed table column links
to the SYS_ZIP_DICT_COLS table.
*/
ut_a(zip_dict_ids != 0);
{
dict_table_t* local_table = dict_table_open_on_name(
norm_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
ut_a(local_table);
table_id_t table_id = local_table->id;
dict_table_close(local_table, TRUE, FALSE);
innobase_create_zip_dict_references(form,
table_id, zip_dict_ids, trx);
}
stmt = innobase_get_stmt(thd, &stmt_len);
if (stmt) {
@ -10818,6 +11065,9 @@ ha_innobase::create(
trx_free_for_mysql(trx);
if (heap != 0)
mem_heap_free(heap);
DBUG_RETURN(0);
cleanup:
@ -10827,6 +11077,9 @@ cleanup:
trx_free_for_mysql(trx);
if (heap != 0)
mem_heap_free(heap);
DBUG_RETURN(error);
}
@ -11904,6 +12157,14 @@ ha_innobase::info_low(
if (dict_stats_is_persistent_enabled(ib_table)) {
if (is_analyze) {
/* If this table is already queued for
background analyze, remove it from the
queue as we are about to do the same */
dict_mutex_enter_for_mysql();
dict_stats_recalc_pool_del(ib_table);
dict_mutex_exit_for_mysql();
opt = DICT_STATS_RECALC_PERSISTENT;
} else {
/* This is e.g. 'SHOW INDEXES', fetch
@ -13050,6 +13311,11 @@ ha_innobase::extra(
if (prebuilt->blob_heap) {
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
if (prebuilt->compress_heap) {
row_mysql_prebuilt_free_compress_heap(prebuilt);
}
break;
case HA_EXTRA_RESET_STATE:
reset_template();
@ -13101,6 +13367,10 @@ ha_innobase::reset()
row_mysql_prebuilt_free_blob_heap(prebuilt);
}
if (prebuilt->compress_heap) {
row_mysql_prebuilt_free_compress_heap(prebuilt);
}
reset_template();
ds_mrr.reset();
@ -13300,7 +13570,11 @@ ha_innobase::external_lock(
&& lock_type == F_WRLCK)
|| thd_sql_command(thd) == SQLCOM_CREATE_INDEX
|| thd_sql_command(thd) == SQLCOM_DROP_INDEX
|| thd_sql_command(thd) == SQLCOM_DELETE)) {
|| thd_sql_command(thd) == SQLCOM_DELETE
|| thd_sql_command(thd) ==
SQLCOM_CREATE_COMPRESSION_DICTIONARY
|| thd_sql_command(thd) ==
SQLCOM_DROP_COMPRESSION_DICTIONARY)) {
if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE)
{
@ -14062,7 +14336,9 @@ ha_innobase::store_lock(
&& lock_type <= TL_WRITE))
|| sql_command == SQLCOM_CREATE_INDEX
|| sql_command == SQLCOM_DROP_INDEX
|| sql_command == SQLCOM_DELETE)) {
|| sql_command == SQLCOM_DELETE
|| sql_command == SQLCOM_CREATE_COMPRESSION_DICTIONARY
|| sql_command == SQLCOM_DROP_COMPRESSION_DICTIONARY)) {
ib_senderrf(trx->mysql_thd,
IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
@ -15001,6 +15277,82 @@ ha_innobase::check_if_incompatible_data(
return(COMPATIBLE_DATA_YES);
}
/** This function reads zip dict-related info from SYS_ZIP_DICT
and SYS_ZIP_DICT_COLS for all columns marked with
COLUMN_FORMAT_TYPE_COMPRESSED flag and updates
zip_dict_name / zip_dict_data for those which have associated
compression dictionaries.
*/
UNIV_INTERN
void
ha_innobase::update_field_defs_with_zip_dict_info()
{
DBUG_ENTER("update_field_defs_with_zip_dict_info");
ut_ad(!mutex_own(&dict_sys->mutex));
char norm_name[FN_REFLEN];
normalize_table_name(norm_name, table_share->normalized_path.str);
dict_table_t* ib_table = dict_table_open_on_name(
norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
/* if dict_table_open_on_name() returns NULL, then it means that
TABLE_SHARE is populated for a table being created and we can
skip filling zip dict info here */
if (ib_table == 0)
DBUG_VOID_RETURN;
table_id_t ib_table_id = ib_table->id;
dict_table_close(ib_table, FALSE, FALSE);
Field* field;
for (uint i = 0; i < table_share->fields; ++i) {
field = table_share->field[i];
if (field->column_format() ==
COLUMN_FORMAT_TYPE_COMPRESSED) {
bool reference_found = false;
ulint dict_id = 0;
switch (dict_get_dictionary_id_by_key(ib_table_id, i,
&dict_id)) {
case DB_SUCCESS:
reference_found = true;
break;
case DB_RECORD_NOT_FOUND:
reference_found = false;
break;
default:
ut_error;
}
if (reference_found) {
char* local_name = 0;
ulint local_name_len = 0;
char* local_data = 0;
ulint local_data_len = 0;
if (dict_get_dictionary_info_by_id(dict_id,
&local_name, &local_name_len,
&local_data, &local_data_len) !=
DB_SUCCESS) {
ut_error;
}
else {
field->zip_dict_name.str =
local_name;
field->zip_dict_name.length =
local_name_len;
field->zip_dict_data.str =
local_data;
field->zip_dict_data.length =
local_data_len;
}
}
else {
field->zip_dict_name = null_lex_cstr;
field->zip_dict_data = null_lex_cstr;
}
}
}
DBUG_VOID_RETURN;
}
/****************************************************************//**
Update the system variable innodb_io_capacity_max using the "saved"
value. This function is registered as a callback with MySQL. */
@ -15555,7 +15907,12 @@ innodb_internal_table_update(
my_free(old);
}
fts_internal_tbl_name = *(char**) var_ptr;
fts_internal_tbl_name2 = *(char**) var_ptr;
if (fts_internal_tbl_name2 == NULL) {
fts_internal_tbl_name = const_cast<char*>("default");
} else {
fts_internal_tbl_name = fts_internal_tbl_name2;
}
}
/****************************************************************//**
@ -17888,7 +18245,7 @@ static MYSQL_SYSVAR_BOOL(disable_sort_file_cache, srv_disable_sort_file_cache,
"Whether to disable OS system file cache for sort I/O",
NULL, NULL, FALSE);
static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name,
static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name2,
PLUGIN_VAR_NOCMDARG,
"FTS internal auxiliary table to be checked",
innodb_internal_table_validate,
@ -18340,6 +18697,19 @@ static MYSQL_SYSVAR_BOOL(locking_fake_changes, srv_fake_changes_locks,
"not take any locks at all.",
NULL, NULL, TRUE);
static MYSQL_SYSVAR_UINT(compressed_columns_zip_level,
srv_compressed_columns_zip_level,
PLUGIN_VAR_RQCMDARG,
"Compression level used for compressed columns. 0 is no compression"
", 1 is fastest and 9 is best compression. Default is 6.",
NULL, NULL, DEFAULT_COMPRESSION_LEVEL, 0, 9, 0);
static MYSQL_SYSVAR_ULONG(compressed_columns_threshold,
srv_compressed_columns_threshold,
PLUGIN_VAR_RQCMDARG,
"Compress column data if its length exceeds this value. Default is 96",
NULL, NULL, 96, 1, ~0UL, 0);
static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(log_block_size),
MYSQL_SYSVAR(additional_mem_pool_size),
@ -18537,6 +18907,8 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(fake_changes),
MYSQL_SYSVAR(locking_fake_changes),
MYSQL_SYSVAR(tmpdir),
MYSQL_SYSVAR(compressed_columns_zip_level),
MYSQL_SYSVAR(compressed_columns_threshold),
NULL
};
@ -18559,6 +18931,8 @@ mysql_declare_plugin(innobase)
i_s_xtradb_read_view,
i_s_xtradb_internal_hash_tables,
i_s_xtradb_rseg,
i_s_xtradb_zip_dict,
i_s_xtradb_zip_dict_cols,
i_s_innodb_trx,
i_s_innodb_locks,
i_s_innodb_lock_waits,

View File

@ -287,6 +287,15 @@ class ha_innobase: public handler
/** @} */
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
/** This function reads zip dict-related info from SYS_ZIP_DICT
and SYS_ZIP_DICT_COLS for all columns marked with
COLUMN_FORMAT_TYPE_COMPRESSED flag and updates
zip_dict_name / zip_dict_data for those which have associated
compression dictionaries.
*/
virtual void update_field_defs_with_zip_dict_info();
private:
/** Builds a 'template' to the prebuilt struct.
@ -665,3 +674,31 @@ innobase_build_index_translation(
INNOBASE_SHARE* share); /*!< in/out: share structure
where index translation table
will be constructed in. */
/** This function checks if all the compression dictionaries referenced
in table->fields exist in SYS_ZIP_DICT InnoDB system table.
@return true if all referenced dictionaries exist */
UNIV_INTERN
bool
innobase_check_zip_dicts(
const TABLE* table, /*!< in: table in MySQL data
dictionary */
ulint* dict_ids, /*!< out: identified zip dict ids
(at least n_fields long) */
trx_t* trx, /*!< in: transaction */
const char** err_dict_name); /*!< out: the name of the
zip_dict which does not exist. */
/** This function creates compression dictionary references in
SYS_ZIP_DICT_COLS InnoDB system table for table_id based on info
in table->fields and provided zip dict ids. */
UNIV_INTERN
void
innobase_create_zip_dict_references(
const TABLE* table, /*!< in: table in MySQL data
dictionary */
table_id_t ib_table_id, /*!< in: table ID in Innodb data
dictionary */
ulint* zip_dict_ids, /*!< in: zip dict ids
(at least n_fields long) */
trx_t* trx); /*!< in: transaction */

View File

@ -201,7 +201,10 @@ innobase_need_rebuild(
/*==================*/
const Alter_inplace_info* ha_alter_info)
{
if (ha_alter_info->handler_flags
Alter_inplace_info::HA_ALTER_FLAGS alter_inplace_flags =
ha_alter_info->handler_flags & ~(INNOBASE_INPLACE_IGNORE);
if (alter_inplace_flags
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !(ha_alter_info->create_info->used_fields
& (HA_CREATE_USED_ROW_FORMAT
@ -1069,6 +1072,15 @@ innobase_col_to_mysql(
field->reset();
if (field->type() == MYSQL_TYPE_VARCHAR) {
if (field->column_format() ==
COLUMN_FORMAT_TYPE_COMPRESSED) {
/* Skip compressed varchar column when
reporting an erroneous row
during index creation or table rebuild. */
field->set_null();
break;
}
/* This is a >= 5.0.3 type true VARCHAR. Store the
length of the data to the first byte or the first
two bytes of dest. */
@ -2328,7 +2340,8 @@ innobase_build_col_map_add(
mem_heap_t* heap,
dfield_t* dfield,
const Field* field,
ulint comp)
ulint comp,
row_prebuilt_t* prebuilt)
{
if (field->is_real_null()) {
dfield_set_null(dfield);
@ -2340,7 +2353,10 @@ innobase_build_col_map_add(
byte* buf = static_cast<byte*>(mem_heap_alloc(heap, size));
row_mysql_store_col_in_innobase_format(
dfield, buf, TRUE, field->ptr, size, comp);
dfield, buf, TRUE, field->ptr, size, comp,
field->column_format() == COLUMN_FORMAT_TYPE_COMPRESSED,
reinterpret_cast<const byte*>(field->zip_dict_data.str),
field->zip_dict_data.length, prebuilt);
}
/** Construct the translation table for reordering, dropping or
@ -2365,7 +2381,8 @@ innobase_build_col_map(
const dict_table_t* new_table,
const dict_table_t* old_table,
dtuple_t* add_cols,
mem_heap_t* heap)
mem_heap_t* heap,
row_prebuilt_t* prebuilt)
{
DBUG_ENTER("innobase_build_col_map");
DBUG_ASSERT(altered_table != table);
@ -2404,7 +2421,7 @@ innobase_build_col_map(
innobase_build_col_map_add(
heap, dtuple_get_nth_field(add_cols, i),
altered_table->field[i],
dict_table_is_comp(new_table));
dict_table_is_comp(new_table), prebuilt);
found_col:
i++;
}
@ -2567,7 +2584,8 @@ prepare_inplace_alter_table_dict(
ulint flags2,
ulint fts_doc_id_col,
bool add_fts_doc_id,
bool add_fts_doc_id_idx)
bool add_fts_doc_id_idx,
row_prebuilt_t* prebuilt)
{
bool dict_locked = false;
ulint* add_key_nums; /* MySQL key numbers */
@ -2578,6 +2596,7 @@ prepare_inplace_alter_table_dict(
dberr_t error;
ulint num_fts_index;
ha_innobase_inplace_ctx*ctx;
ulint* zip_dict_ids = 0;
DBUG_ENTER("prepare_inplace_alter_table_dict");
@ -2712,6 +2731,18 @@ prepare_inplace_alter_table_dict(
ctx->new_table->id);
ulint n_cols;
dtuple_t* add_cols;
const char* err_zip_dict_name = 0;
zip_dict_ids = static_cast<ulint*>(
mem_heap_alloc(ctx->heap,
altered_table->s->fields * sizeof(ulint)));
if (!innobase_check_zip_dicts(altered_table, zip_dict_ids,
ctx->trx, &err_zip_dict_name)) {
my_error(ER_COMPRESSION_DICTIONARY_DOES_NOT_EXIST,
MYF(0), err_zip_dict_name);
goto new_clustered_failed;
}
if (innobase_check_foreigns(
ha_alter_info, altered_table, old_table,
@ -2815,6 +2846,12 @@ prepare_inplace_alter_table_dict(
}
}
if (field->column_format() ==
COLUMN_FORMAT_TYPE_COMPRESSED) {
field_type |= DATA_COMPRESSED;
}
if (dict_col_name_is_reserved(field->field_name)) {
dict_mem_table_free(ctx->new_table);
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
@ -2894,7 +2931,7 @@ prepare_inplace_alter_table_dict(
ctx->col_map = innobase_build_col_map(
ha_alter_info, altered_table, old_table,
ctx->new_table, user_table,
add_cols, ctx->heap);
add_cols, ctx->heap, prebuilt);
ctx->add_cols = add_cols;
} else {
DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info));
@ -3072,6 +3109,15 @@ op_ok:
DBUG_ASSERT(error == DB_SUCCESS);
/*
Adding compression dictionary <-> compressed table column links
to the SYS_ZIP_DICT_COLS table.
*/
if (zip_dict_ids != 0) {
innobase_create_zip_dict_references(altered_table,
ctx->trx->table_id, zip_dict_ids, ctx->trx);
}
/* Commit the data dictionary transaction in order to release
the table locks on the system tables. This means that if
MySQL crashes while creating a new primary key inside
@ -3767,7 +3813,7 @@ err_exit:
}
if (!(ha_alter_info->handler_flags & INNOBASE_ALTER_DATA)
|| (ha_alter_info->handler_flags
|| ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !innobase_need_rebuild(ha_alter_info))) {
@ -3893,7 +3939,7 @@ found_col:
table_share->table_name.str,
flags, flags2,
fts_doc_col_no, add_fts_doc_id,
add_fts_doc_id_idx));
add_fts_doc_id_idx, prebuilt));
}
/** Alter the table structure in-place with operations
@ -3933,7 +3979,7 @@ ok_exit:
DBUG_RETURN(false);
}
if (ha_alter_info->handler_flags
if ((ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !innobase_need_rebuild(ha_alter_info)) {
goto ok_exit;

View File

@ -4050,6 +4050,8 @@ i_s_fts_config_fill(
DBUG_RETURN(0);
}
DEBUG_SYNC_C("i_s_fts_config_fille_check");
fields = table->field;
/* Prevent DDL to drop fts aux tables. */

View File

@ -32,9 +32,11 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <read0i_s.h>
#include <trx0i_s.h>
#include "srv0start.h" /* for srv_was_started */
#include <btr0pcur.h> /* btr_pcur_t */
#include <btr0sea.h> /* btr_search_sys */
#include <log0recv.h> /* recv_sys */
#include <fil0fil.h>
#include <dict0crea.h> /* for ZIP_DICT_MAX_* constants */
/* for XTRADB_RSEG table */
#include "trx0trx.h" /* for TRX_QUE_STATE_STR_MAX_LEN */
@ -130,6 +132,28 @@ field_store_string(
return(ret);
}
/** Auxiliary function to store (char*, len) value in MYSQL_TYPE_BLOB
field.
@return 0 on success */
static
int
field_store_blob(
Field* field, /*!< in/out: target field for storage */
const char* data, /*!< in: pointer to data, or NULL */
uint data_len) /*!< in: data length */
{
int ret;
if (data != NULL) {
ret = field->store(data, data_len, system_charset_info);
field->set_notnull();
} else {
ret = 0; /* success */
field->set_null();
}
return(ret);
}
static
int
@ -603,3 +627,329 @@ UNIV_INTERN struct st_mysql_plugin i_s_xtradb_rseg =
STRUCT_FLD(__reserved1, NULL),
STRUCT_FLD(flags, 0UL),
};
/************************************************************************/
enum zip_dict_field_type
{
zip_dict_field_id,
zip_dict_field_name,
zip_dict_field_zip_dict
};
static ST_FIELD_INFO xtradb_sys_zip_dict_fields_info[] =
{
{ STRUCT_FLD(field_name, "id"),
STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
{ STRUCT_FLD(field_name, "name"),
STRUCT_FLD(field_length, ZIP_DICT_MAX_NAME_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
{ STRUCT_FLD(field_name, "zip_dict"),
STRUCT_FLD(field_length, ZIP_DICT_MAX_DATA_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_BLOB),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
END_OF_ST_FIELD_INFO
};
/** Function to fill INFORMATION_SCHEMA.XTRADB_ZIP_DICT with information
collected by scanning SYS_ZIP_DICT table.
@return 0 on success */
static
int
xtradb_i_s_dict_fill_sys_zip_dict(
THD* thd, /*!< in: thread */
ulint id, /*!< in: dict ID */
const char* name, /*!< in: dict name */
const char* data, /*!< in: dict data */
ulint data_len, /*!< in: dict data length */
TABLE* table_to_fill) /*!< in/out: fill this table */
{
DBUG_ENTER("xtradb_i_s_dict_fill_sys_zip_dict");
Field** fields = table_to_fill->field;
OK(field_store_ulint(fields[zip_dict_field_id], id));
OK(field_store_string(fields[zip_dict_field_name], name));
OK(field_store_blob(fields[zip_dict_field_zip_dict], data,
data_len));
OK(schema_table_store_record(thd, table_to_fill));
DBUG_RETURN(0);
}
/** Function to populate INFORMATION_SCHEMA.XTRADB_ZIP_DICT table.
Loop through each record in SYS_ZIP_DICT, and extract the column
information and fill the INFORMATION_SCHEMA.XTRADB_ZIP_DICT table.
@return 0 on success */
static
int
xtradb_i_s_sys_zip_dict_fill_table(
THD* thd, /*!< in: thread */
TABLE_LIST* tables, /*!< in/out: tables to fill */
Item* ) /*!< in: condition (not used) */
{
btr_pcur_t pcur;
const rec_t* rec;
mem_heap_t* heap;
mtr_t mtr;
DBUG_ENTER("xtradb_i_s_sys_zip_dict_fill_table");
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to user without SUPER_ACL privilege */
if (check_global_access(thd, SUPER_ACL)) {
DBUG_RETURN(0);
}
heap = mem_heap_create(1000);
mutex_enter(&dict_sys->mutex);
mtr_start(&mtr);
rec = dict_startscan_system(&pcur, &mtr, SYS_ZIP_DICT);
ulint zip_size = dict_table_zip_size(pcur.btr_cur.index->table);
while (rec) {
const char* err_msg;
ulint id;
const char* name;
const char* data;
ulint data_len;
/* Extract necessary information from a SYS_ZIP_DICT row */
err_msg = dict_process_sys_zip_dict(
heap, zip_size, rec, &id, &name, &data, &data_len);
mtr_commit(&mtr);
mutex_exit(&dict_sys->mutex);
if (!err_msg) {
xtradb_i_s_dict_fill_sys_zip_dict(
thd, id, name, data, data_len,
tables->table);
} else {
push_warning_printf(thd,
Sql_condition::WARN_LEVEL_WARN,
ER_CANT_FIND_SYSTEM_REC, "%s", err_msg);
}
mem_heap_empty(heap);
/* Get the next record */
mutex_enter(&dict_sys->mutex);
mtr_start(&mtr);
rec = dict_getnext_system(&pcur, &mtr);
}
mtr_commit(&mtr);
mutex_exit(&dict_sys->mutex);
mem_heap_free(heap);
DBUG_RETURN(0);
}
static int i_s_xtradb_zip_dict_init(void* p)
{
DBUG_ENTER("i_s_xtradb_zip_dict_init");
ST_SCHEMA_TABLE* schema = static_cast<ST_SCHEMA_TABLE*>(p);
schema->fields_info = xtradb_sys_zip_dict_fields_info;
schema->fill_table = xtradb_i_s_sys_zip_dict_fill_table;
DBUG_RETURN(0);
}
UNIV_INTERN struct st_mysql_plugin i_s_xtradb_zip_dict =
{
STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
STRUCT_FLD(info, &i_s_info),
STRUCT_FLD(name, "XTRADB_ZIP_DICT"),
STRUCT_FLD(author, PLUGIN_AUTHOR),
STRUCT_FLD(descr, "InnoDB compression dictionaries information"),
STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
STRUCT_FLD(init, i_s_xtradb_zip_dict_init),
STRUCT_FLD(deinit, i_s_common_deinit),
STRUCT_FLD(version, INNODB_VERSION_SHORT),
STRUCT_FLD(status_vars, NULL),
STRUCT_FLD(system_vars, NULL),
STRUCT_FLD(__reserved1, NULL),
STRUCT_FLD(flags, 0UL),
};
enum zip_dict_cols_field_type
{
zip_dict_cols_field_table_id,
zip_dict_cols_field_column_pos,
zip_dict_cols_field_dict_id
};
static ST_FIELD_INFO xtradb_sys_zip_dict_cols_fields_info[] =
{
{ STRUCT_FLD(field_name, "table_id"),
STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
{ STRUCT_FLD(field_name, "column_pos"),
STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
{ STRUCT_FLD(field_name, "dict_id"),
STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE) },
END_OF_ST_FIELD_INFO
};
/** Function to fill INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS with information
collected by scanning SYS_ZIP_DICT_COLS table.
@return 0 on success */
static
int
xtradb_i_s_dict_fill_sys_zip_dict_cols(
THD* thd, /*!< in: thread */
ulint table_id, /*!< in: table ID */
ulint column_pos, /*!< in: column position */
ulint dict_id, /*!< in: dict ID */
TABLE* table_to_fill) /*!< in/out: fill this table */
{
DBUG_ENTER("xtradb_i_s_dict_fill_sys_zip_dict_cols");
Field** fields = table_to_fill->field;
OK(field_store_ulint(fields[zip_dict_cols_field_table_id],
table_id));
OK(field_store_ulint(fields[zip_dict_cols_field_column_pos],
column_pos));
OK(field_store_ulint(fields[zip_dict_cols_field_dict_id],
dict_id));
OK(schema_table_store_record(thd, table_to_fill));
DBUG_RETURN(0);
}
/** Function to populate INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS table.
Loop through each record in SYS_ZIP_DICT_COLS, and extract the column
information and fill the INFORMATION_SCHEMA.XTRADB_ZIP_DICT_COLS table.
@return 0 on success */
static
int
xtradb_i_s_sys_zip_dict_cols_fill_table(
THD* thd, /*!< in: thread */
TABLE_LIST* tables, /*!< in/out: tables to fill */
Item* ) /*!< in: condition (not used) */
{
btr_pcur_t pcur;
const rec_t* rec;
mem_heap_t* heap;
mtr_t mtr;
DBUG_ENTER("xtradb_i_s_sys_zip_dict_cols_fill_table");
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
/* deny access to user without SUPER_ACL privilege */
if (check_global_access(thd, SUPER_ACL)) {
DBUG_RETURN(0);
}
heap = mem_heap_create(1000);
mutex_enter(&dict_sys->mutex);
mtr_start(&mtr);
rec = dict_startscan_system(&pcur, &mtr, SYS_ZIP_DICT_COLS);
while (rec) {
const char* err_msg;
ulint table_id;
ulint column_pos;
ulint dict_id;
/* Extract necessary information from a SYS_ZIP_DICT_COLS
row */
err_msg = dict_process_sys_zip_dict_cols(
heap, rec, &table_id, &column_pos, &dict_id);
mtr_commit(&mtr);
mutex_exit(&dict_sys->mutex);
if (!err_msg) {
xtradb_i_s_dict_fill_sys_zip_dict_cols(
thd, table_id, column_pos, dict_id,
tables->table);
} else {
push_warning_printf(thd,
Sql_condition::WARN_LEVEL_WARN,
ER_CANT_FIND_SYSTEM_REC, "%s", err_msg);
}
mem_heap_empty(heap);
/* Get the next record */
mutex_enter(&dict_sys->mutex);
mtr_start(&mtr);
rec = dict_getnext_system(&pcur, &mtr);
}
mtr_commit(&mtr);
mutex_exit(&dict_sys->mutex);
mem_heap_free(heap);
DBUG_RETURN(0);
}
static int i_s_xtradb_zip_dict_cols_init(void* p)
{
DBUG_ENTER("i_s_xtradb_zip_dict_cols_init");
ST_SCHEMA_TABLE* schema = static_cast<ST_SCHEMA_TABLE*>(p);
schema->fields_info = xtradb_sys_zip_dict_cols_fields_info;
schema->fill_table = xtradb_i_s_sys_zip_dict_cols_fill_table;
DBUG_RETURN(0);
}
UNIV_INTERN struct st_mysql_plugin i_s_xtradb_zip_dict_cols =
{
STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
STRUCT_FLD(info, &i_s_info),
STRUCT_FLD(name, "XTRADB_ZIP_DICT_COLS"),
STRUCT_FLD(author, PLUGIN_AUTHOR),
STRUCT_FLD(descr, "InnoDB compressed columns information"),
STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
STRUCT_FLD(init, i_s_xtradb_zip_dict_cols_init),
STRUCT_FLD(deinit, i_s_common_deinit),
STRUCT_FLD(version, INNODB_VERSION_SHORT),
STRUCT_FLD(status_vars, NULL),
STRUCT_FLD(system_vars, NULL),
STRUCT_FLD(__reserved1, NULL),
STRUCT_FLD(flags, 0UL),
};

View File

@ -22,5 +22,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
extern struct st_mysql_plugin i_s_xtradb_read_view;
extern struct st_mysql_plugin i_s_xtradb_internal_hash_tables;
extern struct st_mysql_plugin i_s_xtradb_rseg;
extern struct st_mysql_plugin i_s_xtradb_zip_dict;
extern struct st_mysql_plugin i_s_xtradb_zip_dict_cols;
#endif /* XTRADB_I_S_H */

View File

@ -170,6 +170,9 @@ be less than 256 */
type when the column is true VARCHAR where
MySQL uses 2 bytes to store the data len;
for shorter VARCHARs MySQL uses only 1 byte */
#define DATA_COMPRESSED 16384 /* this is ORed to the precise data
type when the column has COLUMN_FORMAT =
COMPRESSED attribute*/
/*-------------------------------------------*/
/* This many bytes we need to store the type information affecting the
@ -500,6 +503,17 @@ dtype_print(
/*========*/
const dtype_t* type); /*!< in: type */
/**
Calculates the number of extra bytes needed for compression header
depending on precise column type.
@reval 0 if prtype does not include DATA_COMPRESSED flag
@reval ZIP_COLUMN_HEADER_LENGTH if prtype includes DATA_COMPRESSED flag
*/
UNIV_INLINE
ulint
prtype_get_compression_extra(
ulint prtype); /*!< in: precise type */
/* Structure for an SQL data type.
If you add fields to this structure, be sure to initialize them everywhere.
This structure is initialized in the following functions:

View File

@ -26,6 +26,7 @@ Created 1/16/1996 Heikki Tuuri
#include <string.h> /* strlen() */
#include "mach0data.h"
#include "rem0types.h" /* ZIP_COLUMN_HEADER_LENGTH */
#ifndef UNIV_HOTBACKUP
# include "ha_prototypes.h"
@ -709,3 +710,18 @@ dtype_get_sql_null_size(
0, 0));
#endif /* !UNIV_HOTBACKUP */
}
/**
Calculates the number of extra bytes needed for compression header
depending on precise column type.
@reval 0 if prtype does not include DATA_COMPRESSED flag
@reval ZIP_COLUMN_HEADER_LENGTH if prtype includes DATA_COMPRESSED flag
*/
UNIV_INLINE
ulint
prtype_get_compression_extra(
ulint prtype) /*!< in: precise type */
{
return (prtype & DATA_COMPRESSED) != 0 ?
ZIP_COLUMN_HEADER_LENGTH : 0;
}

View File

@ -324,6 +324,38 @@ enum dict_fld_sys_datafiles_enum {
DICT_FLD__SYS_DATAFILES__PATH = 3,
DICT_NUM_FIELDS__SYS_DATAFILES = 4
};
/* The columns in SYS_DICT */
enum dict_col_sys_zip_dict_enum {
DICT_COL__SYS_ZIP_DICT__ID = 0,
DICT_COL__SYS_ZIP_DICT__NAME = 1,
DICT_COL__SYS_ZIP_DICT__DATA = 2,
DICT_NUM_COLS__SYS_ZIP_DICT = 3
};
/* The field numbers in the SYS_DICT clustered index */
enum dict_fld_sys_zip_dict_enum {
DICT_FLD__SYS_ZIP_DICT__ID = 0,
DICT_FLD__SYS_ZIP_DICT__DB_TRX_ID = 1,
DICT_FLD__SYS_ZIP_DICT__DB_ROLL_PTR = 2,
DICT_FLD__SYS_ZIP_DICT__NAME = 3,
DICT_FLD__SYS_ZIP_DICT__DATA = 4,
DICT_NUM_FIELDS__SYS_ZIP_DICT = 5
};
/* The columns in SYS_DICT_COLS */
enum dict_col_sys_zip_dict_cols_enum {
DICT_COL__SYS_ZIP_DICT_COLS__TABLE_ID = 0,
DICT_COL__SYS_ZIP_DICT_COLS__COLUMN_POS = 1,
DICT_COL__SYS_ZIP_DICT_COLS__DICT_ID = 2,
DICT_NUM_COLS__SYS_ZIP_DICT_COLS = 3
};
/* The field numbers in the SYS_DICT_COLS clustered index */
enum dict_fld_sys_zip_dict_cols_enum {
DICT_FLD__SYS_ZIP_DICT_COLS__TABLE_ID = 0,
DICT_FLD__SYS_ZIP_DICT_COLS__COLUMN_POS = 1,
DICT_FLD__SYS_ZIP_DICT_COLS__DB_TRX_ID = 2,
DICT_FLD__SYS_ZIP_DICT_COLS__DB_ROLL_PTR = 3,
DICT_FLD__SYS_ZIP_DICT_COLS__DICT_ID = 4,
DICT_NUM_FIELDS__SYS_ZIP_DICT_COLS = 5
};
/* A number of the columns above occur in multiple tables. These are the
length of thos fields. */

View File

@ -152,6 +152,19 @@ UNIV_INTERN
dberr_t
dict_create_or_check_sys_tablespace(void);
/*=====================================*/
#define ZIP_DICT_MAX_NAME_LENGTH 64
/* Max window size (2^15) minus 262 */
#define ZIP_DICT_MAX_DATA_LENGTH 32506
/** Creates the zip_dict system table inside InnoDB
at server bootstrap or server start if it is not found or is
not of the right form.
@return DB_SUCCESS or error code */
UNIV_INTERN
dberr_t
dict_create_or_check_sys_zip_dict(void);
/********************************************************************//**
Add a single tablespace definition to the data dictionary tables in the
database.
@ -167,6 +180,84 @@ dict_create_add_tablespace_to_dictionary(
trx_t* trx, /*!< in: transaction */
bool commit); /*!< in: if true then commit the
transaction */
/** Add a single compression dictionary definition to the SYS_ZIP_DICT
InnoDB system table.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_add_zip_dict(
const char* name, /*!< in: dict name */
ulint name_len, /*!< in: dict name length */
const char* data, /*!< in: dict data */
ulint data_len, /*!< in: dict data length */
trx_t* trx); /*!< in/out: transaction */
/** Add a single compression dictionary reference to the SYS_ZIP_DICT_COLS
InnoDB system table.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_add_zip_dict_reference(
ulint table_id, /*!< in: table id */
ulint column_pos, /*!< in: column position */
ulint dict_id, /*!< in: dict id */
trx_t* trx); /*!< in/out: transaction */
/** Get a single compression dictionary id for the given
(table id, column pos) pair.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_get_zip_dict_id_by_reference(
ulint table_id, /*!< in: table id */
ulint column_pos, /*!< in: column position */
ulint* dict_id, /*!< out: dict id */
trx_t* trx); /*!< in/out: transaction */
/** Get compression dictionary id for the given name.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_get_zip_dict_id_by_name(
const char* dict_name, /*!< in: dict name */
ulint dict_name_len, /*!< in: dict name length */
ulint* dict_id, /*!< out: dict id */
trx_t* trx); /*!< in/out: transaction */
/** Get compression dictionary info (name and data) for the given id.
Allocates memory for name and data on success.
Must be freed with mem_free().
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_get_zip_dict_info_by_id(
ulint dict_id, /*!< in: dict id */
char** name, /*!< out: dict name */
ulint* name_len, /*!< out: dict name length */
char** data, /*!< out: dict data */
ulint* data_len, /*!< out: dict data length */
trx_t* trx); /*!< in/out: transaction */
/** Remove a single compression dictionary from the data dictionary
tables in the database.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_remove_zip_dict(
const char* name, /*!< in: dict name */
ulint name_len, /*!< in: dict name length */
trx_t* trx); /*!< in/out: transaction */
/** Remove all compression dictionary references for the given table ID from
the data dictionary tables in the database.
@return error code or DB_SUCCESS */
UNIV_INTERN
dberr_t
dict_create_remove_zip_dict_references_for_table(
ulint table_id, /*!< in: table id */
trx_t* trx); /*!< in/out: transaction */
/********************************************************************//**
Add a foreign key definition to the data dictionary tables.
@return error code or DB_SUCCESS */

View File

@ -1845,6 +1845,52 @@ dict_table_set_corrupt_by_space(
ulint space_id,
ibool need_mutex);
/** Insert a records into SYS_ZIP_DICT.
@retval DB_SUCCESS if OK
@retval dberr_t if the insert failed */
UNIV_INTERN
dberr_t
dict_create_zip_dict(
const char* name, /*!< in: zip_dict name */
ulint name_len, /*!< in: zip_dict name length*/
const char* data, /*!< in: zip_dict data */
ulint data_len); /*!< in: zip_dict data length */
/** Get single compression dictionary id for the given
(table id, column pos) pair.
@retval DB_SUCCESS if OK
@retval DB_RECORD_NOT_FOUND if not found */
UNIV_INTERN
dberr_t
dict_get_dictionary_id_by_key(
ulint table_id, /*!< in: table id */
ulint column_pos, /*!< in: column position */
ulint* dict_id); /*!< out: zip_dict id */
/** Get compression dictionary info (name and data) for the given id.
Allocates memory in name->str and data->str on success.
Must be freed with mem_free().
@retval DB_SUCCESS if OK
@retval DB_RECORD_NOT_FOUND if not found */
UNIV_INTERN
dberr_t
dict_get_dictionary_info_by_id(
ulint dict_id, /*!< in: table name */
char** name, /*!< out: dictionary name */
ulint* name_len, /*!< out: dictionary name length*/
char** data, /*!< out: dictionary data */
ulint* data_len); /*!< out: dictionary data length*/
/** Delete a record in SYS_ZIP_DICT with the given name.
@retval DB_SUCCESS if OK
@retval DB_RECORD_NOT_FOUND if not found
@retval DB_ROW_IS_REFERENCED if in use */
UNIV_INTERN
dberr_t
dict_drop_zip_dict(
const char* name, /*!< in: zip_dict name */
ulint name_len); /*!< in: zip_dict name length*/
#ifndef UNIV_NONINL
#include "dict0dict.ic"
#endif

View File

@ -44,6 +44,8 @@ enum dict_system_id_t {
SYS_FOREIGN_COLS,
SYS_TABLESPACES,
SYS_DATAFILES,
SYS_ZIP_DICT,
SYS_ZIP_DICT_COLS,
/* This must be last item. Defines the number of system tables. */
SYS_NUM_SYSTEM_TABLES
@ -386,6 +388,33 @@ dict_process_sys_datafiles(
const rec_t* rec, /*!< in: current SYS_DATAFILES rec */
ulint* space, /*!< out: pace id */
const char** path); /*!< out: datafile path */
/** This function parses a SYS_ZIP_DICT record, extracts necessary
information from the record and returns to caller.
@return error message, or NULL on success */
UNIV_INTERN
const char*
dict_process_sys_zip_dict(
mem_heap_t* heap, /*!< in/out: heap memory */
ulint zip_size, /*!< in: nonzero=compressed BLOB page size */
const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
ulint* id, /*!< out: dict id */
const char** name, /*!< out: dict name */
const char** data, /*!< out: dict data */
ulint* data_len); /*!< out: dict data length */
/** This function parses a SYS_ZIP_DICT_COLS record, extracts necessary
information from the record and returns to caller.
@return error message, or NULL on success */
UNIV_INTERN
const char*
dict_process_sys_zip_dict_cols(
mem_heap_t* heap, /*!< in/out: heap memory */
const rec_t* rec, /*!< in: current SYS_ZIP_DICT rec */
ulint* table_id, /*!< out: table id */
ulint* column_pos, /*!< out: column position */
ulint* dict_id); /*!< out: dict id */
/********************************************************************//**
Get the filepath for a spaceid from SYS_DATAFILES. This function provides
a temporary heap which is used for the table lookup, but not for the path.

View File

@ -375,6 +375,7 @@ extern bool fts_need_sync;
/** Variable specifying the table that has Fulltext index to display its
content through information schema table */
extern char* fts_internal_tbl_name;
extern char* fts_internal_tbl_name2;
#define fts_que_graph_free(graph) \
do { \
@ -823,6 +824,15 @@ void
fts_drop_orphaned_tables(void);
/*==========================*/
/* Get parent table name if it's a fts aux table
@param[in] aux_table_name aux table name
@param[in] aux_table_len aux table length
@return parent table name, or NULL */
char*
fts_get_parent_table_name(
const char* aux_table_name,
ulint aux_table_len);
/******************************************************************//**
Since we do a horizontal split on the index table, we need to drop
all the split tables.

View File

@ -131,14 +131,27 @@ os_thread_create_func(
os_thread_id_t* thread_id); /*!< out: id of the created
thread, or NULL */
/**
Waits until the specified thread completes and joins it. Its return value is
ignored.
@param thread thread to join */
UNIV_INTERN
void
os_thread_join(
os_thread_t thread);
/*****************************************************************//**
Exits the current thread. */
UNIV_INTERN
void
os_thread_exit(
/*===========*/
void* exit_value) /*!< in: exit value; in Windows this void*
void* exit_value, /*!< in: exit value; in Windows this void*
is cast as a DWORD */
bool detach = true) /*!< in: if true, the thread will be detached
right before exiting. If false, another thread
is responsible for joining this thread. */
UNIV_COLD MY_ATTRIBUTE((noreturn));
/*****************************************************************//**
Returns the thread identifier of current thread.

View File

@ -71,4 +71,7 @@ enum rec_format_enum {
};
typedef enum rec_format_enum rec_format_t;
/** Compressed field header size in bytes */
#define ZIP_COLUMN_HEADER_LENGTH 2
#endif

View File

@ -41,6 +41,9 @@ struct SysIndexCallback;
extern ibool row_rollback_on_timeout;
extern uint srv_compressed_columns_zip_level;
extern ulong srv_compressed_columns_threshold;
struct row_prebuilt_t;
/*******************************************************************//**
@ -51,6 +54,49 @@ row_mysql_prebuilt_free_blob_heap(
/*==============================*/
row_prebuilt_t* prebuilt); /*!< in: prebuilt struct of a
ha_innobase:: table handle */
/** Frees the compress heap in prebuilt when no longer needed. */
UNIV_INTERN
void
row_mysql_prebuilt_free_compress_heap(
row_prebuilt_t* prebuilt); /*!< in: prebuilt struct of a
ha_innobase:: table handle */
/** Uncompress blob/text/varchar column using zlib
@return pointer to the uncompressed data */
const byte*
row_decompress_column(
const byte* data, /*!< in: data in innodb(compressed) format */
ulint *len, /*!< in: data length; out: length of
decompressed data*/
const byte* dict_data,
/*!< in: optional dictionary data used for
decompression */
ulint dict_data_len,
/*!< in: optional dictionary data length */
row_prebuilt_t* prebuilt);
/*!< in: use prebuilt->compress_heap only
here*/
/** Compress blob/text/varchar column using zlib
@return pointer to the compressed data */
byte*
row_compress_column(
const byte* data, /*!< in: data in mysql(uncompressed)
format */
ulint *len, /*!< in: data length; out: length of
compressed data*/
ulint lenlen, /*!< in: bytes used to store the length of
data */
const byte* dict_data,
/*!< in: optional dictionary data used for
compression */
ulint dict_data_len,
/*!< in: optional dictionary data length */
row_prebuilt_t* prebuilt);
/*!< in: use prebuilt->compress_heap only
here*/
/*******************************************************************//**
Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row
format.
@ -89,10 +135,21 @@ row_mysql_store_blob_ref(
to 4 bytes */
const void* data, /*!< in: BLOB data; if the value to store
is SQL NULL this should be NULL pointer */
ulint len); /*!< in: BLOB length; if the value to store
ulint len, /*!< in: BLOB length; if the value to store
is SQL NULL this should be 0; remember
also to set the NULL bit in the MySQL record
header! */
bool need_decompression,
/*!< in: if the data need to be compressed*/
const byte* dict_data,
/*!< in: optional compression dictionary
data */
ulint dict_data_len,
/*!< in: optional compression dictionary data
length */
row_prebuilt_t* prebuilt);
/*<! in: use prebuilt->compress_heap only
here */
/*******************************************************************//**
Reads a reference to a BLOB in the MySQL format.
@return pointer to BLOB data */
@ -103,8 +160,17 @@ row_mysql_read_blob_ref(
ulint* len, /*!< out: BLOB length */
const byte* ref, /*!< in: BLOB reference in the
MySQL format */
ulint col_len); /*!< in: BLOB reference length
ulint col_len, /*!< in: BLOB reference length
(not BLOB length) */
bool need_compression,
/*!< in: if the data need to be
compressed*/
const byte* dict_data, /*!< in: optional compression
dictionary data */
ulint dict_data_len, /*!< in: optional compression
dictionary data length */
row_prebuilt_t* prebuilt); /*!< in: use prebuilt->compress_heap
only here */
/**************************************************************//**
Pad a column with spaces. */
UNIV_INTERN
@ -152,7 +218,16 @@ row_mysql_store_col_in_innobase_format(
necessarily the length of the actual
payload data; if the column is a true
VARCHAR then this is irrelevant */
ulint comp); /*!< in: nonzero=compact format */
ulint comp, /*!< in: nonzero=compact format */
bool need_compression,
/*!< in: if the data need to be
compressed */
const byte* dict_data, /*!< in: optional compression
dictionary data */
ulint dict_data_len, /*!< in: optional compression
dictionary data length */
row_prebuilt_t* prebuilt); /*!< in: use prebuilt->compress_heap
only here */
/****************************************************************//**
Handles user errors and lock waits detected by the database engine.
@return true if it was a lock wait and we should continue running the
@ -643,6 +718,8 @@ struct mysql_row_templ_t {
ulint is_unsigned; /*!< if a column type is an integer
type and this field is != 0, then
it is an unsigned integer type */
bool compressed; /*!< if column format is compressed */
LEX_CSTRING zip_dict_data; /*!< associated compression dictionary */
};
#define MYSQL_FETCH_CACHE_SIZE 8
@ -839,6 +916,8 @@ struct row_prebuilt_t {
in fetch_cache */
mem_heap_t* blob_heap; /*!< in SELECTS BLOB fields are copied
to this heap */
mem_heap_t* compress_heap; /*!< memory heap used to compress
/decompress blob column*/
mem_heap_t* old_vers_heap; /*!< memory heap where a previous
version is built in consistent read */
bool in_fts_query; /*!< Whether we are in a FTS query */

View File

@ -487,6 +487,9 @@ extern ibool srv_priority_boost;
extern ulint srv_truncated_status_writes;
extern ulint srv_available_undo_logs;
extern ulint srv_column_compressed;
extern ulint srv_column_decompressed;
extern ulint srv_mem_pool_size;
extern ulint srv_lock_table_size;
@ -1079,6 +1082,8 @@ struct export_var_t{
ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id
- purged view's min trx_id */
#endif /* UNIV_DEBUG */
ulint innodb_column_compressed; /*!< srv_column_compressed */
ulint innodb_column_decompressed; /*!< srv_column_decompressed */
};
/** Thread slot in the thread table. */

View File

@ -47,7 +47,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH
#ifndef PERCONA_INNODB_VERSION
#define PERCONA_INNODB_VERSION 78.1
#define PERCONA_INNODB_VERSION 79.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */

View File

@ -975,6 +975,7 @@ log_init(void)
log_sys->next_checkpoint_no = 0;
log_sys->last_checkpoint_lsn = log_sys->lsn;
log_sys->next_checkpoint_lsn = log_sys->lsn;
log_sys->n_pending_checkpoint_writes = 0;
@ -1891,6 +1892,7 @@ log_complete_checkpoint(void)
log_sys->next_checkpoint_no++;
ut_ad(log_sys->next_checkpoint_lsn >= log_sys->last_checkpoint_lsn);
log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn;
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
log_sys->lsn - log_sys->last_checkpoint_lsn);
@ -1978,11 +1980,17 @@ log_group_checkpoint(
ulint i;
ut_ad(!srv_read_only_mode);
ut_ad(srv_shutdown_state != SRV_SHUTDOWN_LAST_PHASE);
ut_ad(mutex_own(&(log_sys->mutex)));
ut_a(LOG_CHECKPOINT_SIZE <= OS_FILE_LOG_BLOCK_SIZE);
buf = group->checkpoint_buf;
#ifdef UNIV_DEBUG
lsn_t old_next_checkpoint_lsn
= mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
ut_ad(old_next_checkpoint_lsn <= log_sys->next_checkpoint_lsn);
#endif /* UNIV_DEBUG */
mach_write_to_8(buf + LOG_CHECKPOINT_NO, log_sys->next_checkpoint_no);
mach_write_to_8(buf + LOG_CHECKPOINT_LSN, log_sys->next_checkpoint_lsn);
@ -2242,6 +2250,7 @@ log_checkpoint(
return(FALSE);
}
ut_ad(oldest_lsn >= log_sys->next_checkpoint_lsn);
log_sys->next_checkpoint_lsn = oldest_lsn;
#ifdef UNIV_DEBUG
@ -3490,13 +3499,15 @@ loop:
before proceeding further. */
srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE;
count = 0;
while (buf_page_cleaner_is_active) {
++count;
os_thread_sleep(100000);
if (srv_print_verbose_log && count > 600) {
while (buf_page_cleaner_is_active || buf_lru_manager_is_active) {
if (srv_print_verbose_log && count == 0) {
ib_logf(IB_LOG_LEVEL_INFO,
"Waiting for page_cleaner to "
"finish flushing of buffer pool");
}
++count;
os_thread_sleep(100000);
if (count > 600) {
count = 0;
}
}
@ -3664,6 +3675,7 @@ loop:
ut_a(freed);
ut_a(lsn == log_sys->lsn);
ut_ad(lsn == log_sys->last_checkpoint_lsn);
if (lsn < srv_start_lsn) {
ib_logf(IB_LOG_LEVEL_ERROR,

View File

@ -441,6 +441,7 @@ log_online_track_missing_on_startup(
current server startup */
{
ut_ad(last_tracked_lsn != tracking_start_lsn);
ut_ad(srv_track_changed_pages);
ib_logf(IB_LOG_LEVEL_WARN, "last tracked LSN in \'%s\' is " LSN_PF
", but the last checkpoint LSN is " LSN_PF ". This might be "
@ -623,6 +624,8 @@ log_online_read_init(void)
compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP % 8 == 0);
compile_time_assert(MODIFIED_PAGE_BLOCK_BITMAP_LEN % 8 == 0);
ut_ad(srv_track_changed_pages);
log_bmp_sys = static_cast<log_bitmap_struct *>
(ut_malloc(sizeof(*log_bmp_sys)));
log_bmp_sys->read_buf_ptr = static_cast<byte *>
@ -1097,10 +1100,15 @@ log_online_write_bitmap_page(
{
ibool success;
ut_ad(srv_track_changed_pages);
ut_ad(mutex_own(&log_bmp_sys->mutex));
/* Simulate a write error */
DBUG_EXECUTE_IF("bitmap_page_write_error", return FALSE;);
DBUG_EXECUTE_IF("bitmap_page_write_error",
ib_logf(IB_LOG_LEVEL_ERROR,
"simulating bitmap write error in "
"log_online_write_bitmap_page");
return FALSE;);
success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file,
block, log_bmp_sys->out.offset,
@ -1190,7 +1198,9 @@ log_online_write_bitmap(void)
rbt_next(log_bmp_sys->modified_pages, bmp_tree_node);
DBUG_EXECUTE_IF("bitmap_page_2_write_error",
DBUG_SET("+d,bitmap_page_write_error"););
ut_ad(bmp_tree_node); /* 2nd page must exist */
DBUG_SET("+d,bitmap_page_write_error");
DBUG_SET("-d,bitmap_page_2_write_error"););
}
rbt_reset(log_bmp_sys->modified_pages);
@ -1211,15 +1221,11 @@ log_online_follow_redo_log(void)
log_group_t* group;
ibool result;
mutex_enter(&log_bmp_sys->mutex);
if (!srv_track_changed_pages) {
mutex_exit(&log_bmp_sys->mutex);
return FALSE;
}
ut_ad(srv_track_changed_pages);
ut_ad(!srv_read_only_mode);
mutex_enter(&log_bmp_sys->mutex);
/* Grab the LSN of the last checkpoint, we will parse up to it */
mutex_enter(&(log_sys->mutex));
log_bmp_sys->end_lsn = log_sys->last_checkpoint_lsn;
@ -1562,9 +1568,12 @@ log_online_diagnose_bitmap_eof(
/* It's a "Warning" here because it's not a fatal error
for the whole server */
ib_logf(IB_LOG_LEVEL_WARN,
"changed page bitmap file \'%s\' does not "
"contain a complete run at the end.",
bitmap_file->name);
"changed page bitmap file \'%s\', size "
UINT64PF " bytes, does not "
"contain a complete run at the next read "
"offset " UINT64PF,
bitmap_file->name, bitmap_file->size,
bitmap_file->offset);
return FALSE;
}
}

View File

@ -56,7 +56,18 @@ mach_parse_compressed(
*val = flag;
return(ptr + 1);
} else if (flag < 0xC0UL) {
}
/* Workaround GCC bug
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77673:
the compiler moves mach_read_from_4 right to the beginning of the
function, causing and out-of-bounds read if we are reading a short
integer close to the end of buffer. */
#if defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__clang__)
asm volatile("": : :"memory");
#endif
if (flag < 0xC0UL) {
if (end_ptr < ptr + 2) {
return(NULL);
}

View File

@ -210,14 +210,33 @@ os_thread_create_func(
#endif
}
/**
Waits until the specified thread completes and joins it. Its return value is
ignored.
@param thread thread to join */
UNIV_INTERN
void
os_thread_join(
os_thread_t thread)
{
int ret MY_ATTRIBUTE((unused)) = pthread_join(thread, NULL);
/* Waiting on already-quit threads is allowed */
ut_ad(ret == 0 || ret == ESRCH);
}
/*****************************************************************//**
Exits the current thread. */
UNIV_INTERN
void
os_thread_exit(
/*===========*/
void* exit_value) /*!< in: exit value; in Windows this void*
void* exit_value, /*!< in: exit value; in Windows this void*
is cast as a DWORD */
bool detach) /*!< in: if true, the thread will be detached
right before exiting. If false, another thread
is responsible for joining this thread. */
{
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Thread exits, id %lu\n",
@ -233,7 +252,8 @@ os_thread_exit(
#ifdef __WIN__
ExitThread((DWORD) exit_value);
#else
pthread_detach(pthread_self());
if (detach)
pthread_detach(pthread_self());
pthread_exit(exit_value);
#endif
}

View File

@ -320,7 +320,8 @@ rec_init_offsets_comp_ordinary(
stored in one byte for 0..127. The length
will be encoded in two bytes when it is 128 or
more, or when the field is stored externally. */
if (UNIV_UNLIKELY(col->len > 255)
if (UNIV_UNLIKELY(col->len > 255 -
prtype_get_compression_extra(col->prtype))
|| UNIV_UNLIKELY(col->mtype
== DATA_BLOB)) {
if (len & 0x80) {
@ -841,8 +842,12 @@ rec_get_converted_size_comp_prefix_low(
continue;
}
ut_ad(len <= col->len || col->mtype == DATA_BLOB
|| (col->len == 0 && col->mtype == DATA_VARCHAR));
ut_ad(len <= col->len || col->mtype == DATA_BLOB ||
((col->mtype == DATA_VARCHAR || col->mtype == DATA_BINARY
|| col->mtype == DATA_VARMYSQL)
&& (col->len == 0
|| len <= col->len +
prtype_get_compression_extra(col->prtype))));
fixed_len = field->fixed_len;
if (temp && fixed_len
@ -874,7 +879,9 @@ rec_get_converted_size_comp_prefix_low(
ut_ad(col->len >= 256 || col->mtype == DATA_BLOB);
extra_size += 2;
} else if (len < 128
|| (col->len < 256 && col->mtype != DATA_BLOB)) {
|| (col->len < 256 -
prtype_get_compression_extra(col->prtype)
&& col->mtype != DATA_BLOB)) {
extra_size++;
} else {
/* For variable-length columns, we look up the
@ -1269,12 +1276,16 @@ rec_convert_dtuple_to_rec_comp(
*lens-- = (byte) (len >> 8) | 0xc0;
*lens-- = (byte) len;
} else {
ut_ad(len <= dtype_get_len(type)
ut_ad(len <= dtype_get_len(type) +
prtype_get_compression_extra(
dtype_get_prtype(type))
|| dtype_get_mtype(type) == DATA_BLOB
|| !strcmp(index->name,
FTS_INDEX_TABLE_IND_NAME));
if (len < 128
|| (dtype_get_len(type) < 256
|| (dtype_get_len(type) < 256 -
prtype_get_compression_extra(
dtype_get_prtype(type))
&& dtype_get_mtype(type) != DATA_BLOB)) {
*lens-- = (byte) len;

View File

@ -960,7 +960,7 @@ fts_parallel_merge(
CloseHandle(psort_info->thread_hdl);
#endif /*__WIN__ */
os_thread_exit(NULL);
os_thread_exit(NULL, false);
OS_THREAD_DUMMY_RETURN;
}

View File

@ -613,7 +613,7 @@ row_log_table_delete(
&old_pk_extra_size);
ut_ad(old_pk_extra_size < 0x100);
mrec_size = 4 + old_pk_size;
mrec_size = 6 + old_pk_size;
/* Log enough prefix of the BLOB unless both the
old and new table are in COMPACT or REDUNDANT format,
@ -643,8 +643,8 @@ row_log_table_delete(
*b++ = static_cast<byte>(old_pk_extra_size);
/* Log the size of external prefix we saved */
mach_write_to_2(b, ext_size);
b += 2;
mach_write_to_4(b, ext_size);
b += 4;
rec_convert_dtuple_to_temp(
b + old_pk_extra_size, new_index,
@ -2268,14 +2268,14 @@ row_log_table_apply_op(
break;
case ROW_T_DELETE:
/* 1 (extra_size) + 2 (ext_size) + at least 1 (payload) */
if (mrec + 4 >= mrec_end) {
/* 1 (extra_size) + 4 (ext_size) + at least 1 (payload) */
if (mrec + 6 >= mrec_end) {
return(NULL);
}
extra_size = *mrec++;
ext_size = mach_read_from_2(mrec);
mrec += 2;
ext_size = mach_read_from_4(mrec);
mrec += 4;
ut_ad(mrec < mrec_end);
/* We assume extra_size < 0x100 for the PRIMARY KEY prefix.

View File

@ -523,7 +523,12 @@ row_merge_buf_add(
dfield_set_len(field, len);
}
ut_ad(len <= col->len || col->mtype == DATA_BLOB);
ut_ad(len <= col->len || col->mtype == DATA_BLOB ||
((col->mtype == DATA_VARCHAR || col->mtype == DATA_BINARY
|| col->mtype == DATA_VARMYSQL)
&& (col->len == 0
|| len <= col->len +
prtype_get_compression_extra(col->prtype))));
fixed_len = ifield->fixed_len;
if (fixed_len && !dict_table_is_comp(index->table)
@ -552,7 +557,9 @@ row_merge_buf_add(
} else if (dfield_is_ext(field)) {
extra_size += 2;
} else if (len < 128
|| (col->len < 256 && col->mtype != DATA_BLOB)) {
|| (col->len < 256 -
prtype_get_compression_extra(col->prtype)
&& col->mtype != DATA_BLOB)) {
extra_size++;
} else {
/* For variable-length columns, we look up the
@ -3780,6 +3787,13 @@ wait_again:
" exited when creating FTS"
" index '%s'",
indexes[i]->name);
} else {
for (j = 0; j < FTS_NUM_AUX_INDEX;
j++) {
os_thread_join(merge_info[j]
.thread_hdl);
}
}
} else {
/* This cannot report duplicates; an

View File

@ -65,11 +65,54 @@ Created 9/17/2000 Heikki Tuuri
#include "m_string.h"
#include "my_sys.h"
#include "ha_prototypes.h"
#include "zlib.h"
#include <algorithm>
/** Provide optional 4.x backwards compatibility for 5.0 and above */
UNIV_INTERN ibool row_rollback_on_timeout = FALSE;
/**
Z_NO_COMPRESSION = 0
Z_BEST_SPEED = 1
Z_BEST_COMPRESSION = 9
Z_DEFAULT_COMPRESSION = -1
Compression level to be used by zlib for compressed-blob columns.
Settable by user.
*/
UNIV_INTERN uint srv_compressed_columns_zip_level = DEFAULT_COMPRESSION_LEVEL;
/**
(Z_FILTERED | Z_HUFFMAN_ONLY | Z_RLE | Z_FIXED | Z_DEFAULT_STRATEGY)
The strategy parameter is used to tune the compression algorithm. Use the
value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only
(no string match), or Z_RLE to limit match distances to one
(run-length encoding). Filtered data consists mostly of small values with a
somewhat random distribution. In this case, the compression algorithm is
tuned to compress them better.
The effect of Z_FILTERED is to force more Huffman coding and less string
matching; it is somewhat intermediate between Z_DEFAULT_STRATEGY and
Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as Z_HUFFMAN_ONLY,
but give better compression for PNG image data. The strategy parameter only
affects the compression ratio but not the correctness of the compressed
output even if it is not set appropriately. Z_FIXED prevents the use of
dynamic Huffman codes, allowing for a simpler decoder for special
applications.
*/
const uint srv_compressed_columns_zlib_strategy = Z_DEFAULT_STRATEGY;
/** Compress the column if the data length exceeds this value. */
UNIV_INTERN ulong srv_compressed_columns_threshold = 96;
/**
Determine if zlib needs to compute adler32 value for the compressed data.
This variables is similar to page_zip_zlib_wrap, but only used by
compressed blob columns.
*/
const bool srv_compressed_columns_zlib_wrap = true;
/**
Determine if zlib will use custom memory allocation functions based on
InnoDB memory heap routines (mem_heap_t*).
*/
const bool srv_compressed_columns_zlib_use_heap = false;
/** Chain node of the list of tables to drop in the background. */
struct row_mysql_drop_t{
char* table_name; /*!< table name */
@ -173,6 +216,17 @@ row_mysql_prebuilt_free_blob_heap(
prebuilt->blob_heap = NULL;
}
/** Frees the compress heap in prebuilt when no longer needed. */
UNIV_INTERN
void
row_mysql_prebuilt_free_compress_heap(
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct of a
ha_innobase:: table handle */
{
mem_heap_free(prebuilt->compress_heap);
prebuilt->compress_heap = NULL;
}
/*******************************************************************//**
Stores a >= 5.0.3 format true VARCHAR length to dest, in the MySQL row
format.
@ -229,6 +283,425 @@ row_mysql_read_true_varchar(
return(field + 1);
}
/**
Compressed BLOB header format:
---------------------------------------------------------------
| reserved | wrap | algorithm | len-len | compressed | unused |
| [1] | [1] | [5] | [3] | [1] | [5] |
---------------------------------------------------------------
| 0 0 | 1 1 | 2 6 | 7 9 | 10 10 | 11 15 |
---------------------------------------------------------------
* 'reserved' bit is planned to be used in future versions of the BLOB
header. In this version it must always be
'default_zip_column_reserved_value' (0).
* 'wrap' identifies if compression algorithm calculated a checksum
(adler32 in case of zlib) and appended it to the compressed data.
* 'algorithm' identifies which algoritm was used to compress this BLOB.
Currently, the only value 'default_zip_column_algorithm_value' (0) is
supported.
* 'len-len' field identifies the length of the column length data portion
followed by this header (see below).
* If 'compressed' bit is set to 1, then this header is immediately followed
by 1..8 bytes (depending on the value of 'len-len' bitfield) which
determine original (uncompressed) block size. These 'len-len' bytes are
followed by compressed representation of the original data.
* If 'compressed' bit is set to 0, every other bitfield ('wrap',
'algorithm' and 'le-len') must be ignored. In this case the header is
immediately followed by uncompressed (original) data.
*/
/**
Currently the only supported value for the 'reserved' field is
false (0).
*/
static const bool default_zip_column_reserved_value = false;
/**
Currently the only supported value for the 'algorithm' field is 0, which
means 'zlib'.
*/
static const uint default_zip_column_algorithm_value = 0;
static const size_t zip_column_prefix_max_length =
ZIP_COLUMN_HEADER_LENGTH + 8;
static const size_t zip_column_header_length = ZIP_COLUMN_HEADER_LENGTH;
/* 'reserved', bit 0 */
static const uint zip_column_reserved = 0;
/* 0000 0000 0000 0001 */
static const uint zip_column_reserved_mask = 0x0001;
/* 'wrap', bit 1 */
static const uint zip_column_wrap = 1;
/* 0000 0000 0000 0010 */
static const uint zip_column_wrap_mask = 0x0002;
/* 'algorithm', bit 2,3,4,5,6 */
static const uint zip_column_algorithm = 2;
/* 0000 0000 0111 1100 */
static const uint zip_column_algorithm_mask = 0x007C;
/* 'len-len', bit 7,8,9 */
static const uint zip_column_data_length = 7;
/* 0000 0011 1000 0000 */
static const uint zip_column_data_length_mask = 0x0380;
/* 'compressed', bit 10 */
static const uint zip_column_compressed = 10;
/* 0000 0100 0000 0000 */
static const uint zip_column_compressed_mask = 0x0400;
/** Updates compressed block header with the given components */
static void
column_set_compress_header(
byte* data,
bool compressed,
ulint lenlen,
uint alg,
bool wrap,
bool reserved)
{
ulint header = 0;
header |= (compressed << zip_column_compressed);
header |= (lenlen << zip_column_data_length);
header |= (alg << zip_column_algorithm);
header |= (wrap << zip_column_wrap);
header |= (reserved << zip_column_reserved);
mach_write_to_2(data, header);
}
/** Parse compressed block header into components */
static void
column_get_compress_header(
const byte* data,
bool* compressed,
ulint* lenlen,
uint* alg,
bool* wrap,
bool* reserved
)
{
ulint header = mach_read_from_2(data);
*compressed = ((header & zip_column_compressed_mask) >>
zip_column_compressed);
*lenlen = ((header & zip_column_data_length_mask) >>
zip_column_data_length);
*alg = ((header & zip_column_algorithm_mask) >>
zip_column_algorithm);
*wrap = ((header & zip_column_wrap_mask) >>
zip_column_wrap);
*reserved = ((header & zip_column_reserved_mask) >>
zip_column_reserved);
}
/** Allocate memory for zlib. */
static
void*
column_zip_zalloc(
void* opaque, /*!< in/out: memory heap */
uInt items, /*!< in: number of items to allocate */
uInt size) /*!< in: size of an item in bytes */
{
return(mem_heap_zalloc(static_cast<mem_heap_t*>(opaque),
items * size));
}
/** Deallocate memory for zlib. */
static
void
column_zip_free(
void* opaque MY_ATTRIBUTE((unused)), /*!< in: memory heap */
void* address MY_ATTRIBUTE((unused))) /*!< in: object to free */
{
}
/** Configure the zlib allocator to use the given memory heap. */
UNIV_INTERN
void
column_zip_set_alloc(
void* stream, /*!< in/out: zlib stream */
mem_heap_t* heap) /*!< in: memory heap to use */
{
z_stream* strm = static_cast<z_stream*>(stream);
if (srv_compressed_columns_zlib_use_heap) {
strm->zalloc = column_zip_zalloc;
strm->zfree = column_zip_free;
strm->opaque = heap;
} else {
strm->zalloc = (alloc_func)0;
strm->zfree = (free_func)0;
strm->opaque = (voidpf)0;
}
}
/** Compress blob/text/varchar column using zlib
@return pointer to the compressed data */
byte*
row_compress_column(
const byte* data, /*!< in: data in mysql(uncompressed)
format */
ulint *len, /*!< in: data length; out: length of
compressed data*/
ulint lenlen, /*!< in: bytes used to store the length of
data */
const byte* dict_data,
/*!< in: optional dictionary data used for
compression */
ulint dict_data_len,
/*!< in: optional dictionary data length */
row_prebuilt_t* prebuilt)
/*!< in: use prebuilt->compress_heap only
here*/
{
int err = 0;
ulint comp_len = *len;
ulint buf_len = *len + zip_column_prefix_max_length;
byte* buf;
byte* ptr;
z_stream c_stream;
bool wrap = srv_compressed_columns_zlib_wrap;
int window_bits = wrap ? MAX_WBITS : -MAX_WBITS;
if (!prebuilt->compress_heap) {
prebuilt->compress_heap =
mem_heap_create(max(UNIV_PAGE_SIZE, buf_len));
}
buf = static_cast<byte*>(mem_heap_zalloc(
prebuilt->compress_heap,buf_len));
if (*len < srv_compressed_columns_threshold ||
srv_compressed_columns_zip_level == Z_NO_COMPRESSION)
goto do_not_compress;
ptr = buf + zip_column_header_length + lenlen;
/*init deflate object*/
c_stream.next_in = const_cast<Bytef*>(data);
c_stream.avail_in = *len;
c_stream.next_out = ptr;
c_stream.avail_out = comp_len;
column_zip_set_alloc(&c_stream, prebuilt->compress_heap);
err = deflateInit2(&c_stream, srv_compressed_columns_zip_level,
Z_DEFLATED, window_bits, MAX_MEM_LEVEL,
srv_compressed_columns_zlib_strategy);
ut_a(err == Z_OK);
if (dict_data != 0 && dict_data_len != 0) {
err = deflateSetDictionary(&c_stream, dict_data,
dict_data_len);
ut_a(err == Z_OK);
}
err = deflate(&c_stream, Z_FINISH);
if (err != Z_STREAM_END) {
deflateEnd(&c_stream);
if (err == Z_OK)
err = Z_BUF_ERROR;
} else {
comp_len = c_stream.total_out;
err = deflateEnd(&c_stream);
}
switch (err) {
case Z_OK:
break;
case Z_BUF_ERROR:
/* data after compress is larger than uncompressed data*/
break;
default:
ib_logf(IB_LOG_LEVEL_ERROR,
"failed to compress the column, error: %d\n", err);
}
/* make sure the compressed data size is smaller than
uncompressed data */
if (err == Z_OK &&
*len > (comp_len + zip_column_header_length + lenlen)) {
column_set_compress_header(buf, true, lenlen - 1,
default_zip_column_algorithm_value, wrap,
default_zip_column_reserved_value);
ptr = buf + zip_column_header_length;
/*store the uncompressed data length*/
switch (lenlen) {
case 1:
mach_write_to_1(ptr, *len);
break;
case 2:
mach_write_to_2(ptr, *len);
break;
case 3:
mach_write_to_3(ptr, *len);
break;
case 4:
mach_write_to_4(ptr, *len);
break;
default:
ut_error;
}
*len = comp_len + zip_column_header_length + lenlen;
return buf;
}
do_not_compress:
ptr = buf;
column_set_compress_header(ptr, false, 0,
default_zip_column_algorithm_value, false,
default_zip_column_reserved_value);
ptr += zip_column_header_length;
memcpy(ptr, data, *len);
*len += zip_column_header_length;
return buf;
}
/** Uncompress blob/text/varchar column using zlib
@return pointer to the uncompressed data */
const byte*
row_decompress_column(
const byte* data, /*!< in: data in innodb(compressed) format */
ulint *len, /*!< in: data length; out: length of
decompressed data*/
const byte* dict_data,
/*!< in: optional dictionary data used for
decompression */
ulint dict_data_len,
/*!< in: optional dictionary data length */
row_prebuilt_t* prebuilt)
/*!< in: use prebuilt->compress_heap only
here*/
{
ulint buf_len = 0;
byte* buf;
int err = 0;
int window_bits = 0;
z_stream d_stream;
bool is_compressed = false;
bool wrap = false;
bool reserved = false;
ulint lenlen = 0;
uint alg = 0;
ut_ad(*len != ULINT_UNDEFINED);
ut_ad(*len >= zip_column_header_length);
column_get_compress_header(data, &is_compressed, &lenlen, &alg,
&wrap, &reserved);
if (reserved != default_zip_column_reserved_value) {
ib_logf(IB_LOG_LEVEL_FATAL,
"unsupported compressed BLOB header format\n");
}
if (alg != default_zip_column_algorithm_value) {
ib_logf(IB_LOG_LEVEL_FATAL,
"unsupported 'algorithm' value in the"
" compressed BLOB header\n");
}
ut_a(lenlen < 4);
data += zip_column_header_length;
if (!is_compressed) { /* column not compressed */
*len -= zip_column_header_length;
return data;
}
lenlen++;
ulint comp_len = *len - zip_column_header_length - lenlen;
ulint uncomp_len = 0;
switch (lenlen) {
case 1:
uncomp_len = mach_read_from_1(data);
break;
case 2:
uncomp_len = mach_read_from_2(data);
break;
case 3:
uncomp_len = mach_read_from_3(data);
break;
case 4:
uncomp_len = mach_read_from_4(data);
break;
default:
ut_error;
}
data += lenlen;
/* data is compressed, decompress it*/
if (!prebuilt->compress_heap) {
prebuilt->compress_heap =
mem_heap_create(max(UNIV_PAGE_SIZE, uncomp_len));
}
buf_len = uncomp_len;
buf = static_cast<byte*>(mem_heap_zalloc(
prebuilt->compress_heap, buf_len));
/* init d_stream */
d_stream.next_in = const_cast<Bytef*>(data);
d_stream.avail_in = comp_len;
d_stream.next_out = buf;
d_stream.avail_out = buf_len;
column_zip_set_alloc(&d_stream, prebuilt->compress_heap);
window_bits = wrap ? MAX_WBITS : -MAX_WBITS;
err = inflateInit2(&d_stream, window_bits);
ut_a(err == Z_OK);
err = inflate(&d_stream, Z_FINISH);
if (err == Z_NEED_DICT) {
ut_a(dict_data != 0 && dict_data_len != 0);
err = inflateSetDictionary(&d_stream, dict_data,
dict_data_len);
ut_a(err == Z_OK);
err = inflate(&d_stream, Z_FINISH);
}
if (err != Z_STREAM_END) {
inflateEnd(&d_stream);
if (err == Z_BUF_ERROR && d_stream.avail_in == 0)
err = Z_DATA_ERROR;
} else {
buf_len = d_stream.total_out;
err = inflateEnd(&d_stream);
}
switch (err) {
case Z_OK:
break;
case Z_BUF_ERROR:
ib_logf(IB_LOG_LEVEL_FATAL,
"zlib buf error, this shouldn't happen\n");
break;
default:
ib_logf(IB_LOG_LEVEL_FATAL,
"failed to decompress column, error: %d\n", err);
}
if (err == Z_OK) {
if (buf_len != uncomp_len) {
ib_logf(IB_LOG_LEVEL_FATAL,
"failed to decompress blob column, may"
" be corrupted\n");
}
*len = buf_len;
return buf;
}
*len -= (zip_column_header_length + lenlen);
return data;
}
/*******************************************************************//**
Stores a reference to a BLOB in the MySQL format. */
UNIV_INTERN
@ -242,10 +715,21 @@ row_mysql_store_blob_ref(
to 4 bytes */
const void* data, /*!< in: BLOB data; if the value to store
is SQL NULL this should be NULL pointer */
ulint len) /*!< in: BLOB length; if the value to store
ulint len, /*!< in: BLOB length; if the value to store
is SQL NULL this should be 0; remember
also to set the NULL bit in the MySQL record
header! */
bool need_decompression,
/*!< in: if the data need to be compressed*/
const byte* dict_data,
/*!< in: optional compression dictionary
data */
ulint dict_data_len,
/*!< in: optional compression dictionary data
length */
row_prebuilt_t* prebuilt)
/*<! in: use prebuilt->compress_heap only
here */
{
/* MySQL might assume the field is set to zero except the length and
the pointer fields */
@ -257,13 +741,28 @@ row_mysql_store_blob_ref(
In 32-bit architectures we only use the first 4 bytes of the pointer
slot. */
ut_a(col_len - 8 > 1 || len < 256);
ut_a(col_len - 8 > 2 || len < 256 * 256);
ut_a(col_len - 8 > 3 || len < 256 * 256 * 256);
ut_a(col_len - 8 > 1 ||
len < 256 +
(need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0));
ut_a(col_len - 8 > 2 ||
len < 256 * 256 +
(need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0));
ut_a(col_len - 8 > 3 ||
len < 256 * 256 * 256 +
(need_decompression ? ZIP_COLUMN_HEADER_LENGTH : 0));
const byte *ptr = NULL;
if (need_decompression)
ptr = row_decompress_column((const byte*)data, &len,
dict_data, dict_data_len, prebuilt);
if (ptr)
memcpy(dest + col_len - 8, &ptr, sizeof ptr);
else
memcpy(dest + col_len - 8, &data, sizeof data);
mach_write_to_n_little_endian(dest, col_len - 8, len);
memcpy(dest + col_len - 8, &data, sizeof data);
}
/*******************************************************************//**
@ -276,15 +775,32 @@ row_mysql_read_blob_ref(
ulint* len, /*!< out: BLOB length */
const byte* ref, /*!< in: BLOB reference in the
MySQL format */
ulint col_len) /*!< in: BLOB reference length
ulint col_len, /*!< in: BLOB reference length
(not BLOB length) */
bool need_compression,
/*!< in: if the data need to be
compressed*/
const byte* dict_data, /*!< in: optional compression
dictionary data */
ulint dict_data_len, /*!< in: optional compression
dictionary data length */
row_prebuilt_t* prebuilt) /*!< in: use prebuilt->compress_heap
only here */
{
byte* data;
byte* data = NULL;
byte* ptr = NULL;
*len = mach_read_from_n_little_endian(ref, col_len - 8);
memcpy(&data, ref + col_len - 8, sizeof data);
if (need_compression) {
ptr = row_compress_column(data, len, col_len - 8, dict_data,
dict_data_len, prebuilt);
if (ptr)
data = ptr;
}
return(data);
}
@ -367,7 +883,16 @@ row_mysql_store_col_in_innobase_format(
necessarily the length of the actual
payload data; if the column is a true
VARCHAR then this is irrelevant */
ulint comp) /*!< in: nonzero=compact format */
ulint comp, /*!< in: nonzero=compact format */
bool need_compression,
/*!< in: if the data need to be
compressed*/
const byte* dict_data, /*!< in: optional compression
dictionary data */
ulint dict_data_len, /*!< in: optional compression
dictionary data length */
row_prebuilt_t* prebuilt) /*!< in: use prebuilt->compress_heap
only here */
{
const byte* ptr = mysql_data;
const dtype_t* dtype;
@ -420,8 +945,14 @@ row_mysql_store_col_in_innobase_format(
lenlen = 2;
}
ptr = row_mysql_read_true_varchar(&col_len, mysql_data,
lenlen);
const byte* tmp_ptr = row_mysql_read_true_varchar(
&col_len, mysql_data, lenlen);
if (need_compression)
ptr = row_compress_column(tmp_ptr, &col_len,
lenlen, dict_data, dict_data_len,
prebuilt);
else
ptr = tmp_ptr;
} else {
/* Remove trailing spaces from old style VARCHAR
columns. */
@ -503,7 +1034,9 @@ row_mysql_store_col_in_innobase_format(
}
} else if (type == DATA_BLOB && row_format_col) {
ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len);
ptr = row_mysql_read_blob_ref(&col_len, mysql_data, col_len,
need_compression, dict_data, dict_data_len,
prebuilt);
}
dfield_set_data(dfield, ptr, col_len);
@ -561,7 +1094,11 @@ row_mysql_convert_row_to_innobase(
TRUE, /* MySQL row format data */
mysql_rec + templ->mysql_col_offset,
templ->mysql_col_len,
dict_table_is_comp(prebuilt->table));
dict_table_is_comp(prebuilt->table),
templ->compressed,
reinterpret_cast<const byte*>(
templ->zip_dict_data.str),
templ->zip_dict_data.length, prebuilt);
next_column:
;
}
@ -907,6 +1444,10 @@ row_prebuilt_free(
mem_heap_free(prebuilt->blob_heap);
}
if (prebuilt->compress_heap) {
mem_heap_free(prebuilt->compress_heap);
}
if (prebuilt->old_vers_heap) {
mem_heap_free(prebuilt->old_vers_heap);
}
@ -1333,6 +1874,9 @@ row_insert_for_mysql(
return(DB_READ_ONLY);
}
if (UNIV_LIKELY_NULL(prebuilt->compress_heap))
mem_heap_empty(prebuilt->compress_heap);
trx->op_info = "inserting";
row_mysql_delay_if_needed();
@ -2693,6 +3237,10 @@ loop:
return(n_tables + n_tables_dropped);
}
DBUG_EXECUTE_IF("row_drop_tables_in_background_sleep",
os_thread_sleep(5000000);
);
table = dict_table_open_on_name(drop->table_name, FALSE, FALSE,
DICT_ERR_IGNORE_NONE);
@ -2703,6 +3251,16 @@ loop:
goto already_dropped;
}
if (!table->to_be_dropped) {
/* There is a scenario: the old table is dropped
just after it's added into drop list, and new
table with the same name is created, then we try
to drop the new table in background. */
dict_table_close(table, FALSE, FALSE);
goto already_dropped;
}
ut_a(!table->can_be_evicted);
dict_table_close(table, FALSE, FALSE);
@ -2833,6 +3391,12 @@ row_mysql_table_id_reassign(
pars_info_add_ull_literal(info, "old_id", table->id);
pars_info_add_ull_literal(info, "new_id", *new_id);
/* As micro-SQL does not support int4 == int8 comparisons,
old and new IDs are added again under different names as
int4 values*/
pars_info_add_int4_literal(info, "old_id_narrow", table->id);
pars_info_add_int4_literal(info, "new_id_narrow", *new_id);
err = que_eval_sql(
info,
"PROCEDURE RENUMBER_TABLE_PROC () IS\n"
@ -2843,6 +3407,8 @@ row_mysql_table_id_reassign(
" WHERE TABLE_ID = :old_id;\n"
"UPDATE SYS_INDEXES SET TABLE_ID = :new_id\n"
" WHERE TABLE_ID = :old_id;\n"
"UPDATE SYS_ZIP_DICT_COLS SET TABLE_ID = :new_id_narrow\n"
" WHERE TABLE_ID = :old_id_narrow;\n"
"END;\n", FALSE, trx);
return(err);
@ -3609,6 +4175,12 @@ next_rec:
pars_info_add_ull_literal(info, "old_id", table->id);
pars_info_add_ull_literal(info, "new_id", new_id);
/* As micro-SQL does not support int4 == int8 comparisons,
old and new IDs are added again under different names as
int4 values*/
pars_info_add_int4_literal(info, "old_id_narrow", table->id);
pars_info_add_int4_literal(info, "new_id_narrow", new_id);
err = que_eval_sql(info,
"PROCEDURE RENUMBER_TABLE_ID_PROC () IS\n"
"BEGIN\n"
@ -3620,6 +4192,9 @@ next_rec:
"UPDATE SYS_INDEXES"
" SET TABLE_ID = :new_id, SPACE = :new_space\n"
" WHERE TABLE_ID = :old_id;\n"
"UPDATE SYS_ZIP_DICT_COLS\n"
" SET TABLE_ID = :new_id_narrow\n"
" WHERE TABLE_ID = :old_id_narrow;\n"
"END;\n"
, FALSE, trx);
@ -3962,6 +4537,13 @@ row_drop_table_for_mysql(
}
}
DBUG_EXECUTE_IF("row_drop_table_add_to_background",
row_add_table_to_background_drop_list(table->name);
err = DB_SUCCESS;
goto funct_exit;
);
/* TODO: could we replace the counter n_foreign_key_checks_running
with lock checks on the table? Acquire here an exclusive lock on the
table, and rewrite lock0lock.cc and the lock wait in srv0srv.cc so that
@ -4232,6 +4814,19 @@ row_drop_table_for_mysql(
filepath = fil_make_ibd_name(tablename, false);
}
/* Remove all compression dictionary references for the
table */
err = dict_create_remove_zip_dict_references_for_table(
table->id, trx);
if (err != DB_SUCCESS) {
ib_logf(IB_LOG_LEVEL_ERROR, "Error: (%s) not "
"able to remove compression dictionary "
"references for table %s", ut_strerr(err),
tablename);
goto funct_exit;
}
if (dict_table_has_fts_index(table)
|| DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) {
ut_ad(table->n_ref_count == 0);
@ -4578,6 +5173,19 @@ loop:
row_mysql_lock_data_dictionary(trx);
while ((table_name = dict_get_first_table_name_in_db(name))) {
/* Drop parent table if it is a fts aux table, to
avoid accessing dropped fts aux tables in information
scheam when parent table still exists.
Note: Drop parent table will drop fts aux tables. */
char* parent_table_name;
parent_table_name = fts_get_parent_table_name(
table_name, strlen(table_name));
if (parent_table_name != NULL) {
mem_free(table_name);
table_name = parent_table_name;
}
ut_a(memcmp(table_name, name, namelen) == 0);
table = dict_table_open_on_name(

View File

@ -2460,9 +2460,11 @@ row_sel_convert_mysql_key_to_innobase(
if (UNIV_LIKELY(!is_null)) {
buf = row_mysql_store_col_in_innobase_format(
dfield, buf,
FALSE, /* MySQL key value format col */
/* MySQL key value format col */
FALSE,
key_ptr + data_offset, data_len,
dict_table_is_comp(index->table));
dict_table_is_comp(index->table),
false, 0, 0 ,0);
ut_a(buf <= original_buf + buf_len);
}
@ -2555,12 +2557,16 @@ row_sel_store_row_id_to_prebuilt(
#ifdef UNIV_DEBUG
/** Convert a non-SQL-NULL field from Innobase format to MySQL format. */
# define row_sel_field_store_in_mysql_format(dest,templ,idx,field,src,len) \
row_sel_field_store_in_mysql_format_func(dest,templ,idx,field,src,len)
# define row_sel_field_store_in_mysql_format( \
dest,templ,idx,field,src,len,prebuilt) \
row_sel_field_store_in_mysql_format_func \
(dest,templ,idx,field,src,len, prebuilt)
#else /* UNIV_DEBUG */
/** Convert a non-SQL-NULL field from Innobase format to MySQL format. */
# define row_sel_field_store_in_mysql_format(dest,templ,idx,field,src,len) \
row_sel_field_store_in_mysql_format_func(dest,templ,src,len)
# define row_sel_field_store_in_mysql_format( \
dest,templ,idx,field,src,len,prebuilt) \
row_sel_field_store_in_mysql_format_func \
(dest,templ,src,len, prebuilt)
#endif /* UNIV_DEBUG */
/**************************************************************//**
@ -2590,7 +2596,10 @@ row_sel_field_store_in_mysql_format_func(
templ->icp_rec_field_no */
#endif /* UNIV_DEBUG */
const byte* data, /*!< in: data to store */
ulint len) /*!< in: length of the data */
ulint len, /*!< in: length of the data */
row_prebuilt_t* prebuilt)
/*!< in: use prebuilt->compress_heap
only here */
{
byte* ptr;
#ifdef UNIV_DEBUG
@ -2634,6 +2643,15 @@ row_sel_field_store_in_mysql_format_func(
field_end = dest + templ->mysql_col_len;
if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) {
/* If this is a compressed column,
decompress it first */
if (templ->compressed)
data = row_decompress_column(data, &len,
reinterpret_cast<const byte*>(
templ->zip_dict_data.str),
templ->zip_dict_data.length,
prebuilt);
/* This is a >= 5.0.3 type true VARCHAR. Store the
length of the data to the first byte or the first
two bytes of dest. */
@ -2684,7 +2702,11 @@ row_sel_field_store_in_mysql_format_func(
already copied to the buffer in row_sel_store_mysql_rec */
row_mysql_store_blob_ref(dest, templ->mysql_col_len, data,
len);
len, templ->compressed,
reinterpret_cast<const byte*>(
templ->zip_dict_data.str),
templ->zip_dict_data.length,
prebuilt);
break;
case DATA_MYSQL:
@ -2837,7 +2859,7 @@ row_sel_store_mysql_field_func(
row_sel_field_store_in_mysql_format(
mysql_rec + templ->mysql_col_offset,
templ, index, field_no, data, len);
templ, index, field_no, data, len, prebuilt);
if (heap != prebuilt->blob_heap) {
mem_heap_free(heap);
@ -2887,7 +2909,7 @@ row_sel_store_mysql_field_func(
row_sel_field_store_in_mysql_format(
mysql_rec + templ->mysql_col_offset,
templ, index, field_no, data, len);
templ, index, field_no, data, len, prebuilt);
}
ut_ad(len != UNIV_SQL_NULL);
@ -2935,6 +2957,9 @@ row_sel_store_mysql_rec(
prebuilt->blob_heap = NULL;
}
if (UNIV_LIKELY_NULL(prebuilt->compress_heap))
mem_heap_empty(prebuilt->compress_heap);
for (i = 0; i < prebuilt->n_template; i++) {
const mysql_row_templ_t*templ = &prebuilt->mysql_template[i];
const ulint field_no

View File

@ -2714,6 +2714,12 @@ files_checked:
return(err);
}
/* Create the SYS_ZIP_DICT system table */
err = dict_create_or_check_sys_zip_dict();
if (err != DB_SUCCESS) {
return(err);
}
srv_is_being_started = FALSE;
ut_a(trx_purge_state() == PURGE_STATE_INIT);