MDEV-16045: Allocate log_sys statically

There is only one redo log subsystem in InnoDB. Allocate the object
statically, to avoid unnecessary dereferencing of the pointer.

log_t::create(): Renamed from log_sys_init().

log_t::close(): Renamed from log_shutdown().

log_t::checkpoint_buf_ptr: Remove. Allocate log_t::checkpoint_buf
statically.
This commit is contained in:
Marko Mäkelä 2018-04-27 10:06:14 +03:00
parent 715e4f4320
commit d73a898d64
19 changed files with 458 additions and 463 deletions

View File

@ -2445,7 +2445,7 @@ lsn_t
xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
{ {
lsn_t scanned_lsn = start_lsn; lsn_t scanned_lsn = start_lsn;
const byte* log_block = log_sys->buf; const byte* log_block = log_sys.buf;
bool more_data = false; bool more_data = false;
for (ulint scanned_checkpoint = 0; for (ulint scanned_checkpoint = 0;
@ -2494,7 +2494,7 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
recv_sys_justify_left_parsing_buf(); recv_sys_justify_left_parsing_buf();
log_sys->log.scanned_lsn = scanned_lsn; log_sys.log.scanned_lsn = scanned_lsn;
end_lsn = copy == COPY_LAST end_lsn = copy == COPY_LAST
? ut_uint64_align_up(scanned_lsn, OS_FILE_LOG_BLOCK_SIZE) ? ut_uint64_align_up(scanned_lsn, OS_FILE_LOG_BLOCK_SIZE)
@ -2502,10 +2502,10 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn)
if (ulint write_size = ulint(end_lsn - start_lsn)) { if (ulint write_size = ulint(end_lsn - start_lsn)) {
if (srv_encrypt_log) { if (srv_encrypt_log) {
log_crypt(log_sys->buf, start_lsn, write_size); log_crypt(log_sys.buf, start_lsn, write_size);
} }
if (ds_write(dst_log_file, log_sys->buf, write_size)) { if (ds_write(dst_log_file, log_sys.buf, write_size)) {
msg("mariabackup: Error: " msg("mariabackup: Error: "
"write to logfile failed\n"); "write to logfile failed\n");
return(0); return(0);
@ -2544,7 +2544,7 @@ xtrabackup_copy_logfile(copy_logfile copy)
lsn_t lsn= start_lsn; lsn_t lsn= start_lsn;
for(int retries= 0; retries < 100; retries++) { for(int retries= 0; retries < 100; retries++) {
if (log_group_read_log_seg(log_sys->buf, &log_sys->log, if (log_group_read_log_seg(log_sys.buf, &log_sys.log,
&lsn, end_lsn)){ &lsn, end_lsn)){
break; break;
} }
@ -2565,7 +2565,7 @@ xtrabackup_copy_logfile(copy_logfile copy)
} }
} while (start_lsn == end_lsn); } while (start_lsn == end_lsn);
ut_ad(start_lsn == log_sys->log.scanned_lsn); ut_ad(start_lsn == log_sys.log.scanned_lsn);
msg_ts(">> log scanned up to (" LSN_PF ")\n", start_lsn); msg_ts(">> log scanned up to (" LSN_PF ")\n", start_lsn);
@ -3656,9 +3656,9 @@ xtrabackup_backup_low()
log_mutex_enter(); log_mutex_enter();
if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS if (recv_find_max_checkpoint(&max_cp_field) == DB_SUCCESS
&& log_sys->log.format != 0) { && log_sys.log.format != 0) {
metadata_to_lsn = mach_read_from_8( metadata_to_lsn = mach_read_from_8(
log_sys->checkpoint_buf + LOG_CHECKPOINT_LSN); log_sys.checkpoint_buf + LOG_CHECKPOINT_LSN);
msg("mariabackup: The latest check point" msg("mariabackup: The latest check point"
" (for incremental): '" LSN_PF "'\n", " (for incremental): '" LSN_PF "'\n",
metadata_to_lsn); metadata_to_lsn);
@ -3818,7 +3818,7 @@ fail:
os_aio_init(srv_n_read_io_threads, srv_n_write_io_threads, os_aio_init(srv_n_read_io_threads, srv_n_write_io_threads,
SRV_MAX_N_PENDING_SYNC_IOS); SRV_MAX_N_PENDING_SYNC_IOS);
log_sys_init(); log_sys.create();
log_init(srv_n_log_files); log_init(srv_n_log_files);
fil_space_t* space = fil_space_create( fil_space_t* space = fil_space_create(
"innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0,
@ -3894,7 +3894,7 @@ log_fail:
goto fail; goto fail;
} }
if (log_sys->log.format == 0) { if (log_sys.log.format == 0) {
old_format: old_format:
msg("mariabackup: Error: cannot process redo log" msg("mariabackup: Error: cannot process redo log"
" before MariaDB 10.2.2\n"); " before MariaDB 10.2.2\n");
@ -3902,14 +3902,14 @@ old_format:
goto log_fail; goto log_fail;
} }
ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT)
& ~LOG_HEADER_FORMAT_ENCRYPTED)); & ~LOG_HEADER_FORMAT_ENCRYPTED));
const byte* buf = log_sys->checkpoint_buf; const byte* buf = log_sys.checkpoint_buf;
reread_log_header: reread_log_header:
checkpoint_lsn_start = log_sys->log.lsn; checkpoint_lsn_start = log_sys.log.lsn;
checkpoint_no_start = log_sys->next_checkpoint_no; checkpoint_no_start = log_sys.next_checkpoint_no;
err = recv_find_max_checkpoint(&max_cp_field); err = recv_find_max_checkpoint(&max_cp_field);
@ -3917,14 +3917,14 @@ reread_log_header:
goto log_fail; goto log_fail;
} }
if (log_sys->log.format == 0) { if (log_sys.log.format == 0) {
goto old_format; goto old_format;
} }
ut_ad(!((log_sys->log.format ^ LOG_HEADER_FORMAT_CURRENT) ut_ad(!((log_sys.log.format ^ LOG_HEADER_FORMAT_CURRENT)
& ~LOG_HEADER_FORMAT_ENCRYPTED)); & ~LOG_HEADER_FORMAT_ENCRYPTED));
log_group_header_read(&log_sys->log, max_cp_field); log_group_header_read(&log_sys.log, max_cp_field);
if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) { if (checkpoint_no_start != mach_read_from_8(buf + LOG_CHECKPOINT_NO)) {
goto reread_log_header; goto reread_log_header;
@ -3950,7 +3950,7 @@ reread_log_header:
/* label it */ /* label it */
byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr[OS_FILE_LOG_BLOCK_SIZE]; byte MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE) log_hdr[OS_FILE_LOG_BLOCK_SIZE];
memset(log_hdr, 0, sizeof log_hdr); memset(log_hdr, 0, sizeof log_hdr);
mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys->log.format); mach_write_to_4(LOG_HEADER_FORMAT + log_hdr, log_sys.log.format);
mach_write_to_8(LOG_HEADER_START_LSN + log_hdr, checkpoint_lsn_start); mach_write_to_8(LOG_HEADER_START_LSN + log_hdr, checkpoint_lsn_start);
strcpy(reinterpret_cast<char*>(LOG_HEADER_CREATOR + log_hdr), strcpy(reinterpret_cast<char*>(LOG_HEADER_CREATOR + log_hdr),
"Backup " MYSQL_SERVER_VERSION); "Backup " MYSQL_SERVER_VERSION);
@ -4936,7 +4936,7 @@ xtrabackup_prepare_func(char** argv)
ut_d(sync_check_enable()); ut_d(sync_check_enable());
ut_crc32_init(); ut_crc32_init();
recv_sys_init(); recv_sys_init();
log_sys_init(); log_sys.create();
recv_recovery_on = true; recv_recovery_on = true;
#ifdef WITH_INNODB_DISALLOW_WRITES #ifdef WITH_INNODB_DISALLOW_WRITES
@ -4970,7 +4970,7 @@ xtrabackup_prepare_func(char** argv)
os_event_destroy(srv_allow_writes_event); os_event_destroy(srv_allow_writes_event);
#endif #endif
innodb_free_param(); innodb_free_param();
log_shutdown(); log_sys.close();
sync_check_close(); sync_check_close();
if (!ok) goto error_cleanup; if (!ok) goto error_cleanup;
} }

View File

@ -1,7 +1,7 @@
--source include/have_innodb.inc --source include/have_innodb.inc
# #
# MDEV-11705: InnoDB: Failing assertion: (&log_sys->mutex)->is_owned() if server started with innodb-scrub-log # MDEV-11705: InnoDB: Failing assertion: (&log_sys.mutex)->is_owned() if server started with innodb-scrub-log
# #
create table t1(a int not null primary key auto_increment, create table t1(a int not null primary key auto_increment,

View File

@ -725,7 +725,7 @@ BtrBulk::pageCommit(
void void
BtrBulk::logFreeCheck() BtrBulk::logFreeCheck()
{ {
if (log_sys->check_flush_or_checkpoint) { if (log_sys.check_flush_or_checkpoint) {
release(); release();
log_free_check(); log_free_check();

View File

@ -1191,11 +1191,11 @@ buf_madvise_do_dump()
buf_pool_t* buf_pool; buf_pool_t* buf_pool;
buf_chunk_t* chunk; buf_chunk_t* chunk;
/* mirrors allocation in log_sys_init() */ /* mirrors allocation in log_t::create() */
if (log_sys->buf) { if (log_sys.buf) {
ret+= madvise(log_sys->first_in_use ret+= madvise(log_sys.first_in_use
? log_sys->buf ? log_sys.buf
: log_sys->buf - srv_log_buffer_size, : log_sys.buf - srv_log_buffer_size,
srv_log_buffer_size * 2, srv_log_buffer_size * 2,
MADV_DODUMP); MADV_DODUMP);
} }

View File

@ -2439,7 +2439,7 @@ page_cleaner_flush_pages_recommendation(
cur_lsn = log_get_lsn_nowait(); cur_lsn = log_get_lsn_nowait();
/* log_get_lsn_nowait tries to get log_sys->mutex with /* log_get_lsn_nowait tries to get log_sys.mutex with
mutex_enter_nowait, if this does not succeed function mutex_enter_nowait, if this does not succeed function
returns 0, do not use that value to update stats. */ returns 0, do not use that value to update stats. */
if (cur_lsn == 0) { if (cur_lsn == 0) {

View File

@ -3122,7 +3122,7 @@ func_exit:
log_mutex_enter(); log_mutex_enter();
} }
/* log_sys->mutex is above fil_system.mutex in the latching order */ /* log_sys.mutex is above fil_system.mutex in the latching order */
ut_ad(log_mutex_own()); ut_ad(log_mutex_own());
mutex_enter(&fil_system.mutex); mutex_enter(&fil_system.mutex);
ut_ad(space->name == old_space_name); ut_ad(space->name == old_space_name);
@ -5120,12 +5120,12 @@ fil_names_dirty(
{ {
ut_ad(log_mutex_own()); ut_ad(log_mutex_own());
ut_ad(recv_recovery_is_on()); ut_ad(recv_recovery_is_on());
ut_ad(log_sys->lsn != 0); ut_ad(log_sys.lsn != 0);
ut_ad(space->max_lsn == 0); ut_ad(space->max_lsn == 0);
ut_d(fil_space_validate_for_mtr_commit(space)); ut_d(fil_space_validate_for_mtr_commit(space));
UT_LIST_ADD_LAST(fil_system.named_spaces, space); UT_LIST_ADD_LAST(fil_system.named_spaces, space);
space->max_lsn = log_sys->lsn; space->max_lsn = log_sys.lsn;
} }
/** Write MLOG_FILE_NAME records when a non-predefined persistent /** Write MLOG_FILE_NAME records when a non-predefined persistent
@ -5140,7 +5140,7 @@ fil_names_dirty_and_write(
{ {
ut_ad(log_mutex_own()); ut_ad(log_mutex_own());
ut_d(fil_space_validate_for_mtr_commit(space)); ut_d(fil_space_validate_for_mtr_commit(space));
ut_ad(space->max_lsn == log_sys->lsn); ut_ad(space->max_lsn == log_sys.lsn);
UT_LIST_ADD_LAST(fil_system.named_spaces, space); UT_LIST_ADD_LAST(fil_system.named_spaces, space);
fil_names_write(space, mtr); fil_names_write(space, mtr);
@ -5177,8 +5177,8 @@ fil_names_clear(
ut_ad(log_mutex_own()); ut_ad(log_mutex_own());
if (log_sys->append_on_checkpoint) { if (log_sys.append_on_checkpoint) {
mtr_write_log(log_sys->append_on_checkpoint); mtr_write_log(log_sys.append_on_checkpoint);
do_write = true; do_write = true;
} }

View File

@ -18562,16 +18562,16 @@ checkpoint_now_set(
check function */ check function */
{ {
if (*(my_bool*) save) { if (*(my_bool*) save) {
while (log_sys->last_checkpoint_lsn while (log_sys.last_checkpoint_lsn
+ SIZE_OF_MLOG_CHECKPOINT + SIZE_OF_MLOG_CHECKPOINT
+ (log_sys->append_on_checkpoint != NULL + (log_sys.append_on_checkpoint != NULL
? log_sys->append_on_checkpoint->size() : 0) ? log_sys.append_on_checkpoint->size() : 0)
< log_sys->lsn) { < log_sys.lsn) {
log_make_checkpoint_at(LSN_MAX, TRUE); log_make_checkpoint_at(LSN_MAX, TRUE);
fil_flush_file_spaces(FIL_TYPE_LOG); fil_flush_file_spaces(FIL_TYPE_LOG);
} }
dberr_t err = fil_write_flushed_lsn(log_sys->lsn); dberr_t err = fil_write_flushed_lsn(log_sys.lsn);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
ib::warn() << "Checkpoint set failed " << err; ib::warn() << "Checkpoint set failed " << err;

View File

@ -82,7 +82,7 @@ struct fil_space_t {
/*!< LSN of the most recent /*!< LSN of the most recent
fil_names_write_if_was_clean(). fil_names_write_if_was_clean().
Reset to 0 by fil_names_clear(). Reset to 0 by fil_names_clear().
Protected by log_sys->mutex. Protected by log_sys.mutex.
If and only if this is nonzero, the If and only if this is nonzero, the
tablespace will be in named_spaces. */ tablespace will be in named_spaces. */
bool stop_ios;/*!< true if we want to rename the bool stop_ios;/*!< true if we want to rename the
@ -286,7 +286,7 @@ struct fil_space_t {
struct fil_node_t { struct fil_node_t {
/** tablespace containing this file */ /** tablespace containing this file */
fil_space_t* space; fil_space_t* space;
/** file name; protected by fil_system.mutex and log_sys->mutex. */ /** file name; protected by fil_system.mutex and log_sys.mutex. */
char* name; char* name;
/** file handle (valid if is_open) */ /** file handle (valid if is_open) */
pfs_os_file_t handle; pfs_os_file_t handle;
@ -628,7 +628,7 @@ public:
for which a MLOG_FILE_NAME for which a MLOG_FILE_NAME
record has been written since record has been written since
the latest redo log checkpoint. the latest redo log checkpoint.
Protected only by log_sys->mutex. */ Protected only by log_sys.mutex. */
UT_LIST_BASE_NODE_T(fil_space_t) rotation_list; UT_LIST_BASE_NODE_T(fil_space_t) rotation_list;
/*!< list of all file spaces needing /*!< list of all file spaces needing
key rotation.*/ key rotation.*/
@ -1326,8 +1326,8 @@ fil_names_write_if_was_clean(
} }
const bool was_clean = space->max_lsn == 0; const bool was_clean = space->max_lsn == 0;
ut_ad(space->max_lsn <= log_sys->lsn); ut_ad(space->max_lsn <= log_sys.lsn);
space->max_lsn = log_sys->lsn; space->max_lsn = log_sys.lsn;
if (was_clean) { if (was_clean) {
fil_names_dirty_and_write(space, mtr); fil_names_dirty_and_write(space, mtr);

View File

@ -56,7 +56,7 @@ step which modifies the database, is started */
typedef ulint (*log_checksum_func_t)(const byte* log_block); typedef ulint (*log_checksum_func_t)(const byte* log_block);
/** Pointer to the log checksum calculation function. Protected with /** Pointer to the log checksum calculation function. Protected with
log_sys->mutex. */ log_sys.mutex. */
extern log_checksum_func_t log_checksum_algorithm_ptr; extern log_checksum_func_t log_checksum_algorithm_ptr;
/** Append a string to the log. /** Append a string to the log.
@ -136,7 +136,7 @@ log_get_flush_lsn(void);
/*=============*/ /*=============*/
/**************************************************************** /****************************************************************
Gets the log group capacity. It is OK to read the value without Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant. holding log_sys.mutex because it is constant.
@return log group capacity */ @return log group capacity */
UNIV_INLINE UNIV_INLINE
lsn_t lsn_t
@ -150,9 +150,6 @@ UNIV_INLINE
lsn_t lsn_t
log_get_max_modified_age_async(void); log_get_max_modified_age_async(void);
/*================================*/ /*================================*/
/** Initializes the redo logging subsystem. */
void
log_sys_init();
/** Initialize the redo log. /** Initialize the redo log.
@param[in] n_files number of files */ @param[in] n_files number of files */
@ -233,7 +230,7 @@ shutdown. This function also writes all log in log files to the log archive. */
void void
logs_empty_and_mark_files_at_shutdown(void); logs_empty_and_mark_files_at_shutdown(void);
/*=======================================*/ /*=======================================*/
/** Read a log group header page to log_sys->checkpoint_buf. /** Read a log group header page to log_sys.checkpoint_buf.
@param[in] group log group @param[in] group log group
@param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */ @param[in] header 0 or LOG_CHEKCPOINT_1 or LOG_CHECKPOINT2 */
void void
@ -406,9 +403,6 @@ Closes all log groups. */
void void
log_group_close_all(void); log_group_close_all(void);
/*=====================*/ /*=====================*/
/** Shut down the redo log subsystem. */
void
log_shutdown();
/** Whether to generate and require checksums on the redo log pages */ /** Whether to generate and require checksums on the redo log pages */
extern my_bool innodb_log_checksums; extern my_bool innodb_log_checksums;
@ -443,7 +437,7 @@ extern my_bool innodb_log_checksums;
from this offset in this log block, from this offset in this log block,
if value not 0 */ if value not 0 */
#define LOG_BLOCK_CHECKPOINT_NO 8 /* 4 lower bytes of the value of #define LOG_BLOCK_CHECKPOINT_NO 8 /* 4 lower bytes of the value of
log_sys->next_checkpoint_no when the log_sys.next_checkpoint_no when the
log block was last written to: if the log block was last written to: if the
block has not yet been written full, block has not yet been written full,
this value is only updated before a this value is only updated before a
@ -544,9 +538,9 @@ typedef ib_mutex_t FlushOrderMutex;
/** Log group consists of a number of log files, each of the same size; a log /** Log group consists of a number of log files, each of the same size; a log
group is implemented as a space in the sense of the module fil0fil. group is implemented as a space in the sense of the module fil0fil.
Currently, this is only protected by log_sys->mutex. However, in the case Currently, this is only protected by log_sys.mutex. However, in the case
of log_write_up_to(), we will access some members only with the protection of log_write_up_to(), we will access some members only with the protection
of log_sys->write_mutex, which should affect nothing for now. */ of log_sys.write_mutex, which should affect nothing for now. */
struct log_group_t{ struct log_group_t{
/** number of files in the group */ /** number of files in the group */
ulint n_files; ulint n_files;
@ -588,25 +582,22 @@ struct log_group_t{
/** Redo log buffer */ /** Redo log buffer */
struct log_t{ struct log_t{
char pad1[CACHE_LINE_SIZE]; MY_ALIGNED(CACHE_LINE_SIZE)
/*!< Padding to prevent other memory
update hotspots from residing on the
same memory cache line */
lsn_t lsn; /*!< log sequence number */ lsn_t lsn; /*!< log sequence number */
ulong buf_free; /*!< first free offset within the log ulong buf_free; /*!< first free offset within the log
buffer in use */ buffer in use */
char pad2[CACHE_LINE_SIZE];/*!< Padding */ MY_ALIGNED(CACHE_LINE_SIZE)
LogSysMutex mutex; /*!< mutex protecting the log */ LogSysMutex mutex; /*!< mutex protecting the log */
char pad3[CACHE_LINE_SIZE]; /*!< Padding */ MY_ALIGNED(CACHE_LINE_SIZE)
LogSysMutex write_mutex; /*!< mutex protecting writing to log LogSysMutex write_mutex; /*!< mutex protecting writing to log
file and accessing to log_group_t */ file and accessing to log_group_t */
char pad4[CACHE_LINE_SIZE];/*!< Padding */ MY_ALIGNED(CACHE_LINE_SIZE)
FlushOrderMutex log_flush_order_mutex;/*!< mutex to serialize access to FlushOrderMutex log_flush_order_mutex;/*!< mutex to serialize access to
the flush list when we are putting the flush list when we are putting
dirty blocks in the list. The idea dirty blocks in the list. The idea
behind this mutex is to be able behind this mutex is to be able
to release log_sys->mutex during to release log_sys.mutex during
mtr_commit and still ensure that mtr_commit and still ensure that
insertions in the flush_list happen insertions in the flush_list happen
in the LSN order. */ in the LSN order. */
@ -636,7 +627,7 @@ struct log_t{
peeked at by log_free_check(), which peeked at by log_free_check(), which
does not reserve the log mutex */ does not reserve the log mutex */
/** the redo log */ /** the redo log */
log_group_t log; log_group_t log;
/** The fields involved in the log buffer flush @{ */ /** The fields involved in the log buffer flush @{ */
@ -707,7 +698,7 @@ struct log_t{
/*!< extra redo log records to write /*!< extra redo log records to write
during a checkpoint, or NULL if none. during a checkpoint, or NULL if none.
The pointer is protected by The pointer is protected by
log_sys->mutex, and the data must log_sys.mutex, and the data must
remain constant as long as this remain constant as long as this
pointer is not NULL. */ pointer is not NULL. */
ulint n_pending_checkpoint_writes; ulint n_pending_checkpoint_writes;
@ -717,62 +708,79 @@ struct log_t{
checkpoint write is running; a thread checkpoint write is running; a thread
should wait for this without owning should wait for this without owning
the log mutex */ the log mutex */
byte* checkpoint_buf_ptr;/* unaligned checkpoint header */
byte* checkpoint_buf; /*!< checkpoint header is read to this /** buffer for checkpoint header */
buffer */ MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE)
byte checkpoint_buf[OS_FILE_LOG_BLOCK_SIZE];
/* @} */ /* @} */
/** @return whether the redo log is encrypted */ private:
bool is_encrypted() const bool m_initialised;
{ public:
return(log.is_encrypted()); /**
} Constructor.
Some members may require late initialisation, thus we just mark object as
uninitialised. Real initialisation happens in create().
*/
log_t(): m_initialised(false) {}
/** @return whether the redo log is encrypted */
bool is_encrypted() const { return(log.is_encrypted()); }
bool is_initialised() { return m_initialised; }
/** Initialise the redo log subsystem. */
void create();
/** Shut down the redo log subsystem. */
void close();
}; };
/** Redo log system */ /** Redo log system */
extern log_t* log_sys; extern log_t log_sys;
/** Test if flush order mutex is owned. */ /** Test if flush order mutex is owned. */
#define log_flush_order_mutex_own() \ #define log_flush_order_mutex_own() \
mutex_own(&log_sys->log_flush_order_mutex) mutex_own(&log_sys.log_flush_order_mutex)
/** Acquire the flush order mutex. */ /** Acquire the flush order mutex. */
#define log_flush_order_mutex_enter() do { \ #define log_flush_order_mutex_enter() do { \
mutex_enter(&log_sys->log_flush_order_mutex); \ mutex_enter(&log_sys.log_flush_order_mutex); \
} while (0) } while (0)
/** Release the flush order mutex. */ /** Release the flush order mutex. */
# define log_flush_order_mutex_exit() do { \ # define log_flush_order_mutex_exit() do { \
mutex_exit(&log_sys->log_flush_order_mutex); \ mutex_exit(&log_sys.log_flush_order_mutex); \
} while (0) } while (0)
/** Test if log sys mutex is owned. */ /** Test if log sys mutex is owned. */
#define log_mutex_own() mutex_own(&log_sys->mutex) #define log_mutex_own() mutex_own(&log_sys.mutex)
/** Test if log sys write mutex is owned. */ /** Test if log sys write mutex is owned. */
#define log_write_mutex_own() mutex_own(&log_sys->write_mutex) #define log_write_mutex_own() mutex_own(&log_sys.write_mutex)
/** Acquire the log sys mutex. */ /** Acquire the log sys mutex. */
#define log_mutex_enter() mutex_enter(&log_sys->mutex) #define log_mutex_enter() mutex_enter(&log_sys.mutex)
/** Acquire the log sys write mutex. */ /** Acquire the log sys write mutex. */
#define log_write_mutex_enter() mutex_enter(&log_sys->write_mutex) #define log_write_mutex_enter() mutex_enter(&log_sys.write_mutex)
/** Acquire all the log sys mutexes. */ /** Acquire all the log sys mutexes. */
#define log_mutex_enter_all() do { \ #define log_mutex_enter_all() do { \
mutex_enter(&log_sys->write_mutex); \ mutex_enter(&log_sys.write_mutex); \
mutex_enter(&log_sys->mutex); \ mutex_enter(&log_sys.mutex); \
} while (0) } while (0)
/** Release the log sys mutex. */ /** Release the log sys mutex. */
#define log_mutex_exit() mutex_exit(&log_sys->mutex) #define log_mutex_exit() mutex_exit(&log_sys.mutex)
/** Release the log sys write mutex.*/ /** Release the log sys write mutex.*/
#define log_write_mutex_exit() mutex_exit(&log_sys->write_mutex) #define log_write_mutex_exit() mutex_exit(&log_sys.write_mutex)
/** Release all the log sys mutexes. */ /** Release all the log sys mutexes. */
#define log_mutex_exit_all() do { \ #define log_mutex_exit_all() do { \
mutex_exit(&log_sys->mutex); \ mutex_exit(&log_sys.mutex); \
mutex_exit(&log_sys->write_mutex); \ mutex_exit(&log_sys.write_mutex); \
} while (0) } while (0)
/** Calculate the offset of an lsn within a log group. /** Calculate the offset of an lsn within a log group.

View File

@ -330,15 +330,15 @@ log_reserve_and_write_fast(
len - SIZE_OF_MLOG_CHECKPOINT] len - SIZE_OF_MLOG_CHECKPOINT]
? 0 ? 0
: 1 : 1
+ mach_get_compressed_size(log_sys->lsn >> 32) + mach_get_compressed_size(log_sys.lsn >> 32)
+ mach_get_compressed_size(log_sys->lsn & 0xFFFFFFFFUL); + mach_get_compressed_size(log_sys.lsn & 0xFFFFFFFFUL);
#endif /* UNIV_LOG_LSN_DEBUG */ #endif /* UNIV_LOG_LSN_DEBUG */
const ulint data_len = len const ulint data_len = len
#ifdef UNIV_LOG_LSN_DEBUG #ifdef UNIV_LOG_LSN_DEBUG
+ lsn_len + lsn_len
#endif /* UNIV_LOG_LSN_DEBUG */ #endif /* UNIV_LOG_LSN_DEBUG */
+ log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE; + log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE;
if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) { if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
@ -348,44 +348,44 @@ log_reserve_and_write_fast(
return(0); return(0);
} }
*start_lsn = log_sys->lsn; *start_lsn = log_sys.lsn;
#ifdef UNIV_LOG_LSN_DEBUG #ifdef UNIV_LOG_LSN_DEBUG
if (lsn_len) { if (lsn_len) {
/* Write the LSN pseudo-record. */ /* Write the LSN pseudo-record. */
byte* b = &log_sys->buf[log_sys->buf_free]; byte* b = &log_sys.buf[log_sys.buf_free];
*b++ = MLOG_LSN | (MLOG_SINGLE_REC_FLAG & *(const byte*) str); *b++ = MLOG_LSN | (MLOG_SINGLE_REC_FLAG & *(const byte*) str);
/* Write the LSN in two parts, /* Write the LSN in two parts,
as a pseudo page number and space id. */ as a pseudo page number and space id. */
b += mach_write_compressed(b, log_sys->lsn >> 32); b += mach_write_compressed(b, log_sys.lsn >> 32);
b += mach_write_compressed(b, log_sys->lsn & 0xFFFFFFFFUL); b += mach_write_compressed(b, log_sys.lsn & 0xFFFFFFFFUL);
ut_a(b - lsn_len == &log_sys->buf[log_sys->buf_free]); ut_a(b - lsn_len == &log_sys.buf[log_sys.buf_free]);
::memcpy(b, str, len); ::memcpy(b, str, len);
len += lsn_len; len += lsn_len;
} else } else
#endif /* UNIV_LOG_LSN_DEBUG */ #endif /* UNIV_LOG_LSN_DEBUG */
memcpy(log_sys->buf + log_sys->buf_free, str, len); memcpy(log_sys.buf + log_sys.buf_free, str, len);
log_block_set_data_len( log_block_set_data_len(
reinterpret_cast<byte*>(ut_align_down( reinterpret_cast<byte*>(ut_align_down(
log_sys->buf + log_sys->buf_free, log_sys.buf + log_sys.buf_free,
OS_FILE_LOG_BLOCK_SIZE)), OS_FILE_LOG_BLOCK_SIZE)),
data_len); data_len);
log_sys->buf_free += ulong(len); log_sys.buf_free += ulong(len);
ut_ad(log_sys->buf_free <= srv_log_buffer_size); ut_ad(log_sys.buf_free <= srv_log_buffer_size);
log_sys->lsn += len; log_sys.lsn += len;
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
log_sys->lsn - log_sys->last_checkpoint_lsn); log_sys.lsn - log_sys.last_checkpoint_lsn);
return(log_sys->lsn); return(log_sys.lsn);
} }
/************************************************************//** /************************************************************//**
@ -400,7 +400,7 @@ log_get_lsn(void)
log_mutex_enter(); log_mutex_enter();
lsn = log_sys->lsn; lsn = log_sys.lsn;
log_mutex_exit(); log_mutex_exit();
@ -418,7 +418,7 @@ log_get_flush_lsn(void)
log_mutex_enter(); log_mutex_enter();
lsn = log_sys->flushed_to_disk_lsn; lsn = log_sys.flushed_to_disk_lsn;
log_mutex_exit(); log_mutex_exit();
@ -435,11 +435,11 @@ log_get_lsn_nowait(void)
{ {
lsn_t lsn=0; lsn_t lsn=0;
if (!mutex_enter_nowait(&(log_sys->mutex))) { if (!mutex_enter_nowait(&(log_sys.mutex))) {
lsn = log_sys->lsn; lsn = log_sys.lsn;
mutex_exit(&(log_sys->mutex)); mutex_exit(&(log_sys.mutex));
} }
return(lsn); return(lsn);
@ -447,14 +447,14 @@ log_get_lsn_nowait(void)
/**************************************************************** /****************************************************************
Gets the log group capacity. It is OK to read the value without Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant. holding log_sys.mutex because it is constant.
@return log group capacity */ @return log group capacity */
UNIV_INLINE UNIV_INLINE
lsn_t lsn_t
log_get_capacity(void) log_get_capacity(void)
/*==================*/ /*==================*/
{ {
return(log_sys->log_group_capacity); return(log_sys.log_group_capacity);
} }
/**************************************************************** /****************************************************************
@ -466,7 +466,7 @@ lsn_t
log_get_max_modified_age_async(void) log_get_max_modified_age_async(void)
/*================================*/ /*================================*/
{ {
return(log_sys->max_modified_age_async); return(log_sys.max_modified_age_async);
} }
/***********************************************************************//** /***********************************************************************//**
@ -498,7 +498,7 @@ log_free_check(void)
sync_allowed_latches(latches, sync_allowed_latches(latches,
latches + UT_ARR_SIZE(latches)))); latches + UT_ARR_SIZE(latches))));
if (log_sys->check_flush_or_checkpoint) { if (log_sys.check_flush_or_checkpoint) {
log_check_margins(); log_check_margins();
} }

View File

@ -331,7 +331,7 @@ extern bool recv_no_ibuf_operations;
extern bool recv_needed_recovery; extern bool recv_needed_recovery;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** TRUE if writing to the redo log (mtr_commit) is forbidden. /** TRUE if writing to the redo log (mtr_commit) is forbidden.
Protected by log_sys->mutex. */ Protected by log_sys.mutex. */
extern bool recv_no_log_write; extern bool recv_no_log_write;
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */

View File

@ -80,7 +80,7 @@ struct srv_stats_t
lsn_ctr_1_t os_log_written; lsn_ctr_1_t os_log_written;
/** Number of writes being done to the log files. /** Number of writes being done to the log files.
Protected by log_sys->write_mutex. */ Protected by log_sys.write_mutex. */
ulint_ctr_1_t os_log_pending_writes; ulint_ctr_1_t os_log_pending_writes;
/** We increase this counter, when we don't have enough /** We increase this counter, when we don't have enough

View File

@ -219,7 +219,7 @@ bool
log_crypt_init() log_crypt_init()
{ {
ut_ad(log_mutex_own()); ut_ad(log_mutex_own());
ut_ad(log_sys->is_encrypted()); ut_ad(log_sys.is_encrypted());
info.key_version = encryption_key_get_latest_version( info.key_version = encryption_key_get_latest_version(
LOG_DEFAULT_ENCRYPTION_KEY); LOG_DEFAULT_ENCRYPTION_KEY);

File diff suppressed because it is too large Load Diff

View File

@ -79,7 +79,7 @@ volatile bool recv_recovery_on;
bool recv_needed_recovery; bool recv_needed_recovery;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** TRUE if writing to the redo log (mtr_commit) is forbidden. /** TRUE if writing to the redo log (mtr_commit) is forbidden.
Protected by log_sys->mutex. */ Protected by log_sys.mutex. */
bool recv_no_log_write = false; bool recv_no_log_write = false;
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
@ -669,7 +669,7 @@ loop:
(source_offset % group->file_size)); (source_offset % group->file_size));
} }
log_sys->n_log_ios++; log_sys.n_log_ios++;
MONITOR_INC(MONITOR_LOG_IO); MONITOR_INC(MONITOR_LOG_IO);
@ -760,13 +760,13 @@ recv_synchronize_groups()
lsn_t start_lsn = ut_uint64_align_down(recovered_lsn, lsn_t start_lsn = ut_uint64_align_down(recovered_lsn,
OS_FILE_LOG_BLOCK_SIZE); OS_FILE_LOG_BLOCK_SIZE);
log_group_read_log_seg(log_sys->buf, &log_sys->log, log_group_read_log_seg(log_sys.buf, &log_sys.log,
&start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE); &start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE);
/* Update the fields in the group struct to correspond to /* Update the fields in the group struct to correspond to
recovered_lsn */ recovered_lsn */
log_group_set_fields(&log_sys->log, recovered_lsn); log_group_set_fields(&log_sys.log, recovered_lsn);
/* Copy the checkpoint info to the log; remember that we have /* Copy the checkpoint info to the log; remember that we have
incremented checkpoint_no by one, and the info will not be written incremented checkpoint_no by one, and the info will not be written
@ -799,10 +799,10 @@ static MY_ATTRIBUTE((warn_unused_result))
dberr_t dberr_t
recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field)
{ {
log_group_t* group = &log_sys->log; log_group_t* group = &log_sys.log;
ib_uint64_t max_no = 0; ib_uint64_t max_no = 0;
ib_uint64_t checkpoint_no; ib_uint64_t checkpoint_no;
byte* buf = log_sys->checkpoint_buf; byte* buf = log_sys.checkpoint_buf;
ut_ad(group->format == 0); ut_ad(group->format == 0);
@ -882,12 +882,12 @@ dberr_t
recv_log_format_0_recover(lsn_t lsn) recv_log_format_0_recover(lsn_t lsn)
{ {
log_mutex_enter(); log_mutex_enter();
log_group_t* group = &log_sys->log; log_group_t* group = &log_sys.log;
const lsn_t source_offset const lsn_t source_offset
= log_group_calc_lsn_offset(lsn, group); = log_group_calc_lsn_offset(lsn, group);
log_mutex_exit(); log_mutex_exit();
const ulint page_no = ulint(source_offset >> srv_page_size_shift); const ulint page_no = ulint(source_offset >> srv_page_size_shift);
byte* buf = log_sys->buf; byte* buf = log_sys.buf;
static const char* NO_UPGRADE_RECOVERY_MSG = static const char* NO_UPGRADE_RECOVERY_MSG =
"Upgrade after a crash is not supported." "Upgrade after a crash is not supported."
@ -919,11 +919,11 @@ recv_log_format_0_recover(lsn_t lsn)
recv_sys->parse_start_lsn = recv_sys->recovered_lsn recv_sys->parse_start_lsn = recv_sys->recovered_lsn
= recv_sys->scanned_lsn = recv_sys->scanned_lsn
= recv_sys->mlog_checkpoint_lsn = lsn; = recv_sys->mlog_checkpoint_lsn = lsn;
log_sys->last_checkpoint_lsn = log_sys->next_checkpoint_lsn log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn
= log_sys->lsn = log_sys->write_lsn = log_sys.lsn = log_sys.write_lsn
= log_sys->current_flush_lsn = log_sys->flushed_to_disk_lsn = log_sys.current_flush_lsn = log_sys.flushed_to_disk_lsn
= lsn; = lsn;
log_sys->next_checkpoint_no = 0; log_sys.next_checkpoint_no = 0;
return(DB_SUCCESS); return(DB_SUCCESS);
} }
@ -939,12 +939,12 @@ recv_find_max_checkpoint(ulint* max_field)
ulint field; ulint field;
byte* buf; byte* buf;
group = &log_sys->log; group = &log_sys.log;
max_no = 0; max_no = 0;
*max_field = 0; *max_field = 0;
buf = log_sys->checkpoint_buf; buf = log_sys.checkpoint_buf;
group->state = LOG_GROUP_CORRUPTED; group->state = LOG_GROUP_CORRUPTED;
@ -1019,7 +1019,7 @@ recv_find_max_checkpoint(ulint* max_field)
buf + LOG_CHECKPOINT_LSN); buf + LOG_CHECKPOINT_LSN);
group->lsn_offset = mach_read_from_8( group->lsn_offset = mach_read_from_8(
buf + LOG_CHECKPOINT_OFFSET); buf + LOG_CHECKPOINT_OFFSET);
log_sys->next_checkpoint_no = checkpoint_no; log_sys.next_checkpoint_no = checkpoint_no;
} }
} }
@ -1751,7 +1751,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block)
while (recv) { while (recv) {
end_lsn = recv->end_lsn; end_lsn = recv->end_lsn;
ut_ad(end_lsn <= log_sys->log.scanned_lsn); ut_ad(end_lsn <= log_sys.log.scanned_lsn);
if (recv->len > RECV_DATA_BLOCK_SIZE) { if (recv->len > RECV_DATA_BLOCK_SIZE) {
/* We have to copy the record body to a separate /* We have to copy the record body to a separate
@ -2927,11 +2927,11 @@ recv_group_scan_log_recs(
OS_FILE_LOG_BLOCK_SIZE); OS_FILE_LOG_BLOCK_SIZE);
end_lsn = start_lsn; end_lsn = start_lsn;
log_group_read_log_seg( log_group_read_log_seg(
log_sys->buf, group, &end_lsn, log_sys.buf, group, &end_lsn,
start_lsn + RECV_SCAN_SIZE); start_lsn + RECV_SCAN_SIZE);
} while (end_lsn != start_lsn } while (end_lsn != start_lsn
&& !recv_scan_log_recs( && !recv_scan_log_recs(
available_mem, &store_to_hash, log_sys->buf, available_mem, &store_to_hash, log_sys.buf,
checkpoint_lsn, checkpoint_lsn,
start_lsn, end_lsn, start_lsn, end_lsn,
contiguous_lsn, &group->scanned_lsn)); contiguous_lsn, &group->scanned_lsn));
@ -3157,14 +3157,14 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
srv_start_lsn = recv_sys->recovered_lsn = log_sys->lsn; srv_start_lsn = recv_sys->recovered_lsn = log_sys.lsn;
log_mutex_exit(); log_mutex_exit();
return(err); return(err);
} }
log_group_header_read(&log_sys->log, max_cp_field); log_group_header_read(&log_sys.log, max_cp_field);
buf = log_sys->checkpoint_buf; buf = log_sys.checkpoint_buf;
checkpoint_lsn = mach_read_from_8(buf + LOG_CHECKPOINT_LSN); checkpoint_lsn = mach_read_from_8(buf + LOG_CHECKPOINT_LSN);
checkpoint_no = mach_read_from_8(buf + LOG_CHECKPOINT_NO); checkpoint_no = mach_read_from_8(buf + LOG_CHECKPOINT_NO);
@ -3177,7 +3177,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
ut_ad(RECV_SCAN_SIZE <= srv_log_buffer_size); ut_ad(RECV_SCAN_SIZE <= srv_log_buffer_size);
group = &log_sys->log; group = &log_sys.log;
const lsn_t end_lsn = mach_read_from_8( const lsn_t end_lsn = mach_read_from_8(
buf + LOG_CHECKPOINT_END_LSN); buf + LOG_CHECKPOINT_END_LSN);
@ -3283,7 +3283,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
} }
} }
log_sys->lsn = recv_sys->recovered_lsn; log_sys.lsn = recv_sys->recovered_lsn;
if (recv_needed_recovery) { if (recv_needed_recovery) {
bool missing_tablespace = false; bool missing_tablespace = false;
@ -3378,8 +3378,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
/* Synchronize the uncorrupted log groups to the most up-to-date log /* Synchronize the uncorrupted log groups to the most up-to-date log
group; we also copy checkpoint info to groups */ group; we also copy checkpoint info to groups */
log_sys->next_checkpoint_lsn = checkpoint_lsn; log_sys.next_checkpoint_lsn = checkpoint_lsn;
log_sys->next_checkpoint_no = checkpoint_no + 1; log_sys.next_checkpoint_no = checkpoint_no + 1;
recv_synchronize_groups(); recv_synchronize_groups();
@ -3389,24 +3389,24 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
srv_start_lsn = recv_sys->recovered_lsn; srv_start_lsn = recv_sys->recovered_lsn;
} }
log_sys->buf_free = ulong(log_sys->lsn % OS_FILE_LOG_BLOCK_SIZE); log_sys.buf_free = ulong(log_sys.lsn % OS_FILE_LOG_BLOCK_SIZE);
log_sys->buf_next_to_write = log_sys->buf_free; log_sys.buf_next_to_write = log_sys.buf_free;
log_sys->write_lsn = log_sys->lsn; log_sys.write_lsn = log_sys.lsn;
log_sys->last_checkpoint_lsn = checkpoint_lsn; log_sys.last_checkpoint_lsn = checkpoint_lsn;
if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) { if (!srv_read_only_mode && srv_operation == SRV_OPERATION_NORMAL) {
/* Write a MLOG_CHECKPOINT marker as the first thing, /* Write a MLOG_CHECKPOINT marker as the first thing,
before generating any other redo log. This ensures before generating any other redo log. This ensures
that subsequent crash recovery will be possible even that subsequent crash recovery will be possible even
if the server were killed soon after this. */ if the server were killed soon after this. */
fil_names_clear(log_sys->last_checkpoint_lsn, true); fil_names_clear(log_sys.last_checkpoint_lsn, true);
} }
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
log_sys->lsn - log_sys->last_checkpoint_lsn); log_sys.lsn - log_sys.last_checkpoint_lsn);
log_sys->next_checkpoint_no = ++checkpoint_no; log_sys.next_checkpoint_no = ++checkpoint_no;
mutex_enter(&recv_sys->mutex); mutex_enter(&recv_sys->mutex);
@ -3512,26 +3512,26 @@ recv_reset_logs(
{ {
ut_ad(log_mutex_own()); ut_ad(log_mutex_own());
log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); log_sys.lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE);
log_sys->log.lsn = log_sys->lsn; log_sys.log.lsn = log_sys.lsn;
log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE; log_sys.log.lsn_offset = LOG_FILE_HDR_SIZE;
log_sys->buf_next_to_write = 0; log_sys.buf_next_to_write = 0;
log_sys->write_lsn = log_sys->lsn; log_sys.write_lsn = log_sys.lsn;
log_sys->next_checkpoint_no = 0; log_sys.next_checkpoint_no = 0;
log_sys->last_checkpoint_lsn = 0; log_sys.last_checkpoint_lsn = 0;
memset(log_sys->buf, 0, srv_log_buffer_size); memset(log_sys.buf, 0, srv_log_buffer_size);
log_block_init(log_sys->buf, log_sys->lsn); log_block_init(log_sys.buf, log_sys.lsn);
log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE); log_block_set_first_rec_group(log_sys.buf, LOG_BLOCK_HDR_SIZE);
log_sys->buf_free = LOG_BLOCK_HDR_SIZE; log_sys.buf_free = LOG_BLOCK_HDR_SIZE;
log_sys->lsn += LOG_BLOCK_HDR_SIZE; log_sys.lsn += LOG_BLOCK_HDR_SIZE;
MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
(log_sys->lsn - log_sys->last_checkpoint_lsn)); (log_sys.lsn - log_sys.last_checkpoint_lsn));
log_mutex_exit(); log_mutex_exit();

View File

@ -481,7 +481,7 @@ mtr_write_log(
ut_ad(!recv_no_log_write); ut_ad(!recv_no_log_write);
DBUG_PRINT("ib_log", DBUG_PRINT("ib_log",
(ULINTPF " extra bytes written at " LSN_PF, (ULINTPF " extra bytes written at " LSN_PF,
len, log_sys->lsn)); len, log_sys.lsn));
log_reserve_and_open(len); log_reserve_and_open(len);
log->for_each_block(write_log); log->for_each_block(write_log);
@ -624,7 +624,7 @@ mtr_t::commit_checkpoint(
if (write_mlog_checkpoint) { if (write_mlog_checkpoint) {
DBUG_PRINT("ib_log", DBUG_PRINT("ib_log",
("MLOG_CHECKPOINT(" LSN_PF ") written at " LSN_PF, ("MLOG_CHECKPOINT(" LSN_PF ") written at " LSN_PF,
checkpoint_lsn, log_sys->lsn)); checkpoint_lsn, log_sys.lsn));
} }
} }
@ -774,7 +774,7 @@ mtr_t::Command::prepare_write()
case MTR_LOG_NONE: case MTR_LOG_NONE:
ut_ad(m_impl->m_log.size() == 0); ut_ad(m_impl->m_log.size() == 0);
log_mutex_enter(); log_mutex_enter();
m_end_lsn = m_start_lsn = log_sys->lsn; m_end_lsn = m_start_lsn = log_sys.lsn;
return(0); return(0);
case MTR_LOG_ALL: case MTR_LOG_ALL:
break; break;

View File

@ -144,7 +144,7 @@ public:
ut_ad(dict_index_is_spatial(m_index)); ut_ad(dict_index_is_spatial(m_index));
DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush", DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush",
log_sys->check_flush_or_checkpoint = true; log_sys.check_flush_or_checkpoint = true;
); );
for (idx_tuple_vec::iterator it = m_dtuple_vec->begin(); for (idx_tuple_vec::iterator it = m_dtuple_vec->begin();
@ -153,7 +153,7 @@ public:
dtuple = *it; dtuple = *it;
ut_ad(dtuple); ut_ad(dtuple);
if (log_sys->check_flush_or_checkpoint) { if (log_sys.check_flush_or_checkpoint) {
if (!(*mtr_committed)) { if (!(*mtr_committed)) {
btr_pcur_move_to_prev_on_page(pcur); btr_pcur_move_to_prev_on_page(pcur);
btr_pcur_store_position(pcur, scan_mtr); btr_pcur_store_position(pcur, scan_mtr);

View File

@ -2000,11 +2000,11 @@ srv_mon_process_existing_counter(
break; break;
case MONITOR_OVLD_LSN_FLUSHDISK: case MONITOR_OVLD_LSN_FLUSHDISK:
value = (mon_type_t) log_sys->flushed_to_disk_lsn; value = (mon_type_t) log_sys.flushed_to_disk_lsn;
break; break;
case MONITOR_OVLD_LSN_CURRENT: case MONITOR_OVLD_LSN_CURRENT:
value = (mon_type_t) log_sys->lsn; value = (mon_type_t) log_sys.lsn;
break; break;
case MONITOR_OVLD_BUF_OLDEST_LSN: case MONITOR_OVLD_BUF_OLDEST_LSN:
@ -2012,15 +2012,15 @@ srv_mon_process_existing_counter(
break; break;
case MONITOR_OVLD_LSN_CHECKPOINT: case MONITOR_OVLD_LSN_CHECKPOINT:
value = (mon_type_t) log_sys->last_checkpoint_lsn; value = (mon_type_t) log_sys.last_checkpoint_lsn;
break; break;
case MONITOR_OVLD_MAX_AGE_ASYNC: case MONITOR_OVLD_MAX_AGE_ASYNC:
value = log_sys->max_modified_age_async; value = log_sys.max_modified_age_async;
break; break;
case MONITOR_OVLD_MAX_AGE_SYNC: case MONITOR_OVLD_MAX_AGE_SYNC:
value = log_sys->max_modified_age_sync; value = log_sys.max_modified_age_sync;
break; break;
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT

View File

@ -496,7 +496,7 @@ create_log_files(
/* Create a log checkpoint. */ /* Create a log checkpoint. */
log_mutex_enter(); log_mutex_enter();
if (log_sys->is_encrypted() && !log_crypt_init()) { if (log_sys.is_encrypted() && !log_crypt_init()) {
return(DB_ERROR); return(DB_ERROR);
} }
ut_d(recv_no_log_write = false); ut_d(recv_no_log_write = false);
@ -1370,14 +1370,14 @@ srv_prepare_to_delete_redo_log_files(
log_mutex_enter(); log_mutex_enter();
fil_names_clear(log_sys->lsn, false); fil_names_clear(log_sys.lsn, false);
flushed_lsn = log_sys->lsn; flushed_lsn = log_sys.lsn;
{ {
ib::info info; ib::info info;
if (srv_log_file_size == 0 if (srv_log_file_size == 0
|| (log_sys->log.format || (log_sys.log.format
& ~LOG_HEADER_FORMAT_ENCRYPTED) & ~LOG_HEADER_FORMAT_ENCRYPTED)
!= LOG_HEADER_FORMAT_CURRENT) { != LOG_HEADER_FORMAT_CURRENT) {
info << "Upgrading redo log: "; info << "Upgrading redo log: ";
@ -1385,7 +1385,7 @@ srv_prepare_to_delete_redo_log_files(
|| srv_log_file_size || srv_log_file_size
!= srv_log_file_size_requested) { != srv_log_file_size_requested) {
if (srv_encrypt_log if (srv_encrypt_log
== (my_bool)log_sys->is_encrypted()) { == (my_bool)log_sys.is_encrypted()) {
info << (srv_encrypt_log info << (srv_encrypt_log
? "Resizing encrypted" ? "Resizing encrypted"
: "Resizing"); : "Resizing");
@ -1689,7 +1689,7 @@ dberr_t srv_start(bool create_new_db)
} }
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
log_sys_init(); log_sys.create();
recv_sys_init(); recv_sys_init();
lock_sys.create(srv_lock_table_size); lock_sys.create(srv_lock_table_size);
@ -2204,7 +2204,7 @@ files_checked:
/* Leave the redo log alone. */ /* Leave the redo log alone. */
} else if (srv_log_file_size_requested == srv_log_file_size } else if (srv_log_file_size_requested == srv_log_file_size
&& srv_n_log_files_found == srv_n_log_files && srv_n_log_files_found == srv_n_log_files
&& log_sys->log.format && log_sys.log.format
== (srv_encrypt_log == (srv_encrypt_log
? LOG_HEADER_FORMAT_CURRENT ? LOG_HEADER_FORMAT_CURRENT
| LOG_HEADER_FORMAT_ENCRYPTED | LOG_HEADER_FORMAT_ENCRYPTED
@ -2674,11 +2674,11 @@ void innodb_shutdown()
ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
ut_ad(lock_sys.is_initialised() || !srv_was_started); ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys.is_initialised() || !srv_was_started);
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
ut_ad(btr_search_sys || !srv_was_started); ut_ad(btr_search_sys || !srv_was_started);
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
ut_ad(ibuf || !srv_was_started); ut_ad(ibuf || !srv_was_started);
ut_ad(log_sys || !srv_was_started);
if (dict_stats_event) { if (dict_stats_event) {
dict_stats_thread_deinit(); dict_stats_thread_deinit();
@ -2705,9 +2705,7 @@ void innodb_shutdown()
if (ibuf) { if (ibuf) {
ibuf_close(); ibuf_close();
} }
if (log_sys) { log_sys.close();
log_shutdown();
}
purge_sys.close(); purge_sys.close();
trx_sys.close(); trx_sys.close();
if (buf_dblwr) { if (buf_dblwr) {