369 lines
12 KiB
Plaintext
369 lines
12 KiB
Plaintext
/*****************************************************************************
|
|
|
|
Copyright (c) 1995, 2019, Oracle and/or its affiliates. All Rights Reserved.
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
the terms of the GNU General Public License, version 2.0, as published by the
|
|
Free Software Foundation.
|
|
|
|
This program is also distributed with certain software (including but not
|
|
limited to OpenSSL) that is licensed under separate terms, as designated in a
|
|
particular file or component or in included license documentation. The authors
|
|
of MySQL hereby grant you an additional permission to link the program and
|
|
your derivative works with the separately licensed software that they have
|
|
included with MySQL.
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
|
|
for more details.
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*****************************************************************************/
|
|
|
|
/**************************************************/ /**
|
|
@file include/log0log.ic
|
|
|
|
Redo log - definition of inlined functions.
|
|
|
|
Created 12/9/1995 Heikki Tuuri
|
|
*******************************************************/
|
|
|
|
#include <cstring>
|
|
|
|
#include "mach0data.h"
|
|
#include "os0file.h"
|
|
#include "srv0mon.h"
|
|
#include "srv0srv.h"
|
|
#include "ut0crc32.h"
|
|
|
|
#ifdef UNIV_LOG_LSN_DEBUG
|
|
#include "mtr0types.h"
|
|
#endif /* UNIV_LOG_LSN_DEBUG */
|
|
|
|
/** @name Log blocks */
|
|
|
|
/* @{ */
|
|
|
|
inline bool log_block_get_flush_bit(const byte *log_block) {
|
|
if (LOG_BLOCK_FLUSH_BIT_MASK &
|
|
mach_read_from_4(log_block + LOG_BLOCK_HDR_NO)) {
|
|
return (true);
|
|
}
|
|
|
|
return (false);
|
|
}
|
|
|
|
inline void log_block_set_flush_bit(byte *log_block, bool value) {
|
|
uint32_t field = mach_read_from_4(log_block + LOG_BLOCK_HDR_NO);
|
|
|
|
ut_a(field != 0);
|
|
|
|
if (value) {
|
|
field = field | LOG_BLOCK_FLUSH_BIT_MASK;
|
|
} else {
|
|
field = field & ~LOG_BLOCK_FLUSH_BIT_MASK;
|
|
}
|
|
|
|
mach_write_to_4(log_block + LOG_BLOCK_HDR_NO, field);
|
|
}
|
|
|
|
inline bool log_block_get_encrypt_bit(const byte *log_block) {
|
|
if (LOG_BLOCK_ENCRYPT_BIT_MASK &
|
|
mach_read_from_2(log_block + LOG_BLOCK_HDR_DATA_LEN)) {
|
|
return (true);
|
|
}
|
|
|
|
return (false);
|
|
}
|
|
|
|
inline void log_block_set_encrypt_bit(byte *log_block, ibool val) {
|
|
uint32_t field;
|
|
|
|
field = mach_read_from_2(log_block + LOG_BLOCK_HDR_DATA_LEN);
|
|
|
|
if (val) {
|
|
field = field | LOG_BLOCK_ENCRYPT_BIT_MASK;
|
|
} else {
|
|
field = field & ~LOG_BLOCK_ENCRYPT_BIT_MASK;
|
|
}
|
|
|
|
mach_write_to_2(log_block + LOG_BLOCK_HDR_DATA_LEN, field);
|
|
}
|
|
|
|
inline uint32_t log_block_get_hdr_no(const byte *log_block) {
|
|
return (~LOG_BLOCK_FLUSH_BIT_MASK &
|
|
mach_read_from_4(log_block + LOG_BLOCK_HDR_NO));
|
|
}
|
|
|
|
inline void log_block_set_hdr_no(byte *log_block, uint32_t n) {
|
|
ut_a(n > 0);
|
|
ut_a(n < LOG_BLOCK_FLUSH_BIT_MASK);
|
|
ut_a(n <= LOG_BLOCK_MAX_NO);
|
|
|
|
mach_write_to_4(log_block + LOG_BLOCK_HDR_NO, n);
|
|
}
|
|
|
|
inline uint32_t log_block_get_data_len(const byte *log_block) {
|
|
return (mach_read_from_2(log_block + LOG_BLOCK_HDR_DATA_LEN));
|
|
}
|
|
|
|
inline void log_block_set_data_len(byte *log_block, ulint len) {
|
|
mach_write_to_2(log_block + LOG_BLOCK_HDR_DATA_LEN, len);
|
|
}
|
|
|
|
inline uint32_t log_block_get_first_rec_group(const byte *log_block) {
|
|
return (mach_read_from_2(log_block + LOG_BLOCK_FIRST_REC_GROUP));
|
|
}
|
|
|
|
inline void log_block_set_first_rec_group(byte *log_block, uint32_t offset) {
|
|
mach_write_to_2(log_block + LOG_BLOCK_FIRST_REC_GROUP, offset);
|
|
}
|
|
|
|
inline uint32_t log_block_get_checkpoint_no(const byte *log_block) {
|
|
return (mach_read_from_4(log_block + LOG_BLOCK_CHECKPOINT_NO));
|
|
}
|
|
|
|
inline void log_block_set_checkpoint_no(byte *log_block, uint64_t no) {
|
|
mach_write_to_4(log_block + LOG_BLOCK_CHECKPOINT_NO, (uint32_t)no);
|
|
}
|
|
|
|
inline uint32_t log_block_convert_lsn_to_no(lsn_t lsn) {
|
|
return ((uint32_t)(lsn / OS_FILE_LOG_BLOCK_SIZE) % LOG_BLOCK_MAX_NO + 1);
|
|
}
|
|
|
|
inline uint32_t log_block_calc_checksum(const byte *log_block) {
|
|
return (log_checksum_algorithm_ptr.load()(log_block));
|
|
}
|
|
|
|
inline uint32_t log_block_calc_checksum_crc32(const byte *log_block) {
|
|
return (ut_crc32(log_block, OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE));
|
|
}
|
|
|
|
inline uint32_t log_block_calc_checksum_none(const byte *log_block) {
|
|
return (LOG_NO_CHECKSUM_MAGIC);
|
|
}
|
|
|
|
inline uint32_t log_block_get_checksum(const byte *log_block) {
|
|
return (mach_read_from_4(log_block + OS_FILE_LOG_BLOCK_SIZE -
|
|
LOG_BLOCK_CHECKSUM));
|
|
}
|
|
|
|
inline void log_block_set_checksum(byte *log_block, uint32_t checksum) {
|
|
mach_write_to_4(log_block + OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_CHECKSUM,
|
|
checksum);
|
|
}
|
|
|
|
inline void log_block_store_checksum(byte *log_block) {
|
|
log_block_set_checksum(log_block, log_block_calc_checksum(log_block));
|
|
}
|
|
|
|
/* @} */
|
|
|
|
#ifndef UNIV_HOTBACKUP
|
|
|
|
inline bool log_needs_free_check(const log_t &log) {
|
|
const sn_t sn = log.sn.load();
|
|
return (sn > log.free_check_limit_sn.load());
|
|
}
|
|
|
|
inline bool log_needs_free_check() { return (log_needs_free_check(*log_sys)); }
|
|
|
|
/** Call this function before starting a mini-transaction. It will check
|
|
for space in the redo log. It assures there is at least
|
|
concurrency_safe_free_margin. If the space is not available, this will
|
|
wait until it is. Therefore it is important that the caller does not hold
|
|
any latch that may be called by the page cleaner or log flush process.
|
|
This includes any page block or file space latch. */
|
|
inline void log_free_check() {
|
|
log_t &log = *log_sys;
|
|
|
|
#ifdef UNIV_DEBUG
|
|
/* This function may be called while holding some latches. This is OK,
|
|
as long as we are not holding any latches on buffer blocks or file spaces.
|
|
The following latches are not held by any thread that frees up redo log
|
|
space. */
|
|
static const latch_level_t latches[] = {
|
|
SYNC_NO_ORDER_CHECK, /* used for non-labeled latches */
|
|
SYNC_RSEGS, /* rsegs->x_lock in trx_rseg_create() */
|
|
SYNC_UNDO_DDL, /* undo::ddl_mutex */
|
|
SYNC_UNDO_SPACES, /* undo::spaces::m_latch */
|
|
SYNC_FTS_CACHE, /* fts_cache_t::lock */
|
|
SYNC_DICT, /* dict_sys->mutex in commit_try_rebuild() */
|
|
SYNC_DICT_OPERATION, /* X-latch in commit_try_rebuild() */
|
|
SYNC_INDEX_TREE /* index->lock */
|
|
};
|
|
|
|
sync_allowed_latches check(latches,
|
|
latches + sizeof(latches) / sizeof(*latches));
|
|
|
|
if (sync_check_iterate(check)) {
|
|
ib::error()
|
|
<< "log_free_check() was called while holding an un-listed latch.";
|
|
ut_error;
|
|
}
|
|
#endif /* UNIV_DEBUG */
|
|
|
|
/** We prefer to wait now for the space in log file, because now
|
|
are not holding any latches of dirty pages. */
|
|
|
|
if (log_needs_free_check(log)) {
|
|
/* We need to wait, because the concurrency margin could be violated
|
|
if we let all threads to go forward after making this check now.
|
|
|
|
The waiting procedure is rather unlikely to happen for proper my.cnf.
|
|
Therefore we extracted the code to seperate function, to make the
|
|
inlined log_free_check() small. */
|
|
|
|
log_free_check_wait(log);
|
|
}
|
|
}
|
|
|
|
constexpr inline lsn_t log_translate_sn_to_lsn(lsn_t sn) {
|
|
return (sn / LOG_BLOCK_DATA_SIZE * OS_FILE_LOG_BLOCK_SIZE +
|
|
sn % LOG_BLOCK_DATA_SIZE + LOG_BLOCK_HDR_SIZE);
|
|
}
|
|
|
|
inline lsn_t log_translate_lsn_to_sn(lsn_t lsn) {
|
|
/* Calculate sn of the beginning of log block, which contains
|
|
the provided lsn value. */
|
|
const sn_t sn = lsn / OS_FILE_LOG_BLOCK_SIZE * LOG_BLOCK_DATA_SIZE;
|
|
|
|
/* Calculate offset for the provided lsn within the log block.
|
|
The offset includes LOG_BLOCK_HDR_SIZE bytes of block's header. */
|
|
const uint32_t diff = lsn % OS_FILE_LOG_BLOCK_SIZE;
|
|
|
|
if (diff < LOG_BLOCK_HDR_SIZE) {
|
|
/* The lsn points to some bytes inside the block's header.
|
|
Return sn for the beginning of the block. Note, that sn
|
|
values don't enumerate bytes of blocks' headers, so the
|
|
value of diff does not matter at all. */
|
|
return (sn);
|
|
}
|
|
|
|
if (diff > OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
|
|
/* The lsn points to some bytes inside the block's footer.
|
|
Return sn for the beginning of the next block. Note, that
|
|
sn values don't enumerate bytes of blocks' footer, so the
|
|
value of diff does not matter at all. */
|
|
return (sn + LOG_BLOCK_DATA_SIZE);
|
|
}
|
|
|
|
/* Add the offset but skip bytes of block's header. */
|
|
return (sn + diff - LOG_BLOCK_HDR_SIZE);
|
|
}
|
|
|
|
#endif /* !UNIV_HOTBACKUP */
|
|
|
|
inline bool log_lsn_validate(lsn_t lsn) {
|
|
const uint32_t offset = lsn % OS_FILE_LOG_BLOCK_SIZE;
|
|
|
|
return (lsn >= LOG_START_LSN && offset >= LOG_BLOCK_HDR_SIZE &&
|
|
offset < OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE);
|
|
}
|
|
|
|
#ifndef UNIV_HOTBACKUP
|
|
|
|
/** @return total capacity of log files in bytes. */
|
|
inline uint64_t log_get_file_capacity(const log_t &log) {
|
|
return (log.files_real_capacity);
|
|
}
|
|
|
|
inline lsn_t log_get_lsn(const log_t &log) {
|
|
return (log_translate_sn_to_lsn(log.sn.load()));
|
|
}
|
|
|
|
inline lsn_t log_get_checkpoint_lsn(const log_t &log) {
|
|
return (log.last_checkpoint_lsn.load());
|
|
}
|
|
|
|
inline lsn_t log_get_checkpoint_age(const log_t &log) {
|
|
const lsn_t last_checkpoint_lsn = log.last_checkpoint_lsn.load();
|
|
|
|
const lsn_t current_lsn = log_get_lsn(log);
|
|
|
|
if (current_lsn <= last_checkpoint_lsn) {
|
|
/* Writes or reads have been somehow reordered.
|
|
Note that this function does not provide any lock,
|
|
and does not assume any lock existing. Therefore
|
|
the calculated result is already outdated when the
|
|
function is finished. Hence, we might assume that
|
|
this time we calculated age = 0, because checkpoint
|
|
lsn is close to current lsn if such race happened. */
|
|
return (0);
|
|
}
|
|
|
|
return (current_lsn - last_checkpoint_lsn);
|
|
}
|
|
|
|
inline void log_buffer_flush_to_disk(bool sync) {
|
|
log_buffer_flush_to_disk(*log_sys, sync);
|
|
}
|
|
|
|
#if defined(UNIV_HOTBACKUP) && defined(UNIV_DEBUG)
|
|
/** Print a log file header.
|
|
@param[in] block pointer to the log buffer */
|
|
UNIV_INLINE
|
|
void meb_log_print_file_hdr(byte *block) {
|
|
ib::info(ER_IB_MSG_626) << "Log file header:"
|
|
<< " format "
|
|
<< mach_read_from_4(block + LOG_HEADER_FORMAT)
|
|
<< " pad1 "
|
|
<< mach_read_from_4(block + LOG_HEADER_PAD1)
|
|
<< " start_lsn "
|
|
<< mach_read_from_8(block + LOG_HEADER_START_LSN)
|
|
<< " creator '" << block + LOG_HEADER_CREATOR << "'"
|
|
<< " checksum " << log_block_get_checksum(block);
|
|
}
|
|
#endif /* UNIV_HOTBACKUP && UNIV_DEBUG */
|
|
|
|
inline lsn_t log_buffer_ready_for_write_lsn(const log_t &log) {
|
|
return (log.recent_written.tail());
|
|
}
|
|
|
|
inline lsn_t log_buffer_dirty_pages_added_up_to_lsn(const log_t &log) {
|
|
return (log.recent_closed.tail());
|
|
}
|
|
|
|
inline lsn_t log_buffer_flush_order_lag(const log_t &log) {
|
|
return (log.recent_closed.capacity());
|
|
}
|
|
|
|
inline bool log_write_to_file_requests_are_frequent(uint64_t interval) {
|
|
return (interval < 1000); /* 1ms */
|
|
}
|
|
|
|
inline bool log_write_to_file_requests_are_frequent(const log_t &log) {
|
|
return (log_write_to_file_requests_are_frequent(
|
|
log.write_to_file_requests_interval.load(std::memory_order_relaxed)));
|
|
}
|
|
|
|
inline bool log_writer_is_active() {
|
|
return (srv_thread_is_active(srv_threads.m_log_writer));
|
|
}
|
|
|
|
inline bool log_write_notifier_is_active() {
|
|
return (srv_thread_is_active(srv_threads.m_log_write_notifier));
|
|
}
|
|
|
|
inline bool log_flusher_is_active() {
|
|
return (srv_thread_is_active(srv_threads.m_log_flusher));
|
|
}
|
|
|
|
inline bool log_flush_notifier_is_active() {
|
|
return (srv_thread_is_active(srv_threads.m_log_flush_notifier));
|
|
}
|
|
|
|
inline bool log_closer_is_active() {
|
|
return (srv_thread_is_active(srv_threads.m_log_closer));
|
|
}
|
|
|
|
inline bool log_checkpointer_is_active() {
|
|
return (srv_thread_is_active(srv_threads.m_log_checkpointer));
|
|
}
|
|
|
|
#endif /* !UNIV_HOTBACKUP */
|