Merge branch 'bb-10.0-vicentiu' into 10.0

Includes Percona XtraDB and TokuDB 5.6.36-82.1
This commit is contained in:
Vicențiu Ciorbaru 2017-08-04 09:32:40 +02:00
commit a346a5613e
127 changed files with 34124 additions and 510 deletions

View File

@ -1,4 +1,4 @@
SET(TOKUDB_VERSION 5.6.36-82.0)
SET(TOKUDB_VERSION 5.6.36-82.1)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")

View File

@ -9,6 +9,16 @@ project(TokuDB)
set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
# detect when we are being built as a subproject
if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
add_definitions( -DMYSQL_TOKUDB_ENGINE=1)
if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
(CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_SOURCE_DIR}/sql)
endif ()
endif ()
## Versions of gcc >= 4.9.0 require special version of 'ar' and 'ranlib' for
## link-time optimizations to work properly.
##

View File

@ -428,6 +428,7 @@ static void print_db_env_struct (void) {
"int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)",
"int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)",
"int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)",
"void (*kill_waiter)(DB_ENV *, void *extra)",
NULL};
sort_and_dump_fields("db_env", true, extra);
@ -548,8 +549,8 @@ static void print_db_txn_struct (void) {
"int (*abort_with_progress)(DB_TXN*, TXN_PROGRESS_POLL_FUNCTION, void*)",
"int (*xa_prepare) (DB_TXN*, TOKU_XA_XID *, uint32_t flags)",
"uint64_t (*id64) (DB_TXN*)",
"void (*set_client_id)(DB_TXN *, uint64_t client_id)",
"uint64_t (*get_client_id)(DB_TXN *)",
"void (*set_client_id)(DB_TXN *, uint64_t client_id, void *client_extra)",
"void (*get_client_id)(DB_TXN *, uint64_t *client_id, void **client_extra)",
"bool (*is_prepared)(DB_TXN *)",
"DB_TXN *(*get_child)(DB_TXN *)",
"uint64_t (*get_start_time)(DB_TXN *)",

View File

@ -123,6 +123,9 @@ ExternalProject_Add(build_snappy
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_AR=${CMAKE_AR}
-DCMAKE_NM=${CMAKE_NM}
-DCMAKE_RANLIB=${CMAKE_RANLIB}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
${USE_PROJECT_CMAKE_MODULE_PATH}

View File

@ -464,7 +464,10 @@ int toku_cachetable_openf (CACHEFILE *cfptr, CACHETABLE ct, const char *fname_in
char *
toku_cachefile_fname_in_env (CACHEFILE cf) {
return cf->fname_in_env;
if (cf) {
return cf->fname_in_env;
}
return nullptr;
}
void toku_cachefile_set_fname_in_env(CACHEFILE cf, char *new_fname_in_env) {
@ -2890,6 +2893,10 @@ toku_cachefile_get_cachetable(CACHEFILE cf) {
return cf->cachetable;
}
CACHEFILE toku_pair_get_cachefile(PAIR pair) {
return pair->cachefile;
}
//Only called by ft_end_checkpoint
//Must have access to cf->fd (must be protected)
void toku_cachefile_fsync(CACHEFILE cf) {

View File

@ -297,6 +297,9 @@ void *toku_cachefile_get_userdata(CACHEFILE);
CACHETABLE toku_cachefile_get_cachetable(CACHEFILE cf);
// Effect: Get the cachetable.
CACHEFILE toku_pair_get_cachefile(PAIR);
// Effect: Get the cachefile of the pair
void toku_cachetable_swap_pair_values(PAIR old_pair, PAIR new_pair);
// Effect: Swaps the value_data of old_pair and new_pair.
// Requires: both old_pair and new_pair to be pinned with write locks.

View File

@ -651,8 +651,12 @@ void toku_ftnode_clone_callback(void *value_data,
// set new pair attr if necessary
if (node->height == 0) {
*new_attr = make_ftnode_pair_attr(node);
node->logical_rows_delta = 0;
cloned_node->logical_rows_delta = 0;
for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node, i) == PT_AVAIL) {
BLB_LRD(node, i) = 0;
BLB_LRD(cloned_node, i) = 0;
}
}
} else {
new_attr->is_valid = false;
}
@ -700,9 +704,26 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
if (ftnode->height == 0) {
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
if (!ftnode->dirty) {
toku_ft_adjust_logical_row_count(
ft, -ftnode->logical_rows_delta);
// A leaf node (height == 0) is being evicted (!keep_me) and is
// not a checkpoint clone (!is_clone). This leaf node may have
// had messages applied to satisfy a query, but was never
// actually dirtied (!ftnode->dirty && !write_me). **Note that
// if (write_me) would persist the node and clear the dirty
// flag **. This message application may have updated the trees
// logical row count. Since these message applications are not
// persisted, we need undo the logical row count adjustments as
// they may occur again in the future if/when the node is
// re-read from disk for another query or change.
if (!ftnode->dirty && !write_me) {
int64_t lrc_delta = 0;
for (int i = 0; i < ftnode->n_children; i++) {
if (BP_STATE(ftnode, i) == PT_AVAIL) {
lrc_delta -= BLB_LRD(ftnode, i);
BLB_LRD(ftnode, i) = 0;
}
}
toku_ft_adjust_logical_row_count(ft, lrc_delta);
}
} else {
FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
@ -711,6 +732,11 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
toku_free(*disk_data);
} else {
if (ftnode->height == 0) {
// No need to adjust logical row counts when flushing a clone
// as they should have been zeroed out anyway when cloned.
// Clones are 'copies' of work already done so doing it again
// (adjusting row counts) would be redundant and leads to
// inaccurate counts.
for (int i = 0; i < ftnode->n_children; i++) {
if (BP_STATE(ftnode, i) == PT_AVAIL) {
BASEMENTNODE bn = BLB(ftnode, i);
@ -718,10 +744,6 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
bn->stat64_delta);
}
}
if (!ftnode->dirty) {
toku_ft_adjust_logical_row_count(
ft, -ftnode->logical_rows_delta);
}
}
}
toku_ftnode_free(&ftnode);
@ -748,24 +770,48 @@ toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe)
}
}
int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash,
void **ftnode_pv, void** disk_data, PAIR_ATTR *sizep, int *dirtyp, void *extraargs) {
int toku_ftnode_fetch_callback(CACHEFILE UU(cachefile),
PAIR p,
int fd,
BLOCKNUM blocknum,
uint32_t fullhash,
void **ftnode_pv,
void **disk_data,
PAIR_ATTR *sizep,
int *dirtyp,
void *extraargs) {
assert(extraargs);
assert(*ftnode_pv == NULL);
FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data;
assert(*ftnode_pv == nullptr);
FTNODE_DISK_DATA *ndd = (FTNODE_DISK_DATA *)disk_data;
ftnode_fetch_extra *bfe = (ftnode_fetch_extra *)extraargs;
FTNODE *node=(FTNODE*)ftnode_pv;
FTNODE *node = (FTNODE *)ftnode_pv;
// deserialize the node, must pass the bfe in because we cannot
// evaluate what piece of the the node is necessary until we get it at
// least partially into memory
int r = toku_deserialize_ftnode_from(fd, blocknum, fullhash, node, ndd, bfe);
int r =
toku_deserialize_ftnode_from(fd, blocknum, fullhash, node, ndd, bfe);
if (r != 0) {
if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr,
"Checksum failure while reading node in file %s.\n",
toku_cachefile_fname_in_env(cachefile));
fprintf(
stderr,
"%s:%d:toku_ftnode_fetch_callback - "
"file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
"failed with a checksum error.\n",
__FILE__,
__LINE__,
toku_cachefile_fname_in_env(cachefile),
blocknum.b);
} else {
fprintf(stderr, "Error deserializing node, errno = %d", r);
fprintf(
stderr,
"%s:%d:toku_ftnode_fetch_callback - "
"file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
"failed with %d.\n",
__FILE__,
__LINE__,
toku_cachefile_fname_in_env(cachefile),
blocknum.b,
r);
}
// make absolutely sure we crash before doing anything else.
abort();
@ -774,7 +820,8 @@ int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), PAIR p, int fd, BLOCKNU
if (r == 0) {
*sizep = make_ftnode_pair_attr(*node);
(*node)->ct_pair = p;
*dirtyp = (*node)->dirty; // deserialize could mark the node as dirty (presumably for upgrade)
*dirtyp = (*node)->dirty; // deserialize could mark the node as dirty
// (presumably for upgrade)
}
return r;
}
@ -947,6 +994,16 @@ int toku_ftnode_pe_callback(void *ftnode_pv,
basements_to_destroy[num_basements_to_destroy++] = bn;
toku_ft_decrease_stats(&ft->in_memory_stats,
bn->stat64_delta);
// A basement node is being partially evicted.
// This masement node may have had messages applied to it to
// satisfy a query, but was never actually dirtied.
// This message application may have updated the trees
// logical row count. Since these message applications are
// not being persisted, we need undo the logical row count
// adjustments as they may occur again in the future if/when
// the node is re-read from disk for another query or change.
toku_ft_adjust_logical_row_count(ft,
-bn->logical_rows_delta);
set_BNULL(node, i);
BP_STATE(node, i) = PT_ON_DISK;
num_partial_evictions++;

View File

@ -435,7 +435,8 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
}
int fd = toku_cachefile_get_fd(cf);
int r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft);
const char *fn = toku_cachefile_fname_in_env(cf);
int r = toku_deserialize_ft_from(fd, fn, max_acceptable_lsn, &ft);
if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
assert(false); // make absolutely sure we crash before doing anything else

View File

@ -93,6 +93,7 @@ void toku_destroy_ftnode_internals(FTNODE node) {
if (node->height > 0) {
destroy_nonleaf_childinfo(BNC(node,i));
} else {
paranoid_invariant(BLB_LRD(node, i) == 0);
destroy_basement_node(BLB(node, i));
}
} else if (BP_STATE(node,i) == PT_COMPRESSED) {
@ -386,8 +387,7 @@ static void bnc_apply_messages_to_basement_node(
const pivot_bounds &
bounds, // contains pivot key bounds of this basement node
txn_gc_info *gc_info,
bool *msgs_applied,
int64_t* logical_rows_delta) {
bool *msgs_applied) {
int r;
NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
@ -395,6 +395,7 @@ static void bnc_apply_messages_to_basement_node(
// apply messages from this buffer
STAT64INFO_S stats_delta = {0, 0};
uint64_t workdone_this_ancestor = 0;
int64_t logical_rows_delta = 0;
uint32_t stale_lbi, stale_ube;
if (!bn->stale_ancestor_messages_applied) {
@ -470,7 +471,7 @@ static void bnc_apply_messages_to_basement_node(
gc_info,
&workdone_this_ancestor,
&stats_delta,
logical_rows_delta);
&logical_rows_delta);
}
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark
@ -482,7 +483,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
.logical_rows_delta = logical_rows_delta};
.logical_rows_delta = &logical_rows_delta};
if (fresh_ube - fresh_lbi > 0)
*msgs_applied = true;
r = bnc->fresh_message_tree
@ -503,7 +504,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
.logical_rows_delta = logical_rows_delta};
.logical_rows_delta = &logical_rows_delta};
r = bnc->stale_message_tree
.iterate_on_range<struct iterate_do_bn_apply_msg_extra,
@ -521,6 +522,8 @@ static void bnc_apply_messages_to_basement_node(
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
}
toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
bn->logical_rows_delta += logical_rows_delta;
}
static void
@ -534,7 +537,6 @@ apply_ancestors_messages_to_bn(
bool* msgs_applied
)
{
int64_t logical_rows_delta = 0;
BASEMENTNODE curr_bn = BLB(node, childnum);
const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
@ -547,16 +549,13 @@ apply_ancestors_messages_to_bn(
curr_ancestors->childnum,
curr_bounds,
gc_info,
msgs_applied,
&logical_rows_delta
msgs_applied
);
// We don't want to check this ancestor node again if the
// next time we query it, the msn hasn't changed.
curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk;
}
}
toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
node->logical_rows_delta += logical_rows_delta;
// At this point, we know all the stale messages above this
// basement node have been applied, and any new messages will be
// fresh, so we don't need to look at stale messages for this

View File

@ -175,11 +175,6 @@ struct ftnode {
int height;
int dirty;
uint32_t fullhash;
// current count of rows add or removed as a result of message application
// to this node as a basement, irrelevant for internal nodes, gets reset
// when node is undirtied. Used to back out tree scoped LRC id node is
// evicted but not persisted
int64_t logical_rows_delta;
// for internal nodes, if n_children==fanout+1 then the tree needs to be
// rebalanced. for leaf nodes, represents number of basement nodes
@ -211,6 +206,10 @@ struct ftnode_leaf_basement_node {
unsigned int seqinsert; // number of sequential inserts to this leaf
MSN max_msn_applied; // max message sequence number applied
bool stale_ancestor_messages_applied;
// current count of rows added or removed as a result of message application
// to this basement node, gets reset when node is undirtied.
// Used to back out tree scoped LRC id node is evicted but not persisted
int64_t logical_rows_delta;
STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk
};
typedef struct ftnode_leaf_basement_node *BASEMENTNODE;
@ -385,6 +384,16 @@ enum reactivity toku_ftnode_get_reactivity(FT ft, FTNODE node);
enum reactivity toku_ftnode_get_nonleaf_reactivity(FTNODE node, unsigned int fanout);
enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize);
inline const char* toku_ftnode_get_cachefile_fname_in_env(FTNODE node) {
if (node->ct_pair) {
CACHEFILE cf = toku_pair_get_cachefile(node->ct_pair);
if (cf) {
return toku_cachefile_fname_in_env(cf);
}
}
return nullptr;
}
/**
* Finds the next child for HOT to flush to, given that everything up to
* and including k has been flattened.
@ -577,3 +586,4 @@ static inline void set_BSB(FTNODE node, int i, struct sub_block *sb) {
#define BLB_DATA(node,i) (&(BLB(node,i)->data_buffer))
#define BLB_NBYTESINDATA(node,i) (BLB_DATA(node,i)->get_disk_size())
#define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert)
#define BLB_LRD(node, i) (BLB(node,i)->logical_rows_delta)

View File

@ -644,7 +644,29 @@ exit:
// Read ft from file into struct. Read both headers and use one.
// We want the latest acceptable header whose checkpoint_lsn is no later
// than max_acceptable_lsn.
int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
#define dump_state_of_toku_deserialize_ft_from() \
fprintf(stderr, \
"%s:%d toku_deserialize_ft_from: " \
"filename[%s] " \
"r[%d] max_acceptable_lsn[%lu]" \
"r0[%d] checkpoint_lsn_0[%lu] checkpoint_count_0[%lu] " \
"r1[%d] checkpoint_lsn_1[%lu] checkpoint_count_1[%lu]\n", \
__FILE__, \
__LINE__, \
fn, \
r, \
max_acceptable_lsn.lsn, \
r0, \
checkpoint_lsn_0.lsn, \
checkpoint_count_0, \
r1, \
checkpoint_lsn_1.lsn, \
checkpoint_count_1);
int toku_deserialize_ft_from(int fd,
const char *fn,
LSN max_acceptable_lsn,
FT *ft) {
struct rbuf rb_0;
struct rbuf rb_1;
uint64_t checkpoint_count_0 = 0;
@ -655,7 +677,7 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
bool h0_acceptable = false;
bool h1_acceptable = false;
struct rbuf *rb = NULL;
int r0, r1, r;
int r0, r1, r = 0;
toku_off_t header_0_off = 0;
r0 = deserialize_ft_from_fd_into_rbuf(fd,
@ -702,6 +724,10 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
// first header, unless it's readable
}
if (r != TOKUDB_DICTIONARY_NO_HEADER) {
dump_state_of_toku_deserialize_ft_from();
}
// it should not be possible for both headers to be later than the
// max_acceptable_lsn
invariant(
@ -713,11 +739,19 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
if (h0_acceptable && h1_acceptable) {
if (checkpoint_count_0 > checkpoint_count_1) {
if (!(checkpoint_count_0 == checkpoint_count_1 + 1) ||
!(version_0 >= version_1)) {
dump_state_of_toku_deserialize_ft_from();
}
invariant(checkpoint_count_0 == checkpoint_count_1 + 1);
invariant(version_0 >= version_1);
rb = &rb_0;
version = version_0;
} else {
if (!(checkpoint_count_1 == checkpoint_count_0 + 1) ||
!(version_1 >= version_0)) {
dump_state_of_toku_deserialize_ft_from();
}
invariant(checkpoint_count_1 == checkpoint_count_0 + 1);
invariant(version_1 >= version_0);
rb = &rb_1;
@ -729,6 +763,7 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
fprintf(
stderr,
"Header 2 checksum failed, but header 1 ok. Proceeding.\n");
dump_state_of_toku_deserialize_ft_from();
}
rb = &rb_0;
version = version_0;
@ -738,11 +773,15 @@ int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft) {
fprintf(
stderr,
"Header 1 checksum failed, but header 2 ok. Proceeding.\n");
dump_state_of_toku_deserialize_ft_from();
}
rb = &rb_1;
version = version_1;
}
if (!rb) {
dump_state_of_toku_deserialize_ft_from();
}
paranoid_invariant(rb);
r = deserialize_ft_versioned(fd, rb, ft, version);

View File

@ -42,12 +42,23 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/serialize/block_table.h"
size_t toku_serialize_ft_size(struct ft_header *h);
void toku_serialize_ft_to(int fd, struct ft_header *h, block_table *bt, CACHEFILE cf);
void toku_serialize_ft_to_wbuf(struct wbuf *wbuf, struct ft_header *h, DISKOFF translation_location_on_disk, DISKOFF translation_size_on_disk);
void toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc);
int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft);
void toku_serialize_ft_to(int fd,
struct ft_header *h,
block_table *bt,
CACHEFILE cf);
void toku_serialize_ft_to_wbuf(struct wbuf *wbuf,
struct ft_header *h,
DISKOFF translation_location_on_disk,
DISKOFF translation_size_on_disk);
void toku_serialize_descriptor_contents_to_fd(int fd,
DESCRIPTOR desc,
DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb,
DESCRIPTOR desc);
int toku_deserialize_ft_from(int fd,
const char *fn,
LSN max_acceptable_lsn,
FT *ft);
// TODO rename
int deserialize_ft_from_fd_into_rbuf(int fd,

File diff suppressed because it is too large Load Diff

View File

@ -46,21 +46,51 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "ft/serialize/block_table.h"
unsigned int toku_serialize_ftnode_size(FTNODE node);
int toku_serialize_ftnode_to_memory(FTNODE node, FTNODE_DISK_DATA *ndd,
unsigned int basementnodesize,
enum toku_compression_method compression_method,
bool do_rebalancing, bool in_parallel,
size_t *n_bytes_to_write, size_t *n_uncompressed_bytes,
char **bytes_to_write);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA *ndd, bool do_rebalancing, FT ft, bool for_checkpoint);
int toku_serialize_rollback_log_to(int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized,
FT ft, bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_serialize_ftnode_to_memory(
FTNODE node,
FTNODE_DISK_DATA *ndd,
unsigned int basementnodesize,
enum toku_compression_method compression_method,
bool do_rebalancing,
bool in_parallel,
size_t *n_bytes_to_write,
size_t *n_uncompressed_bytes,
char **bytes_to_write);
int toku_serialize_ftnode_to(int fd,
BLOCKNUM,
FTNODE node,
FTNODE_DISK_DATA *ndd,
bool do_rebalancing,
FT ft,
bool for_checkpoint);
int toku_serialize_rollback_log_to(int fd,
ROLLBACK_LOG_NODE log,
SERIALIZED_ROLLBACK_LOG_NODE serialized_log,
bool is_serialized,
FT ft,
bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(
ROLLBACK_LOG_NODE log,
SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft);
int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, ftnode_fetch_extra *bfe);
int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from(int fd, BLOCKNUM off, uint32_t fullhash, FTNODE *node, FTNODE_DISK_DATA *ndd, ftnode_fetch_extra *bfe);
int toku_deserialize_rollback_log_from(int fd,
BLOCKNUM blocknum,
ROLLBACK_LOG_NODE *logp,
FT ft);
int toku_deserialize_bp_from_disk(FTNODE node,
FTNODE_DISK_DATA ndd,
int childnum,
int fd,
ftnode_fetch_extra *bfe);
int toku_deserialize_bp_from_compressed(FTNODE node,
int childnum,
ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from(int fd,
BLOCKNUM off,
uint32_t fullhash,
FTNODE *node,
FTNODE_DISK_DATA *ndd,
ftnode_fetch_extra *bfe);
void toku_serialize_set_parallel(bool);
@ -73,9 +103,14 @@ int decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_siz
// used by verify
int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
void read_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, FT ft, struct rbuf *rb);
void read_block_from_fd_into_rbuf(int fd,
BLOCKNUM blocknum,
FT ft,
struct rbuf *rb);
int read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb);
int verify_ftnode_sub_block(struct sub_block *sb);
int verify_ftnode_sub_block(struct sub_block *sb,
const char *fname,
BLOCKNUM blocknum);
void just_decompress_sub_block(struct sub_block *sb);
// used by ft-node-deserialize.cc

View File

@ -203,7 +203,7 @@ int toku_rollback_frename(BYTESTRING old_iname,
}
if (toku_stat(new_iname_full.get(), &stat) == -1) {
if (ENOENT == errno)
if (ENOENT == errno || ENAMETOOLONG == errno)
new_exist = false;
else
return 1;

View File

@ -269,6 +269,7 @@ static txn_child_manager tcm;
.state = TOKUTXN_LIVE,
.num_pin = 0,
.client_id = 0,
.client_extra = nullptr,
.start_time = time(NULL),
};
@ -705,12 +706,14 @@ bool toku_txn_has_spilled_rollback(TOKUTXN txn) {
return txn_has_spilled_rollback_logs(txn);
}
uint64_t toku_txn_get_client_id(TOKUTXN txn) {
return txn->client_id;
void toku_txn_get_client_id(TOKUTXN txn, uint64_t *client_id, void **client_extra) {
if (client_id) *client_id = txn->client_id;
if (client_extra) *client_extra = txn->client_extra;
}
void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id) {
void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id, void *client_extra) {
txn->client_id = client_id;
txn->client_extra = client_extra;
}
time_t toku_txn_get_start_time(struct tokutxn *txn) {

View File

@ -193,6 +193,7 @@ struct tokutxn {
uint32_t num_pin; // number of threads (all hot indexes) that want this
// txn to not transition to commit or abort
uint64_t client_id;
void *client_extra;
time_t start_time;
};
typedef struct tokutxn *TOKUTXN;
@ -293,8 +294,8 @@ void toku_txn_unpin_live_txn(struct tokutxn *txn);
bool toku_txn_has_spilled_rollback(struct tokutxn *txn);
uint64_t toku_txn_get_client_id(struct tokutxn *txn);
void toku_txn_set_client_id(struct tokutxn *txn, uint64_t client_id);
void toku_txn_get_client_id(struct tokutxn *txn, uint64_t *client_id, void **client_extra);
void toku_txn_set_client_id(struct tokutxn *txn, uint64_t client_id, void *client_extra);
time_t toku_txn_get_start_time(struct tokutxn *txn);

View File

@ -65,6 +65,7 @@ void lock_request::create(void) {
toku_cond_init(&m_wait_cond, nullptr);
m_start_test_callback = nullptr;
m_start_before_pending_test_callback = nullptr;
m_retry_test_callback = nullptr;
}
@ -79,7 +80,7 @@ void lock_request::destroy(void) {
}
// set the lock request parameters. this API allows a lock request to be reused.
void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn) {
void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn, void *extra) {
invariant(m_state != state::PENDING);
m_lt = lt;
m_txnid = txnid;
@ -91,6 +92,7 @@ void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT
m_state = state::INITIALIZED;
m_info = lt ? lt->get_lock_request_info() : nullptr;
m_big_txn = big_txn;
m_extra = extra;
}
// get rid of any stored left and right key copies and
@ -173,6 +175,8 @@ int lock_request::start(void) {
m_state = state::PENDING;
m_start_time = toku_current_time_microsec() / 1000;
m_conflicting_txnid = conflicts.get(0);
if (m_start_before_pending_test_callback)
m_start_before_pending_test_callback();
toku_mutex_lock(&m_info->mutex);
insert_into_lock_requests();
if (deadlock_exists(conflicts)) {
@ -180,7 +184,8 @@ int lock_request::start(void) {
r = DB_LOCK_DEADLOCK;
}
toku_mutex_unlock(&m_info->mutex);
if (m_start_test_callback) m_start_test_callback(); // test callback
if (m_start_test_callback)
m_start_test_callback(); // test callback
}
if (r != DB_LOCK_NOTGRANTED) {
@ -203,7 +208,18 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil
toku_mutex_lock(&m_info->mutex);
// check again, this time locking out other retry calls
if (m_state == state::PENDING) {
retry();
}
while (m_state == state::PENDING) {
// check if this thread is killed
if (killed_callback && killed_callback()) {
remove_from_lock_requests();
complete(DB_LOCK_NOTGRANTED);
continue;
}
// compute next wait time
uint64_t t_wait;
@ -221,13 +237,13 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil
invariant(r == 0 || r == ETIMEDOUT);
t_now = toku_current_time_microsec();
if (m_state == state::PENDING && (t_now >= t_end || (killed_callback && killed_callback()))) {
if (m_state == state::PENDING && (t_now >= t_end)) {
m_info->counters.timeout_count += 1;
// if we're still pending and we timed out, then remove our
// request from the set of lock requests and fail.
remove_from_lock_requests();
// complete sets m_state to COMPLETE, breaking us out of the loop
complete(DB_LOCK_NOTGRANTED);
}
@ -274,13 +290,17 @@ TXNID lock_request::get_conflicting_txnid(void) const {
}
int lock_request::retry(void) {
int r;
invariant(m_state == state::PENDING);
int r;
txnid_set conflicts;
conflicts.create();
if (m_type == type::WRITE) {
r = m_lt->acquire_write_lock(m_txnid, m_left_key, m_right_key, nullptr, m_big_txn);
r = m_lt->acquire_write_lock(
m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
} else {
r = m_lt->acquire_read_lock(m_txnid, m_left_key, m_right_key, nullptr, m_big_txn);
r = m_lt->acquire_read_lock(
m_txnid, m_left_key, m_right_key, &conflicts, m_big_txn);
}
// if the acquisition succeeded then remove ourselves from the
@ -288,44 +308,63 @@ int lock_request::retry(void) {
if (r == 0) {
remove_from_lock_requests();
complete(r);
if (m_retry_test_callback) m_retry_test_callback(); // test callback
if (m_retry_test_callback)
m_retry_test_callback(); // test callback
toku_cond_broadcast(&m_wait_cond);
} else {
m_conflicting_txnid = conflicts.get(0);
}
conflicts.destroy();
return r;
}
void lock_request::retry_all_lock_requests(locktree *lt) {
void lock_request::retry_all_lock_requests(
locktree *lt,
void (*after_retry_all_test_callback)(void)) {
lt_lock_request_info *info = lt->get_lock_request_info();
// if a thread reads this bit to be true, then it should go ahead and
// take the locktree mutex and retry lock requests. we use this bit
// to prevent every single thread from waiting on the locktree mutex
// in order to retry requests, especially when no requests actually exist.
//
// it is important to note that this bit only provides an optimization.
// it is not problematic for it to be true when it should be false,
// but it can be problematic for it to be false when it should be true.
// therefore, the lock request code must ensures that when lock requests
// are added to this locktree, the bit is set.
// see lock_request::insert_into_lock_requests()
if (!info->should_retry_lock_requests) {
// if there are no pending lock requests than there is nothing to do
// the unlocked data race on pending_is_empty is OK since lock requests
// are retried after added to the pending set.
if (info->pending_is_empty)
return;
// get my retry generation (post increment of retry_want)
unsigned long long my_retry_want = (info->retry_want += 1);
toku_mutex_lock(&info->retry_mutex);
// here is the group retry algorithm.
// get the latest retry_want count and use it as the generation number of
// this retry operation. if this retry generation is > the last retry
// generation, then do the lock retries. otherwise, no lock retries
// are needed.
if ((my_retry_want - 1) == info->retry_done) {
for (;;) {
if (!info->running_retry) {
info->running_retry = true;
info->retry_done = info->retry_want;
toku_mutex_unlock(&info->retry_mutex);
retry_all_lock_requests_info(info);
if (after_retry_all_test_callback)
after_retry_all_test_callback();
toku_mutex_lock(&info->retry_mutex);
info->running_retry = false;
toku_cond_broadcast(&info->retry_cv);
break;
} else {
toku_cond_wait(&info->retry_cv, &info->retry_mutex);
}
}
}
toku_mutex_unlock(&info->retry_mutex);
}
void lock_request::retry_all_lock_requests_info(lt_lock_request_info *info) {
toku_mutex_lock(&info->mutex);
// let other threads know that they need not retry lock requests at this time.
//
// the motivation here is that if a bunch of threads have already released
// their locks in the rangetree, then its probably okay for only one thread
// to iterate over the list of requests and retry them. otherwise, at high
// thread counts and a large number of pending lock requests, you could
// end up wasting a lot of cycles.
info->should_retry_lock_requests = false;
size_t i = 0;
while (i < info->pending_lock_requests.size()) {
// retry all of the pending lock requests.
for (size_t i = 0; i < info->pending_lock_requests.size();) {
lock_request *request;
int r = info->pending_lock_requests.fetch(i, &request);
invariant_zero(r);
@ -346,6 +385,30 @@ void lock_request::retry_all_lock_requests(locktree *lt) {
toku_mutex_unlock(&info->mutex);
}
void *lock_request::get_extra(void) const {
return m_extra;
}
void lock_request::kill_waiter(void) {
remove_from_lock_requests();
complete(DB_LOCK_NOTGRANTED);
toku_cond_broadcast(&m_wait_cond);
}
void lock_request::kill_waiter(locktree *lt, void *extra) {
lt_lock_request_info *info = lt->get_lock_request_info();
toku_mutex_lock(&info->mutex);
for (size_t i = 0; i < info->pending_lock_requests.size(); i++) {
lock_request *request;
int r = info->pending_lock_requests.fetch(i, &request);
if (r == 0 && request->get_extra() == extra) {
request->kill_waiter();
break;
}
}
toku_mutex_unlock(&info->mutex);
}
// find another lock request by txnid. must hold the mutex.
lock_request *lock_request::find_lock_request(const TXNID &txnid) {
lock_request *request;
@ -360,27 +423,30 @@ lock_request *lock_request::find_lock_request(const TXNID &txnid) {
void lock_request::insert_into_lock_requests(void) {
uint32_t idx;
lock_request *request;
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx);
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(
m_txnid, &request, &idx);
invariant(r == DB_NOTFOUND);
r = m_info->pending_lock_requests.insert_at(this, idx);
invariant_zero(r);
// ensure that this bit is true, now that at least one lock request is in the set
m_info->should_retry_lock_requests = true;
m_info->pending_is_empty = false;
}
// remove this lock request from the locktree's set. must hold the mutex.
void lock_request::remove_from_lock_requests(void) {
uint32_t idx;
lock_request *request;
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx);
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(
m_txnid, &request, &idx);
invariant_zero(r);
invariant(request == this);
r = m_info->pending_lock_requests.delete_at(idx);
invariant_zero(r);
if (m_info->pending_lock_requests.size() == 0)
m_info->pending_is_empty = true;
}
int lock_request::find_by_txnid(lock_request * const &request, const TXNID &txnid) {
int lock_request::find_by_txnid(lock_request *const &request,
const TXNID &txnid) {
TXNID request_txnid = request->m_txnid;
if (request_txnid < txnid) {
return -1;
@ -395,6 +461,10 @@ void lock_request::set_start_test_callback(void (*f)(void)) {
m_start_test_callback = f;
}
void lock_request::set_start_before_pending_test_callback(void (*f)(void)) {
m_start_before_pending_test_callback = f;
}
void lock_request::set_retry_test_callback(void (*f)(void)) {
m_retry_test_callback = f;
}

View File

@ -78,7 +78,7 @@ public:
// effect: Resets the lock request parameters, allowing it to be reused.
// requires: Lock request was already created at some point
void set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, type lock_type, bool big_txn);
void set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, type lock_type, bool big_txn, void *extra = nullptr);
// effect: Tries to acquire a lock described by this lock request.
// returns: The return code of locktree::acquire_[write,read]_lock()
@ -107,14 +107,24 @@ public:
TXNID get_conflicting_txnid(void) const;
// effect: Retries all of the lock requests for the given locktree.
// Any lock requests successfully restarted is completed and woken up.
// Any lock requests successfully restarted is completed and woken
// up.
// The rest remain pending.
static void retry_all_lock_requests(locktree *lt);
static void retry_all_lock_requests(
locktree *lt,
void (*after_retry_test_callback)(void) = nullptr);
static void retry_all_lock_requests_info(lt_lock_request_info *info);
void set_start_test_callback(void (*f)(void));
void set_start_before_pending_test_callback(void (*f)(void));
void set_retry_test_callback(void (*f)(void));
private:
void *get_extra(void) const;
void kill_waiter(void);
static void kill_waiter(locktree *lt, void *extra);
private:
enum state {
UNINITIALIZED,
INITIALIZED,
@ -152,6 +162,8 @@ private:
// locktree that this lock request is for.
struct lt_lock_request_info *m_info;
void *m_extra;
// effect: tries again to acquire the lock described by this lock request
// returns: 0 if retrying the request succeeded and is now complete
int retry(void);
@ -184,9 +196,10 @@ private:
void copy_keys(void);
static int find_by_txnid(lock_request * const &request, const TXNID &txnid);
static int find_by_txnid(lock_request *const &request, const TXNID &txnid);
void (*m_start_test_callback)(void);
void (*m_start_before_pending_test_callback)(void);
void (*m_retry_test_callback)(void);
friend class lock_request_unit_test;

View File

@ -80,21 +80,24 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const compar
m_sto_end_early_count = 0;
m_sto_end_early_time = 0;
m_lock_request_info.pending_lock_requests.create();
ZERO_STRUCT(m_lock_request_info.mutex);
toku_mutex_init(&m_lock_request_info.mutex, nullptr);
m_lock_request_info.should_retry_lock_requests = false;
ZERO_STRUCT(m_lock_request_info.counters);
m_lock_request_info.init();
}
// Threads read the should retry bit without a lock
// for performance. It's ok to read the wrong value.
// - If you think you should but you shouldn't, you waste a little time.
// - If you think you shouldn't but you should, then some other thread
// will come around to do the work of retrying requests instead of you.
TOKU_VALGRIND_HG_DISABLE_CHECKING(
&m_lock_request_info.should_retry_lock_requests,
sizeof(m_lock_request_info.should_retry_lock_requests));
TOKU_DRD_IGNORE_VAR(m_lock_request_info.should_retry_lock_requests);
void lt_lock_request_info::init(void) {
pending_lock_requests.create();
pending_is_empty = true;
ZERO_STRUCT(mutex);
toku_mutex_init(&mutex, nullptr);
retry_want = retry_done = 0;
ZERO_STRUCT(counters);
ZERO_STRUCT(retry_mutex);
toku_mutex_init(&retry_mutex, nullptr);
toku_cond_init(&retry_cv, nullptr);
running_retry = false;
TOKU_VALGRIND_HG_DISABLE_CHECKING(&pending_is_empty,
sizeof(pending_is_empty));
TOKU_DRD_IGNORE_VAR(pending_is_empty);
}
void locktree::destroy(void) {
@ -104,11 +107,18 @@ void locktree::destroy(void) {
m_rangetree->destroy();
toku_free(m_rangetree);
m_sto_buffer.destroy();
m_lock_request_info.pending_lock_requests.destroy();
m_lock_request_info.destroy();
}
void lt_lock_request_info::destroy(void) {
pending_lock_requests.destroy();
toku_mutex_destroy(&mutex);
toku_mutex_destroy(&retry_mutex);
toku_cond_destroy(&retry_cv);
}
void locktree::add_reference(void) {
(void) toku_sync_add_and_fetch(&m_reference_count, 1);
(void)toku_sync_add_and_fetch(&m_reference_count, 1);
}
uint32_t locktree::release_reference(void) {

View File

@ -38,12 +38,14 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#pragma once
#include <db.h>
#include <toku_time.h>
#include <toku_pthread.h>
#include <atomic>
#include <db.h>
#include <toku_pthread.h>
#include <toku_time.h>
#include <ft/ft-ops.h> // just for DICTIONARY_ID..
#include <ft/comparator.h>
#include <ft/ft-ops.h> // just for DICTIONARY_ID..
#include <util/omt.h>
@ -80,20 +82,33 @@ namespace toku {
// Lock request state for some locktree
struct lt_lock_request_info {
omt<lock_request *> pending_lock_requests;
std::atomic_bool pending_is_empty;
toku_mutex_t mutex;
bool should_retry_lock_requests;
lt_counters counters;
std::atomic_ullong retry_want;
unsigned long long retry_done;
toku_mutex_t retry_mutex;
toku_cond_t retry_cv;
bool running_retry;
void init(void);
void destroy(void);
};
// The locktree manager manages a set of locktrees, one for each open dictionary.
// Locktrees are retrieved from the manager. When they are no longer needed, they
// are be released by the user.
// The locktree manager manages a set of locktrees, one for each open
// dictionary. Locktrees are retrieved from the manager. When they are no
// longer needed, they are be released by the user.
class locktree_manager {
public:
public:
// param: create_cb, called just after a locktree is first created.
// destroy_cb, called just before a locktree is destroyed.
// escalate_cb, called after a locktree is escalated (with extra param)
void create(lt_create_cb create_cb, lt_destroy_cb destroy_cb, lt_escalate_cb escalate_cb, void *extra);
// escalate_cb, called after a locktree is escalated (with extra
// param)
void create(lt_create_cb create_cb,
lt_destroy_cb destroy_cb,
lt_escalate_cb escalate_cb,
void *extra);
void destroy(void);
@ -159,6 +174,8 @@ namespace toku {
// Add time t to the escalator's wait time statistics
void add_escalator_wait_time(uint64_t t);
void kill_waiter(void *extra);
private:
static const uint64_t DEFAULT_MAX_LOCK_MEMORY = 64L * 1024 * 1024;

View File

@ -483,4 +483,17 @@ void locktree_manager::get_status(LTM_STATUS statp) {
*statp = ltm_status;
}
void locktree_manager::kill_waiter(void *extra) {
mutex_lock();
int r = 0;
size_t num_locktrees = m_locktree_map.size();
for (size_t i = 0; i < num_locktrees; i++) {
locktree *lt;
r = m_locktree_map.fetch(i, &lt);
invariant_zero(r);
lock_request::kill_waiter(lt, extra);
}
mutex_unlock();
}
} /* namespace toku */

View File

@ -0,0 +1,100 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
// test the lock manager kill waiter function
#include "locktree.h"
#include "lock_request.h"
#include "test.h"
#include "locktree_unit_test.h"
#include <thread>
#include <atomic>
namespace toku {
const uint64_t my_lock_wait_time = 1000 * 1000;
const uint64_t my_killed_time = 500 * 1000;
const int n_locks = 4;
static int my_killed_callback(void) {
if (1) fprintf(stderr, "%s:%u %s\n", __FILE__, __LINE__, __FUNCTION__);
return 0;
}
static void locktree_release_lock(locktree *lt, TXNID txn_id, const DBT *left, const DBT *right) {
range_buffer buffer;
buffer.create();
buffer.append(left, right);
lt->release_locks(txn_id, &buffer);
buffer.destroy();
}
static void wait_lock(lock_request *lr, std::atomic_int *done) {
int r = lr->wait(my_lock_wait_time, my_killed_time, my_killed_callback);
assert(r == DB_LOCK_NOTGRANTED);
*done = 1;
}
static void test_kill_waiter(void) {
int r;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = { 1 };
locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
const DBT *one = get_dbt(1);
lock_request locks[n_locks];
std::thread waiters[n_locks-1];
for (int i = 0; i < n_locks; i++) {
locks[i].create();
locks[i].set(lt, i+1, one, one, lock_request::type::WRITE, false, &waiters[i]);
}
// txn 'n_locks' grabs the lock
r = locks[n_locks-1].start();
assert_zero(r);
for (int i = 0; i < n_locks-1; i++) {
r = locks[i].start();
assert(r == DB_LOCK_NOTGRANTED);
}
std::atomic_int done[n_locks-1];
for (int i = 0; i < n_locks-1; i++) {
done[i] = 0;
waiters[i] = std::thread(wait_lock, &locks[i], &done[i]);
}
for (int i = 0; i < n_locks-1; i++) {
assert(!done[i]);
}
sleep(1);
for (int i = 0; i < n_locks-1; i++) {
mgr.kill_waiter(&waiters[i]);
while (!done[i]) sleep(1);
waiters[i].join();
for (int j = i+1; j < n_locks-1; j++)
assert(!done[j]);
}
locktree_release_lock(lt, n_locks, one, one);
for (int i = 0; i < n_locks; i++) {
locks[i].destroy();
}
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
int main(void) {
toku::test_kill_waiter();
return 0;
}

View File

@ -51,8 +51,9 @@ static uint64_t t_do_kill;
static int my_killed_callback(void) {
uint64_t t_now = toku_current_time_microsec();
if (t_now == t_last_kill)
return 0;
assert(t_now >= t_last_kill);
assert(t_now - t_last_kill >= my_killed_time * 1000 / 2); // div by 2 for valgrind which is not very accurate
t_last_kill = t_now;
killed_calls++;
if (t_now >= t_do_kill)

View File

@ -52,7 +52,6 @@ static uint64_t t_last_kill;
static int my_killed_callback(void) {
uint64_t t_now = toku_current_time_microsec();
assert(t_now >= t_last_kill);
assert(t_now - t_last_kill >= my_killed_time * 1000 / 2); // div by 2 for valgrind which is not very accurate
t_last_kill = t_now;
killed_calls++;
return 0;

View File

@ -0,0 +1,91 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
// test the race between start, release, and wait. since start does not put
// its lock request into the pending set, the blocking txn could release its
// lock before the first txn waits. this will block the first txn because its
// lock request is not known when the lock is released. the bug fix is to try
// again when lock retries are locked out.
#include "lock_request.h"
#include <atomic>
#include <thread>
#include "locktree.h"
#include "locktree_unit_test.h"
#include "test.h"
namespace toku {
const uint64_t my_lock_wait_time = 1000 * 1000; // ms
const uint64_t my_killed_time = 1 * 1000; // ms
static uint64_t t_wait;
static int my_killed_callback(void) {
uint64_t t_now = toku_current_time_microsec();
assert(t_now >= t_wait);
if (t_now - t_wait >= my_killed_time * 1000)
abort();
return 0;
}
static void locktree_release_lock(locktree *lt,
TXNID txn_id,
const DBT *left,
const DBT *right) {
range_buffer buffer;
buffer.create();
buffer.append(left, right);
lt->release_locks(txn_id, &buffer);
buffer.destroy();
}
static void test_start_release_wait(void) {
int r;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
DICTIONARY_ID dict_id = {1};
locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr);
const DBT *one = get_dbt(1);
// a locks one
lock_request a;
a.create();
a.set(lt, 1, one, one, lock_request::type::WRITE, false);
r = a.start();
assert(r == 0);
// b tries to lock one, fails
lock_request b;
b.create();
b.set(lt, 2, one, one, lock_request::type::WRITE, false);
r = b.start();
assert(r == DB_LOCK_NOTGRANTED);
// a releases its lock
locktree_release_lock(lt, 1, one, one);
// b waits for one, gets locks immediately
t_wait = toku_current_time_microsec();
r = b.wait(my_lock_wait_time, my_killed_time, my_killed_callback);
assert(r == 0);
// b releases its lock so we can exit cleanly
locktree_release_lock(lt, 2, one, one);
a.destroy();
b.destroy();
mgr.release_lt(lt);
mgr.destroy();
}
} /* namespace toku */
int main(void) {
toku::test_start_release_wait();
return 0;
}

View File

@ -34,12 +34,14 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include <iostream>
#include "test.h"
#include "locktree.h"
#include "lock_request.h"
#include <iostream>
#include <thread>
#include "locktree.h"
#include "test.h"
// Test FT-633, the data race on the lock request between ::start and ::retry
// This test is non-deterministic. It uses sleeps at 2 critical places to
@ -47,90 +49,65 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
namespace toku {
struct locker_arg {
locktree *_lt;
TXNID _id;
const DBT *_key;
static void locker_callback(void) { usleep(10000); }
locker_arg(locktree *lt, TXNID id, const DBT *key) : _lt(lt), _id(id), _key(key) {
}
};
static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
int i;
for (i = 0; i < 1000; i++) {
lock_request request;
request.create();
static void locker_callback(void) {
usleep(10000);
}
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
int i;
for (i = 0; i < 1000; i++) {
// set the test callbacks
request.set_start_test_callback(locker_callback);
request.set_retry_test_callback(locker_callback);
lock_request request;
request.create();
// try to acquire the lock
int r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(10 * 1000);
}
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// set the test callbacks
request.set_start_test_callback(locker_callback);
request.set_retry_test_callback(locker_callback);
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
}
// try to acquire the lock
int r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(10 * 1000);
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cerr << std::this_thread::get_id() << " " << i
<< std::endl;
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cout << toku_pthread_self() << " " << i << std::endl;
}
}
static void *locker(void *v_arg) {
locker_arg *arg = static_cast<locker_arg *>(v_arg);
run_locker(arg->_lt, arg->_id, arg->_key);
return arg;
}
} /* namespace toku */
int main(void) {
int r;
toku::locktree lt;
DICTIONARY_ID dict_id = { 1 };
DICTIONARY_ID dict_id = {1};
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 2;
toku_pthread_t ids[n_workers];
std::thread worker[n_workers];
for (int i = 0; i < n_workers; i++) {
toku::locker_arg *arg = new toku::locker_arg(&lt, i, one);
r = toku_pthread_create(&ids[i], nullptr, toku::locker, arg);
assert_zero(r);
worker[i] = std::thread(toku::run_locker, &lt, i, one);
}
for (int i = 0; i < n_workers; i++) {
void *ret;
r = toku_pthread_join(ids[i], &ret);
assert_zero(r);
toku::locker_arg *arg = static_cast<toku::locker_arg *>(ret);
delete arg;
worker[i].join();
}
lt.release_reference();

View File

@ -0,0 +1,133 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "lock_request.h"
#include <pthread.h>
#include <iostream>
#include <thread>
#include "locktree.h"
#include "test.h"
// Suppose that 3 threads are running a lock acquire, release, retry sequence.
// There is a race in the retry algorithm with 2 threads running lock retry
// simultaneously. The first thread to run retry sets a flag that will cause
// the second thread to skip the lock retries. If the first thread progressed
// past the contended lock, then the second threa will HANG until its lock timer
// pops, even when the contended lock is no longer held.
// This test exposes this problem as a test hang. The group retry algorithm
// fixes the race in the lock request retry algorihm and this test should no
// longer hang.
namespace toku {
// use 1000 when after_retry_all is implemented, otherwise use 100000
static const int n_tests = 1000; // 100000;
static void after_retry_all(void) { usleep(10000); }
static void run_locker(locktree *lt,
TXNID txnid,
const DBT *key,
pthread_barrier_t *b) {
for (int i = 0; i < n_tests; i++) {
int r;
r = pthread_barrier_wait(b);
assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD);
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// try to acquire the lock
r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(1000 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt, after_retry_all);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cerr << std::this_thread::get_id() << " " << i
<< std::endl;
}
}
} /* namespace toku */
int main(void) {
toku::locktree lt;
DICTIONARY_ID dict_id = {1};
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 3;
std::thread worker[n_workers];
pthread_barrier_t b;
int r = pthread_barrier_init(&b, nullptr, n_workers);
assert(r == 0);
for (int i = 0; i < n_workers; i++) {
worker[i] = std::thread(toku::run_locker, &lt, i, one, &b);
}
for (int i = 0; i < n_workers; i++) {
worker[i].join();
}
r = pthread_barrier_destroy(&b);
assert(r == 0);
lt.release_reference();
lt.destroy();
return 0;
}

View File

@ -0,0 +1,135 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident \
"Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "lock_request.h"
#include <pthread.h>
#include <iostream>
#include <thread>
#include "locktree.h"
#include "test.h"
// Suppose that 2 threads are running a lock acquire, release, retry sequence.
// There is a race between the acquire and the release with 2 threads.
// If thread 1 acquires a lock, and thread 2 tries to acquire the same lock and
// fails, thread 1 may release its lock and retry pending lock requests BEFORE
// thread 2 adds itself to the pending lock requests. If this happens, then
// thread 2 will HANG until its lock timer expires even when the lock it is
// waiting for is FREE.
// This test exposes this problem as a test hang. If the race is fixed, then
// the test runs to completion.
namespace toku {
static void start_before_pending(void) { usleep(10000); }
static void run_locker(locktree *lt,
TXNID txnid,
const DBT *key,
pthread_barrier_t *b) {
for (int i = 0; i < 100000; i++) {
int r;
r = pthread_barrier_wait(b);
assert(r == 0 || r == PTHREAD_BARRIER_SERIAL_THREAD);
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// if the callback is included, then the race is easy to reproduce.
// Otherwise, several test runs may be required before the race
// happens.
request.set_start_before_pending_test_callback(
start_before_pending);
// try to acquire the lock
r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(1000 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cerr << std::this_thread::get_id() << " " << i
<< std::endl;
}
}
} /* namespace toku */
int main(void) {
toku::locktree lt;
DICTIONARY_ID dict_id = {1};
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 2;
std::thread worker[n_workers];
pthread_barrier_t b;
int r = pthread_barrier_init(&b, nullptr, n_workers);
assert(r == 0);
for (int i = 0; i < n_workers; i++) {
worker[i] = std::thread(toku::run_locker, &lt, i, one, &b);
}
for (int i = 0; i < n_workers; i++) {
worker[i].join();
}
r = pthread_barrier_destroy(&b);
assert(r == 0);
lt.release_reference();
lt.destroy();
return 0;
}

View File

@ -0,0 +1,77 @@
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of PerconaFT.
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
----------------------------------------
PerconaFT is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License, version 3,
as published by the Free Software Foundation.
PerconaFT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#pragma once
struct tokutxn;
#if defined(ENABLED_DEBUG_SYNC)
/*
the below macros are defined in my_global.h, which is included in m_string.h,
the same macros are defined in TokuSetupCompiler.cmake as compiler options,
undefine them here to avoid build errors
*/
#undef __STDC_FORMAT_MACROS
#undef __STDC_LIMIT_MACROS
#include "m_string.h"
#include "debug_sync.h"
void toku_txn_get_client_id(struct tokutxn *txn,
uint64_t *client_id,
void **client_extra);
inline void toku_debug_sync(struct tokutxn *txn, const char *sync_point_name) {
uint64_t client_id;
void *client_extra;
THD *thd;
if (likely(!opt_debug_sync_timeout))
return;
toku_txn_get_client_id(txn, &client_id, &client_extra);
thd = reinterpret_cast<THD *>(client_extra);
DEBUG_SYNC(thd, sync_point_name);
}
#else // defined(ENABLED_DEBUG_SYNC)
inline void toku_debug_sync(struct tokutxn *, const char *) {};
#endif // defined(ENABLED_DEBUG_SYNC)

View File

@ -121,6 +121,7 @@ typedef int64_t toku_off_t;
#include "toku_htod.h"
#include "toku_assert.h"
#include "toku_crash.h"
#include "toku_debug_sync.h"
#define UU(x) x __attribute__((__unused__))
@ -183,8 +184,10 @@ extern void *realloc(void*, size_t) __THROW __attribute__((__deprecat
# pragma GCC poison u_int32_t
# pragma GCC poison u_int64_t
# pragma GCC poison BOOL
#if !defined(MYSQL_TOKUDB_ENGINE)
# pragma GCC poison FALSE
# pragma GCC poison TRUE
#endif // MYSQL_TOKUDB_ENGINE
#endif
#pragma GCC poison __sync_fetch_and_add
#pragma GCC poison __sync_fetch_and_sub

View File

@ -55,7 +55,8 @@ static int iterate_callback(DB_TXN *txn,
iterate_row_locks_callback iterate_locks,
void *locks_extra, void *extra) {
uint64_t txnid = txn->id64(txn);
uint64_t client_id = txn->get_client_id(txn);
uint64_t client_id; void *client_extra;
txn->get_client_id(txn, &client_id, &client_extra);
iterate_extra *info = reinterpret_cast<iterate_extra *>(extra);
DB *db;
DBT left_key, right_key;
@ -93,13 +94,13 @@ int test_main(int UU(argc), char *const UU(argv[])) {
r = env->open(env, TOKU_TEST_FILENAME, env_flags, 0755); CKERR(r);
r = env->txn_begin(env, NULL, &txn1, 0); CKERR(r);
txn1->set_client_id(txn1, 0);
txn1->set_client_id(txn1, 0, nullptr);
txnid1 = txn1->id64(txn1);
r = env->txn_begin(env, NULL, &txn2, 0); CKERR(r);
txn2->set_client_id(txn2, 1);
txn2->set_client_id(txn2, 1, nullptr);
txnid2 = txn2->id64(txn2);
r = env->txn_begin(env, NULL, &txn3, 0); CKERR(r);
txn3->set_client_id(txn3, 2);
txn3->set_client_id(txn3, 2, nullptr);
txnid3 = txn3->id64(txn3);
{

View File

@ -93,7 +93,8 @@ static int iterate_txns(DB_TXN *txn,
iterate_row_locks_callback iterate_locks,
void *locks_extra, void *extra) {
uint64_t txnid = txn->id64(txn);
uint64_t client_id = txn->get_client_id(txn);
uint64_t client_id; void *client_extra;
txn->get_client_id(txn, &client_id, &client_extra);
invariant_null(extra);
invariant(txnid > 0);
invariant(client_id == 0);

View File

@ -87,6 +87,7 @@ setup (void) {
else error_file = stderr;
r=db_env_create(&env, 0); CKERR(r);
env->set_dir_per_db(env, true);
env->set_errfile(env, error_file ? error_file : stderr);
r=env->open(env, TOKU_TEST_FILENAME, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
}
@ -431,6 +432,14 @@ test_fileops_3(void) {
r = env->dbrename(env, txn_a, "a.db", NULL, "d.db", 0);
CKERR2(r, EEXIST);
// verify correct error return code when trying to
// rename a dictionary to a name that is beyond the limit
// of the operating system.
char longname[FILENAME_MAX+11];
memset(longname, 'b', FILENAME_MAX+7);
memcpy(longname+FILENAME_MAX+7, ".db", 4);
r = env->dbrename(env, txn_a, "a.db", NULL, longname, 0);
CKERR2(r, ENAMETOOLONG);
r=txn_a->abort(txn_a); CKERR(r);
}

View File

@ -2775,6 +2775,10 @@ static void env_set_killed_callback(DB_ENV *env, uint64_t default_killed_time_ms
env->i->killed_callback = killed_callback;
}
static void env_kill_waiter(DB_ENV *env, void *extra) {
env->i->ltm.kill_waiter(extra);
}
static void env_do_backtrace(DB_ENV *env) {
if (env->i->errcall) {
db_env_do_backtrace_errfunc((toku_env_err_func) toku_env_err, (const void *) env);
@ -2877,6 +2881,7 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) {
USENV(set_dir_per_db);
USENV(get_dir_per_db);
USENV(get_data_dir);
USENV(kill_waiter);
#undef USENV
// unlocked methods
@ -3061,28 +3066,31 @@ env_dbremove_subdb(DB_ENV * env, DB_TXN * txn, const char *fname, const char *db
// see if we can acquire a table lock for the given dname.
// requires: write lock on dname in the directory. dictionary
// open, close, and begin checkpoint cannot occur.
// returns: true if we could open, lock, and close a dictionary
// with the given dname, false otherwise.
static bool
// returns: zero if we could open, lock, and close a dictionary
// with the given dname, errno otherwise.
static int
can_acquire_table_lock(DB_ENV *env, DB_TXN *txn, const char *iname_in_env) {
int r;
bool got_lock = false;
DB *db;
r = toku_db_create(&db, env, 0);
assert_zero(r);
r = toku_db_open_iname(db, txn, iname_in_env, 0, 0);
assert_zero(r);
r = toku_db_pre_acquire_table_lock(db, txn);
if (r == 0) {
got_lock = true;
} else {
got_lock = false;
if(r) {
if (r == ENAMETOOLONG)
toku_ydb_do_error(env, r, "File name too long!\n");
goto exit;
}
r = toku_db_close(db);
assert_zero(r);
return got_lock;
r = toku_db_pre_acquire_table_lock(db, txn);
if (r) {
r = DB_LOCK_NOTGRANTED;
}
exit:
if(db) {
int r2 = toku_db_close(db);
assert_zero(r2);
}
return r;
}
static int
@ -3295,8 +3303,8 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
// otherwise, we're okay in marking this ft as remove on
// commit. no new handles can open for this dictionary
// because the txn has directory write locks on the dname
if (txn && !can_acquire_table_lock(env, txn, new_iname.get())) {
r = DB_LOCK_NOTGRANTED;
if (txn) {
r = can_acquire_table_lock(env, txn, new_iname.get());
}
// We don't do anything at the ft or cachetable layer for rename.
// We just update entries in the environment's directory.

View File

@ -181,7 +181,16 @@ int toku_db_get_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *
request.create();
int r = toku_db_start_range_lock(db, txn, left_key, right_key, lock_type, &request);
if (r == DB_LOCK_NOTGRANTED) {
toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
"toku_range_lock_before_wait");
r = toku_db_wait_range_lock(db, txn, &request);
if (r == DB_LOCK_NOTGRANTED)
toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
"toku_range_lock_not_granted_after_wait");
}
else if (r == 0) {
toku_debug_sync(db_txn_struct_i(txn)->tokutxn,
"toku_range_lock_granted_immediately");
}
request.destroy();
@ -191,9 +200,13 @@ int toku_db_get_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *
// Setup and start an asynchronous lock request.
int toku_db_start_range_lock(DB *db, DB_TXN *txn, const DBT *left_key, const DBT *right_key,
toku::lock_request::type lock_type, toku::lock_request *request) {
uint64_t client_id;
void *client_extra;
DB_TXN *txn_anc = txn_oldest_ancester(txn);
TXNID txn_anc_id = txn_anc->id64(txn_anc);
request->set(db->i->lt, txn_anc_id, left_key, right_key, lock_type, toku_is_big_txn(txn_anc));
txn->get_client_id(txn, &client_id, &client_extra);
request->set(db->i->lt, txn_anc_id, left_key, right_key, lock_type,
toku_is_big_txn(txn_anc), client_extra);
const int r = request->start();
if (r == 0) {
@ -241,6 +254,8 @@ int toku_db_get_point_write_lock(DB *db, DB_TXN *txn, const DBT *key) {
// acquire a point write lock on the key for a given txn.
// this does not block the calling thread.
void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
uint64_t client_id;
void *client_extra;
DB_TXN *txn = toku_txn_get_container_db_txn(tokutxn);
DB_TXN *txn_anc = txn_oldest_ancester(txn);
TXNID txn_anc_id = txn_anc->id64(txn_anc);
@ -248,7 +263,10 @@ void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
// This lock request must succeed, so we do not want to wait
toku::lock_request request;
request.create();
request.set(db->i->lt, txn_anc_id, key, key, toku::lock_request::type::WRITE, toku_is_big_txn(txn_anc));
txn->get_client_id(txn, &client_id, &client_extra);
request.set(db->i->lt, txn_anc_id, key, key,
toku::lock_request::type::WRITE, toku_is_big_txn(txn_anc),
client_extra);
int r = request.start();
invariant_zero(r);
db_txn_note_row_lock(db, txn_anc, key, key);

View File

@ -323,12 +323,12 @@ int locked_txn_abort(DB_TXN *txn) {
return r;
}
static void locked_txn_set_client_id(DB_TXN *txn, uint64_t client_id) {
toku_txn_set_client_id(db_txn_struct_i(txn)->tokutxn, client_id);
static void locked_txn_set_client_id(DB_TXN *txn, uint64_t client_id, void *client_extra) {
toku_txn_set_client_id(db_txn_struct_i(txn)->tokutxn, client_id, client_extra);
}
static uint64_t locked_txn_get_client_id(DB_TXN *txn) {
return toku_txn_get_client_id(db_txn_struct_i(txn)->tokutxn);
static void locked_txn_get_client_id(DB_TXN *txn, uint64_t *client_id, void **client_extra) {
toku_txn_get_client_id(db_txn_struct_i(txn)->tokutxn, client_id, client_extra);
}
static int toku_txn_discard(DB_TXN *txn, uint32_t flags) {

View File

@ -6,6 +6,14 @@ foreach(tool ${tools})
add_dependencies(${tool} install_tdb_h)
target_link_libraries(${tool} ${LIBTOKUDB}_static ft_static z lzma snappy ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
# detect when we are being built as a subproject
if (DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
if ((CMAKE_BUILD_TYPE MATCHES "Debug") AND
(CMAKE_CXX_FLAGS_DEBUG MATCHES " -DENABLED_DEBUG_SYNC"))
target_link_libraries(${tool} sql)
endif()
endif ()
add_space_separated_property(TARGET ${tool} COMPILE_FLAGS -fvisibility=hidden)
endforeach(tool)

View File

@ -325,7 +325,7 @@ check_block(BLOCKNUM blocknum, int64_t UU(blocksize), int64_t UU(address), void
}
just_decompress_sub_block(&sb);
r = verify_ftnode_sub_block(&sb);
r = verify_ftnode_sub_block(&sb, nullptr, blocknum);
if (r != 0) {
printf(" Uncompressed child partition %d checksum failed.\n", i);
failure++;

View File

@ -158,7 +158,8 @@ static void dump_descriptor(DESCRIPTOR d) {
static void open_header(int fd, FT *header, CACHEFILE cf) {
FT ft = NULL;
int r;
r = toku_deserialize_ft_from (fd, MAX_LSN, &ft);
const char *fn = toku_cachefile_fname_in_env(cf);
r = toku_deserialize_ft_from (fd, fn, MAX_LSN, &ft);
if (r != 0) {
fprintf(stderr, "%s: can not deserialize from %s error %d\n", arg0, fname, r);
exit(1);

View File

@ -532,51 +532,6 @@ typedef struct index_read_info {
DBT* orig_key;
} *INDEX_READ_INFO;
static int ai_poll_fun(void *extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting add index.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg, "Adding of indexes about %.1f%% done", percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif
return 0;
}
static int loader_poll_fun(void *extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting bulk load.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg, "Loading of data about %.1f%% done", percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif
return 0;
}
static void loader_ai_err_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
}
static void loader_dup_fun(DB *db, int i, int err, DBT *key, DBT *val, void *error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
if (err == DB_KEYEXIST) {
context->ha->set_dup_value_for_pk(key);
}
}
//
// smart DBT callback function for optimize
// in optimize, we want to flatten DB by doing
@ -3397,11 +3352,13 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
lc.thd = thd;
lc.ha = this;
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
error = loader->set_poll_function(
loader, ha_tokudb::bulk_insert_poll, &lc);
assert_always(!error);
error = loader->set_error_callback(loader, loader_dup_fun, &lc);
error = loader->set_error_callback(
loader, ha_tokudb::loader_dup, &lc);
assert_always(!error);
trx->stmt_progress.using_loader = true;
@ -3414,6 +3371,47 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
}
TOKUDB_HANDLER_DBUG_VOID_RETURN;
}
int ha_tokudb::bulk_insert_poll(void* extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg,
"The process has been killed, aborting bulk load.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg,
"Loading of data t %s about %.1f%% done",
context->ha->share->full_table_name(),
percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long)percentage, 100);
#endif
return 0;
}
void ha_tokudb::loader_add_index_err(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
}
void ha_tokudb::loader_dup(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra) {
LOADER_CONTEXT context = (LOADER_CONTEXT)error_extra;
assert_always(context->ha);
context->ha->set_loader_error(err);
if (err == DB_KEYEXIST) {
context->ha->set_dup_value_for_pk(key);
}
}
//
// Method that is called at the end of many calls to insert rows
@ -8193,12 +8191,14 @@ int ha_tokudb::tokudb_add_index(
goto cleanup;
}
error = indexer->set_poll_function(indexer, ai_poll_fun, &lc);
error = indexer->set_poll_function(
indexer, ha_tokudb::tokudb_add_index_poll, &lc);
if (error) {
goto cleanup;
}
error = indexer->set_error_callback(indexer, loader_ai_err_fun, &lc);
error = indexer->set_error_callback(
indexer, ha_tokudb::loader_add_index_err, &lc);
if (error) {
goto cleanup;
}
@ -8253,12 +8253,14 @@ int ha_tokudb::tokudb_add_index(
goto cleanup;
}
error = loader->set_poll_function(loader, loader_poll_fun, &lc);
error =
loader->set_poll_function(loader, ha_tokudb::bulk_insert_poll, &lc);
if (error) {
goto cleanup;
}
error = loader->set_error_callback(loader, loader_ai_err_fun, &lc);
error = loader->set_error_callback(
loader, ha_tokudb::loader_add_index_err, &lc);
if (error) {
goto cleanup;
}
@ -8465,6 +8467,24 @@ cleanup:
thd_proc_info(thd, orig_proc_info);
TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error);
}
int ha_tokudb::tokudb_add_index_poll(void* extra, float progress) {
LOADER_CONTEXT context = (LOADER_CONTEXT)extra;
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg,
"The process has been killed, aborting add index.");
return ER_ABORTING_CONNECTION;
}
float percentage = progress * 100;
sprintf(context->write_status_msg,
"Adding of indexes to %s about %.1f%% done",
context->ha->share->full_table_name(),
percentage);
thd_proc_info(context->thd, context->write_status_msg);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_report(context->thd, (unsigned long long)percentage, 100);
#endif
return 0;
}
//
// Internal function called by ha_tokudb::add_index and ha_tokudb::alter_table_phase2

View File

@ -799,6 +799,19 @@ public:
#else
void start_bulk_insert(ha_rows rows);
#endif
static int bulk_insert_poll(void* extra, float progress);
static void loader_add_index_err(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra);
static void loader_dup(DB* db,
int i,
int err,
DBT* key,
DBT* val,
void* error_extra);
int end_bulk_insert();
int end_bulk_insert(bool abort);
@ -938,17 +951,23 @@ public:
#endif
private:
int tokudb_add_index(
TABLE *table_arg,
KEY *key_info,
uint num_of_keys,
DB_TXN* txn,
bool* inc_num_DBs,
bool* modified_DB
);
void restore_add_index(TABLE* table_arg, uint num_of_keys, bool incremented_numDBs, bool modified_DBs);
int drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys, KEY *key_info, DB_TXN* txn);
void restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_of_keys);
int tokudb_add_index(TABLE* table_arg,
KEY* key_info,
uint num_of_keys,
DB_TXN* txn,
bool* inc_num_DBs,
bool* modified_DB);
static int tokudb_add_index_poll(void *extra, float progress);
void restore_add_index(TABLE* table_arg,
uint num_of_keys,
bool incremented_numDBs,
bool modified_DBs);
int drop_indexes(TABLE* table_arg,
uint* key_num,
uint num_of_keys,
KEY* key_info,
DB_TXN* txn);
void restore_drop_indexes(TABLE* table_arg, uint* key_num, uint num_of_keys);
public:
// delete all rows from the table

View File

@ -55,6 +55,7 @@ static bool tokudb_show_status(
static void tokudb_handle_fatal_signal(handlerton* hton, THD* thd, int sig);
#endif
static int tokudb_close_connection(handlerton* hton, THD* thd);
static void tokudb_kill_connection(handlerton *hton, THD *thd, enum thd_kill_levels level);
static int tokudb_commit(handlerton* hton, THD* thd, bool all);
static int tokudb_rollback(handlerton* hton, THD* thd, bool all);
#if TOKU_INCLUDE_XA
@ -331,6 +332,7 @@ static int tokudb_init_func(void *p) {
tokudb_hton->create = tokudb_create_handler;
tokudb_hton->close_connection = tokudb_close_connection;
tokudb_hton->kill_query = tokudb_kill_connection;
tokudb_hton->savepoint_offset = sizeof(SP_INFO_T);
tokudb_hton->savepoint_set = tokudb_savepoint;
@ -754,6 +756,13 @@ static int tokudb_close_connection(handlerton* hton, THD* thd) {
return error;
}
void tokudb_kill_connection(handlerton *hton, THD *thd,
enum thd_kill_levels level) {
TOKUDB_DBUG_ENTER("");
db_env->kill_waiter(db_env, thd);
DBUG_VOID_RETURN;
}
bool tokudb_flush_logs(handlerton * hton) {
TOKUDB_DBUG_ENTER("");
int error;

View File

@ -0,0 +1,42 @@
#
# This is a helper script for rpl_row_img.test. It creates
# all combinations MyISAM / InnoDB in a three server replication
# chain. Each engine combination is tested against the current
# seetings for binlog_row_image (on each server).
#
# The test script that is executed on every combination is the
# only argument to this wrapper script. See below.
#
# This script takes one parameter:
# - $row_img_test_script
# the name of the test script to include in every combination
#
# Sample usage:
# -- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
# -- source include/rpl_row_img_general_loop.test
-- let $engine_type_a= 2
-- let $server_1_engine= TokuDB
while($engine_type_a)
{
-- let $engine_type_b= 2
-- let $server_2_engine= TokuDB
while($engine_type_b)
{
-- let $engine_type_c= 2
-- let $server_3_engine= TokuDB
while($engine_type_c)
{
-- echo ### engines: $server_1_engine, $server_2_engine, $server_3_engine
-- source $row_img_test_script
-- let $server_3_engine= InnoDB
-- dec $engine_type_c
}
-- let $server_2_engine= InnoDB
-- dec $engine_type_b
}
-- let $server_1_engine= InnoDB
-- dec $engine_type_a
}

View File

@ -18,8 +18,10 @@ update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
# select unix_timestamp() into @tstart;
# Commented out for MariaDB
# select unix_timestamp() into @tend;
# select @tend-@tstart <= 5; # assert no delay in the delete time
select * from t;
a b
1 3

View File

@ -6,7 +6,6 @@ Variable_name Value
tokudb_rpl_unique_checks OFF
tokudb_rpl_unique_checks_delay 5000
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);

View File

@ -1,7 +1,4 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
call mtr.add_suppression("read free replication is disabled for tokudb table");
CREATE TABLE t (a int(11), b char(20)) ENGINE = TokuDB;

View File

@ -0,0 +1,10 @@
include/master-slave.inc
[connection master]
CREATE TABLE t1 (a INT) ENGINE=tokudb;
begin;
insert into t1 values(1);
flush tables with read lock;
commit;
unlock tables;
drop table t1;
include/rpl_end.inc

View File

@ -0,0 +1,530 @@
#
# Setup
#
#
# See if queries that use both auto_increment and LAST_INSERT_ID()
# are replicated well
#
# We also check how the foreign_key_check variable is replicated
#
include/master-slave.inc
[connection master]
SET @old_concurrent_insert= @@global.concurrent_insert;
SET @@global.concurrent_insert= 0;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b)) engine=tokudb;
insert into t1 values (1),(2),(3);
insert into t1 values (null);
insert into t2 values (null,last_insert_id());
select * from t1 ORDER BY a;
a
1
2
3
4
select * from t2 ORDER BY b;
b c
1 4
drop table t1;
drop table t2;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b), foreign key(b) references t1(a)) engine=tokudb;
SET FOREIGN_KEY_CHECKS=0;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 values (null,last_insert_id());
SET FOREIGN_KEY_CHECKS=1;
select * from t1;
a
10
11
12
13
select * from t2;
b c
5 0
6 11
#
# check if INSERT SELECT in auto_increment is well replicated (bug #490)
#
drop table t2;
drop table t1;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b)) engine=tokudb;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 (c) select * from t1 ORDER BY a;
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
select * from t1 ORDER BY a;
a
10
11
12
13
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
drop table t1;
drop table t2;
#
# Bug#8412: Error codes reported in binary log for CHARACTER SET,
# FOREIGN_KEY_CHECKS
#
SET TIMESTAMP=1000000000;
CREATE TABLE t1 ( a INT UNIQUE ) engine=tokudb;
SET FOREIGN_KEY_CHECKS=0;
INSERT INTO t1 VALUES (1),(1);
Got one of the listed errors
drop table t1;
#
# Bug#14553: NULL in WHERE resets LAST_INSERT_ID
#
set @@session.sql_auto_is_null=1;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(a int) engine=tokudb;
insert into t1 (a) values (null);
insert into t2 (a) select a from t1 where a is null;
insert into t2 (a) select a from t1 where a is null;
select * from t2;
a
1
select * from t2;
a
1
drop table t1;
drop table t2;
#
# End of 4.1 tests
#
#
# BUG#15728: LAST_INSERT_ID function inside a stored function returns 0
#
# The solution is not to reset last_insert_id on enter to sub-statement.
#
drop function if exists bug15728;
drop function if exists bug15728_insert;
drop table if exists t1, t2;
create table t1 (
id int not null auto_increment,
last_id int,
primary key (id)
) engine=tokudb;
create function bug15728() returns int(11)
return last_insert_id();
insert into t1 (last_id) values (0);
insert into t1 (last_id) values (last_insert_id());
insert into t1 (last_id) values (bug15728());
create table t2 (
id int not null auto_increment,
last_id int,
primary key (id)
) engine=tokudb;
create function bug15728_insert() returns int(11) modifies sql data
begin
insert into t2 (last_id) values (bug15728());
return bug15728();
end|
create trigger t1_bi before insert on t1 for each row
begin
declare res int;
select bug15728_insert() into res;
set NEW.last_id = res;
end|
insert into t1 (last_id) values (0);
drop trigger t1_bi;
select last_insert_id();
last_insert_id()
4
select bug15728_insert();
bug15728_insert()
2
select last_insert_id();
last_insert_id()
4
insert into t1 (last_id) values (bug15728());
select last_insert_id();
last_insert_id()
5
drop procedure if exists foo;
create procedure foo()
begin
declare res int;
insert into t2 (last_id) values (bug15728());
insert into t1 (last_id) values (bug15728());
end|
call foo();
select * from t1;
id last_id
1 0
2 1
3 2
4 1
5 4
6 3
select * from t2;
id last_id
1 3
2 4
3 5
select * from t1;
id last_id
1 0
2 1
3 2
4 1
5 4
6 3
select * from t2;
id last_id
1 3
2 4
3 5
drop function bug15728;
drop function bug15728_insert;
drop table t1,t2;
drop procedure foo;
create table t1 (n int primary key auto_increment not null,
b int, unique(b)) engine=tokudb;
set sql_log_bin=0;
insert into t1 values(null,100);
replace into t1 values(null,50),(null,100),(null,150);
select * from t1 order by n;
n b
2 50
3 100
4 150
truncate table t1;
set sql_log_bin=1;
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
replace into t1 values(null,100),(null,350);
select * from t1 order by n;
n b
2 100
3 350
select * from t1 order by n;
n b
2 100
3 350
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
n b
2 100
4 400
1000 350
1001 600
select * from t1 order by n;
n b
2 100
4 400
1000 350
1001 600
drop table t1;
create table t1 (n int primary key auto_increment not null,
b int, unique(b)) engine=tokudb;
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
n b
2 100
3 350
select * from t1 order by n;
n b
2 100
3 350
drop table t1;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b INT,
UNIQUE(b)) ENGINE=tokudb;
INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10;
SELECT * FROM t1 ORDER BY a;
a b
1 10
2 2
SELECT * FROM t1 ORDER BY a;
a b
1 10
2 2
drop table t1;
CREATE TABLE t1 (
id bigint(20) unsigned NOT NULL auto_increment,
field_1 int(10) unsigned NOT NULL,
field_2 varchar(255) NOT NULL,
field_3 varchar(255) NOT NULL,
PRIMARY KEY (id),
UNIQUE KEY field_1 (field_1, field_2)
) ENGINE=tokudb;
CREATE TABLE t2 (
field_a int(10) unsigned NOT NULL,
field_b varchar(255) NOT NULL,
field_c varchar(255) NOT NULL
) ENGINE=tokudb;
INSERT INTO t2 (field_a, field_b, field_c) VALUES (1, 'a', '1a');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (2, 'b', '2b');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (3, 'c', '3c');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (4, 'd', '4d');
INSERT INTO t2 (field_a, field_b, field_c) VALUES (5, 'e', '5e');
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
INSERT INTO t2 (field_a, field_b, field_c) VALUES (6, 'f', '6f');
INSERT INTO t1 (field_1, field_2, field_3)
SELECT t2.field_a, t2.field_b, t2.field_c
FROM t2
ON DUPLICATE KEY UPDATE
t1.field_3 = t2.field_c;
SELECT * FROM t1 ORDER BY id;
id field_1 field_2 field_3
1 1 a 1a
2 2 b 2b
3 3 c 3c
4 4 d 4d
5 5 e 5e
8 6 f 6f
SELECT * FROM t1 ORDER BY id;
id field_1 field_2 field_3
1 1 a 1a
2 2 b 2b
3 3 c 3c
4 4 d 4d
5 5 e 5e
8 6 f 6f
drop table t1, t2;
DROP PROCEDURE IF EXISTS p1;
DROP TABLE IF EXISTS t1, t2;
SELECT LAST_INSERT_ID(0);
LAST_INSERT_ID(0)
0
CREATE TABLE t1 (
id INT NOT NULL DEFAULT 0,
last_id INT,
PRIMARY KEY (id)
) ENGINE=tokudb;
CREATE TABLE t2 (
id INT NOT NULL AUTO_INCREMENT,
last_id INT,
PRIMARY KEY (id)
) ENGINE=tokudb;
CREATE PROCEDURE p1()
BEGIN
INSERT INTO t2 (last_id) VALUES (LAST_INSERT_ID());
INSERT INTO t1 (last_id) VALUES (LAST_INSERT_ID());
END|
CALL p1();
SELECT * FROM t1 ORDER BY id;
id last_id
0 1
SELECT * FROM t2 ORDER BY id;
id last_id
1 0
SELECT * FROM t1 ORDER BY id;
id last_id
0 1
SELECT * FROM t2 ORDER BY id;
id last_id
1 0
DROP PROCEDURE p1;
DROP TABLE t1, t2;
DROP PROCEDURE IF EXISTS p1;
DROP FUNCTION IF EXISTS f1;
DROP FUNCTION IF EXISTS f2;
DROP FUNCTION IF EXISTS f3;
DROP TABLE IF EXISTS t1, t2;
CREATE TABLE t1 (
i INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
j INT DEFAULT 0
) ENGINE=tokudb;
CREATE TABLE t2 (i INT) ENGINE=tokudb;
CREATE PROCEDURE p1()
BEGIN
INSERT INTO t1 (i) VALUES (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
INSERT INTO t1 (i) VALUES (NULL), (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
END |
CREATE FUNCTION f1() RETURNS INT MODIFIES SQL DATA
BEGIN
INSERT INTO t1 (i) VALUES (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
INSERT INTO t1 (i) VALUES (NULL), (NULL);
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
RETURN 0;
END |
CREATE FUNCTION f2() RETURNS INT NOT DETERMINISTIC
RETURN LAST_INSERT_ID() |
CREATE FUNCTION f3() RETURNS INT MODIFIES SQL DATA
BEGIN
INSERT INTO t2 (i) VALUES (LAST_INSERT_ID());
RETURN 0;
END |
INSERT INTO t1 VALUES (NULL, -1);
CALL p1();
SELECT f1();
f1()
0
INSERT INTO t1 VALUES (NULL, f2()), (NULL, LAST_INSERT_ID()),
(NULL, LAST_INSERT_ID()), (NULL, f2()), (NULL, f2());
INSERT INTO t1 VALUES (NULL, f2());
INSERT INTO t1 VALUES (NULL, 0), (NULL, LAST_INSERT_ID());
UPDATE t1 SET j= -1 WHERE i IS NULL;
INSERT INTO t1 (i) VALUES (NULL);
INSERT INTO t1 (i) VALUES (NULL);
SELECT f3();
f3()
0
SELECT * FROM t1 ORDER BY i;
i j
1 -1
2 0
3 0
4 0
5 0
6 0
7 0
8 3
9 3
10 3
11 3
12 3
13 8
14 -1
15 13
16 0
17 0
SELECT * FROM t2 ORDER BY i;
i
2
3
5
6
16
SELECT * FROM t1;
i j
1 -1
2 0
3 0
4 0
5 0
6 0
7 0
8 3
9 3
10 3
11 3
12 3
13 8
14 -1
15 13
16 0
17 0
SELECT * FROM t2;
i
2
3
5
6
16
DROP PROCEDURE p1;
DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
DROP TABLE t1, t2;
#
# End of 5.0 tests
#
create table t2 (
id int not null auto_increment,
last_id int,
primary key (id)
) engine=tokudb;
truncate table t2;
create table t1 (id tinyint primary key) engine=tokudb;
create function insid() returns int
begin
insert into t2 (last_id) values (0);
return 0;
end|
set sql_log_bin=0;
insert into t2 (id) values(1),(2),(3);
delete from t2;
set sql_log_bin=1;
select insid();
insid()
0
set sql_log_bin=0;
insert into t2 (id) values(5),(6),(7);
delete from t2 where id>=5;
set sql_log_bin=1;
insert into t1 select insid();
select * from t1 order by id;
id
0
select * from t2 order by id;
id last_id
4 0
8 0
select * from t1 order by id;
id
0
select * from t2 order by id;
id last_id
4 0
8 0
drop table t1;
drop function insid;
truncate table t2;
create table t1 (n int primary key auto_increment not null,
b int, unique(b)) engine=tokudb;
create procedure foo()
begin
insert into t1 values(null,10);
insert ignore into t1 values(null,10);
insert ignore into t1 values(null,10);
insert into t2 values(null,3);
end|
call foo();
select * from t1 order by n;
n b
1 10
select * from t2 order by id;
id last_id
1 3
select * from t1 order by n;
n b
1 10
select * from t2 order by id;
id last_id
1 3
drop table t1, t2;
drop procedure foo;
SET @@global.concurrent_insert= @old_concurrent_insert;
set @@session.sql_auto_is_null=default;
include/rpl_end.inc

View File

@ -0,0 +1,74 @@
include/master-slave.inc
[connection master]
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.");
create table t1(a int auto_increment, primary key(a));
create table t2(b int auto_increment, c int, primary key(b));
insert into t1 values (1),(2),(3);
insert into t1 values (null);
insert into t2 values (null,last_insert_id());
select * from t1 ORDER BY a;
a
1
2
3
4
select * from t2 ORDER BY b;
b c
1 4
drop table t1;
drop table t2;
create table t1(a int auto_increment, key(a)) engine=tokudb;
create table t2(b int auto_increment, c int, key(b), foreign key(b) references t1(a)) engine=tokudb;
SET FOREIGN_KEY_CHECKS=0;
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 values (null,last_insert_id());
SET FOREIGN_KEY_CHECKS=1;
select * from t1;
a
10
11
12
13
select * from t2;
b c
5 0
6 11
drop table t2;
drop table t1;
create table t1(a int auto_increment, primary key(a));
create table t2(b int auto_increment, c int, primary key(b));
insert into t1 values (10);
insert into t1 values (null),(null),(null);
insert into t2 values (5,0);
insert into t2 (c) select * from t1 ORDER BY a;
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
select * from t1 ORDER BY a;
a
10
11
12
13
select * from t2 ORDER BY b;
b c
5 0
6 10
7 11
8 12
9 13
drop table t1;
drop table t2;
SET TIMESTAMP=1000000000;
CREATE TABLE t1 ( a INT UNIQUE );
SET FOREIGN_KEY_CHECKS=0;
INSERT INTO t1 VALUES (1),(1);
Got one of the listed errors
drop table t1;
include/rpl_end.inc

View File

@ -0,0 +1,25 @@
include/master-slave.inc
[connection master]
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.');
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
CREATE TABLE t2 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
INSERT INTO t1 VALUES (NULL, 0);
INSERT INTO t1 SELECT NULL, 0 FROM t1;
INSERT INTO t2 VALUES (NULL, 0), (NULL,1);
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
UPDATE t1, t2 SET t1.b = t2.b WHERE t1.a = t2.a;
drop table t1, t2;
include/rpl_end.inc

View File

@ -0,0 +1,53 @@
include/master-slave.inc
[connection master]
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT');
drop table if exists t1,t2;
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
CREATE TABLE t2 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
INSERT INTO t1 VALUES (NULL, 0);
INSERT INTO t1 SELECT NULL, 0 FROM t1;
INSERT INTO t2 VALUES (NULL, 0), (NULL,1);
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
UPDATE t1, t2 SET t1.b = (t2.b+4) WHERE t1.a = t2.a;
SELECT * FROM t1 ORDER BY a;
a b
1 4
2 5
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
SELECT * FROM t1 ORDER BY a;
a b
1 4
2 5
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
drop table t1,t2;
reset master;
CREATE TABLE t1 ( a INT );
INSERT INTO t1 VALUES (0);
UPDATE t1, (SELECT 3 as b) AS x SET t1.a = x.b;
select * from t1;
a
3
select * from t1;
a
3
drop table t1;
include/rpl_end.inc

View File

@ -0,0 +1,195 @@
include/master-slave.inc
[connection master]
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.');
-------- Test for BUG#9361 --------
CREATE TABLE t1 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
CREATE TABLE t2 (
a int unsigned not null auto_increment primary key,
b int unsigned
) ENGINE=TokuDB;
INSERT INTO t1 VALUES (NULL, 0);
INSERT INTO t1 SELECT NULL, 0 FROM t1;
INSERT INTO t2 VALUES (NULL, 0), (NULL,1);
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 0
2 1
UPDATE t2, (SELECT a FROM t1 ORDER BY a) AS t SET t2.b = t.a+5 ;
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 6
2 6
SELECT * FROM t1 ORDER BY a;
a b
1 0
2 0
SELECT * FROM t2 ORDER BY a;
a b
1 6
2 6
drop table t1,t2;
-------- Test 1 for BUG#9361 --------
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1 (
a1 char(30),
a2 int,
a3 int,
a4 char(30),
a5 char(30)
);
CREATE TABLE t2 (
b1 int,
b2 char(30)
);
INSERT INTO t1 VALUES ('Yes', 1, NULL, 'foo', 'bar');
INSERT INTO t2 VALUES (1, 'baz');
UPDATE t1 a, t2
SET a.a1 = 'No'
WHERE a.a2 =
(SELECT b1
FROM t2
WHERE b2 = 'baz')
AND a.a3 IS NULL
AND a.a4 = 'foo'
AND a.a5 = 'bar';
SELECT * FROM t1;
a1 a2 a3 a4 a5
No 1 NULL foo bar
SELECT * FROM t2;
b1 b2
1 baz
DROP TABLE t1, t2;
-------- Test 2 for BUG#9361 --------
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
CREATE TABLE t1 (
i INT,
j INT,
x INT,
y INT,
z INT
);
CREATE TABLE t2 (
i INT,
k INT,
x INT,
y INT,
z INT
);
CREATE TABLE t3 (
j INT,
k INT,
x INT,
y INT,
z INT
);
INSERT INTO t1 VALUES ( 1, 2,13,14,15);
INSERT INTO t2 VALUES ( 1, 3,23,24,25);
INSERT INTO t3 VALUES ( 2, 3, 1,34,35), ( 2, 3, 1,34,36);
UPDATE t1 AS a
INNER JOIN t2 AS b
ON a.i = b.i
INNER JOIN t3 AS c
ON a.j = c.j AND b.k = c.k
SET a.x = b.x,
a.y = b.y,
a.z = (
SELECT sum(z)
FROM t3
WHERE y = 34
)
WHERE b.x = 23;
SELECT * FROM t1;
i j x y z
1 2 23 24 71
DROP TABLE t1, t2, t3;
DROP TABLE IF EXISTS t1;
Warnings:
Note 1051 Unknown table 'test.t1'
DROP TABLE IF EXISTS t2;
Warnings:
Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 (
idp int(11) NOT NULL default '0',
idpro int(11) default NULL,
price decimal(19,4) default NULL,
PRIMARY KEY (idp)
);
CREATE TABLE t2 (
idpro int(11) NOT NULL default '0',
price decimal(19,4) default NULL,
nbprice int(11) default NULL,
PRIMARY KEY (idpro)
);
INSERT INTO t1 VALUES
(1,1,'3.0000'),
(2,2,'1.0000'),
(3,1,'1.0000'),
(4,1,'4.0000'),
(5,3,'2.0000'),
(6,2,'4.0000');
INSERT INTO t2 VALUES
(1,'0.0000',0),
(2,'0.0000',0),
(3,'0.0000',0);
update
t2
join
( select idpro, min(price) as min_price, count(*) as nbr_price
from t1
where idpro>0 and price>0
group by idpro
) as table_price
on t2.idpro = table_price.idpro
set t2.price = table_price.min_price,
t2.nbprice = table_price.nbr_price;
select "-- MASTER AFTER JOIN --" as "";
-- MASTER AFTER JOIN --
select * from t1;
idp idpro price
1 1 3.0000
2 2 1.0000
3 1 1.0000
4 1 4.0000
5 3 2.0000
6 2 4.0000
select * from t2;
idpro price nbprice
1 1.0000 3
2 1.0000 2
3 2.0000 1
select "-- SLAVE AFTER JOIN --" as "";
-- SLAVE AFTER JOIN --
select * from t1;
idp idpro price
1 1 3.0000
2 2 1.0000
3 1 1.0000
4 1 4.0000
5 3 2.0000
6 2 4.0000
select * from t2;
idpro price nbprice
1 1.0000 3
2 1.0000 2
3 2.0000 1
DROP TABLE t1, t2;
include/rpl_end.inc

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,293 @@
include/master-slave.inc
[connection master]
include/stop_slave.inc
include/wait_for_slave_to_stop.inc
reset master;
reset slave;
start slave;
include/wait_for_slave_to_start.inc
set @save_slave_ddl_exec_mode=@@global.slave_ddl_exec_mode;
set @@global.slave_ddl_exec_mode=STRICT;
create table t1(n int not null auto_increment primary key)ENGINE=TokuDB;
insert into t1 values (NULL);
drop table t1;
create table t1 (word char(20) not null)ENGINE=TokuDB;
load data infile 'LOAD_FILE' into table t1 ignore 1 lines;
select count(*) from t1;
count(*)
69
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
flush logs;
create table t3 (a int)ENGINE=TokuDB;
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
flush logs;
include/stop_slave.inc
include/start_slave.inc
create table t2 (n int)ENGINE=TokuDB;
insert into t2 values (1);
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Gtid # # GTID #-#-#
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
master-bin.000002 # Gtid # # GTID #-#-#
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
master-bin.000002 # Gtid # # BEGIN GTID #-#-#
master-bin.000002 # Table_map # # table_id: # (test.t2)
master-bin.000002 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000002 # Xid # # COMMIT /* XID */
show binary logs;
Log_name File_size
master-bin.000001 #
master-bin.000002 #
show binary logs;
Log_name File_size
slave-bin.000001 #
slave-bin.000002 #
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
slave-bin.000001 # Gtid # # BEGIN GTID #-#-#
slave-bin.000001 # Table_map # # table_id: # (test.t1)
slave-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
slave-bin.000001 # Gtid # # BEGIN GTID #-#-#
slave-bin.000001 # Table_map # # table_id: # (test.t1)
slave-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Gtid # # GTID #-#-#
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
slave-bin.000002 # Gtid # # BEGIN GTID #-#-#
slave-bin.000002 # Table_map # # table_id: # (test.t2)
slave-bin.000002 # Write_rows_v1 # # table_id: # flags: STMT_END_F
slave-bin.000002 # Xid # # COMMIT /* XID */
include/check_slave_is_running.inc
show binlog events in 'slave-bin.000005' from 4;
ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
include/rpl_reset.inc
create table t1(a int auto_increment primary key, b int);
insert into t1 values (NULL, 1);
set insert_id=5;
insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id());
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(a int auto_increment primary key, b int)
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Table_map # # table_id: # (test.t1)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
select * from t1;
a b
1 1
5 1
6 1
drop table t1;
set @@global.slave_ddl_exec_mode=@save_slave_ddl_exec_mode;
include/rpl_end.inc

View File

@ -0,0 +1,51 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
******** [ MASTER ] ********
CREATE DATABASE BUG_37656;
use BUG_37656;
show databases like 'BUG_37656';
Database (BUG_37656)
BUG_37656
******** [ SLAVE ] ********
show databases like 'bug_37656';
Database (bug_37656)
bug_37656
******** [ MASTER ] ********
CREATE TABLE T1 (a int);
CREATE TABLE T2 (b int) ENGINE=TokuDB;
CREATE TABLE T3 (txt TEXT);
show tables;
Tables_in_BUG_37656
T1
T2
T3
******** [ SLAVE ] ********
use bug_37656;
show tables;
Tables_in_bug_37656
t2
t3
CREATE TABLE t1 (a INT);
******** [ MASTER ] ********
use BUG_37656;
INSERT INTO T1 VALUES (1);
INSERT INTO T2 VALUES (1);
use test;
INSERT INTO BUG_37656.T1 VALUES (2);
INSERT INTO BUG_37656.T2 VALUES (2);
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE BUG_37656.T3;
******** [ SLAVE ] ********
include/diff_tables.inc [master:BUG_37656.T2, slave:bug_37656.t2]
include/diff_tables.inc [master:BUG_37656.T3, slave:bug_37656.t3]
******** [ MASTER ] ********
DROP DATABASE BUG_37656;
include/rpl_reset.inc
CREATE DATABASE B50653;
USE B50653;
CREATE PROCEDURE b50653_proc() BEGIN SELECT 1; END;
DROP PROCEDURE b50653_proc;
DROP DATABASE B50653;
include/rpl_end.inc

View File

@ -0,0 +1,54 @@
include/master-slave.inc
[connection master]
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1(a INT,PRIMARY KEY(a))ENGINE=TOKUDB;
CREATE PROCEDURE test.p1()
BEGIN
INSERT INTO test.t1 VALUES (4);
SELECT get_lock("test", 100);
UPDATE test.t1 set a=a+4 WHERE a=4;
END|
CREATE PROCEDURE test.p2()
BEGIN
UPDATE test.t1 SET a=a+1;
END|
SELECT get_lock("test", 200);
get_lock("test", 200)
1
CALL test.p1();
CALL test.p2();
SELECT release_lock("test");
release_lock("test")
1
get_lock("test", 100)
1
SELECT release_lock("test");
release_lock("test")
1
SELECT * FROM test.t1;
a
5
SELECT * FROM test.t1;
a
5
DROP TABLE IF EXISTS test.t1;
CREATE TABLE test.t1(a INT,PRIMARY KEY(a))ENGINE=TOKUDB;
CALL test.p2();
CALL test.p1();
get_lock("test", 100)
1
SELECT release_lock("test");
release_lock("test")
1
SELECT * FROM test.t1;
a
8
SELECT * FROM test.t1;
a
8
DROP PROCEDURE IF EXISTS test.p1;
DROP PROCEDURE IF EXISTS test.p2;
DROP TABLE IF EXISTS test.t1;
include/rpl_end.inc

View File

@ -0,0 +1,41 @@
include/master-slave.inc
[connection master]
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
DROP PROCEDURE IF EXISTS p1;
DROP PROCEDURE IF EXISTS p2;
CREATE TABLE IF NOT EXISTS t1(name CHAR(16), birth DATE,PRIMARY KEY(name))ENGINE=TokuDB;
CREATE TABLE IF NOT EXISTS t2(name CHAR(16), age INT ,PRIMARY KEY(name))ENGINE=TokuDB;
CREATE PROCEDURE p1()
BEGIN
DECLARE done INT DEFAULT 0;
DECLARE spa CHAR(16);
DECLARE spb INT;
DECLARE cur1 CURSOR FOR SELECT name,
(YEAR(CURDATE())-YEAR(birth))-(RIGHT(CURDATE(),5)<RIGHT(birth,5))
FROM t1;
DECLARE CONTINUE HANDLER FOR SQLSTATE '02000' SET done = 1;
OPEN cur1;
SET AUTOCOMMIT=0;
REPEAT
FETCH cur1 INTO spa, spb;
IF NOT done THEN
START TRANSACTION;
INSERT INTO t2 VALUES (spa,spb);
COMMIT;
END IF;
UNTIL done END REPEAT;
SET AUTOCOMMIT=1;
CLOSE cur1;
END|
CREATE PROCEDURE p2()
BEGIN
INSERT INTO t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '1999-03-30'),('kyle','2005-1-1');
END|
CALL p2();
CALL p1();
DROP TABLE t1;
DROP TABLE t2;
DROP PROCEDURE p1;
DROP PROCEDURE p2;
include/rpl_end.inc

View File

@ -0,0 +1,27 @@
include/master-slave.inc
[connection master]
DROP TRIGGER test.t1_bi_t2;
DROP TABLE IF EXISTS test.t1;
DROP TABLE IF EXISTS test.t2;
CREATE TABLE test.t1 (n MEDIUMINT NOT NULL AUTO_INCREMENT, d FLOAT, PRIMARY KEY(n))ENGINE=TOKUDB;
CREATE TABLE test.t2 (n MEDIUMINT NOT NULL, f FLOAT, PRIMARY KEY(n))ENGINE=TOKUDB;
CREATE TRIGGER test.t1_bi_t2 BEFORE INSERT ON test.t2 FOR EACH ROW INSERT INTO test.t1 VALUES (NULL, 1.234)//
INSERT INTO test.t2 VALUES (1, 0.0);
INSERT INTO test.t2 VALUES (1, 0.0);
Got one of the listed errors
select * from test.t1;
n d
1 1.234
select * from test.t2;
n f
1 0
select * from test.t1;
n d
1 1.234
select * from test.t2;
n f
1 0
DROP TRIGGER test.t1_bi_t2;
DROP TABLE test.t1;
DROP TABLE test.t2;
include/rpl_end.inc

View File

@ -0,0 +1,292 @@
include/master-slave.inc
[connection master]
include/stop_slave.inc
include/wait_for_slave_to_stop.inc
reset master;
reset slave;
start slave;
include/wait_for_slave_to_start.inc
set @save_slave_ddl_exec_mode=@@global.slave_ddl_exec_mode;
set @@global.slave_ddl_exec_mode=STRICT;
create table t1(n int not null auto_increment primary key)ENGINE=TokuDB;
insert into t1 values (NULL);
drop table t1;
create table t1 (word char(20) not null)ENGINE=TokuDB;
load data infile 'LOAD_FILE' into table t1 ignore 1 lines;
select count(*) from t1;
count(*)
69
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Begin_load_query # # ;file_id=#;block_len=#
master-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE `t1` FIELDS TERMINATED BY '\t' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' IGNORE 1 LINES (`word`) ;file_id=#
master-bin.000001 # Xid # # COMMIT /* XID */
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
master-bin.000001 # Xid # # COMMIT /* XID */
flush logs;
create table t3 (a int)ENGINE=TokuDB;
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
select * from t1 order by 1 asc;
word
Aarhus
Aaron
Aaron
Ababa
Ababa
aback
aback
abaft
abaft
abandon
abandon
abandoned
abandoned
abandoning
abandoning
abandonment
abandonment
abandons
abandons
abase
abased
abasement
abasements
abases
abash
abashed
abashes
abashing
abasing
abate
abated
abatement
abatements
abater
abates
abating
Abba
abbe
abbey
abbeys
abbot
abbots
Abbott
abbreviate
abbreviated
abbreviates
abbreviating
abbreviation
abbreviations
Abby
abdomen
abdomens
abdominal
abduct
abducted
abduction
abductions
abductor
abductors
abducts
Abe
abed
Abel
Abelian
Abelson
Aberdeen
Abernathy
aberrant
aberration
flush logs;
include/stop_slave.inc
include/start_slave.inc
create table t2 (n int)ENGINE=TokuDB;
insert into t2 values (1);
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Begin_load_query # # ;file_id=#;block_len=#
master-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE `t1` FIELDS TERMINATED BY '\t' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' IGNORE 1 LINES (`word`) ;file_id=#
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Gtid # # GTID #-#-#
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
master-bin.000002 # Gtid # # GTID #-#-#
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
master-bin.000002 # Gtid # # BEGIN GTID #-#-#
master-bin.000002 # Query # # use `test`; insert into t2 values (1)
master-bin.000002 # Xid # # COMMIT /* XID */
show binary logs;
Log_name File_size
master-bin.000001 #
master-bin.000002 #
show binary logs;
Log_name File_size
slave-bin.000001 #
slave-bin.000002 #
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; create table t1(n int not null auto_increment primary key)ENGINE=TokuDB
slave-bin.000001 # Gtid # # BEGIN GTID #-#-#
slave-bin.000001 # Intvar # # INSERT_ID=1
slave-bin.000001 # Query # # use `test`; insert into t1 values (NULL)
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; create table t1 (word char(20) not null)ENGINE=TokuDB
slave-bin.000001 # Gtid # # BEGIN GTID #-#-#
slave-bin.000001 # Begin_load_query # # ;file_id=#;block_len=#
slave-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE '../../tmp/SQL_LOAD-<SERVER UUID>-<MASTER server-id>-<file-id>.<extension>' INTO TABLE `t1` FIELDS TERMINATED BY '\t' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' IGNORE 1 LINES (`word`) ;file_id=#
slave-bin.000001 # Xid # # COMMIT /* XID */
slave-bin.000001 # Gtid # # GTID #-#-#
slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=TokuDB
slave-bin.000001 # Rotate # # slave-bin.000002;pos=POS
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Gtid # # GTID #-#-#
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=TokuDB
slave-bin.000002 # Gtid # # BEGIN GTID #-#-#
slave-bin.000002 # Query # # use `test`; insert into t2 values (1)
slave-bin.000002 # Xid # # COMMIT /* XID */
include/check_slave_is_running.inc
show binlog events in 'slave-bin.000005' from 4;
ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Could not find target log
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
include/rpl_reset.inc
create table t1(a int auto_increment primary key, b int);
insert into t1 values (NULL, 1);
set insert_id=5;
insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id());
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1(a int auto_increment primary key, b int)
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Intvar # # INSERT_ID=1
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL, 1)
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Intvar # # LAST_INSERT_ID=1
master-bin.000001 # Intvar # # INSERT_ID=5
master-bin.000001 # Query # # use `test`; insert into t1 values (NULL, last_insert_id()), (NULL, last_insert_id())
master-bin.000001 # Query # # COMMIT
select * from t1;
a b
1 1
5 1
6 1
drop table t1;
set @@global.slave_ddl_exec_mode=@save_slave_ddl_exec_mode;
include/rpl_end.inc

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,48 @@
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
******** [ MASTER ] ********
CREATE DATABASE BUG_37656;
use BUG_37656;
show databases like 'BUG_37656';
Database (BUG_37656)
BUG_37656
******** [ SLAVE ] ********
show databases like 'bug_37656';
Database (bug_37656)
bug_37656
******** [ MASTER ] ********
CREATE TABLE T1 (a int);
CREATE TABLE T2 (b int) ENGINE=TokuDB;
CREATE TABLE T3 (txt TEXT);
show tables;
Tables_in_BUG_37656
T1
T2
T3
******** [ SLAVE ] ********
use bug_37656;
show tables;
Tables_in_bug_37656
t2
t3
CREATE TABLE t1 (a INT);
******** [ MASTER ] ********
use BUG_37656;
INSERT INTO T1 VALUES (1);
INSERT INTO T2 VALUES (1);
LOAD DATA INFILE '../../std_data/words.dat' INTO TABLE BUG_37656.T3;
******** [ SLAVE ] ********
include/diff_tables.inc [master:BUG_37656.T2, slave:bug_37656.t2]
include/diff_tables.inc [master:BUG_37656.T3, slave:bug_37656.t3]
******** [ MASTER ] ********
DROP DATABASE BUG_37656;
include/rpl_reset.inc
CREATE DATABASE B50653;
USE B50653;
CREATE PROCEDURE b50653_proc() BEGIN SELECT 1; END;
DROP PROCEDURE b50653_proc;
DROP DATABASE B50653;
include/rpl_end.inc

View File

@ -1,3 +1,4 @@
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=TokuDB;

View File

@ -11,9 +11,9 @@
# 3 - NULL --> NOT NULL ( sql-mode != STRICT and no failures)
#
#################################################################################
--source include/master-slave.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
let $engine=TokuDB;
--source extra/rpl_tests/rpl_not_null.test

View File

@ -1,5 +1,2 @@
--read-only=ON
--loose-tokudb-rpl-unique-checks-delay=10000
--loose-tokudb-rpl-unique-checks=OFF
--loose-tokudb-rpl-lookup-rows-delay=10000
--loose-tokudb-rpl-lookup-rows=OFF
--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=OFF

View File

@ -1,5 +1 @@
--read-only=ON
--loose-tokudb-rpl-unique-checks-delay=10000
--loose-tokudb-rpl-unique-checks=OFF
--loose-tokudb-rpl-lookup-rows-delay=10000
--loose-tokudb-rpl-lookup-rows=OFF
--read-only=ON --loose-tokudb-rpl-unique-checks-delay=10000 --loose-tokudb-rpl-unique-checks=OFF --loose-tokudb-rpl-lookup-rows-delay=10000 --loose-tokudb-rpl-lookup-rows=OFF

View File

@ -42,15 +42,16 @@ update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
--echo # select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
#select @tend-@tstart <= 5; # assert no delay in the delete time
--echo # Commented out for MariaDB
--echo # select unix_timestamp() into @tend;
--echo # select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;

View File

@ -1,3 +1,2 @@
--read-only=ON
--loose-tokudb-rpl-unique-checks-delay=5000
--loose-tokudb-rpl-unique-checks=OFF
--read-only=ON --loose-tokudb-rpl-unique-checks-delay=5000 --loose-tokudb-rpl-unique-checks=OFF

View File

@ -22,7 +22,6 @@ connection master;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
@ -30,8 +29,7 @@ insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
#connection master;
#select unix_timestamp()-@tstart;
connection master;
connection slave;
# insert into t values (5); # test read-only

View File

@ -1,11 +1,11 @@
-- source include/have_tokudb.inc
-- source include/have_binlog_format_row.inc
-- source include/master-slave.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc
--source include/master-slave.inc
connection slave;
let $bit_field_special = ALL_LOSSY;
let $type= 'TokuDB';
let $extra_index= ;
-- source extra/rpl_tests/rpl_row_basic.test
let $type = 'TokuDB';
let $extra_index = ;
--source extra/rpl_tests/rpl_row_basic.test
--source include/rpl_end.inc

View File

@ -4,6 +4,7 @@
source include/master-slave.inc;
source include/have_tokudb.inc;
source include/have_binlog_format_statement.inc;
# gtids disabled because it tests DROP TEMPORARY inside a transaction
source extra/rpl_tests/rpl_tokudb.test;
--source include/rpl_end.inc

View File

@ -0,0 +1,6 @@
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
-- source include/master-slave.inc
let $engine_type=tokudb;
-- source extra/rpl_tests/rpl_commit_after_flush.test
--source include/rpl_end.inc

View File

@ -0,0 +1,7 @@
#################################
# Wrapper for rpl_insert_id.test#
#################################
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=tokudb;
-- source extra/rpl_tests/rpl_insert_id.test

View File

@ -0,0 +1,7 @@
#################################
# Wrapper for rpl_insert_id.test#
#################################
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=tokudb;
-- source extra/rpl_tests/rpl_insert_id_pk.test

View File

@ -0,0 +1,4 @@
-- source include/not_ndb_default.inc
-- source include/have_tokudb.inc
let $engine_type=TokuDB;
-- source extra/rpl_tests/rpl_multi_update.test

View File

@ -0,0 +1 @@
--replicate-ignore-table=nothing.sensible

View File

@ -0,0 +1,13 @@
#######################################################
# Wrapper for rpl_multi_update2.test to allow multi #
# Engines to reuse test code. By JBM 2006-02-15 #
# Added comments section and to skip when ndb is #
# Default engine. #
#######################################################
--source include/not_ndb_default.inc
--source include/have_tokudb.inc
--source include/master-slave.inc
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT');
let $engine_type=TokuDB;
--source extra/rpl_tests/rpl_multi_update2.test
--source include/rpl_end.inc

View File

@ -0,0 +1,13 @@
#######################################################
# Wrapper for rpl_multi_update3.test to allow multi #
# Engines to reuse test code. By JBM 2006-02-15 #
# Added comments section and to skip when ndb is #
# Default engine. #
#######################################################
--source include/have_tokudb.inc
--source include/not_ndb_default.inc
--source include/master-slave.inc
call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.');
let $engine_type=TokuDB;
--source extra/rpl_tests/rpl_multi_update3.test
--source include/rpl_end.inc

View File

@ -1,5 +1,7 @@
# test tokudb read free replication feature with partition table
skip MDEV-13441;
--source include/have_partition.inc
--source include/have_debug.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc

View File

@ -0,0 +1 @@
--transaction_isolation=READ-COMMITTED

View File

@ -0,0 +1 @@
--skip-slave-start --relay-log-info-repository=TABLE --relay-log-recovery=1 --transaction_isolation=READ-COMMITTED

View File

@ -0,0 +1,19 @@
# This test takes long time, so only run it with the --big-test mtr-flag.
--source include/big_test.inc
--source include/not_embedded.inc
--source include/not_valgrind.inc
--source include/have_debug.inc
--source include/have_tokudb.inc
--source include/have_binlog_format_row.inc
--source include/not_mts_slave_parallel_workers.inc
--source include/master-slave.inc
call mtr.add_suppression('Attempting backtrace');
call mtr.add_suppression("Recovery from master pos .* and file master-bin.000001");
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
call mtr.add_suppression(".* InnoDB: Warning: allocated tablespace .*, old maximum was .*");
let $engine_type=TokuDB;
let $database_name=test;
--source extra/rpl_tests/rpl_crash_safe.test
--source include/rpl_end.inc

View File

@ -0,0 +1,54 @@
#Want to skip this test from daily Valgrind execution
skip Require rpl files from MySQL;
--source include/no_valgrind_without_big.inc
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
#
# This file contains tests for WL#5096.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
#
# WL#5096 Tests.
#
#
# Tests combinations of binlog-row-image against mixes of MyISAM and InnoDB
# storage engines on all three servers.
#
# All the combinarions need not to be separated into their own files as
# the tests for indexes and engines mixes are, because noblobs test script
# does not take too long time, thence we do not risk triggering PB2 timeout
# on valgrind runs.
#
## NOBLOB
-- let $row_img_set=server_1:NOBLOB:N,server_2:NOBLOB:Y,server_3:NOBLOB:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_blobs.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
## MINIMAL
-- let $row_img_set=server_1:MINIMAL:N,server_2:MINIMAL:Y,server_3:MINIMAL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_blobs.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
## FULL
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_blobs.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- source include/rpl_end.inc

View File

@ -0,0 +1,51 @@
#Want to skip this test from daily Valgrind execution
skip Require rpl files from MySQL;
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096 and bug fixes.
#
-- source include/have_binlog_format_row.inc
-- source include/not_gtid_enabled.inc
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096
#
#
# Tests for different storage engines on each server,
# but same index structure on tables. The tests are conducted
# using FULL binlog-row-image on all servers.
#
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
#
# BUG#49100
#
-- echo ### Testing with TokuDB storage engine
-- let $engine=TokuDB
-- source extra/rpl_tests/rpl_row_empty_imgs.test
-- source include/rpl_end.inc

View File

@ -0,0 +1,43 @@
#Want to skip this test from daily Valgrind execution
skip Require rpl files from MySQL;
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096 and bug fixes.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096
#
#
# Tests for different storage engines on each server,
# but same index structure on tables. The tests are conducted
# using MINIMAL binlog-row-image on all servers.
#
-- let $row_img_set=server_1:MINIMAL:N,server_2:MINIMAL:Y,server_3:MINIMAL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- source include/rpl_end.inc

View File

@ -0,0 +1,43 @@
#Want to skip this test from daily Valgrind execution
skip Require rpl files from MySQL;
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096 and bug fixes.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096
#
#
# Tests for different storage engines on each server,
# but same index structure on tables. The tests are conducted
# using NOBLOB binlog-row-image on all servers.
#
-- let $row_img_set=server_1:NOBLOB:N,server_2:NOBLOB:Y,server_3:NOBLOB:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- source include/rpl_end.inc

View File

@ -0,0 +1,39 @@
#Want to skip this test from daily Valgrind execution
skip Require rpl files from MySQL;
-- source include/no_valgrind_without_big.inc
#
# This file contains tests for WL#5096.
#
-- let $rpl_topology= 1->2->3
-- source include/rpl_init.inc
-- source include/have_binlog_format_row.inc
-- connection server_1
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_2
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_3
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- connection server_1
#
# WL#5096 Tests.
#
#
# Tests FULL image against a mix of MyISAM and InnoDB engines on
# each of the three servers.
#
-- let $row_img_set=server_1:FULL:N,server_2:FULL:Y,server_3:FULL:Y
-- source include/rpl_row_img_set.inc
-- let $row_img_test_script= extra/rpl_tests/rpl_row_img_diff_indexes.test
-- source suite/tokudb.rpl/include/rpl_tokudb_row_img_general_loop.inc
-- source include/rpl_end.inc

Some files were not shown because too many files have changed in this diff Show More