From 605aa82f5d0ae0a7521d06a9a0911b453e9dbf0d Mon Sep 17 00:00:00 2001 From: Sujatha Sivakumar Date: Fri, 10 Jan 2014 15:11:56 +0530 Subject: [PATCH] Bug#17081415:>=4GB ROW EVENT CRASHES SERVER WITH WILD MEMCPY OF ROW DATA Problem: ======== Inserting a row larger than 4G when server uses RBR leads to crash. Analysis: ======== Row-based binary logging logs changes in individual table rows. During the execution of DML statements in RBR the actual row data will be stored within "m_rows_buf" buffer and this buffer contents will be written to binary log. "m_rows_buf" is prepared within the following function "Rows_log_event::do_add_row_data". When a huge row is specified as in this bug scenario where row size is 4294971520 > UINT_MAX (4294967295) then the "m_rows_buf" is reallocated to accommodate the row data and then the row is copied to the buffer. During this realloc call, the length is getting type casted to "uint" which results in overflow. Because of the overflow the reallocated memory happens to be incorrect than what was requested and it results in a crash during copy of rowdata to buffer. Hence rows of size > 4GB cannot be written to binary log. By default the event_length can be stored within 4 bytes which in turn restricts an event's size to grow. Hence large rows cannot be replicated using row based replication. Fix: === An error is generated if the row size exceeds 4GB value. --- sql/log_event.cc | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/sql/log_event.cc b/sql/log_event.cc index 1ff29f2730e..8724239b6d3 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -7767,8 +7767,31 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length) if (static_cast(m_rows_end - m_rows_cur) <= length) { size_t const block_size= 1024; - my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf; - my_ptrdiff_t const new_alloc= + ulong cur_size= m_rows_cur - m_rows_buf; + DBUG_EXECUTE_IF("simulate_too_big_row_case1", + cur_size= UINT_MAX32 - (block_size * 10); + length= UINT_MAX32 - (block_size * 10);); + DBUG_EXECUTE_IF("simulate_too_big_row_case2", + cur_size= UINT_MAX32 - (block_size * 10); + length= block_size * 10;); + DBUG_EXECUTE_IF("simulate_too_big_row_case3", + cur_size= block_size * 10; + length= UINT_MAX32 - (block_size * 10);); + DBUG_EXECUTE_IF("simulate_too_big_row_case4", + cur_size= UINT_MAX32 - (block_size * 10); + length= (block_size * 10) - block_size + 1;); + ulong remaining_space= UINT_MAX32 - cur_size; + /* Check that the new data fits within remaining space and we can add + block_size without wrapping. + */ + if (length > remaining_space || + ((length + block_size) > remaining_space)) + { + sql_print_error("The row data is greater than 4GB, which is too big to " + "write to the binary log."); + DBUG_RETURN(ER_BINLOG_ROW_LOGGING_FAILED); + } + ulong const new_alloc= block_size * ((cur_size + length + block_size - 1) / block_size); uchar* const new_buf= (uchar*)my_realloc((uchar*)m_rows_buf, (uint) new_alloc,