Added support to MEM_ROOT for write protected memory

This is useful for thing like Item_true and Item_false that we
allocated and initalize once and want to ensure that nothing can
change them

Main changes:
- Memory protection is achived by allocating memory with mmap() and
  protect it from write with mprotect()
- init_alloc_root(...,MY_ROOT_USE_MPROTECT) will create a
  memroot that one can later use with protect_root() to turn it
  read only or turn it back to read-write. All allocations to this
  memroot is done with mmap() to ensure page alligned allocations.
- alloc_root() code was rearranged to combine normal and valgrind code.
- init_alloc_root() now changes block size to be power of 2's, to get less
  memory fragmentation.
- Changed MEM_ROOT structure to make it smaller. Also renamed
  MEM_ROOT m_psi_key to psi_key.
- Moved MY_THREAD_SPECIFIC marker in MEM_ROOT from block size (old hack)
  to flags.
- Added global variable my_system_page_size. This is initialized at
  startup.
This commit is contained in:
Monty 2021-06-18 18:40:05 +03:00 committed by Vicențiu Ciorbaru
parent d378a466a5
commit b1d81974b2
15 changed files with 237 additions and 94 deletions

View File

@ -179,6 +179,7 @@
#cmakedefine HAVE_MLOCKALL 1
#cmakedefine HAVE_MMAP 1
#cmakedefine HAVE_MMAP64 1
#cmakedefine HAVE_MPROTECT 1
#cmakedefine HAVE_PERROR 1
#cmakedefine HAVE_POLL 1
#cmakedefine HAVE_POSIX_FALLOCATE 1

View File

@ -374,6 +374,7 @@ CHECK_FUNCTION_EXISTS (mlock HAVE_MLOCK)
CHECK_FUNCTION_EXISTS (mlockall HAVE_MLOCKALL)
CHECK_FUNCTION_EXISTS (mmap HAVE_MMAP)
CHECK_FUNCTION_EXISTS (mmap64 HAVE_MMAP64)
CHECK_FUNCTION_EXISTS (mprotect HAVE_MPROTECT)
CHECK_FUNCTION_EXISTS (perror HAVE_PERROR)
CHECK_FUNCTION_EXISTS (poll HAVE_POLL)
CHECK_FUNCTION_EXISTS (posix_fallocate HAVE_POSIX_FALLOCATE)

View File

@ -50,11 +50,12 @@ typedef struct st_mem_root
first free block in queue test counter (if it exceed
MAX_BLOCK_USAGE_BEFORE_DROP block will be dropped in 'used' list)
*/
unsigned int first_block_usage;
unsigned short first_block_usage;
unsigned short flags;
void (*error_handler)(void);
PSI_memory_key m_psi_key;
PSI_memory_key psi_key;
} MEM_ROOT;
#ifdef __cplusplus

View File

@ -94,6 +94,7 @@ C_MODE_START
#define MY_SYNC_DIR 32768U /* my_create/delete/rename: sync directory */
#define MY_SYNC_FILESIZE 65536U /* my_sync(): safe sync when file is extended */
#define MY_THREAD_SPECIFIC 0x10000U /* my_malloc(): thread specific */
#define MY_ROOT_USE_MPROTECT 0x20000U /* init_alloc_root: read only segments */
/* Tree that should delete things automatically */
#define MY_TREE_WITH_DELETE 0x40000U
@ -283,6 +284,7 @@ extern my_bool my_disable_locking, my_disable_async_io,
extern my_bool my_disable_sync, my_disable_copystat_in_redel;
extern char wild_many,wild_one,wild_prefix;
extern const char *charsets_dir;
extern size_t my_system_page_size;
enum cache_type
{
@ -886,7 +888,6 @@ extern void my_free_lock(void *ptr);
#define my_free_lock(A) my_free((A))
#endif
#define alloc_root_inited(A) ((A)->min_malloc != 0)
#define ALLOC_ROOT_MIN_BLOCK_SIZE (MALLOC_OVERHEAD + sizeof(USED_MEM) + 8)
#define clear_alloc_root(A) do { (A)->free= (A)->used= (A)->pre_alloc= 0; (A)->min_malloc=0;} while(0)
extern void init_alloc_root(PSI_memory_key key, MEM_ROOT *mem_root,
size_t block_size, size_t pre_alloc_size,
@ -897,6 +898,7 @@ extern void free_root(MEM_ROOT *root, myf MyFLAGS);
extern void set_prealloc_root(MEM_ROOT *root, char *ptr);
extern void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size,
size_t prealloc_size);
extern void protect_root(MEM_ROOT *root, int prot);
extern char *strdup_root(MEM_ROOT *root,const char *str);
static inline char *safe_strdup_root(MEM_ROOT *root, const char *str)
{

View File

@ -259,9 +259,10 @@ typedef struct st_mem_root
size_t min_malloc;
size_t block_size;
unsigned int block_num;
unsigned int first_block_usage;
unsigned short first_block_usage;
unsigned short flags;
void (*error_handler)(void);
PSI_memory_key m_psi_key;
PSI_memory_key psi_key;
} MEM_ROOT;
}
typedef struct st_typelib {

View File

@ -20,15 +20,101 @@
#include <my_global.h>
#include <my_sys.h>
#include <m_string.h>
#include <my_bit.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#undef EXTRA_DEBUG
#define EXTRA_DEBUG
/* data packed in MEM_ROOT -> min_malloc */
#define MALLOC_FLAG(A) ((A & 1) ? MY_THREAD_SPECIFIC : 0)
/* Don't allocate too small blocks */
#define ROOT_MIN_BLOCK_SIZE 256
/* bits in MEM_ROOT->flags */
#define ROOT_FLAG_THREAD_SPECIFIC 1
#define ROOT_FLAG_MPROTECT 2
#define MALLOC_FLAG(R) MYF((R)->flags & ROOT_FLAG_THREAD_SPECIFIC ? THREAD_SPECIFIC : 0)
#define TRASH_MEM(X) TRASH_FREE(((char*)(X) + ((X)->size-(X)->left)), (X)->left)
/*
Alloc memory through either my_malloc or mmap()
*/
static void *root_alloc(MEM_ROOT *root, size_t size, size_t *alloced_size,
myf my_flags)
{
*alloced_size= size;
#if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS)
if (root->flags & ROOT_FLAG_MPROTECT)
{
void *res;
*alloced_size= MY_ALIGN(size, my_system_page_size);
res= my_mmap(0, *alloced_size, PROT_READ | PROT_WRITE,
MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (res == MAP_FAILED)
res= 0;
return res;
}
#endif /* HAVE_MMAP */
return my_malloc(root->psi_key, size,
my_flags | MYF(root->flags & ROOT_FLAG_THREAD_SPECIFIC ?
MY_THREAD_SPECIFIC : 0));
}
static void root_free(MEM_ROOT *root, void *ptr, size_t size)
{
#if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS)
if (root->flags & ROOT_FLAG_MPROTECT)
my_munmap(ptr, size);
else
#endif
my_free(ptr);
}
/*
Calculate block sizes to use
Sizes will be updated to next power of 2, minus operating system
memory management size.
The idea is to reduce memory fragmentation as most system memory
allocators are using power of 2 block size internally.
*/
static void calculate_block_sizes(MEM_ROOT *mem_root, size_t block_size,
size_t *pre_alloc_size)
{
size_t pre_alloc= *pre_alloc_size;
if (mem_root->flags&= ROOT_FLAG_MPROTECT)
{
mem_root->block_size= MY_ALIGN(block_size, my_system_page_size);
if (pre_alloc)
pre_alloc= MY_ALIGN(pre_alloc, my_system_page_size);
}
else
{
DBUG_ASSERT(block_size <= UINT_MAX32);
mem_root->block_size= (my_round_up_to_next_power((uint32) block_size -
MALLOC_OVERHEAD)-
MALLOC_OVERHEAD);
if (pre_alloc)
pre_alloc= (my_round_up_to_next_power((uint32) pre_alloc -
MALLOC_OVERHEAD)-
MALLOC_OVERHEAD);
}
*pre_alloc_size= pre_alloc;
}
/*
Initialize memory root
@ -36,13 +122,18 @@
init_alloc_root()
mem_root - memory root to initialize
name - name of memroot (for debugging)
block_size - size of chunks (blocks) used for memory allocation
block_size - size of chunks (blocks) used for memory allocation.
Will be updated to next power of 2, minus
internal and system memory management size. This is
will reduce memory fragmentation as most system memory
allocators are using power of 2 block size internally.
(It is external size of chunk i.e. it should include
memory required for internal structures, thus it
should be no less than ALLOC_ROOT_MIN_BLOCK_SIZE)
should be no less than ROOT_MIN_BLOCK_SIZE).
pre_alloc_size - if non-0, then size of block that should be
pre-allocated during memory root initialization.
my_flags MY_THREAD_SPECIFIC flag for my_malloc
MY_RROOT_USE_MPROTECT for read only protected memory
DESCRIPTION
This function prepares memory root for further use, sets initial size of
@ -50,9 +141,6 @@
Although error can happen during execution of this function if
pre_alloc_size is non-0 it won't be reported. Instead it will be
reported as error in first alloc_root() on this memory root.
We don't want to change the structure size for MEM_ROOT.
Because of this, we store in MY_THREAD_SPECIFIC as bit 1 in block_size
*/
void init_alloc_root(PSI_memory_key key, MEM_ROOT *mem_root, size_t block_size,
@ -63,25 +151,31 @@ void init_alloc_root(PSI_memory_key key, MEM_ROOT *mem_root, size_t block_size,
DBUG_PRINT("enter",("root: %p prealloc: %zu", mem_root, pre_alloc_size));
mem_root->free= mem_root->used= mem_root->pre_alloc= 0;
mem_root->min_malloc= 32;
mem_root->block_size= (block_size - ALLOC_ROOT_MIN_BLOCK_SIZE) & ~1;
mem_root->min_malloc= 32 + REDZONE_SIZE;
mem_root->block_size= MY_MAX(block_size, ROOT_MIN_BLOCK_SIZE);
mem_root->flags= 0;
if (my_flags & MY_THREAD_SPECIFIC)
mem_root->block_size|= 1;
mem_root->flags|= ROOT_FLAG_THREAD_SPECIFIC;
if (my_flags & MY_ROOT_USE_MPROTECT)
mem_root->flags|= ROOT_FLAG_MPROTECT;
calculate_block_sizes(mem_root, block_size, &pre_alloc_size);
mem_root->error_handler= 0;
mem_root->block_num= 4; /* We shift this with >>2 */
mem_root->first_block_usage= 0;
mem_root->m_psi_key= key;
mem_root->psi_key= key;
#if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG))
if (pre_alloc_size)
{
size_t size= pre_alloc_size + ALIGN_SIZE(sizeof(USED_MEM));
size_t alloced_size;
if ((mem_root->free= mem_root->pre_alloc=
(USED_MEM*) my_malloc(key, size, MYF(my_flags))))
(USED_MEM*) root_alloc(mem_root, pre_alloc_size, &alloced_size,
MYF(0))))
{
mem_root->free->size= size;
mem_root->free->left= pre_alloc_size;
mem_root->free->size= alloced_size;
mem_root->free->left= alloced_size - ALIGN_SIZE(sizeof(USED_MEM));
mem_root->free->next= 0;
TRASH_MEM(mem_root->free);
}
@ -113,13 +207,14 @@ void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size,
DBUG_ENTER("reset_root_defaults");
DBUG_ASSERT(alloc_root_inited(mem_root));
mem_root->block_size= (((block_size - ALLOC_ROOT_MIN_BLOCK_SIZE) & ~1) |
(mem_root->block_size & 1));
calculate_block_sizes(mem_root, block_size, &pre_alloc_size);
#if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG))
if (pre_alloc_size)
{
size_t size= pre_alloc_size + ALIGN_SIZE(sizeof(USED_MEM));
if (!mem_root->pre_alloc || mem_root->pre_alloc->size != size)
size_t size= mem_root->block_size, alloced_size;
if (!mem_root->pre_alloc ||
mem_root->pre_alloc->size != mem_root->block_size)
{
USED_MEM *mem, **prev= &mem_root->free;
/*
@ -139,26 +234,23 @@ void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size,
{
/* remove block from the list and free it */
*prev= mem->next;
my_free(mem);
root_free(mem_root, mem, mem->size);
}
else
prev= &mem->next;
}
/* Allocate new prealloc block and add it to the end of free list */
if ((mem= (USED_MEM *) my_malloc(mem_root->m_psi_key, size,
MYF(MALLOC_FLAG(mem_root->
block_size)))))
if ((mem= (USED_MEM *) root_alloc(mem_root, size, &alloced_size,
MYF(MY_WME))))
{
mem->size= size;
mem->left= pre_alloc_size;
mem->size= alloced_size;
mem->left= alloced_size - ALIGN_SIZE(sizeof(USED_MEM));
mem->next= *prev;
*prev= mem_root->pre_alloc= mem;
TRASH_MEM(mem);
}
else
{
mem_root->pre_alloc= 0;
}
}
}
else
@ -171,37 +263,6 @@ void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size,
void *alloc_root(MEM_ROOT *mem_root, size_t length)
{
#if defined(HAVE_valgrind) && defined(EXTRA_DEBUG)
reg1 USED_MEM *next;
DBUG_ENTER("alloc_root");
DBUG_PRINT("enter",("root: %p", mem_root));
DBUG_ASSERT(alloc_root_inited(mem_root));
DBUG_EXECUTE_IF("simulate_out_of_memory",
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
DBUG_SET("-d,simulate_out_of_memory");
DBUG_RETURN((void*) 0); /* purecov: inspected */
});
length+=ALIGN_SIZE(sizeof(USED_MEM));
if (!(next = (USED_MEM*) my_malloc(mem_root->m_psi_key, length,
MYF(MY_WME | ME_FATAL |
MALLOC_FLAG(mem_root->block_size)))))
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
DBUG_RETURN((uchar*) 0); /* purecov: inspected */
}
next->next= mem_root->used;
next->left= 0;
next->size= length;
mem_root->used= next;
DBUG_PRINT("exit",("ptr: %p", (((char*)next)+ALIGN_SIZE(sizeof(USED_MEM)))));
DBUG_RETURN((((uchar*) next)+ALIGN_SIZE(sizeof(USED_MEM))));
#else
size_t get_size, block_size;
uchar* point;
reg1 USED_MEM *next= 0;
@ -212,13 +273,36 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
DBUG_ASSERT(alloc_root_inited(mem_root));
DBUG_EXECUTE_IF("simulate_out_of_memory",
{
/* Avoid reusing an already allocated block */
if (mem_root->error_handler)
(*mem_root->error_handler)();
DBUG_SET("-d,simulate_out_of_memory");
DBUG_RETURN((void*) 0); /* purecov: inspected */
});
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
DBUG_SET("-d,simulate_out_of_memory");
DBUG_RETURN((void*) 0); /* purecov: inspected */
});
#if defined(HAVE_valgrind) && defined(EXTRA_DEBUG)
if (!(mem_root->flags & ROOT_FLAG_MPROTECT))
{
length+= ALIGN_SIZE(sizeof(USED_MEM));
if (!(next = (USED_MEM*) my_malloc(mem_root->psi_key, length,
MYF(MY_WME | ME_FATAL |
(mem_root->flags &
ROOT_FLAG_THREAD_SPECIFIC ?
MY_THREAD_SPECIFIC : 0)))))
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
DBUG_RETURN((uchar*) 0); /* purecov: inspected */
}
next->next= mem_root->used;
next->left= 0;
next->size= length;
mem_root->used= next;
DBUG_PRINT("exit",("ptr: %p", (((char*)next)+ALIGN_SIZE(sizeof(USED_MEM)))));
DBUG_RETURN((((uchar*) next)+ALIGN_SIZE(sizeof(USED_MEM))));
}
#endif /* defined(HAVE_valgrind) && defined(EXTRA_DEBUG) */
length= ALIGN_SIZE(length) + REDZONE_SIZE;
if ((*(prev= &mem_root->free)) != NULL)
{
@ -237,14 +321,16 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
}
if (! next)
{ /* Time to alloc new block */
block_size= (mem_root->block_size & ~1) * (mem_root->block_num >> 2);
get_size= length+ALIGN_SIZE(sizeof(USED_MEM));
size_t alloced_length;
/* Increase block size over time if there is a lot of mallocs */
block_size= (MY_ALIGN(mem_root->block_size, ROOT_MIN_BLOCK_SIZE) *
(mem_root->block_num >> 2)- MALLOC_OVERHEAD);
get_size= length + ALIGN_SIZE(sizeof(USED_MEM));
get_size= MY_MAX(get_size, block_size);
if (!(next = (USED_MEM*) my_malloc(mem_root->m_psi_key, get_size,
MYF(MY_WME | ME_FATAL |
MALLOC_FLAG(mem_root->
block_size)))))
if (!(next= (USED_MEM*) root_alloc(mem_root, get_size, &alloced_length,
MYF(MY_WME | ME_FATAL))))
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
@ -252,8 +338,8 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
}
mem_root->block_num++;
next->next= *prev;
next->size= get_size;
next->left= get_size-ALIGN_SIZE(sizeof(USED_MEM));
next->size= alloced_length;
next->left= alloced_length - ALIGN_SIZE(sizeof(USED_MEM));
*prev=next;
TRASH_MEM(next);
}
@ -271,7 +357,6 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
TRASH_ALLOC(point, original_length);
DBUG_PRINT("exit",("ptr: %p", point));
DBUG_RETURN((void*) point);
#endif
}
@ -407,13 +492,13 @@ void free_root(MEM_ROOT *root, myf MyFlags)
{
old=next; next= next->next ;
if (old != root->pre_alloc)
my_free(old);
root_free(root, old, old->size);
}
for (next=root->free ; next ;)
{
old=next; next= next->next;
if (old != root->pre_alloc)
my_free(old);
root_free(root, old, old->size);
}
root->used=root->free=0;
if (root->pre_alloc)
@ -428,6 +513,7 @@ void free_root(MEM_ROOT *root, myf MyFlags)
DBUG_VOID_RETURN;
}
/*
Find block that contains an object and set the pre_alloc to it
*/
@ -454,6 +540,38 @@ void set_prealloc_root(MEM_ROOT *root, char *ptr)
}
/**
Change protection for all blocks in the mem root
*/
#if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS)
void protect_root(MEM_ROOT *root, int prot)
{
reg1 USED_MEM *next,*old;
DBUG_ENTER("protect_root");
DBUG_PRINT("enter",("root: %p prot: %d", root, prot));
DBUG_ASSERT(root->flags & ROOT_FLAG_MPROTECT);
for (next= root->used; next ;)
{
old= next; next= next->next ;
mprotect(old, old->size, prot);
}
for (next= root->free; next ;)
{
old= next; next= next->next ;
mprotect(old, old->size, prot);
}
DBUG_VOID_RETURN;
}
#else
void protect_root(MEM_ROOT *root, int prot)
{
}
#endif /* defined(HAVE_MMAP) && ... */
char *strdup_root(MEM_ROOT *root, const char *str)
{
return strmake_root(root, str, strlen(str));

View File

@ -22,6 +22,9 @@
#include <m_ctype.h>
#include <signal.h>
#include <mysql/psi/mysql_stage.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef _WIN32
#ifdef _MSC_VER
#include <locale.h>
@ -35,6 +38,10 @@ static my_bool win32_init_tcp_ip();
#define my_win_init()
#endif
#if defined(_SC_PAGE_SIZE) && !defined(_SC_PAGESIZE)
#define _SC_PAGESIZE _SC_PAGE_SIZE
#endif
extern pthread_key(struct st_my_thread_var*, THR_KEY_mysys);
#define SCALE_SEC 100
@ -42,6 +49,7 @@ extern pthread_key(struct st_my_thread_var*, THR_KEY_mysys);
my_bool my_init_done= 0;
uint mysys_usage_id= 0; /* Incremented for each my_init() */
size_t my_system_page_size= 8192; /* Default if no sysconf() */
ulonglong my_thread_stack_size= (sizeof(void*) <= 4)? 65536: ((256-16)*1024);
@ -79,6 +87,9 @@ my_bool my_init(void)
my_umask= 0660; /* Default umask for new files */
my_umask_dir= 0700; /* Default umask for new directories */
my_global_flags= 0;
#ifdef _SC_PAGESIZE
my_system_page_size= sysconf(_SC_PAGESIZE);
#endif
/* Default creation of new files */
if ((str= getenv("UMASK")) != 0)

View File

@ -13,7 +13,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
/* Alloc a block of locked memory */
/* Alloc a block of locked memory (memory protected against swap) */
#include "mysys_priv.h"
#include "mysys_err.h"
@ -33,7 +33,7 @@ LIST *mem_list;
uchar *my_malloc_lock(uint size,myf MyFlags)
{
int success;
uint pagesize=sysconf(_SC_PAGESIZE);
uint pagesize= my_system_page_size;
uchar *ptr;
struct st_mem_list *element;
DBUG_ENTER("my_malloc_lock");

View File

@ -168,7 +168,7 @@ static void GCALC_DBUG_PRINT_SLICE(const char *header,
Gcalc_dyn_list::Gcalc_dyn_list(size_t blk_size, size_t sizeof_item):
m_blk_size(blk_size - ALLOC_ROOT_MIN_BLOCK_SIZE),
m_blk_size(blk_size),
m_sizeof_item(ALIGN_SIZE(sizeof_item)),
m_points_per_blk((uint)((m_blk_size - PH_DATA_OFFSET) / m_sizeof_item)),
m_blk_hook(&m_first_blk),

View File

@ -369,6 +369,7 @@ uint volatile global_disable_checkpoint;
ulong slow_start_timeout;
#endif
static MEM_ROOT startup_root;
MEM_ROOT read_only_root;
/**
@brief 'grant_option' is used to indicate if privileges needs
@ -1969,6 +1970,8 @@ static void clean_up(bool print_message)
mysql_library_end();
finish_client_errs();
free_root(&startup_root, MYF(0));
protect_root(&read_only_root, PROT_READ | PROT_WRITE);
free_root(&read_only_root, MYF(0));
cleanup_errmsgs();
free_error_messages();
/* Tell main we are ready */
@ -3719,6 +3722,8 @@ static int init_early_variables()
set_malloc_size_cb(my_malloc_size_cb_func);
global_status_var.global_memory_used= 0;
init_alloc_root(PSI_NOT_INSTRUMENTED, &startup_root, 1024, 0, MYF(0));
init_alloc_root(PSI_NOT_INSTRUMENTED, &read_only_root, 1024, 0,
MYF(MY_ROOT_USE_MPROTECT));
return 0;
}
@ -5368,8 +5373,9 @@ static int init_server_components()
if (!opt_bootstrap)
servers_init(0);
init_status_vars();
Item_false= new (&startup_root) Item_bool_static("FALSE", 0);
Item_true= new (&startup_root) Item_bool_static("TRUE", 1);
Item_false= new (&read_only_root) Item_bool_static("FALSE", 0);
Item_true= new (&read_only_root) Item_bool_static("TRUE", 1);
DBUG_ASSERT(Item_false);
DBUG_RETURN(0);
}
@ -5734,6 +5740,9 @@ int mysqld_main(int argc, char **argv)
}
#endif /* WITH_WSREP */
/* Protect read_only_root against writes */
protect_root(&read_only_root, PROT_READ);
if (opt_bootstrap)
{
select_thread_in_use= 0; // Allow 'kill' to work

View File

@ -122,8 +122,8 @@ public:
Dynamic_array(MEM_ROOT *root, uint prealloc=16, uint increment=16)
{
void *init_buffer= alloc_root(root, sizeof(Elem) * prealloc);
init_dynamic_array2(root->m_psi_key, &array, sizeof(Elem), init_buffer,
prealloc, increment, MYF(0));
init_dynamic_array2(root->psi_key, &array, sizeof(Elem), init_buffer,
prealloc, increment, MYF(0));
}
void init(PSI_memory_key psi_key, uint prealloc=16, uint increment=16)

View File

@ -748,8 +748,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
will be re-initialized in init_for_queries().
*/
init_sql_alloc(key_memory_thd_main_mem_root,
&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0,
MYF(MY_THREAD_SPECIFIC));
&main_mem_root, 64, 0, MYF(MY_THREAD_SPECIFIC));
/*
Allocation of user variables for binary logging is always done with main

View File

@ -3046,8 +3046,8 @@ public:
{
bzero((char*)this, sizeof(*this));
implicit_xid.null();
init_sql_alloc(key_memory_thd_transactions, &mem_root,
ALLOC_ROOT_MIN_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
init_sql_alloc(key_memory_thd_transactions, &mem_root, 256,
0, MYF(MY_THREAD_SPECIFIC));
}
} default_transaction, *transaction;
Global_read_lock global_read_lock;

View File

@ -7097,7 +7097,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
/* set a barrier for the array of SARGABLE_PARAM */
(*sargables)[0].field= 0;
if (my_init_dynamic_array2(thd->mem_root->m_psi_key, keyuse, sizeof(KEYUSE),
if (my_init_dynamic_array2(thd->mem_root->psi_key, keyuse, sizeof(KEYUSE),
thd->alloc(sizeof(KEYUSE) * 20), 20, 64,
MYF(MY_THREAD_SPECIFIC)))
DBUG_RETURN(TRUE);
@ -28485,7 +28485,7 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
reset_query_plan();
if (!keyuse.buffer &&
my_init_dynamic_array(thd->mem_root->m_psi_key, &keyuse, sizeof(KEYUSE),
my_init_dynamic_array(thd->mem_root->psi_key, &keyuse, sizeof(KEYUSE),
20, 64, MYF(MY_THREAD_SPECIFIC)))
{
delete_dynamic(&added_keyuse);

View File

@ -121,7 +121,7 @@ ha_myisammrg::ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), file(0), is_cloned(0)
{
init_sql_alloc(rg_key_memory_children, &children_mem_root,
FN_REFLEN + ALLOC_ROOT_MIN_BLOCK_SIZE, 0, MYF(0));
FN_REFLEN, 0, MYF(0));
}