Annotate anonymous mmap
Use PR_SET_VMA_ANON_NAME to set human-readable names for anonymous virtual memory areas mapped by `mmap()` when compiled and run on Linux 5.17 or higher. This makes it convenient for developers to debug mmap.
This commit is contained in:
parent
640bacceb1
commit
8ae7c22972
Notes:
git
2024-11-21 18:48:28 +00:00
15
common.mk
15
common.mk
@ -8631,19 +8631,25 @@ io_buffer.$(OBJEXT): $(CCAN_DIR)/str/str.h
|
||||
io_buffer.$(OBJEXT): $(hdrdir)/ruby/ruby.h
|
||||
io_buffer.$(OBJEXT): $(hdrdir)/ruby/version.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/array.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/basic_operators.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/bignum.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/bits.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/compilers.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/error.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/fixnum.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/gc.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/imemo.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/io.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/numeric.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/sanitizers.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/serial.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/static_assert.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/string.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/thread.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/vm.h
|
||||
io_buffer.$(OBJEXT): $(top_srcdir)/internal/warnings.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}assert.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}atomic.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}backward/2/assume.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}backward/2/attributes.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}backward/2/bool.h
|
||||
@ -8657,6 +8663,7 @@ io_buffer.$(OBJEXT): {$(VPATH)}config.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}defines.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}encoding.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}fiber/scheduler.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}id.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}intern.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}internal.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}internal/abi.h
|
||||
@ -8811,13 +8818,21 @@ io_buffer.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}io.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}io/buffer.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}io_buffer.c
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}method.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}missing.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}node.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}onigmo.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}oniguruma.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}ruby_assert.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}ruby_atomic.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}rubyparser.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}st.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}subst.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}thread.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||
io_buffer.$(OBJEXT): {$(VPATH)}vm_opts.h
|
||||
iseq.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
|
||||
iseq.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
|
||||
iseq.$(OBJEXT): $(CCAN_DIR)/list/list.h
|
||||
|
6
cont.c
6
cont.c
@ -475,18 +475,20 @@ fiber_pool_allocate_memory(size_t * count, size_t stride)
|
||||
}
|
||||
#else
|
||||
errno = 0;
|
||||
void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
|
||||
size_t mmap_size = (*count)*stride;
|
||||
void * base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
|
||||
|
||||
if (base == MAP_FAILED) {
|
||||
// If the allocation fails, count = count / 2, and try again.
|
||||
*count = (*count) >> 1;
|
||||
}
|
||||
else {
|
||||
ruby_annotate_mmap(base, mmap_size, "Ruby:fiber_pool_allocate_memory");
|
||||
#if defined(MADV_FREE_REUSE)
|
||||
// On Mac MADV_FREE_REUSE is necessary for the task_info api
|
||||
// to keep the accounting accurate as possible when a page is marked as reusable
|
||||
// it can possibly not occurring at first call thus re-iterating if necessary.
|
||||
while (madvise(base, (*count)*stride, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
|
||||
while (madvise(base, mmap_size, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
|
||||
#endif
|
||||
return base;
|
||||
}
|
||||
|
39
gc.c
39
gc.c
@ -74,6 +74,12 @@
|
||||
#include <emscripten.h>
|
||||
#endif
|
||||
|
||||
/* For ruby_annotate_mmap */
|
||||
#ifdef __linux__
|
||||
#include <linux/prctl.h>
|
||||
#include <sys/prctl.h>
|
||||
#endif
|
||||
|
||||
#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
|
||||
|
||||
#include "constant.h"
|
||||
@ -4494,3 +4500,36 @@ Init_GC(void)
|
||||
|
||||
rb_gc_impl_init();
|
||||
}
|
||||
|
||||
// Set a name for the anonymous virtual memory area. `addr` is the starting
|
||||
// address of the area and `size` is its length in bytes. `name` is a
|
||||
// NUL-terminated human-readable string.
|
||||
//
|
||||
// This function is usually called after calling `mmap()`. The human-readable
|
||||
// annotation helps developers identify the call site of `mmap()` that created
|
||||
// the memory mapping.
|
||||
//
|
||||
// This function currently only works on Linux 5.17 or higher. After calling
|
||||
// this function, we can see annotations in the form of "[anon:...]" in
|
||||
// `/proc/self/maps`, where `...` is the content of `name`. This function has
|
||||
// no effect when called on other platforms.
|
||||
void
|
||||
ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
|
||||
{
|
||||
#if defined(__linux__) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
|
||||
// The name length cannot exceed 80 (including the '\0').
|
||||
RUBY_ASSERT(strlen(name) < 80);
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
|
||||
// We ignore errors in prctl. prctl may set errno to EINVAL for several
|
||||
// reasons.
|
||||
// 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
|
||||
// 2. addr is an invalid address.
|
||||
// 3. The string pointed by name is too long.
|
||||
// The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
|
||||
// happen if we run the compiled binary on an old kernel. In theory, all
|
||||
// other errors should result in a failure. But since EINVAL cannot tell
|
||||
// the first error from others, and this function is mainly used for
|
||||
// debugging, we silently ignore the error.
|
||||
errno = 0;
|
||||
#endif
|
||||
}
|
||||
|
17
gc/default.c
17
gc/default.c
@ -5,6 +5,10 @@
|
||||
#ifndef _WIN32
|
||||
# include <sys/mman.h>
|
||||
# include <unistd.h>
|
||||
# ifdef __linux__
|
||||
# include <linux/prctl.h>
|
||||
# include <sys/prctl.h>
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
|
||||
@ -1859,12 +1863,23 @@ heap_page_body_allocate(void)
|
||||
#ifdef HAVE_MMAP
|
||||
GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
|
||||
|
||||
char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
|
||||
size_t mmap_size = HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE;
|
||||
char *ptr = mmap(NULL, mmap_size,
|
||||
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (ptr == MAP_FAILED) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If we are building `default.c` as part of the ruby executable, we
|
||||
// may just call `ruby_annotate_mmap`. But if we are building
|
||||
// `default.c` as a shared library, we will not have access to private
|
||||
// symbols, and we have to either call prctl directly or make our own
|
||||
// wrapper.
|
||||
#if defined(__linux__) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
|
||||
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, mmap_size, "Ruby:GC:default:heap_page_body_allocate");
|
||||
errno = 0;
|
||||
#endif
|
||||
|
||||
char *aligned = ptr + HEAP_PAGE_ALIGN;
|
||||
aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
|
||||
GC_ASSERT(aligned > ptr);
|
||||
|
@ -260,6 +260,7 @@ RUBY_SYMBOL_EXPORT_END
|
||||
int rb_ec_stack_check(struct rb_execution_context_struct *ec);
|
||||
void rb_gc_writebarrier_remember(VALUE obj);
|
||||
const char *rb_obj_info(VALUE obj);
|
||||
void ruby_annotate_mmap(const void *addr, unsigned long size, const char *name);
|
||||
|
||||
#if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "internal/array.h"
|
||||
#include "internal/bits.h"
|
||||
#include "internal/error.h"
|
||||
#include "internal/gc.h"
|
||||
#include "internal/numeric.h"
|
||||
#include "internal/string.h"
|
||||
#include "internal/io.h"
|
||||
@ -83,6 +84,8 @@ io_buffer_map_memory(size_t size, int flags)
|
||||
if (base == MAP_FAILED) {
|
||||
rb_sys_fail("io_buffer_map_memory:mmap");
|
||||
}
|
||||
|
||||
ruby_annotate_mmap(base, size, "Ruby:io_buffer_map_memory");
|
||||
#endif
|
||||
|
||||
return base;
|
||||
|
5
rjit_c.c
5
rjit_c.c
@ -85,6 +85,7 @@ rjit_reserve_addr_space(uint32_t mem_size)
|
||||
|
||||
// If we succeeded, stop
|
||||
if (mem_block != MAP_FAILED) {
|
||||
ruby_annotate_mmap(mem_block, mem_size, "Ruby:rjit_reserve_addr_space");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -116,6 +117,10 @@ rjit_reserve_addr_space(uint32_t mem_size)
|
||||
-1,
|
||||
0
|
||||
);
|
||||
|
||||
if (mem_block != MAP_FAILED) {
|
||||
ruby_annotate_mmap(mem_block, mem_size, "Ruby:rjit_reserve_addr_space:fallback");
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the memory mapping was successful
|
||||
|
12
shape.c
12
shape.c
@ -1232,11 +1232,15 @@ Init_default_shapes(void)
|
||||
rb_shape_tree_ptr = xcalloc(1, sizeof(rb_shape_tree_t));
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
rb_shape_tree_ptr->shape_list = (rb_shape_t *)mmap(NULL, rb_size_mul_or_raise(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t), rb_eRuntimeError),
|
||||
size_t shape_list_mmap_size = rb_size_mul_or_raise(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t), rb_eRuntimeError);
|
||||
rb_shape_tree_ptr->shape_list = (rb_shape_t *)mmap(NULL, shape_list_mmap_size,
|
||||
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
if (GET_SHAPE_TREE()->shape_list == MAP_FAILED) {
|
||||
GET_SHAPE_TREE()->shape_list = 0;
|
||||
}
|
||||
else {
|
||||
ruby_annotate_mmap(rb_shape_tree_ptr->shape_list, shape_list_mmap_size, "Ruby:Init_default_shapes:shape_list");
|
||||
}
|
||||
#else
|
||||
GET_SHAPE_TREE()->shape_list = xcalloc(SHAPE_BUFFER_SIZE, sizeof(rb_shape_t));
|
||||
#endif
|
||||
@ -1249,7 +1253,8 @@ Init_default_shapes(void)
|
||||
id_t_object = rb_make_internal_id();
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
rb_shape_tree_ptr->shape_cache = (redblack_node_t *)mmap(NULL, rb_size_mul_or_raise(REDBLACK_CACHE_SIZE, sizeof(redblack_node_t), rb_eRuntimeError),
|
||||
size_t shape_cache_mmap_size = rb_size_mul_or_raise(REDBLACK_CACHE_SIZE, sizeof(redblack_node_t), rb_eRuntimeError);
|
||||
rb_shape_tree_ptr->shape_cache = (redblack_node_t *)mmap(NULL, shape_cache_mmap_size,
|
||||
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||
rb_shape_tree_ptr->cache_size = 0;
|
||||
|
||||
@ -1260,6 +1265,9 @@ Init_default_shapes(void)
|
||||
GET_SHAPE_TREE()->shape_cache = 0;
|
||||
GET_SHAPE_TREE()->cache_size = REDBLACK_CACHE_SIZE;
|
||||
}
|
||||
else {
|
||||
ruby_annotate_mmap(rb_shape_tree_ptr->shape_cache, shape_cache_mmap_size, "Ruby:Init_default_shapes:shape_cache");
|
||||
}
|
||||
#endif
|
||||
|
||||
// Root shape
|
||||
|
@ -194,6 +194,8 @@ nt_alloc_thread_stack_chunk(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ruby_annotate_mmap(m, MSTACK_CHUNK_SIZE, "Ruby:nt_alloc_thread_stack_chunk");
|
||||
|
||||
size_t msz = nt_thread_stack_size();
|
||||
int header_page_cnt = 1;
|
||||
int stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
|
||||
|
5
yjit.c
5
yjit.c
@ -291,6 +291,7 @@ rb_yjit_reserve_addr_space(uint32_t mem_size)
|
||||
|
||||
// If we succeeded, stop
|
||||
if (mem_block != MAP_FAILED) {
|
||||
ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_yjit_reserve_addr_space");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -325,6 +326,10 @@ rb_yjit_reserve_addr_space(uint32_t mem_size)
|
||||
-1,
|
||||
0
|
||||
);
|
||||
|
||||
if (mem_block != MAP_FAILED) {
|
||||
ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_yjit_reserve_addr_space:fallback");
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the memory mapping was successful
|
||||
|
Loading…
x
Reference in New Issue
Block a user