YJIT: Fix assertion for partially mapped last pages (#7337)

Follows up [Bug #19400]
This commit is contained in:
Takashi Kokubun 2023-02-20 09:06:09 -08:00 committed by GitHub
parent b326a5f3dd
commit ecd0cdaf82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
Notes: git 2023-02-20 17:06:34 +00:00
Merged-By: k0kubun <takashikkbn@gmail.com>
3 changed files with 33 additions and 6 deletions

View File

@ -974,6 +974,25 @@ class TestYJIT < Test::Unit::TestCase
RUBY
end
def test_code_gc_partial_last_page
# call_threshold: 2 to avoid JIT-ing code_gc itself. If code_gc were JITed right before
# code_gc is called, the last page would be on stack.
assert_compiles(<<~'RUBY', exits: :any, result: :ok, call_threshold: 2)
# Leave a bunch of off-stack pages
i = 0
while i < 1000
eval("x = proc { 1.to_s }; x.call; x.call")
i += 1
end
# On Linux, memory page size != code page size. So the last code page could be partially
# mapped. This call tests that assertions and other things work fine under the situation.
RubyVM::YJIT.code_gc
:ok
RUBY
end
def test_trace_script_compiled # not ISEQ_TRACE_EVENTS
assert_compiles(<<~'RUBY', exits: :any, result: :ok)
@eval_counter = 0

View File

@ -423,7 +423,7 @@ impl CodeBlock {
/// Convert an address range to memory page indexes against a num_pages()-sized array.
pub fn addrs_to_pages(&self, start_addr: CodePtr, end_addr: CodePtr) -> Vec<usize> {
let mem_start = self.mem_block.borrow().start_ptr().into_usize();
let mem_end = self.mem_block.borrow().end_ptr().into_usize();
let mem_end = self.mem_block.borrow().mapped_end_ptr().into_usize();
assert!(mem_start <= start_addr.into_usize());
assert!(start_addr.into_usize() <= end_addr.into_usize());
assert!(end_addr.into_usize() <= mem_end);

View File

@ -101,8 +101,12 @@ impl<A: Allocator> VirtualMemory<A> {
CodePtr(self.region_start)
}
pub fn end_ptr(&self) -> CodePtr {
CodePtr(NonNull::new(self.region_start.as_ptr().wrapping_add(self.mapped_region_bytes)).unwrap())
pub fn mapped_end_ptr(&self) -> CodePtr {
self.start_ptr().add_bytes(self.mapped_region_bytes)
}
pub fn virtual_end_ptr(&self) -> CodePtr {
self.start_ptr().add_bytes(self.region_size_bytes)
}
/// Size of the region in bytes that we have allocated physical memory for.
@ -208,10 +212,14 @@ impl<A: Allocator> VirtualMemory<A> {
assert_eq!(start_ptr.into_usize() % self.page_size_bytes, 0);
// Bounds check the request. We should only free memory we manage.
let region_range = self.region_start.as_ptr() as *const u8..self.end_ptr().raw_ptr();
let mapped_region = self.start_ptr().raw_ptr()..self.mapped_end_ptr().raw_ptr();
let virtual_region = self.start_ptr().raw_ptr()..self.virtual_end_ptr().raw_ptr();
let last_byte_to_free = start_ptr.add_bytes(size.saturating_sub(1).as_usize()).raw_ptr();
assert!(region_range.contains(&start_ptr.raw_ptr()));
assert!(region_range.contains(&last_byte_to_free));
assert!(mapped_region.contains(&start_ptr.raw_ptr()));
// On platforms where code page size != memory page size (e.g. Linux), we often need
// to free code pages that contain unmapped memory pages. When it happens on the last
// code page, it's more appropriate to check the last byte against the virtual region.
assert!(virtual_region.contains(&last_byte_to_free));
self.allocator.mark_unused(start_ptr.0.as_ptr(), size);
}