merge revision(s) 80a1a1bb8ae8435b916ae4f66a483e91ad31356a: [Backport #21257]

YJIT: Fix potential infinite loop when OOM (GH-13186)

	Avoid generating an infinite loop in the case where:
	1. Block `first` is adjacent to block `second`, and the branch from `first` to
	   `second` is a fallthrough, and
	2. Block `second` immediately exits to the interpreter, and
	3. Block `second` is invalidated and YJIT is OOM

	While pondering how to fix this, I think I've stumbled on another related edge case:
	1. Block `incoming_one` and `incoming_two` both branch to block `second`. Block
	   `incoming_one` has a fallthrough
	2. Block `second` immediately exits to the interpreter (so it starts with its exit)
	3. When Block `second` is invalidated, the incoming fallthrough branch from
	   `incoming_one` might be rewritten first, which overwrites the start of block
	   `second` with a jump to a new branch stub.
	4. YJIT runs of out memory
	5. The incoming branch from `incoming_two` is then rewritten, but because we're
	   OOM we can't generate a new stub, so we use `second`'s exit as the branch
	   target. However `second`'s exit was already overwritten with a jump to the
	   branch stub for `incoming_one`, so `incoming_two` will end up jumping to
	   `incoming_one`'s branch stub.

	Fixes [Bug #21257]
This commit is contained in:
nagachika 2025-05-18 16:25:01 +09:00
parent 1f226f1efe
commit f57dd4470b
3 changed files with 103 additions and 6 deletions

View File

@ -3491,6 +3491,74 @@ assert_equal 'new', %q{
test
}
# Bug #21257 (infinite jmp)
assert_equal 'ok', %q{
Good = :ok
def first
second
end
def second
::Good
end
# Make `second` side exit on its first instruction
trace = TracePoint.new(:line) { }
trace.enable(target: method(:second))
first
# Recompile now that the constant cache is populated, so we get a fallthrough from `first` to `second`
# (this is need to reproduce with --yjit-call-threshold=1)
RubyVM::YJIT.code_gc if defined?(RubyVM::YJIT)
first
# Trigger a constant cache miss in rb_vm_opt_getconstant_path (in `second`) next time it's called
module InvalidateConstantCache
Good = nil
end
RubyVM::YJIT.simulate_oom! if defined?(RubyVM::YJIT)
first
first
}
assert_equal 'ok', %q{
# Multiple incoming branches into second
Good = :ok
def incoming_one
second
end
def incoming_two
second
end
def second
::Good
end
# Make `second` side exit on its first instruction
trace = TracePoint.new(:line) { }
trace.enable(target: method(:second))
incoming_one
# Recompile now that the constant cache is populated, so we get a fallthrough from `incoming_one` to `second`
# (this is need to reproduce with --yjit-call-threshold=1)
RubyVM::YJIT.code_gc if defined?(RubyVM::YJIT)
incoming_one
incoming_two
# Trigger a constant cache miss in rb_vm_opt_getconstant_path (in `second`) next time it's called
module InvalidateConstantCache
Good = nil
end
incoming_one
}
assert_equal 'ok', %q{
# Try to compile new method while OOM
def foo

View File

@ -11,7 +11,7 @@
# define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR
#define RUBY_VERSION_TEENY 8
#define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR
#define RUBY_PATCHLEVEL 152
#define RUBY_PATCHLEVEL 153
#include "ruby/version.h"
#include "ruby/internal/abi.h"

View File

@ -3232,7 +3232,23 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
}
// For each incoming branch
for branchref in block.incoming.0.take().iter() {
let mut incoming_branches = block.incoming.0.take();
// An adjacent branch will write into the start of the block being invalidated, possibly
// overwriting the block's exit. If we run out of memory after doing this, any subsequent
// incoming branches we rewrite won't be able use the block's exit as a fallback when they
// are unable to generate a stub. To avoid this, if there's an incoming branch that's
// adjacent to the invalidated block, make sure we process it last.
let adjacent_branch_idx = incoming_branches.iter().position(|branchref| {
let branch = unsafe { branchref.as_ref() };
let target_next = block.start_addr == branch.end_addr.get();
target_next
});
if let Some(adjacent_branch_idx) = adjacent_branch_idx {
incoming_branches.swap(adjacent_branch_idx, incoming_branches.len() - 1)
}
for (i, branchref) in incoming_branches.iter().enumerate() {
let branch = unsafe { branchref.as_ref() };
let target_idx = if branch.get_target_address(0) == Some(block_start) {
0
@ -3272,10 +3288,18 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
let target_next = block.start_addr == branch.end_addr.get();
if target_next {
// The new block will no longer be adjacent.
// Note that we could be enlarging the branch and writing into the
// start of the block being invalidated.
branch.gen_fn.set_shape(BranchShape::Default);
if stub_addr != block.start_addr {
// The new block will no longer be adjacent.
// Note that we could be enlarging the branch and writing into the
// start of the block being invalidated.
branch.gen_fn.set_shape(BranchShape::Default);
} else {
// The branch target is still adjacent, so the branch must remain
// a fallthrough so we don't overwrite the target with a jump.
//
// This can happen if we're unable to generate a stub and the
// target block also exits on entry (block_start == block_entry_exit).
}
}
// Rewrite the branch with the new jump target address
@ -3285,6 +3309,11 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
if target_next && branch.end_addr > block.end_addr {
panic!("yjit invalidate rewrote branch past end of invalidated block: {:?} (code_size: {})", branch, block.code_size());
}
let is_last_incoming_branch = i == incoming_branches.len() - 1;
if target_next && branch.end_addr.get() > block_entry_exit && !is_last_incoming_branch {
// We might still need to jump to this exit if we run out of memory when rewriting another incoming branch.
panic!("yjit invalidate rewrote branch over exit of invalidated block: {:?}", branch);
}
if !target_next && branch.code_size() > old_branch_size {
panic!(
"invalidated branch grew in size (start_addr: {:?}, old_size: {}, new_size: {})",