diff --git a/bootstraptest/test_yjit.rb b/bootstraptest/test_yjit.rb index 5a5e08f095..1da7837fe4 100644 --- a/bootstraptest/test_yjit.rb +++ b/bootstraptest/test_yjit.rb @@ -3667,6 +3667,74 @@ assert_equal 'new', %q{ test } +# Bug #21257 (infinite jmp) +assert_equal 'ok', %q{ + Good = :ok + + def first + second + end + + def second + ::Good + end + + # Make `second` side exit on its first instruction + trace = TracePoint.new(:line) { } + trace.enable(target: method(:second)) + + first + # Recompile now that the constant cache is populated, so we get a fallthrough from `first` to `second` + # (this is need to reproduce with --yjit-call-threshold=1) + RubyVM::YJIT.code_gc if defined?(RubyVM::YJIT) + first + + # Trigger a constant cache miss in rb_vm_opt_getconstant_path (in `second`) next time it's called + module InvalidateConstantCache + Good = nil + end + + RubyVM::YJIT.simulate_oom! if defined?(RubyVM::YJIT) + + first + first +} + +assert_equal 'ok', %q{ + # Multiple incoming branches into second + Good = :ok + + def incoming_one + second + end + + def incoming_two + second + end + + def second + ::Good + end + + # Make `second` side exit on its first instruction + trace = TracePoint.new(:line) { } + trace.enable(target: method(:second)) + + incoming_one + # Recompile now that the constant cache is populated, so we get a fallthrough from `incoming_one` to `second` + # (this is need to reproduce with --yjit-call-threshold=1) + RubyVM::YJIT.code_gc if defined?(RubyVM::YJIT) + incoming_one + incoming_two + + # Trigger a constant cache miss in rb_vm_opt_getconstant_path (in `second`) next time it's called + module InvalidateConstantCache + Good = nil + end + + incoming_one +} + assert_equal 'ok', %q{ # Try to compile new method while OOM def foo diff --git a/yjit/src/core.rs b/yjit/src/core.rs index 05c86a6164..e31e54c106 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -4158,7 +4158,23 @@ pub fn invalidate_block_version(blockref: &BlockRef) { } // For each incoming branch - for branchref in block.incoming.0.take().iter() { + let mut incoming_branches = block.incoming.0.take(); + + // An adjacent branch will write into the start of the block being invalidated, possibly + // overwriting the block's exit. If we run out of memory after doing this, any subsequent + // incoming branches we rewrite won't be able use the block's exit as a fallback when they + // are unable to generate a stub. To avoid this, if there's an incoming branch that's + // adjacent to the invalidated block, make sure we process it last. + let adjacent_branch_idx = incoming_branches.iter().position(|branchref| { + let branch = unsafe { branchref.as_ref() }; + let target_next = block.start_addr == branch.end_addr.get(); + target_next + }); + if let Some(adjacent_branch_idx) = adjacent_branch_idx { + incoming_branches.swap(adjacent_branch_idx, incoming_branches.len() - 1) + } + + for (i, branchref) in incoming_branches.iter().enumerate() { let branch = unsafe { branchref.as_ref() }; let target_idx = if branch.get_target_address(0) == Some(block_start) { 0 @@ -4198,10 +4214,18 @@ pub fn invalidate_block_version(blockref: &BlockRef) { let target_next = block.start_addr == branch.end_addr.get(); if target_next { - // The new block will no longer be adjacent. - // Note that we could be enlarging the branch and writing into the - // start of the block being invalidated. - branch.gen_fn.set_shape(BranchShape::Default); + if stub_addr != block.start_addr { + // The new block will no longer be adjacent. + // Note that we could be enlarging the branch and writing into the + // start of the block being invalidated. + branch.gen_fn.set_shape(BranchShape::Default); + } else { + // The branch target is still adjacent, so the branch must remain + // a fallthrough so we don't overwrite the target with a jump. + // + // This can happen if we're unable to generate a stub and the + // target block also exits on entry (block_start == block_entry_exit). + } } // Rewrite the branch with the new jump target address @@ -4211,6 +4235,11 @@ pub fn invalidate_block_version(blockref: &BlockRef) { if target_next && branch.end_addr > block.end_addr { panic!("yjit invalidate rewrote branch past end of invalidated block: {:?} (code_size: {})", branch, block.code_size()); } + let is_last_incoming_branch = i == incoming_branches.len() - 1; + if target_next && branch.end_addr.get() > block_entry_exit && !is_last_incoming_branch { + // We might still need to jump to this exit if we run out of memory when rewriting another incoming branch. + panic!("yjit invalidate rewrote branch over exit of invalidated block: {:?}", branch); + } if !target_next && branch.code_size() > old_branch_size { panic!( "invalidated branch grew in size (start_addr: {:?}, old_size: {}, new_size: {})",