YJIT: Refactor side_exits
This commit is contained in:
parent
5ce0c13f18
commit
bf3940a306
Notes:
git
2023-01-19 21:11:21 +00:00
@ -271,6 +271,7 @@ impl Target
|
||||
pub fn unwrap_code_ptr(&self) -> CodePtr {
|
||||
match self {
|
||||
Target::CodePtr(ptr) => *ptr,
|
||||
Target::SideExitPtr(ptr) => *ptr,
|
||||
_ => unreachable!("trying to unwrap {:?} into code ptr", self)
|
||||
}
|
||||
}
|
||||
|
@ -212,11 +212,11 @@ macro_rules! counted_exit {
|
||||
gen_counter_incr!(ocb_asm, $counter_name);
|
||||
|
||||
// Jump to the existing side exit
|
||||
ocb_asm.jmp($existing_side_exit.as_side_exit());
|
||||
ocb_asm.jmp($existing_side_exit);
|
||||
ocb_asm.compile(ocb);
|
||||
|
||||
// Pointer to the side-exit code
|
||||
code_ptr
|
||||
code_ptr.as_side_exit()
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -460,14 +460,14 @@ fn gen_outlined_exit(exit_pc: *mut VALUE, ctx: &Context, ocb: &mut OutlinedCb) -
|
||||
// moment, so there is one unique side exit for each context. Note that
|
||||
// it's incorrect to jump to the side exit after any ctx stack push operations
|
||||
// since they change the logic required for reconstructing interpreter state.
|
||||
fn get_side_exit(jit: &mut JITState, ocb: &mut OutlinedCb, ctx: &Context) -> CodePtr {
|
||||
fn get_side_exit(jit: &mut JITState, ocb: &mut OutlinedCb, ctx: &Context) -> Target {
|
||||
match jit.side_exit_for_pc {
|
||||
None => {
|
||||
let exit_code = gen_outlined_exit(jit.pc, ctx, ocb);
|
||||
jit.side_exit_for_pc = Some(exit_code);
|
||||
exit_code
|
||||
exit_code.as_side_exit()
|
||||
}
|
||||
Some(code_ptr) => code_ptr,
|
||||
Some(code_ptr) => code_ptr.as_side_exit()
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,7 +486,7 @@ pub fn jit_ensure_block_entry_exit(jit: &mut JITState, ocb: &mut OutlinedCb) {
|
||||
// If we're compiling the first instruction in the block.
|
||||
if jit.insn_idx == blockid.idx {
|
||||
// Generate the exit with the cache in jitstate.
|
||||
block.entry_exit = Some(get_side_exit(jit, ocb, &block_ctx));
|
||||
block.entry_exit = Some(get_side_exit(jit, ocb, &block_ctx).unwrap_code_ptr());
|
||||
} else {
|
||||
let _pc = unsafe { rb_iseq_pc_at_idx(blockid.iseq, blockid.idx) };
|
||||
block.entry_exit = Some(gen_outlined_exit(jit.pc, &block_ctx, ocb));
|
||||
@ -641,7 +641,7 @@ pub fn gen_entry_prologue(cb: &mut CodeBlock, iseq: IseqPtr, insn_idx: u32) -> O
|
||||
|
||||
// Generate code to check for interrupts and take a side-exit.
|
||||
// Warning: this function clobbers REG0
|
||||
fn gen_check_ints(asm: &mut Assembler, side_exit: CodePtr) {
|
||||
fn gen_check_ints(asm: &mut Assembler, side_exit: Target) {
|
||||
// Check for interrupts
|
||||
// see RUBY_VM_CHECK_INTS(ec) macro
|
||||
asm.comment("RUBY_VM_CHECK_INTS(ec)");
|
||||
@ -651,7 +651,7 @@ fn gen_check_ints(asm: &mut Assembler, side_exit: CodePtr) {
|
||||
let interrupt_flag = asm.load(Opnd::mem(32, EC, RUBY_OFFSET_EC_INTERRUPT_FLAG));
|
||||
asm.test(interrupt_flag, interrupt_flag);
|
||||
|
||||
asm.jnz(Target::SideExitPtr(side_exit));
|
||||
asm.jnz(side_exit);
|
||||
}
|
||||
|
||||
// Generate a stubbed unconditional jump to the next bytecode instruction.
|
||||
@ -1120,7 +1120,7 @@ fn gen_opt_plus(
|
||||
// Add arg0 + arg1 and test for overflow
|
||||
let arg0_untag = asm.sub(arg0, Opnd::Imm(1));
|
||||
let out_val = asm.add(arg0_untag, arg1);
|
||||
asm.jo(side_exit.as_side_exit());
|
||||
asm.jo(side_exit);
|
||||
|
||||
// Push the output on the stack
|
||||
let dst = ctx.stack_push(Type::Fixnum);
|
||||
@ -1297,23 +1297,23 @@ fn gen_newrange(
|
||||
fn guard_object_is_heap(
|
||||
asm: &mut Assembler,
|
||||
object_opnd: Opnd,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
asm.comment("guard object is heap");
|
||||
|
||||
// Test that the object is not an immediate
|
||||
asm.test(object_opnd, (RUBY_IMMEDIATE_MASK as u64).into());
|
||||
asm.jnz(side_exit.as_side_exit());
|
||||
asm.jnz(side_exit);
|
||||
|
||||
// Test that the object is not false
|
||||
asm.cmp(object_opnd, Qfalse.into());
|
||||
asm.je(side_exit.as_side_exit());
|
||||
asm.je(side_exit);
|
||||
}
|
||||
|
||||
fn guard_object_is_array(
|
||||
asm: &mut Assembler,
|
||||
object_opnd: Opnd,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
asm.comment("guard object is array");
|
||||
|
||||
@ -1323,7 +1323,7 @@ fn guard_object_is_array(
|
||||
|
||||
// Compare the result with T_ARRAY
|
||||
asm.cmp(flags_opnd, (RUBY_T_ARRAY as u64).into());
|
||||
asm.jne(side_exit.as_side_exit());
|
||||
asm.jne(side_exit);
|
||||
}
|
||||
|
||||
/// This guards that a special flag is not set on a hash.
|
||||
@ -1333,7 +1333,7 @@ fn guard_object_is_array(
|
||||
fn guard_object_is_not_ruby2_keyword_hash(
|
||||
asm: &mut Assembler,
|
||||
object_opnd: Opnd,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
asm.comment("guard object is not ruby2 keyword hash");
|
||||
|
||||
@ -1355,7 +1355,7 @@ fn guard_object_is_not_ruby2_keyword_hash(
|
||||
asm.jne(not_ruby2_keyword);
|
||||
|
||||
asm.test(flags_opnd, (RHASH_PASS_AS_KEYWORDS as u64).into());
|
||||
asm.jnz(side_exit.as_side_exit());
|
||||
asm.jnz(side_exit);
|
||||
|
||||
asm.write_label(not_ruby2_keyword);
|
||||
}
|
||||
@ -1363,7 +1363,7 @@ fn guard_object_is_not_ruby2_keyword_hash(
|
||||
fn guard_object_is_string(
|
||||
asm: &mut Assembler,
|
||||
object_reg: Opnd,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
asm.comment("guard object is string");
|
||||
|
||||
@ -1373,7 +1373,7 @@ fn guard_object_is_string(
|
||||
|
||||
// Compare the result with T_STRING
|
||||
asm.cmp(flags_reg, Opnd::UImm(RUBY_T_STRING as u64));
|
||||
asm.jne(side_exit.as_side_exit());
|
||||
asm.jne(side_exit);
|
||||
}
|
||||
|
||||
// push enough nils onto the stack to fill out an array
|
||||
@ -1454,7 +1454,7 @@ fn gen_expandarray(
|
||||
// Only handle the case where the number of values in the array is greater
|
||||
// than or equal to the number of values requested.
|
||||
asm.cmp(array_len_opnd, num.into());
|
||||
asm.jl(counted_exit!(ocb, side_exit, expandarray_rhs_too_small).as_side_exit());
|
||||
asm.jl(counted_exit!(ocb, side_exit, expandarray_rhs_too_small));
|
||||
|
||||
// Load the address of the embedded array into REG1.
|
||||
// (struct RArray *)(obj)->as.ary
|
||||
@ -1651,7 +1651,7 @@ fn gen_setlocal_wc0(
|
||||
let side_exit = get_side_exit(jit, ocb, ctx);
|
||||
|
||||
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
|
||||
asm.jnz(side_exit.as_side_exit());
|
||||
asm.jnz(side_exit);
|
||||
}
|
||||
|
||||
// Set the type of the local variable in the context
|
||||
@ -1696,7 +1696,7 @@ fn gen_setlocal_generic(
|
||||
let side_exit = get_side_exit(jit, ocb, ctx);
|
||||
|
||||
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
|
||||
asm.jnz(side_exit.as_side_exit());
|
||||
asm.jnz(side_exit);
|
||||
}
|
||||
|
||||
// Pop the value to write from the stack
|
||||
@ -1889,7 +1889,7 @@ fn jit_chain_guard(
|
||||
asm: &mut Assembler,
|
||||
ocb: &mut OutlinedCb,
|
||||
depth_limit: i32,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
let target0_gen_fn = match jcc {
|
||||
JCC_JNE | JCC_JNZ => gen_jnz_to_target0,
|
||||
@ -1907,7 +1907,7 @@ fn jit_chain_guard(
|
||||
|
||||
gen_branch(jit, asm, ocb, bid, &deeper, None, None, target0_gen_fn);
|
||||
} else {
|
||||
target0_gen_fn(asm, side_exit, None, BranchShape::Default);
|
||||
target0_gen_fn(asm, side_exit.unwrap_code_ptr(), None, BranchShape::Default);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1989,7 +1989,7 @@ fn gen_get_ivar(
|
||||
ivar_name: ID,
|
||||
recv: Opnd,
|
||||
recv_opnd: YARVOpnd,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) -> CodegenStatus {
|
||||
// If the object has a too complex shape, we exit
|
||||
if comptime_receiver.shape_too_complex() {
|
||||
@ -2544,7 +2544,7 @@ fn guard_two_fixnums(
|
||||
ctx: &mut Context,
|
||||
asm: &mut Assembler,
|
||||
ocb: &mut OutlinedCb,
|
||||
side_exit: CodePtr
|
||||
side_exit: Target
|
||||
) {
|
||||
// Get the stack operand types
|
||||
let arg1_type = ctx.get_opnd_type(StackOpnd(0));
|
||||
@ -2552,19 +2552,19 @@ fn guard_two_fixnums(
|
||||
|
||||
if arg0_type.is_heap() || arg1_type.is_heap() {
|
||||
asm.comment("arg is heap object");
|
||||
asm.jmp(side_exit.as_side_exit());
|
||||
asm.jmp(side_exit);
|
||||
return;
|
||||
}
|
||||
|
||||
if arg0_type != Type::Fixnum && arg0_type.is_specific() {
|
||||
asm.comment("arg0 not fixnum");
|
||||
asm.jmp(side_exit.as_side_exit());
|
||||
asm.jmp(side_exit);
|
||||
return;
|
||||
}
|
||||
|
||||
if arg1_type != Type::Fixnum && arg1_type.is_specific() {
|
||||
asm.comment("arg1 not fixnum");
|
||||
asm.jmp(side_exit.as_side_exit());
|
||||
asm.jmp(side_exit);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2704,7 +2704,7 @@ fn gen_equality_specialized(
|
||||
ctx: &mut Context,
|
||||
asm: &mut Assembler,
|
||||
ocb: &mut OutlinedCb,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) -> bool {
|
||||
let comptime_a = jit_peek_at_stack(jit, ctx, 1);
|
||||
let comptime_b = jit_peek_at_stack(jit, ctx, 0);
|
||||
@ -2888,7 +2888,7 @@ fn gen_opt_aref(
|
||||
// Bail if idx is not a FIXNUM
|
||||
let idx_reg = asm.load(idx_opnd);
|
||||
asm.test(idx_reg, (RUBY_FIXNUM_FLAG as u64).into());
|
||||
asm.jz(counted_exit!(ocb, side_exit, oaref_arg_not_fixnum).as_side_exit());
|
||||
asm.jz(counted_exit!(ocb, side_exit, oaref_arg_not_fixnum));
|
||||
|
||||
// Call VALUE rb_ary_entry_internal(VALUE ary, long offset).
|
||||
// It never raises or allocates, so we don't need to write to cfp->pc.
|
||||
@ -3185,7 +3185,7 @@ fn gen_opt_minus(
|
||||
|
||||
// Subtract arg0 - arg1 and test for overflow
|
||||
let val_untag = asm.sub(arg0, arg1);
|
||||
asm.jo(side_exit.as_side_exit());
|
||||
asm.jo(side_exit);
|
||||
let val = asm.add(val_untag, Opnd::Imm(1));
|
||||
|
||||
// Push the output on the stack
|
||||
@ -3252,7 +3252,7 @@ fn gen_opt_mod(
|
||||
|
||||
// Check for arg0 % 0
|
||||
asm.cmp(arg1, Opnd::Imm(VALUE::fixnum_from_usize(0).as_i64()));
|
||||
asm.je(side_exit.as_side_exit());
|
||||
asm.je(side_exit);
|
||||
|
||||
// Call rb_fix_mod_fix(VALUE recv, VALUE obj)
|
||||
let ret = asm.ccall(rb_fix_mod_fix as *const u8, vec![arg0, arg1]);
|
||||
@ -3791,7 +3791,7 @@ fn jit_guard_known_klass(
|
||||
insn_opnd: YARVOpnd,
|
||||
sample_instance: VALUE,
|
||||
max_chain_depth: i32,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
let val_type = ctx.get_opnd_type(insn_opnd);
|
||||
|
||||
@ -3927,7 +3927,7 @@ fn jit_protected_callee_ancestry_guard(
|
||||
asm: &mut Assembler,
|
||||
ocb: &mut OutlinedCb,
|
||||
cme: *const rb_callable_method_entry_t,
|
||||
side_exit: CodePtr,
|
||||
side_exit: Target,
|
||||
) {
|
||||
// See vm_call_method().
|
||||
let def_class = unsafe { (*cme).defined_class };
|
||||
@ -3942,7 +3942,7 @@ fn jit_protected_callee_ancestry_guard(
|
||||
],
|
||||
);
|
||||
asm.test(val, val);
|
||||
asm.jz(counted_exit!(ocb, side_exit, send_se_protected_check_failed).as_side_exit())
|
||||
asm.jz(counted_exit!(ocb, side_exit, send_se_protected_check_failed))
|
||||
}
|
||||
|
||||
// Codegen for rb_obj_not().
|
||||
@ -4228,9 +4228,9 @@ fn jit_rb_str_concat(
|
||||
if !arg_type.is_heap() {
|
||||
asm.comment("guard arg not immediate");
|
||||
asm.test(arg_opnd, (RUBY_IMMEDIATE_MASK as u64).into());
|
||||
asm.jnz(side_exit.as_side_exit());
|
||||
asm.jnz(side_exit);
|
||||
asm.cmp(arg_opnd, Qfalse.into());
|
||||
asm.je(side_exit.as_side_exit());
|
||||
asm.je(side_exit);
|
||||
}
|
||||
guard_object_is_string(asm, arg_opnd, side_exit);
|
||||
}
|
||||
@ -4362,7 +4362,7 @@ fn jit_obj_respond_to(
|
||||
// This is necessary because we have no guarantee that sym_opnd is a constant
|
||||
asm.comment("guard known mid");
|
||||
asm.cmp(sym_opnd, mid_sym.into());
|
||||
asm.jne(side_exit.as_side_exit());
|
||||
asm.jne(side_exit);
|
||||
|
||||
jit_putobject(jit, ctx, asm, result);
|
||||
|
||||
@ -4679,7 +4679,7 @@ fn gen_send_cfunc(
|
||||
asm.comment("stack overflow check");
|
||||
let stack_limit = asm.lea(ctx.sp_opnd((SIZEOF_VALUE * 4 + 2 * RUBY_SIZEOF_CONTROL_FRAME) as isize));
|
||||
asm.cmp(CFP, stack_limit);
|
||||
asm.jbe(counted_exit!(ocb, side_exit, send_se_cf_overflow).as_side_exit());
|
||||
asm.jbe(counted_exit!(ocb, side_exit, send_se_cf_overflow));
|
||||
|
||||
// Number of args which will be passed through to the callee
|
||||
// This is adjusted by the kwargs being combined into a hash.
|
||||
@ -4890,7 +4890,7 @@ fn gen_return_branch(
|
||||
/// Pushes arguments from an array to the stack that are passed with a splat (i.e. *args)
|
||||
/// It optimistically compiles to a static size that is the exact number of arguments
|
||||
/// needed for the function.
|
||||
fn push_splat_args(required_args: u32, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb, side_exit: CodePtr) {
|
||||
fn push_splat_args(required_args: u32, ctx: &mut Context, asm: &mut Assembler, ocb: &mut OutlinedCb, side_exit: Target) {
|
||||
|
||||
asm.comment("push_splat_args");
|
||||
|
||||
@ -4934,7 +4934,7 @@ fn push_splat_args(required_args: u32, ctx: &mut Context, asm: &mut Assembler, o
|
||||
|
||||
asm.comment("Side exit if length doesn't not equal remaining args");
|
||||
asm.cmp(array_len_opnd, required_args.into());
|
||||
asm.jne(counted_exit!(ocb, side_exit, send_splatarray_length_not_equal).as_side_exit());
|
||||
asm.jne(counted_exit!(ocb, side_exit, send_splatarray_length_not_equal));
|
||||
|
||||
asm.comment("Check last argument is not ruby2keyword hash");
|
||||
|
||||
@ -5358,7 +5358,7 @@ fn gen_send_iseq(
|
||||
SIZEOF_VALUE_I32 * (num_locals + stack_max) + 2 * (RUBY_SIZEOF_CONTROL_FRAME as i32);
|
||||
let stack_limit = asm.lea(ctx.sp_opnd(locals_offs as isize));
|
||||
asm.cmp(CFP, stack_limit);
|
||||
asm.jbe(counted_exit!(ocb, side_exit, send_se_cf_overflow).as_side_exit());
|
||||
asm.jbe(counted_exit!(ocb, side_exit, send_se_cf_overflow));
|
||||
|
||||
// push_splat_args does stack manipulation so we can no longer side exit
|
||||
if flags & VM_CALL_ARGS_SPLAT != 0 {
|
||||
@ -6273,7 +6273,7 @@ fn gen_invokeblock(
|
||||
let side_exit = get_side_exit(jit, ocb, ctx);
|
||||
let tag_opnd = asm.and(block_handler_opnd, 0x3.into()); // block_handler is a tagged pointer
|
||||
asm.cmp(tag_opnd, 0x1.into()); // VM_BH_ISEQ_BLOCK_P
|
||||
asm.jne(counted_exit!(ocb, side_exit, invokeblock_iseq_tag_changed).as_side_exit());
|
||||
asm.jne(counted_exit!(ocb, side_exit, invokeblock_iseq_tag_changed));
|
||||
|
||||
// Not supporting vm_callee_setup_block_arg_arg0_splat for now
|
||||
let comptime_captured = unsafe { ((comptime_handler.0 & !0x3) as *const rb_captured_block).as_ref().unwrap() };
|
||||
@ -6431,7 +6431,7 @@ fn gen_invokesuper(
|
||||
SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_ME_CREF,
|
||||
);
|
||||
asm.cmp(ep_me_opnd, me_as_value.into());
|
||||
asm.jne(counted_exit!(ocb, side_exit, invokesuper_me_changed).as_side_exit());
|
||||
asm.jne(counted_exit!(ocb, side_exit, invokesuper_me_changed));
|
||||
|
||||
if block.is_none() {
|
||||
// Guard no block passed
|
||||
@ -6449,7 +6449,7 @@ fn gen_invokesuper(
|
||||
SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL,
|
||||
);
|
||||
asm.cmp(ep_specval_opnd, VM_BLOCK_HANDLER_NONE.into());
|
||||
asm.jne(counted_exit!(ocb, side_exit, invokesuper_block).as_side_exit());
|
||||
asm.jne(counted_exit!(ocb, side_exit, invokesuper_block));
|
||||
}
|
||||
|
||||
// We need to assume that both our current method entry and the super
|
||||
@ -6887,7 +6887,7 @@ fn gen_opt_getconstant_path(
|
||||
// Check the result. SysV only specifies one byte for _Bool return values,
|
||||
// so it's important we only check one bit to ignore the higher bits in the register.
|
||||
asm.test(ret_val, 1.into());
|
||||
asm.jz(counted_exit!(ocb, side_exit, opt_getinlinecache_miss).as_side_exit());
|
||||
asm.jz(counted_exit!(ocb, side_exit, opt_getinlinecache_miss));
|
||||
|
||||
let inline_cache = asm.load(Opnd::const_ptr(ic as *const u8));
|
||||
|
||||
@ -6966,7 +6966,7 @@ fn gen_getblockparamproxy(
|
||||
SIZEOF_VALUE_I32 * (VM_ENV_DATA_INDEX_FLAGS as i32),
|
||||
);
|
||||
asm.test(flag_check, VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM.into());
|
||||
asm.jnz(counted_exit!(ocb, side_exit, gbpp_block_param_modified).as_side_exit());
|
||||
asm.jnz(counted_exit!(ocb, side_exit, gbpp_block_param_modified));
|
||||
|
||||
// Load the block handler for the current frame
|
||||
// note, VM_ASSERT(VM_ENV_LOCAL_P(ep))
|
||||
@ -7061,7 +7061,7 @@ fn gen_getblockparam(
|
||||
asm.test(flags_opnd, VM_ENV_FLAG_WB_REQUIRED.into());
|
||||
|
||||
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
|
||||
asm.jnz(side_exit.as_side_exit());
|
||||
asm.jnz(side_exit);
|
||||
|
||||
// Convert the block handler in to a proc
|
||||
// call rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
|
||||
@ -7646,7 +7646,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_gen_check_ints() {
|
||||
let (_, _ctx, mut asm, _cb, mut ocb) = setup_codegen();
|
||||
let side_exit = ocb.unwrap().get_write_ptr();
|
||||
let side_exit = ocb.unwrap().get_write_ptr().as_side_exit();
|
||||
gen_check_ints(&mut asm, side_exit);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user