diff --git a/yjit/src/backend/arm64/mod.rs b/yjit/src/backend/arm64/mod.rs index 9787b1a4e9..a74e5cf0ff 100644 --- a/yjit/src/backend/arm64/mod.rs +++ b/yjit/src/backend/arm64/mod.rs @@ -1079,6 +1079,9 @@ impl Assembler Insn::Jl(target) => { emit_conditional_jump::<{Condition::LT}>(cb, compile_side_exit(*target, self, ocb)); }, + Insn::Jg(target) => { + emit_conditional_jump::<{Condition::GT}>(cb, compile_side_exit(*target, self, ocb)); + }, Insn::Jbe(target) => { emit_conditional_jump::<{Condition::LS}>(cb, compile_side_exit(*target, self, ocb)); }, diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs index a28c833e89..72a4bc711b 100644 --- a/yjit/src/backend/ir.rs +++ b/yjit/src/backend/ir.rs @@ -432,6 +432,9 @@ pub enum Insn { /// Jump if lower Jl(Target), + /// Jump if greater + Jg(Target), + // Unconditional jump to a branch target Jmp(Target), @@ -578,6 +581,7 @@ impl Insn { Insn::Jbe(_) => "Jbe", Insn::Je(_) => "Je", Insn::Jl(_) => "Jl", + Insn::Jg(_) => "Jg", Insn::Jmp(_) => "Jmp", Insn::JmpOpnd(_) => "JmpOpnd", Insn::Jne(_) => "Jne", @@ -725,6 +729,7 @@ impl<'a> Iterator for InsnOpndIterator<'a> { Insn::Jbe(_) | Insn::Je(_) | Insn::Jl(_) | + Insn::Jg(_) | Insn::Jmp(_) | Insn::Jne(_) | Insn::Jnz(_) | @@ -822,6 +827,7 @@ impl<'a> InsnOpndMutIterator<'a> { Insn::Jbe(_) | Insn::Je(_) | Insn::Jl(_) | + Insn::Jg(_) | Insn::Jmp(_) | Insn::Jne(_) | Insn::Jnz(_) | diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs index b40d8b2382..0cc276fca1 100644 --- a/yjit/src/backend/x86_64/mod.rs +++ b/yjit/src/backend/x86_64/mod.rs @@ -673,6 +673,14 @@ impl Assembler } }, + Insn::Jg(target) => { + match compile_side_exit(*target, self, ocb) { + Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jg_ptr(cb, code_ptr), + Target::Label(label_idx) => jg_label(cb, label_idx), + Target::SideExit { .. } => unreachable!("Target::SideExit should have been compiled by compile_side_exit"), + } + }, + Insn::Jbe(target) => { match compile_side_exit(*target, self, ocb) { Target::CodePtr(code_ptr) | Target::SideExitPtr(code_ptr) => jbe_ptr(cb, code_ptr), diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index f65ffefe5a..204726181d 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -4325,6 +4325,45 @@ fn jit_rb_int_div( true } +/* +fn jit_rb_int_lshift( + jit: &mut JITState, + asm: &mut Assembler, + ocb: &mut OutlinedCb, + _ci: *const rb_callinfo, + _cme: *const rb_callable_method_entry_t, + _block: Option, + _argc: i32, + _known_recv_class: *const VALUE, +) -> bool { + if asm.ctx.two_fixnums_on_stack(jit) != Some(true) { + return false; + } + guard_two_fixnums(jit, asm, ocb); + + let rhs = asm.stack_pop(1); + let lhs = asm.stack_pop(1); + + // Ruby supports using a negative shift value + asm.comment("Guard shift negative"); + let shift_val = asm.sub(rhs, 1.into()); + asm.cmp(shift_val, 0.into()); + asm.jl(Target::side_exit(Counter::lshift_range)); + + asm.cmp(shift_val, 63.into()); + asm.jg(Target::side_exit(Counter::lshift_range)); + + // FIXME: we don't yet support shift with non-immediate values in the backend + // Do the shifting + let out_val = asm.lshift(lhs, shift_val); + asm.jo(Target::side_exit(Counter::lshift_overflow)); + + let ret_opnd = asm.stack_push(Type::Fixnum); + asm.mov(ret_opnd, out_val); + true +} +*/ + fn jit_rb_int_aref( jit: &mut JITState, asm: &mut Assembler, @@ -8371,6 +8410,7 @@ impl CodegenGlobals { self.yjit_reg_method(rb_cInteger, "*", jit_rb_int_mul); self.yjit_reg_method(rb_cInteger, "/", jit_rb_int_div); + //self.yjit_reg_method(rb_cInteger, "<<", jit_rb_int_lshift); self.yjit_reg_method(rb_cInteger, "[]", jit_rb_int_aref); // rb_str_to_s() methods in string.c diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs index 5da15198b1..b1773df359 100644 --- a/yjit/src/stats.rs +++ b/yjit/src/stats.rs @@ -335,6 +335,9 @@ make_counters! { opt_mod_zero, opt_div_zero, + lshift_range, + lshift_overflow, + opt_aref_argc_not_one, opt_aref_arg_not_fixnum, opt_aref_not_array,