YJIT: Compile exception handlers (#8171)

Co-authored-by: Maxime Chevalier-Boisvert <maximechevalierb@gmail.com>
This commit is contained in:
Takashi Kokubun 2023-08-08 16:06:22 -07:00 committed by GitHub
parent 74b9c7d207
commit cd8d20cd1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
Notes: git 2023-08-08 23:06:42 +00:00
Merged-By: k0kubun <takashikkbn@gmail.com>
16 changed files with 275 additions and 86 deletions

View File

@ -4004,3 +4004,25 @@ assert_equal '[]', %q{
_x, _y = func.call
end.call
}
# Catch TAG_BREAK in a non-FINISH frame with JIT code
assert_equal '1', %q{
def entry
catch_break
end
def catch_break
while_true do
break
end
1
end
def while_true
while true
yield
end
end
entry
}

View File

@ -64,7 +64,7 @@ module RubyVM::RJIT
asm = Assembler.new
compile_prologue(asm, iseq, pc)
compile_block(asm, jit:, pc:)
iseq.body.jit_func = @cb.write(asm)
iseq.body.jit_entry = @cb.write(asm)
rescue Exception => e
$stderr.puts e.full_message
exit 1
@ -176,8 +176,8 @@ module RubyVM::RJIT
# If they were the ISEQ's first blocks, re-compile RJIT entry as well
if iseq.body.iseq_encoded.to_i == pc
iseq.body.jit_func = 0
iseq.body.total_calls = 0
iseq.body.jit_entry = 0
iseq.body.jit_entry_calls = 0
end
end

View File

@ -143,11 +143,11 @@ module RubyVM::RJIT
C.rjit_for_each_iseq do |iseq|
# Avoid entering past code
iseq.body.jit_func = 0
iseq.body.jit_entry = 0
# Avoid reusing past code
iseq.body.rjit_blocks.clear if iseq.body.rjit_blocks
# Compile this again if not converted to trace_* insns
iseq.body.total_calls = 0
iseq.body.jit_entry_calls = 0
end
end
end

View File

@ -1177,8 +1177,8 @@ module RubyVM::RJIT # :nodoc: all
), Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), mark_bits)")],
outer_variables: [CType::Pointer.new { self.rb_id_table }, Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), outer_variables)")],
mandatory_only_iseq: [CType::Pointer.new { self.rb_iseq_t }, Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), mandatory_only_iseq)")],
jit_func: [self.rb_jit_func_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), jit_func)")],
total_calls: [CType::Immediate.parse("unsigned long"), Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), total_calls)")],
jit_entry: [self.rb_jit_func_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), jit_entry)")],
jit_entry_calls: [CType::Immediate.parse("unsigned long"), Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), jit_entry_calls)")],
rjit_blocks: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_iseq_constant_body *)NULL)), rjit_blocks)"), true],
)
end

View File

@ -638,7 +638,7 @@ generator = BindingGenerator.new(
skip_fields: {
'rb_execution_context_struct.machine': %w[regs], # differs between macOS and Linux
rb_execution_context_struct: %w[method_missing_reason], # non-leading bit fields not supported
rb_iseq_constant_body: %w[yjit_payload], # conditionally defined
rb_iseq_constant_body: %w[jit_exception jit_exception_calls yjit_payload], # conditionally defined
rb_thread_struct: %w[status has_dedicated_nt to_kill abort_on_exception report_on_exception pending_interrupt_queue_checked],
:'' => %w[is_from_method is_lambda is_isolated], # rb_proc_t
},

97
vm.c
View File

@ -370,7 +370,14 @@ extern VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, V
static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
#if USE_RJIT || USE_YJIT
// Try to compile the current ISeq in ec. Return 0 if not compiled.
// Generate JIT code that supports the following kinds of ISEQ entries:
// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
// The current vm_exec stops if JIT code returns a non-Qundef value.
// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
// blocks called by a Ruby frame that isn't compiled or side-exited).
// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
// vm_exec does NOT stop whether JIT code returns Qundef or not.
static inline rb_jit_func_t
jit_compile(rb_execution_context_t *ec)
{
@ -379,35 +386,29 @@ jit_compile(rb_execution_context_t *ec)
struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
bool yjit_enabled = rb_yjit_compile_new_iseqs();
if (yjit_enabled || rb_rjit_call_p) {
body->total_calls++;
body->jit_entry_calls++;
}
else {
return 0;
return NULL;
}
// Don't try to compile the function if it's already compiled
if (body->jit_func) {
return body->jit_func;
}
// Trigger JIT compilation as needed
if (yjit_enabled) {
if (rb_yjit_threshold_hit(iseq)) {
rb_yjit_compile_iseq(iseq, ec);
// Trigger JIT compilation if not compiled
if (body->jit_entry == NULL) {
if (yjit_enabled) {
if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
rb_yjit_compile_iseq(iseq, ec, false);
}
}
else { // rb_rjit_call_p
if (body->jit_entry_calls == rb_rjit_call_threshold()) {
rb_rjit_compile(iseq);
}
}
}
else { // rb_rjit_call_p
if (body->total_calls == rb_rjit_call_threshold()) {
rb_rjit_compile(iseq);
}
}
return body->jit_func;
return body->jit_entry;
}
// Try to execute the current iseq in ec. Use JIT code if it is ready.
// If it is not, add ISEQ to the compilation queue and return Qundef for RJIT.
// YJIT compiles on the thread running the iseq.
// Execute JIT code compiled by jit_compile()
static inline VALUE
jit_exec(rb_execution_context_t *ec)
{
@ -425,6 +426,51 @@ jit_exec(rb_execution_context_t *ec)
# define jit_exec(ec) Qundef
#endif
#if USE_YJIT
// Generate JIT code that supports the following kind of ISEQ entry:
// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
// point to a location specified by a catch table, and it doesn't have
// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
// a non-Qundef value. So you should not return a non-Qundef value
// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
static inline rb_jit_func_t
jit_compile_exception(rb_execution_context_t *ec)
{
// Increment the ISEQ's call counter
const rb_iseq_t *iseq = ec->cfp->iseq;
struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
if (rb_yjit_compile_new_iseqs()) {
body->jit_exception_calls++;
}
else {
return NULL;
}
// Trigger JIT compilation if not compiled
if (body->jit_exception == NULL && rb_yjit_threshold_hit(iseq, body->jit_exception_calls)) {
rb_yjit_compile_iseq(iseq, ec, true);
}
return body->jit_exception;
}
// Execute JIT code compiled by jit_compile_exception()
static inline VALUE
jit_exec_exception(rb_execution_context_t *ec)
{
rb_jit_func_t func = jit_compile_exception(ec);
if (func) {
// Call the JIT code
return func(ec, ec->cfp);
}
else {
return Qundef;
}
}
#else
# define jit_compile_exception(ec) ((rb_jit_func_t)0)
# define jit_exec_exception(ec) Qundef
#endif
#include "vm_insnhelper.c"
#include "vm_exec.c"
@ -2381,8 +2427,11 @@ vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
/* caught a jump, exec the handler */
result = vm_exec_core(ec);
// caught a jump, exec the handler. JIT code in jit_exec_exception()
// may return Qundef to run remaining frames with vm_exec_core().
if (UNDEF_P(result = jit_exec_exception(ec))) {
result = vm_exec_core(ec);
}
vm_loop_start:
VM_ASSERT(ec->tag == tag);
/* when caught `throw`, `tag.state` is set. */

View File

@ -503,10 +503,17 @@ struct rb_iseq_constant_body {
const rb_iseq_t *mandatory_only_iseq;
#if USE_RJIT || USE_YJIT
// Function pointer for JIT code
rb_jit_func_t jit_func;
// Number of total calls with jit_exec()
long unsigned total_calls;
// Function pointer for JIT code on jit_exec()
rb_jit_func_t jit_entry;
// Number of calls on jit_exec()
long unsigned jit_entry_calls;
#endif
#if USE_YJIT
// Function pointer for JIT code on jit_exec_exception()
rb_jit_func_t jit_exception;
// Number of calls on jit_exec_exception()
long unsigned jit_exception_calls;
#endif
#if USE_RJIT

View File

@ -2485,6 +2485,12 @@ vm_base_ptr(const rb_control_frame_t *cfp)
}
}
VALUE *
rb_vm_base_ptr(const rb_control_frame_t *cfp)
{
return vm_base_ptr(cfp);
}
/* method call processes with call_info */
#include "vm_args.c"

62
yjit.c
View File

@ -422,10 +422,12 @@ void
rb_iseq_reset_jit_func(const rb_iseq_t *iseq)
{
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
iseq->body->jit_func = NULL;
iseq->body->jit_entry = NULL;
iseq->body->jit_exception = NULL;
// Enable re-compiling this ISEQ. Event when it's invalidated for TracePoint,
// we'd like to re-compile ISEQs that haven't been converted to trace_* insns.
iseq->body->total_calls = 0;
iseq->body->jit_entry_calls = 0;
iseq->body->jit_exception_calls = 0;
}
// Get the PC for a given index in an iseq
@ -597,12 +599,6 @@ rb_get_def_bmethod_proc(rb_method_definition_t *def)
return def->body.bmethod.proc;
}
unsigned long
rb_get_iseq_body_total_calls(const rb_iseq_t *iseq)
{
return iseq->body->total_calls;
}
const rb_iseq_t *
rb_get_iseq_body_local_iseq(const rb_iseq_t *iseq)
{
@ -832,6 +828,8 @@ rb_get_cfp_ep_level(struct rb_control_frame_struct *cfp, uint32_t lv)
return ep;
}
extern VALUE *rb_vm_base_ptr(struct rb_control_frame_struct *cfp);
VALUE
rb_yarv_class_of(VALUE obj)
{
@ -1047,27 +1045,24 @@ rb_yjit_vm_unlock(unsigned int *recursive_lock_level, const char *file, int line
rb_vm_lock_leave(recursive_lock_level, file, line);
}
bool
rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
void
rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception)
{
bool success = true;
RB_VM_LOCK_ENTER();
rb_vm_barrier();
// Compile a block version starting at the first instruction
uint8_t *rb_yjit_iseq_gen_entry_point(const rb_iseq_t *iseq, rb_execution_context_t *ec); // defined in Rust
uint8_t *code_ptr = rb_yjit_iseq_gen_entry_point(iseq, ec);
// Compile a block version starting at the current instruction
uint8_t *rb_yjit_iseq_gen_entry_point(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception); // defined in Rust
uint8_t *code_ptr = rb_yjit_iseq_gen_entry_point(iseq, ec, jit_exception);
if (code_ptr) {
iseq->body->jit_func = (rb_jit_func_t)code_ptr;
if (jit_exception) {
iseq->body->jit_exception = (rb_jit_func_t)code_ptr;
}
else {
iseq->body->jit_func = 0;
success = false;
iseq->body->jit_entry = (rb_jit_func_t)code_ptr;
}
RB_VM_LOCK_LEAVE();
return success;
}
// GC root for interacting with the GC
@ -1143,6 +1138,35 @@ rb_yjit_invokeblock_sp_pops(const struct rb_callinfo *ci)
return 1 - sp_inc_of_invokeblock(ci); // + 1 to ignore return value push
}
// Setup jit_return to avoid returning a non-Qundef value on a non-FINISH frame.
// See [jit_compile_exception] for details.
void
rb_yjit_set_exception_return(rb_control_frame_t *cfp, void *leave_exit, void *leave_exception)
{
if (VM_FRAME_FINISHED_P(cfp)) {
// If it's a FINISH frame, just normally exit with a non-Qundef value.
cfp->jit_return = leave_exit;
}
else if (cfp->jit_return) {
while (!VM_FRAME_FINISHED_P(cfp)) {
if (cfp->jit_return == leave_exit) {
// Unlike jit_exec(), leave_exit is not safe on a non-FINISH frame on
// jit_exec_exception(). See [jit_exec] and [jit_exec_exception] for
// details. Exit to the interpreter with Qundef to let it keep executing
// other Ruby frames.
cfp->jit_return = leave_exception;
return;
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
}
}
else {
// If the caller was not JIT code, exit to the interpreter with Qundef
// to keep executing Ruby frames with the interpreter.
cfp->jit_return = leave_exception;
}
}
// Primitives used by yjit.rb
VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self);
VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self);

8
yjit.h
View File

@ -27,12 +27,12 @@
// Expose these as declarations since we are building YJIT.
bool rb_yjit_enabled_p(void);
bool rb_yjit_compile_new_iseqs(void);
bool rb_yjit_threshold_hit(const rb_iseq_t *const iseq);
bool rb_yjit_threshold_hit(const rb_iseq_t *const iseq, unsigned long total_calls);
void rb_yjit_invalidate_all_method_lookup_assumptions(void);
void rb_yjit_cme_invalidate(rb_callable_method_entry_t *cme);
void rb_yjit_collect_binding_alloc(void);
void rb_yjit_collect_binding_set(void);
bool rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec);
void rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception);
void rb_yjit_init(void);
void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop);
void rb_yjit_constant_state_changed(ID id);
@ -49,12 +49,12 @@ void rb_yjit_tracing_invalidate_all(void);
static inline bool rb_yjit_enabled_p(void) { return false; }
static inline bool rb_yjit_compile_new_iseqs(void) { return false; }
static inline bool rb_yjit_threshold_hit(const rb_iseq_t *const iseq) { return false; }
static inline bool rb_yjit_threshold_hit(const rb_iseq_t *const iseq, unsigned long total_calls) { return false; }
static inline void rb_yjit_invalidate_all_method_lookup_assumptions(void) {}
static inline void rb_yjit_cme_invalidate(rb_callable_method_entry_t *cme) {}
static inline void rb_yjit_collect_binding_alloc(void) {}
static inline void rb_yjit_collect_binding_set(void) {}
static inline bool rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec) { return false; }
static inline void rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception) {}
static inline void rb_yjit_init(void) {}
static inline void rb_yjit_bop_redefined(int redefined_flag, enum ruby_basic_operators bop) {}
static inline void rb_yjit_constant_state_changed(ID id) {}

View File

@ -326,6 +326,7 @@ fn main() {
.allowlist_function("rb_yjit_assert_holding_vm_lock")
.allowlist_function("rb_yjit_sendish_sp_pops")
.allowlist_function("rb_yjit_invokeblock_sp_pops")
.allowlist_function("rb_yjit_set_exception_return")
.allowlist_type("robject_offsets")
.allowlist_type("rstring_offsets")
@ -443,6 +444,7 @@ fn main() {
.allowlist_function("rb_yjit_array_len")
.allowlist_function("rb_obj_class")
.allowlist_function("rb_obj_is_proc")
.allowlist_function("rb_vm_base_ptr")
// We define VALUE manually, don't import it
.blocklist_type("VALUE")

View File

@ -18,7 +18,7 @@ use std::cmp::min;
use std::collections::HashMap;
use std::ffi::CStr;
use std::mem;
use std::os::raw::{c_int};
use std::os::raw::c_int;
use std::ptr;
use std::rc::Rc;
use std::slice;
@ -405,7 +405,7 @@ fn verify_ctx(jit: &JITState, ctx: &Context) {
// to the interpreter when it cannot service a stub by generating new code.
// Before coming here, branch_stub_hit() takes care of fully reconstructing
// interpreter state.
fn gen_code_for_exit_from_stub(ocb: &mut OutlinedCb) -> CodePtr {
fn gen_stub_exit(ocb: &mut OutlinedCb) -> CodePtr {
let ocb = ocb.unwrap();
let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
@ -617,6 +617,38 @@ fn gen_leave_exit(ocb: &mut OutlinedCb) -> CodePtr {
return code_ptr;
}
// Increment SP and transfer the execution to the interpreter after jit_exec_exception().
// On jit_exec_exception(), you need to return Qundef to keep executing caller non-FINISH
// frames on the interpreter. You also need to increment SP to push the return value to
// the caller's stack, which is different from gen_stub_exit().
fn gen_leave_exception(ocb: &mut OutlinedCb) -> CodePtr {
let ocb = ocb.unwrap();
let code_ptr = ocb.get_write_ptr();
let mut asm = Assembler::new();
// Every exit to the interpreter should be counted
gen_counter_incr(&mut asm, Counter::leave_interp_return);
asm.comment("increment SP of the caller");
let sp = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP);
let new_sp = asm.add(sp, SIZEOF_VALUE.into());
asm.mov(sp, new_sp);
asm.comment("exit from exception");
asm.cpop_into(SP);
asm.cpop_into(EC);
asm.cpop_into(CFP);
asm.frame_teardown();
// Execute vm_exec_core
asm.cret(Qundef.into());
asm.compile(ocb, None);
return code_ptr;
}
// Generate a runtime guard that ensures the PC is at the expected
// instruction index in the iseq, otherwise takes an entry stub
// that generates another check and entry.
@ -647,7 +679,15 @@ pub fn gen_entry_chain_guard(
/// Compile an interpreter entry block to be inserted into an iseq
/// Returns None if compilation fails.
pub fn gen_entry_prologue(cb: &mut CodeBlock, ocb: &mut OutlinedCb, iseq: IseqPtr, insn_idx: u16) -> Option<CodePtr> {
/// If jit_exception is true, compile JIT code for handling exceptions.
/// See [jit_compile_exception] for details.
pub fn gen_entry_prologue(
cb: &mut CodeBlock,
ocb: &mut OutlinedCb,
iseq: IseqPtr,
insn_idx: u16,
jit_exception: bool,
) -> Option<CodePtr> {
let code_ptr = cb.get_write_ptr();
let mut asm = Assembler::new();
@ -672,19 +712,36 @@ pub fn gen_entry_prologue(cb: &mut CodeBlock, ocb: &mut OutlinedCb, iseq: IseqPt
asm.mov(SP, Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP));
// Setup cfp->jit_return
asm.mov(
Opnd::mem(64, CFP, RUBY_OFFSET_CFP_JIT_RETURN),
Opnd::const_ptr(CodegenGlobals::get_leave_exit_code().raw_ptr()),
);
// If this is an exception handler entry point
if jit_exception {
// On jit_exec_exception(), it's NOT safe to return a non-Qundef value
// from a non-FINISH frame. This function fixes that problem.
// See [jit_compile_exception] for details.
asm.ccall(
rb_yjit_set_exception_return as *mut u8,
vec![
CFP,
Opnd::const_ptr(CodegenGlobals::get_leave_exit_code().raw_ptr()),
Opnd::const_ptr(CodegenGlobals::get_leave_exception_code().raw_ptr()),
],
);
} else {
// On jit_exec() or JIT_EXEC(), it's safe to return a non-Qundef value
// on the entry frame. See [jit_compile] for details.
asm.mov(
Opnd::mem(64, CFP, RUBY_OFFSET_CFP_JIT_RETURN),
Opnd::const_ptr(CodegenGlobals::get_leave_exit_code().raw_ptr()),
);
}
// We're compiling iseqs that we *expect* to start at `insn_idx`. But in
// the case of optional parameters, the interpreter can set the pc to a
// different location depending on the optional parameters. If an iseq
// has optional parameters, we'll add a runtime check that the PC we've
// We're compiling iseqs that we *expect* to start at `insn_idx`.
// But in the case of optional parameters or when handling exceptions,
// the interpreter can set the pc to a different location. For
// such scenarios, we'll add a runtime check that the PC we've
// compiled for is the same PC that the interpreter wants us to run with.
// If they don't match, then we'll jump to an entry stub and generate
// another PC check and entry there.
let pending_entry = if unsafe { get_iseq_flags_has_opt(iseq) } {
let pending_entry = if unsafe { get_iseq_flags_has_opt(iseq) } || jit_exception {
Some(gen_entry_chain_guard(&mut asm, ocb, iseq, insn_idx)?)
} else {
None
@ -8283,8 +8340,11 @@ pub struct CodegenGlobals {
/// Code for exiting back to the interpreter from the leave instruction
leave_exit_code: CodePtr,
/// Code for exiting back to the interpreter after handling an exception
leave_exception_code: CodePtr,
// For exiting from YJIT frame from branch_stub_hit().
// Filled by gen_code_for_exit_from_stub().
// Filled by gen_stub_exit().
stub_exit_code: CodePtr,
// For servicing branch stubs
@ -8373,8 +8433,9 @@ impl CodegenGlobals {
let ocb_start_addr = ocb.unwrap().get_write_ptr();
let leave_exit_code = gen_leave_exit(&mut ocb);
let leave_exception_code = gen_leave_exception(&mut ocb);
let stub_exit_code = gen_code_for_exit_from_stub(&mut ocb);
let stub_exit_code = gen_stub_exit(&mut ocb);
let branch_stub_hit_trampoline = gen_branch_stub_hit_trampoline(&mut ocb);
let entry_stub_hit_trampoline = gen_entry_stub_hit_trampoline(&mut ocb);
@ -8393,7 +8454,8 @@ impl CodegenGlobals {
inline_cb: cb,
outlined_cb: ocb,
leave_exit_code,
stub_exit_code: stub_exit_code,
leave_exception_code,
stub_exit_code,
outline_full_cfunc_return_pos: cfunc_exit_code,
branch_stub_hit_trampoline,
entry_stub_hit_trampoline,
@ -8513,6 +8575,10 @@ impl CodegenGlobals {
CodegenGlobals::get_instance().leave_exit_code
}
pub fn get_leave_exception_code() -> CodePtr {
CodegenGlobals::get_instance().leave_exception_code
}
pub fn get_stub_exit_code() -> CodePtr {
CodegenGlobals::get_instance().stub_exit_code
}

View File

@ -2135,12 +2135,18 @@ fn gen_block_series_body(
/// Generate a block version that is an entry point inserted into an iseq
/// NOTE: this function assumes that the VM lock has been taken
pub fn gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> Option<CodePtr> {
/// If jit_exception is true, compile JIT code for handling exceptions.
/// See [jit_compile_exception] for details.
pub fn gen_entry_point(iseq: IseqPtr, ec: EcPtr, jit_exception: bool) -> Option<CodePtr> {
// Compute the current instruction index based on the current PC
let cfp = unsafe { get_ec_cfp(ec) };
let insn_idx: u16 = unsafe {
let ec_pc = get_cfp_pc(get_ec_cfp(ec));
let ec_pc = get_cfp_pc(cfp);
iseq_pc_to_insn_idx(iseq, ec_pc)?
};
let stack_size: u8 = unsafe {
u8::try_from(get_cfp_sp(cfp).offset_from(get_cfp_bp(cfp))).ok()?
};
// The entry context makes no assumptions about types
let blockid = BlockId {
@ -2153,10 +2159,12 @@ pub fn gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> Option<CodePtr> {
let ocb = CodegenGlobals::get_outlined_cb();
// Write the interpreter entry prologue. Might be NULL when out of memory.
let code_ptr = gen_entry_prologue(cb, ocb, iseq, insn_idx);
let code_ptr = gen_entry_prologue(cb, ocb, iseq, insn_idx, jit_exception);
// Try to generate code for the entry block
let block = gen_block_series(blockid, &Context::default(), ec, cb, ocb);
let mut ctx = Context::default();
ctx.stack_size = stack_size;
let block = gen_block_series(blockid, &ctx, ec, cb, ocb);
cb.mark_all_executable();
ocb.unwrap().mark_all_executable();
@ -2239,6 +2247,9 @@ fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8>
let cfp = unsafe { get_ec_cfp(ec) };
let iseq = unsafe { get_cfp_iseq(cfp) };
let insn_idx = iseq_pc_to_insn_idx(iseq, unsafe { get_cfp_pc(cfp) })?;
let stack_size: u8 = unsafe {
u8::try_from(get_cfp_sp(cfp).offset_from(get_cfp_bp(cfp))).ok()?
};
let cb = CodegenGlobals::get_inline_cb();
let ocb = CodegenGlobals::get_outlined_cb();
@ -2251,7 +2262,8 @@ fn entry_stub_hit_body(entry_ptr: *const c_void, ec: EcPtr) -> Option<*const u8>
// Try to find an existing compiled version of this block
let blockid = BlockId { iseq, idx: insn_idx };
let ctx = Context::default();
let mut ctx = Context::default();
ctx.stack_size = stack_size;
let blockref = match find_block_version(blockid, &ctx) {
// If an existing block is found, generate a jump to the block.
Some(blockref) => {

View File

@ -149,6 +149,7 @@ pub use rb_get_cfp_sp as get_cfp_sp;
pub use rb_get_cfp_self as get_cfp_self;
pub use rb_get_cfp_ep as get_cfp_ep;
pub use rb_get_cfp_ep_level as get_cfp_ep_level;
pub use rb_vm_base_ptr as get_cfp_bp;
pub use rb_get_cme_def_type as get_cme_def_type;
pub use rb_get_cme_def_body_attr_id as get_cme_def_body_attr_id;
pub use rb_get_cme_def_body_optimized_type as get_cme_def_body_optimized_type;

View File

@ -1253,7 +1253,6 @@ extern "C" {
pub fn rb_get_mct_func(mct: *const rb_method_cfunc_t) -> *mut ::std::os::raw::c_void;
pub fn rb_get_def_iseq_ptr(def: *mut rb_method_definition_t) -> *const rb_iseq_t;
pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE;
pub fn rb_get_iseq_body_total_calls(iseq: *const rb_iseq_t) -> ::std::os::raw::c_ulong;
pub fn rb_get_iseq_body_local_iseq(iseq: *const rb_iseq_t) -> *const rb_iseq_t;
pub fn rb_get_iseq_body_parent_iseq(iseq: *const rb_iseq_t) -> *const rb_iseq_t;
pub fn rb_get_iseq_body_local_table_size(iseq: *const rb_iseq_t) -> ::std::os::raw::c_uint;
@ -1297,6 +1296,7 @@ extern "C" {
pub fn rb_get_cfp_self(cfp: *mut rb_control_frame_struct) -> VALUE;
pub fn rb_get_cfp_ep(cfp: *mut rb_control_frame_struct) -> *mut VALUE;
pub fn rb_get_cfp_ep_level(cfp: *mut rb_control_frame_struct, lv: u32) -> *const VALUE;
pub fn rb_vm_base_ptr(cfp: *mut rb_control_frame_struct) -> *mut VALUE;
pub fn rb_yarv_class_of(obj: VALUE) -> VALUE;
pub fn rb_yarv_str_eql_internal(str1: VALUE, str2: VALUE) -> VALUE;
pub fn rb_str_neq_internal(str1: VALUE, str2: VALUE) -> VALUE;
@ -1340,4 +1340,5 @@ extern "C" {
pub fn rb_yjit_assert_holding_vm_lock();
pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize;
pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize;
pub fn rb_yjit_set_exception_return(cfp: *mut rb_control_frame_t);
}

View File

@ -47,11 +47,8 @@ pub fn yjit_enabled_p() -> bool {
/// Test whether we are ready to compile an ISEQ or not
#[no_mangle]
pub extern "C" fn rb_yjit_threshold_hit(iseq: IseqPtr) -> bool {
pub extern "C" fn rb_yjit_threshold_hit(_iseq: IseqPtr, total_calls: u64) -> bool {
let call_threshold = get_option!(call_threshold) as u64;
let total_calls = unsafe { rb_get_iseq_body_total_calls(iseq) } as u64;
return total_calls == call_threshold;
}
@ -112,8 +109,10 @@ fn rb_bug_panic_hook() {
/// Called from C code to begin compiling a function
/// NOTE: this should be wrapped in RB_VM_LOCK_ENTER(), rb_vm_barrier() on the C side
/// If jit_exception is true, compile JIT code for handling exceptions.
/// See [jit_compile_exception] for details.
#[no_mangle]
pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> *const u8 {
pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr, jit_exception: bool) -> *const u8 {
// Reject ISEQs with very large temp stacks,
// this will allow us to use u8/i8 values to track stack_size and sp_offset
let stack_max = unsafe { rb_get_iseq_body_stack_max(iseq) };
@ -131,7 +130,7 @@ pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> *con
return std::ptr::null();
}
let maybe_code_ptr = gen_entry_point(iseq, ec);
let maybe_code_ptr = gen_entry_point(iseq, ec, jit_exception);
match maybe_code_ptr {
Some(ptr) => ptr.raw_ptr(),