YJIT: Assert that we have the VM lock while marking
Somewhat important because having the lock is a key part of the soundness reasoning for the `unsafe` usage here.
This commit is contained in:
parent
f613c18912
commit
de174681f7
Notes:
git
2023-03-15 19:45:52 +00:00
8
yjit.c
8
yjit.c
@ -1100,6 +1100,14 @@ object_shape_count(rb_execution_context_t *ec, VALUE self)
|
|||||||
return ULONG2NUM((unsigned long)GET_VM()->next_shape_id);
|
return ULONG2NUM((unsigned long)GET_VM()->next_shape_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Assert that we have the VM lock. Relevant mostly for multi ractor situations.
|
||||||
|
// The GC takes the lock before calling us, and this asserts that it indeed happens.
|
||||||
|
void
|
||||||
|
rb_yjit_assert_holding_vm_lock(void)
|
||||||
|
{
|
||||||
|
ASSERT_vm_locking();
|
||||||
|
}
|
||||||
|
|
||||||
// Primitives used by yjit.rb
|
// Primitives used by yjit.rb
|
||||||
VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self);
|
VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self);
|
||||||
VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self);
|
VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self);
|
||||||
|
@ -320,6 +320,8 @@ fn main() {
|
|||||||
.allowlist_function("rb_yjit_exit_locations_dict")
|
.allowlist_function("rb_yjit_exit_locations_dict")
|
||||||
.allowlist_function("rb_yjit_icache_invalidate")
|
.allowlist_function("rb_yjit_icache_invalidate")
|
||||||
.allowlist_function("rb_optimized_call")
|
.allowlist_function("rb_optimized_call")
|
||||||
|
.allowlist_function("rb_yjit_assert_holding_vm_lock")
|
||||||
|
|
||||||
// from vm_sync.h
|
// from vm_sync.h
|
||||||
.allowlist_function("rb_vm_barrier")
|
.allowlist_function("rb_vm_barrier")
|
||||||
|
|
||||||
|
@ -863,9 +863,15 @@ pub extern "C" fn rb_yjit_iseq_mark(payload: *mut c_void) {
|
|||||||
// Nothing to mark.
|
// Nothing to mark.
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
// SAFETY: It looks like the GC takes the VM lock while marking
|
// SAFETY: The GC takes the VM lock while marking, which
|
||||||
// so we should be satisfying aliasing rules here.
|
// we assert, so we should be synchronized and data race free.
|
||||||
unsafe { &*(payload as *const IseqPayload) }
|
//
|
||||||
|
// For aliasing, having the VM lock hopefully also implies that no one
|
||||||
|
// else has an overlapping &mut IseqPayload.
|
||||||
|
unsafe {
|
||||||
|
rb_yjit_assert_holding_vm_lock();
|
||||||
|
&*(payload as *const IseqPayload)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// For marking VALUEs written into the inline code block.
|
// For marking VALUEs written into the inline code block.
|
||||||
@ -915,9 +921,15 @@ pub extern "C" fn rb_yjit_iseq_update_references(payload: *mut c_void) {
|
|||||||
// Nothing to update.
|
// Nothing to update.
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
// SAFETY: It looks like the GC takes the VM lock while updating references
|
// SAFETY: The GC takes the VM lock while marking, which
|
||||||
// so we should be satisfying aliasing rules here.
|
// we assert, so we should be synchronized and data race free.
|
||||||
unsafe { &*(payload as *const IseqPayload) }
|
//
|
||||||
|
// For aliasing, having the VM lock hopefully also implies that no one
|
||||||
|
// else has an overlapping &mut IseqPayload.
|
||||||
|
unsafe {
|
||||||
|
rb_yjit_assert_holding_vm_lock();
|
||||||
|
&*(payload as *const IseqPayload)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Evict other threads from generated code since we are about to patch them.
|
// Evict other threads from generated code since we are about to patch them.
|
||||||
|
@ -1333,4 +1333,5 @@ extern "C" {
|
|||||||
file: *const ::std::os::raw::c_char,
|
file: *const ::std::os::raw::c_char,
|
||||||
line: ::std::os::raw::c_int,
|
line: ::std::os::raw::c_int,
|
||||||
);
|
);
|
||||||
|
pub fn rb_yjit_assert_holding_vm_lock();
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user