YJIT: Read rb_num_t as usize early

This patch makes sure that we're not accidentally reading rb_num_t
instruction arguments as VALUE and accidentally baking them into
code and marking them. Some of these are simply moving the cast earlier,
but some of these avoid potential problems for flag and ID arguments.

Follow-up for 39f7eddec4c55711d56f05b085992a83bf23159e.
This commit is contained in:
Alan Wu 2022-10-21 15:11:52 -04:00
parent c7754a4d4c
commit 8bbcb75377
Notes: git 2022-10-21 20:26:01 +00:00

View File

@ -910,9 +910,7 @@ fn gen_dupn(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let n = jit_get_arg(jit, 0).as_usize();
let nval: VALUE = jit_get_arg(jit, 0);
let VALUE(n) = nval;
// In practice, seems to be only used for n==2 // In practice, seems to be only used for n==2
if n != 2 { if n != 2 {
@ -1035,9 +1033,9 @@ fn gen_putspecialobject(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let object_type = jit_get_arg(jit, 0); let object_type = jit_get_arg(jit, 0).as_usize();
if object_type == VALUE(VM_SPECIAL_OBJECT_VMCORE.as_usize()) { if object_type == VM_SPECIAL_OBJECT_VMCORE.as_usize() {
let stack_top = ctx.stack_push(Type::UnknownHeap); let stack_top = ctx.stack_push(Type::UnknownHeap);
let frozen_core = unsafe { rb_mRubyVMFrozenCore }; let frozen_core = unsafe { rb_mRubyVMFrozenCore };
asm.mov(stack_top, frozen_core.into()); asm.mov(stack_top, frozen_core.into());
@ -1056,17 +1054,17 @@ fn gen_setn(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let n: VALUE = jit_get_arg(jit, 0); let n = jit_get_arg(jit, 0).as_usize();
let top_val = ctx.stack_pop(0); let top_val = ctx.stack_pop(0);
let dst_opnd = ctx.stack_opnd(n.into()); let dst_opnd = ctx.stack_opnd(n.try_into().unwrap());
asm.mov( asm.mov(
dst_opnd, dst_opnd,
top_val top_val
); );
let mapping = ctx.get_opnd_mapping(StackOpnd(0)); let mapping = ctx.get_opnd_mapping(StackOpnd(0));
ctx.set_opnd_mapping(StackOpnd(n.into()), mapping); ctx.set_opnd_mapping(StackOpnd(n.try_into().unwrap()), mapping);
KeepCompiling KeepCompiling
} }
@ -1078,10 +1076,10 @@ fn gen_topn(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let nval = jit_get_arg(jit, 0); let n = jit_get_arg(jit, 0).as_usize();
let top_n_val = ctx.stack_opnd(nval.into()); let top_n_val = ctx.stack_opnd(n.try_into().unwrap());
let mapping = ctx.get_opnd_mapping(StackOpnd(nval.into())); let mapping = ctx.get_opnd_mapping(StackOpnd(n.try_into().unwrap()));
let loc0 = ctx.stack_push_mapping(mapping); let loc0 = ctx.stack_push_mapping(mapping);
asm.mov(loc0, top_n_val); asm.mov(loc0, top_n_val);
@ -1095,8 +1093,7 @@ fn gen_adjuststack(
_cb: &mut Assembler, _cb: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let nval: VALUE = jit_get_arg(jit, 0); let n = jit_get_arg(jit, 0).as_usize();
let VALUE(n) = nval;
ctx.stack_pop(n); ctx.stack_pop(n);
KeepCompiling KeepCompiling
} }
@ -1237,7 +1234,7 @@ fn gen_splatarray(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let flag = jit_get_arg(jit, 0); let flag = jit_get_arg(jit, 0).as_usize();
// Save the PC and SP because the callee may allocate // Save the PC and SP because the callee may allocate
// Note that this modifies REG_SP, which is why we do it first // Note that this modifies REG_SP, which is why we do it first
@ -1286,7 +1283,7 @@ fn gen_newrange(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let flag = jit_get_arg(jit, 0); let flag = jit_get_arg(jit, 0).as_usize();
// rb_range_new() allocates and can raise // rb_range_new() allocates and can raise
jit_prepare_routine_call(jit, ctx, asm); jit_prepare_routine_call(jit, ctx, asm);
@ -2149,7 +2146,7 @@ fn gen_setinstancevariable(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let id = jit_get_arg(jit, 0); let id = jit_get_arg(jit, 0).as_usize();
let ic = jit_get_arg(jit, 1).as_u64(); // type IVC let ic = jit_get_arg(jit, 1).as_u64(); // type IVC
// Save the PC and SP because the callee may allocate // Save the PC and SP because the callee may allocate
@ -2165,7 +2162,7 @@ fn gen_setinstancevariable(
vec![ vec![
Opnd::const_ptr(jit.iseq as *const u8), Opnd::const_ptr(jit.iseq as *const u8),
Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF), Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF),
Opnd::UImm(id.into()), id.into(),
val_opnd, val_opnd,
Opnd::const_ptr(ic as *const u8), Opnd::const_ptr(ic as *const u8),
] ]
@ -2273,20 +2270,20 @@ fn gen_concatstrings(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let n = jit_get_arg(jit, 0); let n = jit_get_arg(jit, 0).as_usize();
// Save the PC and SP because we are allocating // Save the PC and SP because we are allocating
jit_prepare_routine_call(jit, ctx, asm); jit_prepare_routine_call(jit, ctx, asm);
let values_ptr = asm.lea(ctx.sp_opnd(-((SIZEOF_VALUE as isize) * n.as_isize()))); let values_ptr = asm.lea(ctx.sp_opnd(-((SIZEOF_VALUE as isize) * n as isize)));
// call rb_str_concat_literals(long n, const VALUE *strings); // call rb_str_concat_literals(size_t n, const VALUE *strings);
let return_value = asm.ccall( let return_value = asm.ccall(
rb_str_concat_literals as *const u8, rb_str_concat_literals as *const u8,
vec![Opnd::UImm(n.into()), values_ptr] vec![n.into(), values_ptr]
); );
ctx.stack_pop(n.as_usize()); ctx.stack_pop(n);
let stack_ret = ctx.stack_push(Type::CString); let stack_ret = ctx.stack_push(Type::CString);
asm.mov(stack_ret, return_value); asm.mov(stack_ret, return_value);
@ -5736,7 +5733,7 @@ fn gen_getglobal(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let gid = jit_get_arg(jit, 0); let gid = jit_get_arg(jit, 0).as_usize();
// Save the PC and SP because we might make a Ruby call for warning // Save the PC and SP because we might make a Ruby call for warning
jit_prepare_routine_call(jit, ctx, asm); jit_prepare_routine_call(jit, ctx, asm);
@ -5758,7 +5755,7 @@ fn gen_setglobal(
asm: &mut Assembler, asm: &mut Assembler,
_ocb: &mut OutlinedCb, _ocb: &mut OutlinedCb,
) -> CodegenStatus { ) -> CodegenStatus {
let gid = jit_get_arg(jit, 0); let gid = jit_get_arg(jit, 0).as_usize();
// Save the PC and SP because we might make a Ruby call for // Save the PC and SP because we might make a Ruby call for
// Kernel#set_trace_var // Kernel#set_trace_var
@ -5872,7 +5869,7 @@ fn gen_toregexp(
rb_ary_tmp_new_from_values as *const u8, rb_ary_tmp_new_from_values as *const u8,
vec![ vec![
Opnd::Imm(0), Opnd::Imm(0),
Opnd::UImm(jit_get_arg(jit, 1).as_u64()), cnt.into(),
values_ptr, values_ptr,
] ]
); );