Add opt_lt, remove as_usize() trait

This commit is contained in:
Maxime Chevalier-Boisvert 2025-02-06 15:22:10 -05:00 committed by Takashi Kokubun
parent 6a0748d2c5
commit 73d01016e8
Notes: git 2025-04-18 13:49:36 +00:00
4 changed files with 16 additions and 55 deletions

View File

@ -2,7 +2,6 @@ use std::rc::Rc;
use std::cell::RefCell;
use crate::cruby::*;
use crate::virtualmem::*;
use crate::{utils::IntoUsize};
/// Block of memory into which instructions can be assembled
pub struct CodeBlock {
@ -135,7 +134,7 @@ impl ZJITState {
// (2¹⁶ bytes) pages, which should be fine. 4KiB pages seem to be the most popular though.
let page_size = unsafe { rb_zjit_get_page_size() };
assert_eq!(
virt_block as usize % page_size.as_usize(), 0,
virt_block as usize % page_size as usize, 0,
"Start of virtual address block should be page-aligned",
);

View File

@ -258,6 +258,14 @@ pub fn iseq_to_ssa(iseq: *const rb_iseq_t) {
state.push(Opnd::Insn(add_id));
}
YARVINSN_opt_lt => {
// TODO: we need to add a BOP not redefined check here
let v0 = state.pop();
let v1 = state.pop();
let lt_id = result.push_insn(block, Insn::Lt { v0, v1 });
state.push(Opnd::Insn(lt_id));
}
YARVINSN_leave => {
result.push_insn(block, Insn::Return { val: state.pop() });
}

View File

@ -1,46 +0,0 @@
/// Trait for casting to [usize] that allows you to say `.as_usize()`.
/// Implementation conditional on the cast preserving the numeric value on
/// all inputs and being inexpensive.
///
/// [usize] is only guaranteed to be more than 16-bit wide, so we can't use
/// `.into()` to cast an `u32` or an `u64` to a `usize` even though in all
/// the platforms YJIT supports these two casts are pretty much no-ops.
/// We could say `as usize` or `.try_convert().unwrap()` everywhere
/// for those casts but they both have undesirable consequences if and when
/// we decide to support 32-bit platforms. Unfortunately we can't implement
/// [::core::convert::From] for [usize] since both the trait and the type are
/// external. Naming the method `into()` also runs into naming conflicts.
pub(crate) trait IntoUsize {
/// Convert to usize. Implementation conditional on width of [usize].
fn as_usize(self) -> usize;
}
#[cfg(target_pointer_width = "64")]
impl IntoUsize for u64 {
fn as_usize(self) -> usize {
self as usize
}
}
#[cfg(target_pointer_width = "64")]
impl IntoUsize for u32 {
fn as_usize(self) -> usize {
self as usize
}
}
impl IntoUsize for u16 {
/// Alias for `.into()`. For convenience so you could use the trait for
/// all unsgined types.
fn as_usize(self) -> usize {
self.into()
}
}
impl IntoUsize for u8 {
/// Alias for `.into()`. For convenience so you could use the trait for
/// all unsgined types.
fn as_usize(self) -> usize {
self.into()
}
}

View File

@ -5,7 +5,7 @@
use std::ptr::NonNull;
use crate::{stats::zjit_alloc_size, utils::IntoUsize};
use crate::{stats::zjit_alloc_size};
#[cfg(not(test))]
pub type VirtualMem = VirtualMemory<sys::SystemAllocator>;
@ -78,7 +78,7 @@ impl CodePtr {
/// been any writes to it through the [VirtualMemory] yet.
pub fn raw_ptr(self, base: &impl CodePtrBase) -> *const u8 {
let CodePtr(offset) = self;
return base.base_ptr().as_ptr().wrapping_add(offset.as_usize())
return base.base_ptr().as_ptr().wrapping_add(offset as usize)
}
/// Get the address of the code pointer.
@ -113,7 +113,7 @@ impl<A: Allocator> VirtualMemory<A> {
memory_limit_bytes: usize,
) -> Self {
assert_ne!(0, page_size);
let page_size_bytes = page_size.as_usize();
let page_size_bytes = page_size as usize;
Self {
region_start: virt_region_start,
@ -246,7 +246,7 @@ impl<A: Allocator> VirtualMemory<A> {
// Bounds check the request. We should only free memory we manage.
let mapped_region = self.start_ptr().raw_ptr(self)..self.mapped_end_ptr().raw_ptr(self);
let virtual_region = self.start_ptr().raw_ptr(self)..self.virtual_end_ptr().raw_ptr(self);
let last_byte_to_free = start_ptr.add_bytes(size.saturating_sub(1).as_usize()).raw_ptr(self);
let last_byte_to_free = start_ptr.add_bytes(size.saturating_sub(1) as usize).raw_ptr(self);
assert!(mapped_region.contains(&start_ptr.raw_ptr(self)));
// On platforms where code page size != memory page size (e.g. Linux), we often need
// to free code pages that contain unmapped memory pages. When it happens on the last
@ -328,7 +328,7 @@ pub mod tests {
let index = ptr as usize - mem_start;
assert!(index < self.memory.len());
assert!(index + size.as_usize() <= self.memory.len());
assert!(index + size as usize <= self.memory.len());
index
}
@ -338,14 +338,14 @@ pub mod tests {
impl super::Allocator for TestingAllocator {
fn mark_writable(&mut self, ptr: *const u8, length: u32) -> bool {
let index = self.bounds_check_request(ptr, length);
self.requests.push(MarkWritable { start_idx: index, length: length.as_usize() });
self.requests.push(MarkWritable { start_idx: index, length: length as usize });
true
}
fn mark_executable(&mut self, ptr: *const u8, length: u32) {
let index = self.bounds_check_request(ptr, length);
self.requests.push(MarkExecutable { start_idx: index, length: length.as_usize() });
self.requests.push(MarkExecutable { start_idx: index, length: length as usize });
// We don't try to execute generated code in cfg(test)
// so no need to actually request executable memory.