Code clean around unused code for some architectures or features (#6581)

This commit is contained in:
Jimmy Miller 2022-10-18 14:19:41 -04:00 committed by GitHub
parent e7166c9bb7
commit b2ba71df9e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
Notes: git 2022-10-18 18:20:02 +00:00
Merged-By: maximecb <maximecb@ruby-lang.org>
5 changed files with 16 additions and 10 deletions

View File

@ -1,5 +1,4 @@
use std::cell::RefCell; use std::cell::RefCell;
use std::cmp;
use std::fmt; use std::fmt;
use std::mem; use std::mem;
use std::rc::Rc; use std::rc::Rc;
@ -7,8 +6,6 @@ use std::rc::Rc;
use crate::backend::x86_64::JMP_PTR_BYTES; use crate::backend::x86_64::JMP_PTR_BYTES;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
use crate::backend::arm64::JMP_PTR_BYTES; use crate::backend::arm64::JMP_PTR_BYTES;
use crate::backend::ir::Assembler;
use crate::backend::ir::Target;
use crate::virtualmem::WriteError; use crate::virtualmem::WriteError;
#[cfg(feature = "asm_comments")] #[cfg(feature = "asm_comments")]
@ -154,7 +151,7 @@ impl CodeBlock {
// We could remember the last write_pos in page2 and let set_page use that position, // We could remember the last write_pos in page2 and let set_page use that position,
// but you need to waste some space for keeping write_pos for every single page. // but you need to waste some space for keeping write_pos for every single page.
// It doesn't seem necessary for performance either. So we're currently not doing it. // It doesn't seem necessary for performance either. So we're currently not doing it.
let mut dst_pos = self.page_size * page_idx + self.page_start(); let dst_pos = self.page_size * page_idx + self.page_start();
if self.page_size * page_idx < self.mem_size && self.write_pos < dst_pos { if self.page_size * page_idx < self.mem_size && self.write_pos < dst_pos {
// Reset dropped_bytes // Reset dropped_bytes
self.dropped_bytes = false; self.dropped_bytes = false;
@ -216,6 +213,8 @@ impl CodeBlock {
self.page_end_reserve = old_page_end_reserve; self.page_end_reserve = old_page_end_reserve;
} }
#[cfg(target_arch = "aarch64")]
#[cfg(not(test))]
/// Return the address ranges of a given address range that this CodeBlock can write. /// Return the address ranges of a given address range that this CodeBlock can write.
pub fn writable_addrs(&self, start_ptr: CodePtr, end_ptr: CodePtr) -> Vec<(usize, usize)> { pub fn writable_addrs(&self, start_ptr: CodePtr, end_ptr: CodePtr) -> Vec<(usize, usize)> {
let mut addrs = vec![]; let mut addrs = vec![];

View File

@ -8,20 +8,18 @@ use crate::core::*;
use crate::cruby::*; use crate::cruby::*;
use crate::invariants::*; use crate::invariants::*;
use crate::options::*; use crate::options::*;
#[cfg(feature = "stats")]
use crate::stats::*; use crate::stats::*;
use crate::utils::*; use crate::utils::*;
use CodegenStatus::*; use CodegenStatus::*;
use InsnOpnd::*; use InsnOpnd::*;
use std::cell::RefCell;
use std::cell::RefMut;
use std::cmp; use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use std::ffi::CStr; use std::ffi::CStr;
use std::mem::{self, size_of}; use std::mem::{self, size_of};
use std::os::raw::c_uint; use std::os::raw::c_uint;
use std::ptr; use std::ptr;
use std::rc::Rc;
use std::slice; use std::slice;
pub use crate::virtualmem::CodePtr; pub use crate::virtualmem::CodePtr;
@ -650,7 +648,7 @@ pub fn gen_entry_prologue(cb: &mut CodeBlock, iseq: IseqPtr, insn_idx: u32) -> O
asm.compile(cb); asm.compile(cb);
if (cb.has_dropped_bytes()) { if cb.has_dropped_bytes() {
None None
} else { } else {
Some(code_ptr) Some(code_ptr)
@ -6537,10 +6535,15 @@ impl CodegenGlobals {
pub fn init() { pub fn init() {
// Executable memory and code page size in bytes // Executable memory and code page size in bytes
let mem_size = get_option!(exec_mem_size); let mem_size = get_option!(exec_mem_size);
let code_page_size = get_option!(code_page_size);
#[cfg(not(test))] #[cfg(not(test))]
let (mut cb, mut ocb) = { let (mut cb, mut ocb) = {
use std::cell::RefCell;
use std::rc::Rc;
let code_page_size = get_option!(code_page_size);
let virt_block: *mut u8 = unsafe { rb_yjit_reserve_addr_space(mem_size as u32) }; let virt_block: *mut u8 = unsafe { rb_yjit_reserve_addr_space(mem_size as u32) };
// Memory protection syscalls need page-aligned addresses, so check it here. Assuming // Memory protection syscalls need page-aligned addresses, so check it here. Assuming

View File

@ -1,9 +1,12 @@
use crate::core::*; use crate::core::*;
use crate::cruby::*; use crate::cruby::*;
use crate::yjit::yjit_enabled_p; use crate::yjit::yjit_enabled_p;
#[cfg(feature = "disasm")]
use crate::asm::CodeBlock; use crate::asm::CodeBlock;
#[cfg(feature = "disasm")]
use crate::options::DumpDisasm; use crate::options::DumpDisasm;
#[cfg(feature = "disasm")]
use std::fmt::Write; use std::fmt::Write;
/// Primitive called in yjit.rb /// Primitive called in yjit.rb

View File

@ -533,7 +533,7 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
with_vm_lock(src_loc!(), || { with_vm_lock(src_loc!(), || {
// Make it so all live block versions are no longer valid branch targets // Make it so all live block versions are no longer valid branch targets
for_each_iseq(|iseq| { for_each_iseq(|iseq| {
if let Some(payload) = unsafe { get_iseq_payload(iseq) } { if let Some(payload) = get_iseq_payload(iseq) {
// C comment: // C comment:
// Leaking the blocks for now since we might have situations where // Leaking the blocks for now since we might have situations where
// a different ractor is waiting for the VM lock in branch_stub_hit(). // a different ractor is waiting for the VM lock in branch_stub_hit().

View File

@ -198,6 +198,7 @@ impl CodePtr {
ptr as i64 ptr as i64
} }
#[cfg(target_arch = "aarch64")]
pub fn into_u64(self) -> u64 { pub fn into_u64(self) -> u64 {
let CodePtr(ptr) = self; let CodePtr(ptr) = self;
ptr as u64 ptr as u64