Add support for multiple versions per block
This commit is contained in:
parent
d528cf4fd5
commit
dea4db16c0
@ -44,7 +44,7 @@ jit_next_idx(jitstate_t* jit)
|
||||
static VALUE
|
||||
jit_get_arg(jitstate_t* jit, size_t arg_idx)
|
||||
{
|
||||
assert (arg_idx + 1 < insn_len(jit_get_opcode(jit)));
|
||||
RUBY_ASSERT(arg_idx + 1 < (size_t)insn_len(jit_get_opcode(jit)));
|
||||
return *(jit->pc + arg_idx + 1);
|
||||
}
|
||||
|
||||
@ -107,7 +107,7 @@ Returns `NULL` if compilation fails.
|
||||
uint8_t*
|
||||
ujit_entry_prologue()
|
||||
{
|
||||
assert (cb != NULL);
|
||||
RUBY_ASSERT(cb != NULL);
|
||||
|
||||
if (cb->write_pos + 1024 >= cb->mem_size) {
|
||||
rb_bug("out of executable memory");
|
||||
@ -133,8 +133,8 @@ Compile a sequence of bytecode instructions for a given basic block version
|
||||
void
|
||||
ujit_gen_block(ctx_t* ctx, block_t* block)
|
||||
{
|
||||
assert (cb != NULL);
|
||||
assert (block != NULL);
|
||||
RUBY_ASSERT(cb != NULL);
|
||||
RUBY_ASSERT(block != NULL);
|
||||
|
||||
const rb_iseq_t *iseq = block->blockid.iseq;
|
||||
uint32_t insn_idx = block->blockid.idx;
|
||||
|
55
ujit_core.c
55
ujit_core.c
@ -185,7 +185,7 @@ block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx)
|
||||
// Patch the last branch address
|
||||
last_branch->dst_addrs[0] = cb_get_ptr(cb, block->start_pos);
|
||||
add_incoming(block, branch_idx);
|
||||
assert (block->start_pos == last_branch->end_pos);
|
||||
RUBY_ASSERT(block->start_pos == last_branch->end_pos);
|
||||
}
|
||||
|
||||
return first_block;
|
||||
@ -221,8 +221,8 @@ uint8_t* branch_stub_hit(uint32_t branch_idx, uint32_t target_idx)
|
||||
|
||||
RB_VM_LOCK_ENTER();
|
||||
|
||||
assert (branch_idx < num_branches);
|
||||
assert (target_idx < 2);
|
||||
RUBY_ASSERT(branch_idx < num_branches);
|
||||
RUBY_ASSERT(target_idx < 2);
|
||||
branch_t *branch = &branch_entries[branch_idx];
|
||||
blockid_t target = branch->targets[target_idx];
|
||||
ctx_t* target_ctx = &branch->target_ctxs[target_idx];
|
||||
@ -239,7 +239,7 @@ uint8_t* branch_stub_hit(uint32_t branch_idx, uint32_t target_idx)
|
||||
// Rewrite the branch with the new, potentially more compact shape
|
||||
cb_set_pos(cb, branch->start_pos);
|
||||
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
||||
assert (cb->write_pos <= branch->end_pos);
|
||||
RUBY_ASSERT(cb->write_pos <= branch->end_pos);
|
||||
}
|
||||
|
||||
// Try to find a compiled version of this block
|
||||
@ -259,12 +259,11 @@ uint8_t* branch_stub_hit(uint32_t branch_idx, uint32_t target_idx)
|
||||
branch->dst_addrs[target_idx] = dst_addr;
|
||||
|
||||
// Rewrite the branch with the new jump target address
|
||||
assert (branch->dst_addrs[0] != NULL);
|
||||
assert (branch->dst_addrs[1] != NULL);
|
||||
RUBY_ASSERT(branch->dst_addrs[0] != NULL);
|
||||
uint32_t cur_pos = cb->write_pos;
|
||||
cb_set_pos(cb, branch->start_pos);
|
||||
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
||||
assert (cb->write_pos <= branch->end_pos);
|
||||
RUBY_ASSERT(cb->write_pos <= branch->end_pos);
|
||||
branch->end_pos = cb->write_pos;
|
||||
cb_set_pos(cb, cur_pos);
|
||||
|
||||
@ -331,9 +330,9 @@ void gen_branch(
|
||||
branchgen_fn gen_fn
|
||||
)
|
||||
{
|
||||
assert (target0.iseq != NULL);
|
||||
assert (target1.iseq != NULL);
|
||||
assert (num_branches < MAX_BRANCHES);
|
||||
RUBY_ASSERT(target0.iseq != NULL);
|
||||
RUBY_ASSERT(target1.iseq != NULL);
|
||||
RUBY_ASSERT(num_branches < MAX_BRANCHES);
|
||||
uint32_t branch_idx = num_branches++;
|
||||
|
||||
// Get the branch targets or stubs
|
||||
@ -369,7 +368,7 @@ gen_jump_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t sha
|
||||
break;
|
||||
|
||||
case SHAPE_NEXT1:
|
||||
assert (false);
|
||||
RUBY_ASSERT(false);
|
||||
break;
|
||||
|
||||
case SHAPE_DEFAULT:
|
||||
@ -383,8 +382,8 @@ void gen_direct_jump(
|
||||
blockid_t target0
|
||||
)
|
||||
{
|
||||
assert (target0.iseq != NULL);
|
||||
assert (num_branches < MAX_BRANCHES);
|
||||
RUBY_ASSERT(target0.iseq != NULL);
|
||||
RUBY_ASSERT(num_branches < MAX_BRANCHES);
|
||||
uint32_t branch_idx = num_branches++;
|
||||
|
||||
// Branch targets or stub adddress
|
||||
@ -442,11 +441,30 @@ void invalidate(block_t* block)
|
||||
fprintf(stderr, "invalidating block (%p, %d)\n", block->blockid.iseq, block->blockid.idx);
|
||||
fprintf(stderr, "block=%p\n", block);
|
||||
|
||||
// Find the first version for this blockid
|
||||
block_t* first_block = NULL;
|
||||
rb_st_lookup(version_tbl, (st_data_t)&block->blockid, (st_data_t*)&first_block);
|
||||
RUBY_ASSERT(first_block != NULL);
|
||||
|
||||
// Remove the version object from the map so we can re-generate stubs
|
||||
st_data_t key = (st_data_t)&block->blockid;
|
||||
int success = st_delete(version_tbl, &key, NULL);
|
||||
if (!success) {
|
||||
rb_bug("failed to delete invalidated version");
|
||||
if (first_block == block)
|
||||
{
|
||||
st_data_t key = (st_data_t)&block->blockid;
|
||||
int success = st_delete(version_tbl, &key, NULL);
|
||||
RUBY_ASSERT(success);
|
||||
}
|
||||
else
|
||||
{
|
||||
bool deleted = false;
|
||||
for (block_t* cur = first_block; cur != NULL; cur = cur->next)
|
||||
{
|
||||
if (cur->next == block)
|
||||
{
|
||||
cur->next = cur->next->next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
RUBY_ASSERT(deleted);
|
||||
}
|
||||
|
||||
// Get a pointer to the generated code for this block
|
||||
@ -479,8 +497,7 @@ void invalidate(block_t* block)
|
||||
}
|
||||
|
||||
// Rewrite the branch with the new jump target address
|
||||
assert (branch->dst_addrs[0] != NULL);
|
||||
assert (branch->dst_addrs[1] != NULL);
|
||||
RUBY_ASSERT(branch->dst_addrs[0] != NULL);
|
||||
uint32_t cur_pos = cb->write_pos;
|
||||
cb_set_pos(cb, branch->start_pos);
|
||||
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
||||
|
@ -119,6 +119,9 @@ typedef struct BlockVersion
|
||||
uint32_t* incoming;
|
||||
uint32_t num_incoming;
|
||||
|
||||
// Next block version for this blockid (singly-linked list)
|
||||
struct BlockVersion* next;
|
||||
|
||||
} block_t;
|
||||
|
||||
// Context object methods
|
||||
|
@ -186,7 +186,7 @@ add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int ex
|
||||
void
|
||||
assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t* block)
|
||||
{
|
||||
assert (block != NULL);
|
||||
RUBY_ASSERT(block != NULL);
|
||||
st_update(method_lookup_dependency, (st_data_t)cme, add_lookup_dependency_i, (st_data_t)block);
|
||||
st_update(method_lookup_dependency, (st_data_t)cc, add_lookup_dependency_i, (st_data_t)block);
|
||||
// FIXME: This is a leak! When either the cme or the cc become invalid, the other also needs to go
|
||||
|
Loading…
x
Reference in New Issue
Block a user