diff options
author | Alan Wu <[email protected]> | 2021-08-25 17:00:45 -0400 |
---|---|---|
committer | Alan Wu <[email protected]> | 2021-10-20 18:19:39 -0400 |
commit | bd876c243aeace00ea312d0a5bbff091ccc84ba2 (patch) | |
tree | e30156ab87c3345a995dcd47c11dc9a535643694 /yjit_core.c | |
parent | 0562459473f44c270784074a09a33ea30d68e457 (diff) |
TracePoint support
This change fixes some cases where YJIT fails to fire tracing events.
Most of the situations YJIT did not handle correctly involves enabling
tracing while running inside generated code.
A new operation to invalidate all generated code is added, which uses
patching to make generated code exit at the next VM instruction
boundary. A new routine called `jit_prepare_routine_call()` is
introduced to facilitate this and should be used when generating code
that could allocate, or could otherwise use `RB_VM_LOCK_ENTER()`.
The `c_return` event is fired in the middle of an instruction as opposed
to at an instruction boundary, so it requires special handling. C method
call return points are patched to go to a fucntion which does everything
the interpreter does, including firing the `c_return` event. The
generated code for C method calls normally does not fire the event.
Invalided code should not change after patching so the exits are not
clobbered. A new variable is introduced to track the region of code that
should not change.
Diffstat (limited to 'yjit_core.c')
-rw-r--r-- | yjit_core.c | 53 |
1 files changed, 31 insertions, 22 deletions
diff --git a/yjit_core.c b/yjit_core.c index ee3914acc6..413411a375 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -506,11 +506,12 @@ static size_t get_num_versions(blockid_t blockid) static void add_block_version(blockid_t blockid, block_t* block) { - // Function entry blocks must have stack size 0 - RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0)); const rb_iseq_t *iseq = block->blockid.iseq; struct rb_iseq_constant_body *body = iseq->body; + // Function entry blocks must have stack size 0 + RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0)); + // Ensure yjit_blocks is initialized for this iseq if (rb_darray_size(body->yjit_blocks) == 0) { // Initialize yjit_blocks to be as wide as body->iseq_encoded @@ -772,7 +773,7 @@ branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_contex // If this block hasn't yet been compiled if (!p_block) { // If the new block can be generated right after the branch (at cb->write_pos) - if (cb->write_pos == branch->end_pos) { + if (cb->write_pos == branch->end_pos && branch->start_pos >= yjit_codepage_frozen_bytes) { // This branch should be terminating its block RUBY_ASSERT(branch->end_pos == branch->block->end_pos); @@ -801,12 +802,14 @@ branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_contex branch->dst_addrs[target_idx] = dst_addr; // Rewrite the branch with the new jump target address - RUBY_ASSERT(branch->dst_addrs[0] != NULL); - uint32_t cur_pos = cb->write_pos; - cb_set_pos(cb, branch->start_pos); - branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape); - RUBY_ASSERT(cb->write_pos == branch->end_pos && "branch can't change size"); - cb_set_pos(cb, cur_pos); + if (branch->start_pos >= yjit_codepage_frozen_bytes) { + RUBY_ASSERT(branch->dst_addrs[0] != NULL); + uint32_t cur_pos = cb->write_pos; + cb_set_pos(cb, branch->start_pos); + branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape); + RUBY_ASSERT(cb->write_pos == branch->end_pos && "branch can't change size"); + cb_set_pos(cb, cur_pos); + } // Mark this branch target as patched (no longer a stub) branch->blocks[target_idx] = p_block; @@ -921,8 +924,7 @@ void gen_direct_jump( block_t* p_block = find_block_version(target0, ctx); // If the version already exists - if (p_block) - { + if (p_block) { rb_darray_append(&p_block->incoming, branch); branch->dst_addrs[0] = cb_get_ptr(cb, p_block->start_pos); @@ -934,10 +936,9 @@ void gen_direct_jump( gen_jump_branch(cb, branch->dst_addrs[0], NULL, SHAPE_DEFAULT); branch->end_pos = cb->write_pos; } - else - { - // The target block will be compiled right after this one (fallthrough) - // See the loop in gen_block_version() + else { + // This NULL target address signals gen_block_version() to compile the + // target block right after this one (fallthrough). branch->dst_addrs[0] = NULL; branch->shape = SHAPE_NEXT0; branch->start_pos = cb->write_pos; @@ -1048,7 +1049,7 @@ block_array_remove(rb_yjit_block_array_t block_array, block_t *block) // Invalidate one specific block version void -invalidate_block_version(block_t* block) +invalidate_block_version(block_t *block) { ASSERT_vm_locking(); // TODO: want to assert that all other ractors are stopped here. Can't patch @@ -1067,8 +1068,7 @@ invalidate_block_version(block_t* block) uint8_t* code_ptr = cb_get_ptr(cb, block->start_pos); // For each incoming branch - rb_darray_for(block->incoming, incoming_idx) - { + rb_darray_for(block->incoming, incoming_idx) { branch_t* branch = rb_darray_get(block->incoming, incoming_idx); uint32_t target_idx = (branch->dst_addrs[0] == code_ptr)? 0:1; RUBY_ASSERT(branch->dst_addrs[target_idx] == code_ptr); @@ -1077,6 +1077,11 @@ invalidate_block_version(block_t* block) // Mark this target as being a stub branch->blocks[target_idx] = NULL; + // Don't patch frozen code region + if (branch->start_pos < yjit_codepage_frozen_bytes) { + continue; + } + // Create a stub for this branch target branch->dst_addrs[target_idx] = get_branch_target( block->blockid, @@ -1088,8 +1093,7 @@ invalidate_block_version(block_t* block) // Check if the invalidated block immediately follows bool target_next = block->start_pos == branch->end_pos; - if (target_next) - { + if (target_next) { // The new block will no longer be adjacent branch->shape = SHAPE_DEFAULT; } @@ -1103,8 +1107,13 @@ invalidate_block_version(block_t* block) branch->block->end_pos = cb->write_pos; cb_set_pos(cb, cur_pos); - if (target_next && branch->end_pos > block->end_pos) - { + if (target_next && branch->end_pos > block->end_pos) { + fprintf(stderr, "branch_block_idx=%u block_idx=%u over=%d block_size=%d\n", + branch->block->blockid.idx, + block->blockid.idx, + branch->end_pos - block->end_pos, + block->end_pos - block->start_pos); + yjit_print_iseq(branch->block->blockid.iseq); rb_bug("yjit invalidate rewrote branch past end of invalidated block"); } } |