diff options
author | Alan Wu <[email protected]> | 2021-11-04 12:30:30 -0400 |
---|---|---|
committer | Alan Wu <[email protected]> | 2021-11-22 18:23:28 -0500 |
commit | 13d1ded253940585a993e92648ab9f77d355586d (patch) | |
tree | 40cd992d429c0e7c53e6e3c4b829b69d662099ff /yjit_core.c | |
parent | e42f994f6b20416853af0252029af94ff7c9b9a9 (diff) |
YJIT: Make block invalidation more robust
This commit adds an entry_exit field to block_t for use in
invalidate_block_version(). By patching the start of the block while
invalidating it, invalidate_block_version() can function correctly
while there is no executable memory left for new branch stubs.
This change additionally fixes correctness for situations where we
cannot patch incoming jumps to the invalidated block. In situations
such as Shopify/yjit#226, the address to the start of the block
is saved and used later, possibly after the block is invalidated.
The assume_* family of function now generate block->entry_exit before
remembering blocks for invalidation.
RubyVM::YJIT.simulate_oom! is introduced for testing out of memory
conditions. The test for it is disabled for now because OOM triggers
other failure conditions not addressed by this commit.
Fixes Shopify/yjit#226
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/5145
Diffstat (limited to 'yjit_core.c')
-rw-r--r-- | yjit_core.c | 53 |
1 files changed, 48 insertions, 5 deletions
diff --git a/yjit_core.c b/yjit_core.c index a395285e1c..32e0575d75 100644 --- a/yjit_core.c +++ b/yjit_core.c @@ -884,8 +884,7 @@ get_branch_target( block_t *p_block = find_block_version(target, ctx); // If the block already exists - if (p_block) - { + if (p_block) { // Add an incoming branch for this version rb_darray_append(&p_block->incoming, branch); branch->blocks[target_idx] = p_block; @@ -894,12 +893,18 @@ get_branch_target( return p_block->start_addr; } + // Do we have enough memory for a stub? + const long MAX_CODE_SIZE = 64; + if (ocb->write_pos + MAX_CODE_SIZE >= cb->mem_size) { + return NULL; + } + // Generate an outlined stub that will call branch_stub_hit() uint8_t *stub_addr = cb_get_ptr(ocb, ocb->write_pos); // Call branch_stub_hit(branch_idx, target_idx, ec) mov(ocb, C_ARG_REGS[2], REG_EC); - mov(ocb, C_ARG_REGS[1], imm_opnd(target_idx)); + mov(ocb, C_ARG_REGS[1], imm_opnd(target_idx)); mov(ocb, C_ARG_REGS[0], const_ptr_opnd(branch)); call_ptr(ocb, REG0, (void *)&branch_stub_hit); @@ -907,6 +912,8 @@ get_branch_target( // branch_stub_hit call jmp_rm(ocb, RAX); + RUBY_ASSERT(cb_get_ptr(ocb, ocb->write_pos) - stub_addr <= MAX_CODE_SIZE); + return stub_addr; } @@ -1116,6 +1123,29 @@ invalidate_block_version(block_t *block) // Get a pointer to the generated code for this block uint8_t *code_ptr = block->start_addr; + // Make the the start of the block do an exit. This handles OOM situations + // and some cases where we can't efficiently patch incoming branches. + // Do this first, since in case there is a fallthrough branch into this + // block, the patching loop below can overwrite the start of the block. + // In those situations, there is hopefully no jumps to the start of the block + // after patching as the start of the block would be in the middle of something + // generated by branch_t::gen_fn. + { + RUBY_ASSERT_ALWAYS(block->entry_exit && "block invalidation requires an exit"); + if (block->entry_exit == block->start_addr) { + // Some blocks exit on entry. Patching a jump to the entry at the + // entry makes an infinite loop. + } + else if (block->start_addr >= cb_get_ptr(cb, yjit_codepage_frozen_bytes)) { // Don't patch frozen code region + // Patch in a jump to block->entry_exit. + uint32_t cur_pos = cb->write_pos; + cb_set_write_ptr(cb, block->start_addr); + jmp_ptr(cb, block->entry_exit); + RUBY_ASSERT_ALWAYS(cb_get_ptr(cb, cb->write_pos) < block->end_addr && "invalidation wrote past end of block"); + cb_set_pos(cb, cur_pos); + } + } + // For each incoming branch rb_darray_for(block->incoming, incoming_idx) { branch_t *branch = rb_darray_get(block->incoming, incoming_idx); @@ -1132,18 +1162,31 @@ invalidate_block_version(block_t *block) } // Create a stub for this branch target - branch->dst_addrs[target_idx] = get_branch_target( + uint8_t *branch_target = get_branch_target( block->blockid, &block->ctx, branch, target_idx ); + if (!branch_target) { + // We were unable to generate a stub (e.g. OOM). Use the block's + // exit instead of a stub for the block. It's important that we + // still patch the branch in this situation so stubs are unique + // to branches. Think about what could go wrong if we run out of + // memory in the middle of this loop. + branch_target = block->entry_exit; + } + + branch->dst_addrs[target_idx] = branch_target; + // Check if the invalidated block immediately follows bool target_next = (block->start_addr == branch->end_addr); if (target_next) { - // The new block will no longer be adjacent + // The new block will no longer be adjacent. + // Note that we could be enlarging the branch and writing into the + // start of the block being invalidated. branch->shape = SHAPE_DEFAULT; } |