diff options
author | Takashi Kokubun <[email protected]> | 2022-12-08 14:33:18 -0800 |
---|---|---|
committer | GitHub <[email protected]> | 2022-12-08 17:33:18 -0500 |
commit | 51ef991d8dabadf8fbd46ad02cd9fea3c4d06f62 (patch) | |
tree | 495c1c1a79a61b3209a0f0d5e5fd736081748dd6 /yjit/src | |
parent | b26c9ce5e9b1c80a59f4faeb92be4a302232e12c (diff) |
YJIT: Drop Copy trait from Context (#6889)
Notes
Notes:
Merged-By: maximecb <[email protected]>
Diffstat (limited to 'yjit/src')
-rw-r--r-- | yjit/src/codegen.rs | 16 | ||||
-rw-r--r-- | yjit/src/core.rs | 22 |
2 files changed, 19 insertions, 19 deletions
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index 914d04bb97..d890775f2b 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -660,7 +660,7 @@ fn jump_to_next_insn( ) { // Reset the depth since in current usages we only ever jump to to // chain_depth > 0 from the same instruction. - let mut reset_depth = *current_context; + let mut reset_depth = current_context.clone(); reset_depth.reset_chain_depth(); let jump_block = BlockId { @@ -1870,7 +1870,7 @@ fn jit_chain_guard( }; if (ctx.get_chain_depth() as i32) < depth_limit { - let mut deeper = *ctx; + let mut deeper = ctx.clone(); deeper.increment_chain_depth(); let bid = BlockId { iseq: jit.iseq, @@ -1964,7 +1964,7 @@ fn gen_get_ivar( side_exit: CodePtr, ) -> CodegenStatus { let comptime_val_klass = comptime_receiver.class_of(); - let starting_context = *ctx; // make a copy for use with jit_chain_guard + let starting_context = ctx.clone(); // make a copy for use with jit_chain_guard // If recv isn't already a register, load it. let recv = match recv { @@ -2178,7 +2178,7 @@ fn gen_setinstancevariable( asm: &mut Assembler, ocb: &mut OutlinedCb, ) -> CodegenStatus { - let starting_context = *ctx; // make a copy for use with jit_chain_guard + let starting_context = ctx.clone(); // make a copy for use with jit_chain_guard // Defer compilation so we can specialize on a runtime `self` if !jit_at_current_insn(jit) { @@ -3393,7 +3393,7 @@ fn gen_opt_case_dispatch( defer_compilation(jit, ctx, asm, ocb); return EndBlock; } - let starting_context = *ctx; + let starting_context = ctx.clone(); let case_hash = jit_get_arg(jit, 0); let else_offset = jit_get_arg(jit, 1).as_u32(); @@ -5395,7 +5395,7 @@ fn gen_send_iseq( // Pop arguments and receiver in return context, push the return value // After the return, sp_offset will be 1. The codegen for leave writes // the return value in case of JIT-to-JIT return. - let mut return_ctx = *ctx; + let mut return_ctx = ctx.clone(); return_ctx.stack_pop(sp_offset.try_into().unwrap()); return_ctx.stack_push(Type::Unknown); return_ctx.set_sp_offset(1); @@ -5764,7 +5764,7 @@ fn gen_send_general( // instead we look up the method and call it, // doing some stack shifting based on the VM_CALL_OPT_SEND flag - let starting_context = *ctx; + let starting_context = ctx.clone(); if argc == 0 { gen_counter_incr!(asm, send_send_wrong_args); @@ -6678,7 +6678,7 @@ fn gen_getblockparamproxy( return EndBlock; } - let starting_context = *ctx; // make a copy for use with jit_chain_guard + let starting_context = ctx.clone(); // make a copy for use with jit_chain_guard // A mirror of the interpreter code. Checking for the case // where it's pushing rb_block_param_proxy. diff --git a/yjit/src/core.rs b/yjit/src/core.rs index 9f89a6e554..c87bfe8245 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -276,7 +276,7 @@ pub enum YARVOpnd { /// Code generation context /// Contains information we can use to specialize/optimize code /// There are a lot of context objects so we try to keep the size small. -#[derive(Copy, Clone, Default, PartialEq, Debug)] +#[derive(Clone, Default, PartialEq, Debug)] pub struct Context { // Number of values currently on the temporary stack stack_size: u16, @@ -854,7 +854,7 @@ fn find_block_version(blockid: BlockId, ctx: &Context) -> Option<BlockRef> { pub fn limit_block_versions(blockid: BlockId, ctx: &Context) -> Context { // Guard chains implement limits separately, do nothing if ctx.chain_depth > 0 { - return *ctx; + return ctx.clone(); } // If this block version we're about to add will hit the version limit @@ -875,7 +875,7 @@ pub fn limit_block_versions(blockid: BlockId, ctx: &Context) -> Context { return generic_ctx; } - return *ctx; + return ctx.clone(); } /// Keep track of a block version. Block should be fully constructed. @@ -939,7 +939,7 @@ impl Block { let block = Block { blockid, end_idx: 0, - ctx: *ctx, + ctx: ctx.clone(), start_addr: None, end_addr: None, incoming: Vec::new(), @@ -963,7 +963,7 @@ impl Block { } pub fn get_ctx(&self) -> Context { - self.ctx + self.ctx.clone() } #[allow(unused)] @@ -1720,7 +1720,7 @@ fn branch_stub_hit_body(branch_ptr: *const c_void, target_idx: u32, ec: EcPtr) - let target_idx: usize = target_idx.as_usize(); let target = branch.targets[target_idx].as_ref().unwrap(); let target_id = target.id; - let target_ctx = target.ctx; + let target_ctx = target.ctx.clone(); let target_branch_shape = match target_idx { 0 => BranchShape::Next0, @@ -1889,7 +1889,7 @@ fn set_branch_target( block: Some(blockref.clone()), address: block.start_addr, id: target, - ctx: *ctx, + ctx: ctx.clone(), })); return; @@ -1934,7 +1934,7 @@ fn set_branch_target( block: None, // no block yet address: Some(stub_addr), id: target, - ctx: *ctx, + ctx: ctx.clone(), })); } } @@ -2020,7 +2020,7 @@ pub fn gen_direct_jump(jit: &JITState, ctx: &Context, target0: BlockId, asm: &mu let mut new_target = BranchTarget { block: None, address: None, - ctx: *ctx, + ctx: ctx.clone(), id: target0, }; @@ -2067,7 +2067,7 @@ pub fn defer_compilation( panic!("Double defer!"); } - let mut next_ctx = *cur_ctx; + let mut next_ctx = cur_ctx.clone(); if next_ctx.chain_depth == u8::MAX { panic!("max block version chain depth reached!"); @@ -2262,7 +2262,7 @@ pub fn invalidate_block_version(blockref: &BlockRef) { block: None, address: block.entry_exit, id: block.blockid, - ctx: block.ctx, + ctx: block.ctx.clone(), })); } |