diff options
author | Jeremy Evans <[email protected]> | 2019-09-05 10:36:28 -0700 |
---|---|---|
committer | Jeremy Evans <[email protected]> | 2019-09-05 17:47:12 -0700 |
commit | d1ef73b59cede58f2173fa0f4ff7480a820f25d6 (patch) | |
tree | 39e3f12eb37469d60d4f027da3f0aa6f68936bf5 /vm_insnhelper.c | |
parent | 55b96c5d2d7d8bcc2953484bd2f9c9519b252dae (diff) |
Always remove empty keyword hashes when calling methods
While doing so is not backwards compatible with Ruby 2.6, it is
necessary for generic argument forwarding to work for all methods:
```ruby
def foo(*args, **kw, &block)
bar(*args, **kw, &block)
end
```
If you do not remove empty keyword hashes, and bar does not accept
keyword arguments, then a call to foo without keyword arguments
calls bar with an extra positional empty hash argument.
Diffstat (limited to 'vm_insnhelper.c')
-rw-r--r-- | vm_insnhelper.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/vm_insnhelper.c b/vm_insnhelper.c index baf93de742..4ae94e3fd7 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1740,7 +1740,7 @@ rb_iseq_only_kwparam_p(const rb_iseq_t *iseq) static inline void CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp, struct rb_calling_info *restrict calling, - const struct rb_call_info *restrict ci, int remove_empty_keyword_hash) + const struct rb_call_info *restrict ci) { if (UNLIKELY(IS_ARGS_SPLAT(ci))) { /* This expands the rest argument to the stack. @@ -1755,9 +1755,14 @@ CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp, */ vm_caller_setup_arg_kw(cfp, calling, ci); } - if (UNLIKELY(calling->kw_splat && remove_empty_keyword_hash)) { + if (UNLIKELY(calling->kw_splat)) { /* This removes the last Hash object if it is empty. * So, ci->flag & VM_CALL_KW_SPLAT is now inconsistent. + * However, you can use ci->flag & VM_CALL_KW_SPLAT to + * determine whether a hash should be added back with + * warning (for backwards compatibility in cases where + * the method does not have the number of required + * arguments. */ if (RHASH_EMPTY_P(cfp->sp[-1])) { cfp->sp--; @@ -1873,7 +1878,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, if (LIKELY(!(ci->flag & VM_CALL_KW_SPLAT))) { if (LIKELY(rb_simple_iseq_p(iseq))) { rb_control_frame_t *cfp = ec->cfp; - CALLER_SETUP_ARG(cfp, calling, ci, 0); /* splat arg */ + CALLER_SETUP_ARG(cfp, calling, ci); if (calling->argc != iseq->body->param.lead_num) { argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num); @@ -1884,7 +1889,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, } else if (rb_iseq_only_optparam_p(iseq)) { rb_control_frame_t *cfp = ec->cfp; - CALLER_SETUP_ARG(cfp, calling, ci, 0); /* splat arg */ + CALLER_SETUP_ARG(cfp, calling, ci); const int lead_num = iseq->body->param.lead_num; const int opt_num = iseq->body->param.opt_num; @@ -2237,7 +2242,7 @@ vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb { RB_DEBUG_COUNTER_INC(ccf_cfunc); - CALLER_SETUP_ARG(reg_cfp, calling, ci, 0); + CALLER_SETUP_ARG(reg_cfp, calling, ci); return vm_call_cfunc_with_frame(ec, reg_cfp, calling, ci, cc); } @@ -2279,7 +2284,7 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c VALUE *argv; int argc; - CALLER_SETUP_ARG(cfp, calling, ci, 0); + CALLER_SETUP_ARG(cfp, calling, ci); argc = calling->argc; argv = ALLOCA_N(VALUE, argc); MEMCPY(argv, cfp->sp - argc, VALUE, argc); @@ -2309,7 +2314,7 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct struct rb_call_info_with_kwarg ci_entry; struct rb_call_cache cc_entry, *cc; - CALLER_SETUP_ARG(reg_cfp, calling, orig_ci, 0); + CALLER_SETUP_ARG(reg_cfp, calling, orig_ci); i = calling->argc - 1; @@ -2414,7 +2419,7 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_call_cache cc_entry, *cc; unsigned int argc; - CALLER_SETUP_ARG(reg_cfp, calling, orig_ci, 0); + CALLER_SETUP_ARG(reg_cfp, calling, orig_ci); argc = calling->argc+1; ci_entry.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); @@ -2619,14 +2624,14 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_cfunc(ec, cfp, calling, ci, cc); case VM_METHOD_TYPE_ATTRSET: - CALLER_SETUP_ARG(cfp, calling, ci, 1); + CALLER_SETUP_ARG(cfp, calling, ci); rb_check_arity(calling->argc, 1, 1); cc->aux.index = 0; CC_SET_FASTPATH(cc, vm_call_attrset, !((ci->flag & VM_CALL_ARGS_SPLAT) || (ci->flag & VM_CALL_KWARG))); return vm_call_attrset(ec, cfp, calling, ci, cc); case VM_METHOD_TYPE_IVAR: - CALLER_SETUP_ARG(cfp, calling, ci, 1); + CALLER_SETUP_ARG(cfp, calling, ci); rb_check_arity(calling->argc, 0, 0); cc->aux.index = 0; CC_SET_FASTPATH(cc, vm_call_ivar, !(ci->flag & VM_CALL_ARGS_SPLAT)); @@ -2927,7 +2932,7 @@ vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *ca rb_control_frame_t *cfp = ec->cfp; VALUE arg0; - CALLER_SETUP_ARG(cfp, calling, ci, 0); /* splat arg */ + CALLER_SETUP_ARG(cfp, calling, ci); if (calling->kw_splat) { rb_warn_keyword_to_last_hash(calling, ci, iseq); @@ -3015,7 +3020,7 @@ vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, { VALUE val; int argc; - CALLER_SETUP_ARG(ec->cfp, calling, ci, 0); + CALLER_SETUP_ARG(ec->cfp, calling, ci); argc = calling->argc; val = vm_yield_with_symbol(ec, symbol, argc, STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler); POPN(argc); @@ -3029,7 +3034,7 @@ vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, { VALUE val; int argc; - CALLER_SETUP_ARG(ec->cfp, calling, ci, 0); + CALLER_SETUP_ARG(ec->cfp, calling, ci); argc = calling->argc; val = vm_yield_with_cfunc(ec, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), calling->kw_splat, calling->block_handler, NULL); POPN(argc); /* TODO: should put before C/yield? */ |