diff options
author | Takashi Kokubun <[email protected]> | 2022-07-21 09:23:58 -0700 |
---|---|---|
committer | Takashi Kokubun <[email protected]> | 2022-07-21 09:42:04 -0700 |
commit | 5b21e94bebed90180d8ff63dad03b8b948361089 (patch) | |
tree | f9f7196d84b51b7a3a8001658e4391a63b71c396 /vm.c | |
parent | 3ff53c8e04ecc91e0190de6d5950ecce2a2ea188 (diff) |
Expand tabs [ci skip]
[Misc #18891]
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/6094
Diffstat (limited to 'vm.c')
-rw-r--r-- | vm.c | 1134 |
1 files changed, 567 insertions, 567 deletions
@@ -70,7 +70,7 @@ static inline const VALUE * VM_EP_LEP(const VALUE *ep) { while (!VM_ENV_LOCAL_P(ep)) { - ep = VM_ENV_PREV_EP(ep); + ep = VM_ENV_PREV_EP(ep); } return ep; } @@ -79,19 +79,19 @@ static inline const rb_control_frame_t * rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep) { if (!ep) { - return NULL; + return NULL; } else { - const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */ + const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */ - while (cfp < eocfp) { - if (cfp->ep == ep) { - return cfp; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); - } + while (cfp < eocfp) { + if (cfp->ep == ep) { + return cfp; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + } - return NULL; + return NULL; } } @@ -143,10 +143,10 @@ VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp VM_ASSERT(start != NULL); if (start <= (VALUE *)cfp && (VALUE *)cfp < end) { - return FALSE; + return FALSE; } else { - return TRUE; + return TRUE; } } @@ -158,10 +158,10 @@ VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep) VM_ASSERT(start != NULL); if (start <= ep && ep < end) { - return FALSE; + return FALSE; } else { - return TRUE; + return TRUE; } } @@ -169,19 +169,19 @@ static int vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep) { if (VM_EP_IN_HEAP_P(ec, ep)) { - VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */ + VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */ - if (envval != Qundef) { - const rb_env_t *env = (const rb_env_t *)envval; + if (envval != Qundef) { + const rb_env_t *env = (const rb_env_t *)envval; - VM_ASSERT(vm_assert_env(envval)); - VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)); - VM_ASSERT(env->ep == ep); - } - return TRUE; + VM_ASSERT(vm_assert_env(envval)); + VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)); + VM_ASSERT(env->ep == ep); + } + return TRUE; } else { - return FALSE; + return FALSE; } } @@ -235,8 +235,8 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_ /* scope */ union { - rb_scope_visibility_t visi; - VALUE value; + rb_scope_visibility_t visi; + VALUE value; } scope_visi; scope_visi.visi.method_visi = visi; @@ -244,12 +244,12 @@ vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_ /* refinements */ if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) { - refinements = CREF_REFINEMENTS(prev_cref); + refinements = CREF_REFINEMENTS(prev_cref); - if (!NIL_P(refinements)) { - omod_shared = TRUE; - CREF_OMOD_SHARED_SET(prev_cref); - } + if (!NIL_P(refinements)) { + omod_shared = TRUE; + CREF_OMOD_SHARED_SET(prev_cref); + } } VM_ASSERT(singleton || klass); @@ -327,7 +327,7 @@ vm_cref_new_toplevel(rb_execution_context_t *ec) VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper; if (top_wrapper) { - cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE); + cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE); } return cref; @@ -345,8 +345,8 @@ vm_cref_dump(const char *mesg, const rb_cref_t *cref) ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref); while (cref) { - ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref)))); - cref = CREF_NEXT(cref); + ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref)))); + cref = CREF_NEXT(cref); } } @@ -467,36 +467,36 @@ rb_vm_inc_const_missing_count(void) MJIT_FUNC_EXPORTED int rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, - struct ruby_dtrace_method_hook_args *args) + struct ruby_dtrace_method_hook_args *args) { enum ruby_value_type type; if (!klass) { - if (!ec) ec = GET_EC(); - if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass) - return FALSE; + if (!ec) ec = GET_EC(); + if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass) + return FALSE; } if (RB_TYPE_P(klass, T_ICLASS)) { - klass = RBASIC(klass)->klass; + klass = RBASIC(klass)->klass; } else if (FL_TEST(klass, FL_SINGLETON)) { - klass = rb_attr_get(klass, id__attached__); - if (NIL_P(klass)) return FALSE; + klass = rb_attr_get(klass, id__attached__); + if (NIL_P(klass)) return FALSE; } type = BUILTIN_TYPE(klass); if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) { - VALUE name = rb_class_path(klass); - const char *classname, *filename; - const char *methodname = rb_id2name(id); - if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) { - if (NIL_P(name) || !(classname = StringValuePtr(name))) - classname = "<unknown>"; - args->classname = classname; - args->methodname = methodname; - args->filename = filename; - args->klass = klass; - args->name = name; - return TRUE; - } + VALUE name = rb_class_path(klass); + const char *classname, *filename; + const char *methodname = rb_id2name(id); + if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) { + if (NIL_P(name) || !(classname = StringValuePtr(name))) + classname = "<unknown>"; + args->classname = classname; + args->methodname = methodname; + args->filename = filename; + args->klass = klass; + args->name = name; + return TRUE; + } } return FALSE; } @@ -532,29 +532,29 @@ vm_stat(int argc, VALUE *argv, VALUE self) if (rb_check_arity(argc, 0, 1) == 1) { arg = argv[0]; - if (SYMBOL_P(arg)) - key = arg; - else if (RB_TYPE_P(arg, T_HASH)) - hash = arg; - else - rb_raise(rb_eTypeError, "non-hash or symbol given"); + if (SYMBOL_P(arg)) + key = arg; + else if (RB_TYPE_P(arg, T_HASH)) + hash = arg; + else + rb_raise(rb_eTypeError, "non-hash or symbol given"); } else { - hash = rb_hash_new(); + hash = rb_hash_new(); } #define S(s) sym_##s = ID2SYM(rb_intern_const(#s)) S(constant_cache_invalidations); S(constant_cache_misses); - S(class_serial); - S(global_cvar_state); + S(class_serial); + S(global_cvar_state); #undef S #define SET(name, attr) \ if (key == sym_##name) \ - return SERIALT2NUM(attr); \ + return SERIALT2NUM(attr); \ else if (hash != Qnil) \ - rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr)); + rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr)); SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations); SET(constant_cache_misses, ruby_vm_constant_cache_misses); @@ -563,7 +563,7 @@ vm_stat(int argc, VALUE *argv, VALUE self) #undef SET if (!NIL_P(key)) { /* matched key should return above */ - rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key)); + rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key)); } return hash; @@ -575,13 +575,13 @@ static void vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq) { if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) { - rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence"); + rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence"); } /* for return */ vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self, - VM_BLOCK_HANDLER_NONE, - (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ + VM_BLOCK_HANDLER_NONE, + (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max); } @@ -590,8 +590,8 @@ static void vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block) { vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH, - vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)), - (VALUE)cref, /* cref or me */ + vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)), + (VALUE)cref, /* cref or me */ ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max); @@ -610,7 +610,7 @@ vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq) /* save binding */ if (ISEQ_BODY(iseq)->local_table_size > 0) { - vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp)); + vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp)); } } @@ -618,10 +618,10 @@ rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - if (cfp->iseq) { - return (rb_control_frame_t *)cfp; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (cfp->iseq) { + return (rb_control_frame_t *)cfp; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return 0; } @@ -630,10 +630,10 @@ MJIT_FUNC_EXPORTED rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - if (VM_FRAME_RUBYFRAME_P(cfp)) { - return (rb_control_frame_t *)cfp; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (VM_FRAME_RUBYFRAME_P(cfp)) { + return (rb_control_frame_t *)cfp; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return 0; } @@ -644,20 +644,20 @@ static rb_control_frame_t * vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp) { if (VM_FRAME_RUBYFRAME_P(cfp)) { - return (rb_control_frame_t *)cfp; + return (rb_control_frame_t *)cfp; } cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - if (VM_FRAME_RUBYFRAME_P(cfp)) { - return (rb_control_frame_t *)cfp; - } + if (VM_FRAME_RUBYFRAME_P(cfp)) { + return (rb_control_frame_t *)cfp; + } - if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) { - break; - } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) { + break; + } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return 0; } @@ -682,14 +682,14 @@ rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp) /* check skipped frame */ while (ec->cfp != cfp) { #if VMDEBUG - printf("skipped frame: %s\n", vm_frametype_name(ec->cfp)); + printf("skipped frame: %s\n", vm_frametype_name(ec->cfp)); #endif - if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) { - rb_vm_pop_frame(ec); - } - else { /* unlikely path */ - rb_vm_pop_cfunc_frame(); - } + if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) { + rb_vm_pop_frame(ec); + } + else { /* unlikely path */ + rb_vm_pop_cfunc_frame(); + } } } @@ -711,11 +711,11 @@ ruby_vm_run_at_exit_hooks(rb_vm_t *vm) rb_at_exit_list *l = vm->at_exit; while (l) { - rb_at_exit_list* t = l->next; - rb_vm_at_exit_func *func = l->func; - ruby_xfree(l); - l = t; - (*func)(vm); + rb_at_exit_list* t = l->next; + rb_vm_at_exit_func *func = l->func; + ruby_xfree(l); + l = t; + (*func)(vm); } } @@ -732,9 +732,9 @@ check_env(const rb_env_t *env) dp(env->ep[1]); ruby_debug_printf("ep: %10p\n", (void *)env->ep); if (rb_vm_env_prev_env(env)) { - fputs(">>\n", stderr); - check_env_value(rb_vm_env_prev_env(env)); - fputs("<<\n", stderr); + fputs(">>\n", stderr); + check_env_value(rb_vm_env_prev_env(env)); + fputs("<<\n", stderr); } return 1; } @@ -743,7 +743,7 @@ static VALUE check_env_value(const rb_env_t *env) { if (check_env(env)) { - return (VALUE)env; + return (VALUE)env; } rb_bug("invalid env"); return Qnil; /* unreachable */ @@ -775,11 +775,11 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co int local_size, env_size; if (VM_ENV_ESCAPED_P(ep)) { - return VM_ENV_ENVVAL(ep); + return VM_ENV_ENVVAL(ep); } if (!VM_ENV_LOCAL_P(ep)) { - const VALUE *prev_ep = VM_ENV_PREV_EP(ep); + const VALUE *prev_ep = VM_ENV_PREV_EP(ep); if (!VM_ENV_ESCAPED_P(prev_ep)) { rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); @@ -793,16 +793,16 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co } } else { - VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep); + VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep); - if (block_handler != VM_BLOCK_HANDLER_NONE) { + if (block_handler != VM_BLOCK_HANDLER_NONE) { VALUE blockprocval = vm_block_handler_escape(ec, block_handler); - VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval); - } + VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval); + } } if (!VM_FRAME_RUBYFRAME_P(cfp)) { - local_size = VM_ENV_DATA_SIZE; + local_size = VM_ENV_DATA_SIZE; } else { local_size = ISEQ_BODY(cfp->iseq)->local_table_size + VM_ENV_DATA_SIZE; @@ -821,16 +821,16 @@ vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *co */ env_size = local_size + - 1 /* envval */; + 1 /* envval */; env_body = ALLOC_N(VALUE, env_size); MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size); #if 0 for (i = 0; i < local_size; i++) { - if (VM_FRAME_RUBYFRAME_P(cfp)) { - /* clear value stack for GC */ - ep[-local_size + i] = 0; - } + if (VM_FRAME_RUBYFRAME_P(cfp)) { + /* clear value stack for GC */ + ep[-local_size + i] = 0; + } } #endif @@ -851,7 +851,7 @@ vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp) VALUE envval = vm_make_env_each(ec, cfp); if (PROCDEBUG) { - check_env_value((const rb_env_t *)envval); + check_env_value((const rb_env_t *)envval); } return envval; @@ -862,8 +862,8 @@ rb_vm_stack_to_heap(rb_execution_context_t *ec) { rb_control_frame_t *cfp = ec->cfp; while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) { - vm_make_env_object(ec, cfp); - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + vm_make_env_object(ec, cfp); + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } } @@ -873,7 +873,7 @@ rb_vm_env_prev_env(const rb_env_t *env) const VALUE *ep = env->ep; if (VM_ENV_LOCAL_P(ep)) { - return NULL; + return NULL; } else { const VALUE *prev_ep = VM_ENV_PREV_EP(ep); @@ -897,7 +897,7 @@ collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list { do { if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; - collect_local_variables_in_iseq(env->iseq, vars); + collect_local_variables_in_iseq(env->iseq, vars); } while ((env = rb_vm_env_prev_env(env)) != NULL); } @@ -905,11 +905,11 @@ static int vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars) { if (VM_ENV_ESCAPED_P(ep)) { - collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars); - return 1; + collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars); + return 1; } else { - return 0; + return 0; } } @@ -937,9 +937,9 @@ rb_iseq_local_variables(const rb_iseq_t *iseq) static VALUE vm_proc_create_from_captured(VALUE klass, - const struct rb_captured_block *captured, - enum rb_block_type block_type, - int8_t is_from_method, int8_t is_lambda) + const struct rb_captured_block *captured, + enum rb_block_type block_type, + int8_t is_from_method, int8_t is_lambda) { VALUE procval = rb_proc_alloc(klass); rb_proc_t *proc = RTYPEDDATA_DATA(procval); @@ -965,16 +965,16 @@ rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *s switch (vm_block_type(src)) { case block_type_iseq: case block_type_ifunc: - RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self); - RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val); - rb_vm_block_ep_update(obj, dst, src->as.captured.ep); - break; + RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self); + RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val); + rb_vm_block_ep_update(obj, dst, src->as.captured.ep); + break; case block_type_symbol: - RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol); - break; + RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol); + break; case block_type_proc: - RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc); - break; + RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc); + break; } } @@ -1214,15 +1214,15 @@ rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_capture VALUE procval; if (!VM_ENV_ESCAPED_P(captured->ep)) { - rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured); - vm_make_env_object(ec, cfp); + rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured); + vm_make_env_object(ec, cfp); } VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep)); VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) || - imemo_type_p(captured->code.val, imemo_ifunc)); + imemo_type_p(captured->code.val, imemo_ifunc)); procval = vm_proc_create_from_captured(klass, captured, - imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda); + imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda); return procval; } @@ -1237,7 +1237,7 @@ rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *s rb_binding_t *bind; if (cfp == 0 || ruby_level_cfp == 0) { - rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber."); + rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber."); } if (!VM_FRAME_RUBYFRAME_P(src_cfp) && !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) { @@ -1288,8 +1288,8 @@ rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const I iseq = rb_iseq_new(&ast, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL); } else { - VALUE tempstr = rb_fstring_lit("<temp>"); - iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL); + VALUE tempstr = rb_fstring_lit("<temp>"); + iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL); } tmp_node.nd_tbl = 0; /* reset table */ ALLOCV_END(idtmp); @@ -1310,10 +1310,10 @@ invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, cons int arg_size = ISEQ_BODY(iseq)->param.size; vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self, - VM_GUARDED_PREV_EP(captured->ep), - (VALUE)cref, /* cref or method */ + VM_GUARDED_PREV_EP(captured->ep), + (VALUE)cref, /* cref or method */ ISEQ_BODY(iseq)->iseq_encoded + opt_pc, - ec->cfp->sp + arg_size, + ec->cfp->sp + arg_size, ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max); return vm_exec(ec, true); @@ -1329,10 +1329,10 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD); vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self, - VM_GUARDED_PREV_EP(captured->ep), - (VALUE)me, + VM_GUARDED_PREV_EP(captured->ep), + (VALUE)me, ISEQ_BODY(iseq)->iseq_encoded + opt_pc, - ec->cfp->sp + arg_size, + ec->cfp->sp + arg_size, ISEQ_BODY(iseq)->local_table_size - arg_size, ISEQ_BODY(iseq)->stack_max); @@ -1349,7 +1349,7 @@ ALWAYS_INLINE(static VALUE static inline VALUE invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured, - VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, + VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me) { const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq); @@ -1364,49 +1364,49 @@ invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_bl vm_check_canary(ec, sp); cfp->sp = sp + argc; for (i=0; i<argc; i++) { - sp[i] = argv[i]; + sp[i] = argv[i]; } opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler, - (is_lambda ? arg_setup_method : arg_setup_block)); + (is_lambda ? arg_setup_method : arg_setup_block)); cfp->sp = sp; if (me == NULL) { - return invoke_block(ec, iseq, self, captured, cref, type, opt_pc); + return invoke_block(ec, iseq, self, captured, cref, type, opt_pc); } else { - return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc); + return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc); } } static inline VALUE invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler, - int argc, const VALUE *argv, - int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, - int is_lambda, int force_blockarg) + int argc, const VALUE *argv, + int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, + int is_lambda, int force_blockarg) { again: switch (vm_block_handler_type(block_handler)) { case block_handler_type_iseq: - { - const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler); - return invoke_iseq_block_from_c(ec, captured, captured->self, - argc, argv, kw_splat, passed_block_handler, + { + const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler); + return invoke_iseq_block_from_c(ec, captured, captured->self, + argc, argv, kw_splat, passed_block_handler, cref, is_lambda, NULL); - } + } case block_handler_type_ifunc: - return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler), - VM_BH_TO_IFUNC_BLOCK(block_handler)->self, + return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler), + VM_BH_TO_IFUNC_BLOCK(block_handler)->self, argc, argv, kw_splat, passed_block_handler, NULL); case block_handler_type_symbol: - return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler), - argc, argv, kw_splat, passed_block_handler); + return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler), + argc, argv, kw_splat, passed_block_handler); case block_handler_type_proc: - if (force_blockarg == FALSE) { - is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler)); - } - block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler)); - goto again; + if (force_blockarg == FALSE) { + is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler)); + } + block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler)); + goto again; } VM_UNREACHABLE(invoke_block_from_c_splattable); return Qundef; @@ -1418,7 +1418,7 @@ check_block_handler(rb_execution_context_t *ec) VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp); vm_block_handler_verify(block_handler); if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) { - rb_vm_localjump_error("no block given", Qnil, 0); + rb_vm_localjump_error("no block given", Qnil, 0); } return block_handler; @@ -1429,7 +1429,7 @@ vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int { return invoke_block_from_c_bh(ec, check_block_handler(ec), argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE, - cref, is_lambda, FALSE); + cref, is_lambda, FALSE); } static VALUE @@ -1443,7 +1443,7 @@ vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VAL { return invoke_block_from_c_bh(ec, check_block_handler(ec), argc, argv, kw_splat, block_handler, - NULL, FALSE, FALSE); + NULL, FALSE, FALSE); } static VALUE @@ -1461,7 +1461,7 @@ ALWAYS_INLINE(static VALUE static inline VALUE invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc, - VALUE self, int argc, const VALUE *argv, + VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, int is_lambda, const rb_callable_method_entry_t *me) { @@ -1486,11 +1486,11 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc, } return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me); case block_type_symbol: - return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler); + return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler); case block_type_proc: - is_lambda = block_proc_is_lambda(block->as.proc); - block = vm_proc_block(block->as.proc); - goto again; + is_lambda = block_proc_is_lambda(block->as.proc); + block = vm_proc_block(block->as.proc); + goto again; } VM_UNREACHABLE(invoke_block_from_c_proc); return Qundef; @@ -1498,7 +1498,7 @@ invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc, static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, - int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) + int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) { return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL); } @@ -1512,7 +1512,7 @@ rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, MJIT_FUNC_EXPORTED VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, - int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) + int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler) { VALUE self = vm_block_self(&proc->block); vm_block_handler_verify(passed_block_handler); @@ -1521,7 +1521,7 @@ rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL); } else { - return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); + return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); } } @@ -1535,7 +1535,7 @@ rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE s return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL); } else { - return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); + return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler); } } @@ -1545,10 +1545,10 @@ static rb_control_frame_t * vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp) { while (cfp->pc == 0) { - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); - if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { - return 0; - } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) { + return 0; + } } return cfp; } @@ -1612,10 +1612,10 @@ rb_sourcefile(void) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp) { - return RSTRING_PTR(rb_iseq_path(cfp->iseq)); + return RSTRING_PTR(rb_iseq_path(cfp->iseq)); } else { - return 0; + return 0; } } @@ -1626,10 +1626,10 @@ rb_sourceline(void) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp) { - return rb_vm_get_sourceline(cfp); + return rb_vm_get_sourceline(cfp); } else { - return 0; + return 0; } } @@ -1640,12 +1640,12 @@ rb_source_location(int *pline) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) { - if (pline) *pline = rb_vm_get_sourceline(cfp); - return rb_iseq_path(cfp->iseq); + if (pline) *pline = rb_vm_get_sourceline(cfp); + return rb_iseq_path(cfp->iseq); } else { - if (pline) *pline = 0; - return Qnil; + if (pline) *pline = 0; + return Qnil; } } @@ -1692,9 +1692,9 @@ void debug_cref(rb_cref_t *cref) { while (cref) { - dp(CREF_CLASS(cref)); - printf("%ld\n", CREF_VISI(cref)); - cref = CREF_NEXT(cref); + dp(CREF_CLASS(cref)); + printf("%ld\n", CREF_VISI(cref)); + cref = CREF_NEXT(cref); } } #endif @@ -1706,7 +1706,7 @@ rb_vm_cbase(void) const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp); if (cfp == 0) { - rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread"); + rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread"); } return vm_get_cbase(cfp->ep); } @@ -1722,23 +1722,23 @@ make_localjump_error(const char *mesg, VALUE value, int reason) switch (reason) { case TAG_BREAK: - CONST_ID(id, "break"); - break; + CONST_ID(id, "break"); + break; case TAG_REDO: - CONST_ID(id, "redo"); - break; + CONST_ID(id, "redo"); + break; case TAG_RETRY: - CONST_ID(id, "retry"); - break; + CONST_ID(id, "retry"); + break; case TAG_NEXT: - CONST_ID(id, "next"); - break; + CONST_ID(id, "next"); + break; case TAG_RETURN: - CONST_ID(id, "return"); - break; + CONST_ID(id, "return"); + break; default: - CONST_ID(id, "noreason"); - break; + CONST_ID(id, "noreason"); + break; } rb_iv_set(exc, "@exit_value", value); rb_iv_set(exc, "@reason", ID2SYM(id)); @@ -1759,27 +1759,27 @@ rb_vm_make_jump_tag_but_local_jump(int state, VALUE val) switch (state) { case TAG_RETURN: - mesg = "unexpected return"; - break; + mesg = "unexpected return"; + break; case TAG_BREAK: - mesg = "unexpected break"; - break; + mesg = "unexpected break"; + break; case TAG_NEXT: - mesg = "unexpected next"; - break; + mesg = "unexpected next"; + break; case TAG_REDO: - mesg = "unexpected redo"; - val = Qnil; - break; + mesg = "unexpected redo"; + val = Qnil; + break; case TAG_RETRY: - mesg = "retry outside of rescue clause"; - val = Qnil; - break; + mesg = "retry outside of rescue clause"; + val = Qnil; + break; default: - return Qnil; + return Qnil; } if (val == Qundef) { - val = GET_EC()->tag->retval; + val = GET_EC()->tag->retval; } return make_localjump_error(mesg, val, state); } @@ -1796,7 +1796,7 @@ static rb_control_frame_t * next_not_local_frame(rb_control_frame_t *cfp) { while (VM_ENV_LOCAL_P(cfp->ep)) { - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } return cfp; } @@ -1811,7 +1811,7 @@ vm_iter_break(rb_execution_context_t *ec, VALUE val) const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep); if (!target_cfp) { - rb_vm_localjump_error("unexpected break", val, TAG_BREAK); + rb_vm_localjump_error("unexpected break", val, TAG_BREAK); } ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK); @@ -1876,9 +1876,9 @@ vm_redefinition_check_method_type(const rb_method_entry_t *me) switch (def->type) { case VM_METHOD_TYPE_CFUNC: case VM_METHOD_TYPE_OPTIMIZED: - return TRUE; + return TRUE; default: - return FALSE; + return FALSE; } } @@ -1926,11 +1926,11 @@ add_opt_method(VALUE klass, ID mid, VALUE bop) const rb_method_entry_t *me = rb_method_entry_at(klass, mid); if (me && vm_redefinition_check_method_type(me)) { - st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop); - st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue); + st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop); + st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue); } else { - rb_bug("undefined optimized method: %s", rb_id2name(mid)); + rb_bug("undefined optimized method: %s", rb_id2name(mid)); } } @@ -1952,7 +1952,7 @@ vm_init_redefined_flag(void) OP(MOD, MOD), (C(Integer), C(Float)); OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol)); OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String), - C(NilClass), C(TrueClass), C(FalseClass)); + C(NilClass), C(TrueClass), C(FalseClass)); OP(LT, LT), (C(Integer), C(Float)); OP(LE, LE), (C(Integer), C(Float)); OP(GT, GT), (C(Integer), C(Float)); @@ -1993,7 +1993,7 @@ vm_frametype_name(const rb_control_frame_t *cfp) case VM_FRAME_MAGIC_EVAL: return "eval"; case VM_FRAME_MAGIC_RESCUE: return "rescue"; default: - rb_bug("unknown frame"); + rb_bug("unknown frame"); } } #endif @@ -2002,12 +2002,12 @@ static VALUE frame_return_value(const struct vm_throw_data *err) { if (THROW_DATA_P(err) && - THROW_DATA_STATE(err) == TAG_BREAK && - THROW_DATA_CONSUMED_P(err) == FALSE) { - return THROW_DATA_VAL(err); + THROW_DATA_STATE(err) == TAG_BREAK && + THROW_DATA_CONSUMED_P(err) == FALSE) { + return THROW_DATA_VAL(err); } else { - return Qnil; + return Qnil; } } @@ -2041,7 +2041,7 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp, bool cfp_returning_with_value, int state, struct vm_throw_data *err) { if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) { - return; + return; } else { const rb_iseq_t *iseq = cfp->iseq; @@ -2293,17 +2293,17 @@ vm_exec(rb_execution_context_t *ec, bool mjit_enable_p) goto vm_loop_start; /* fallback to the VM */ } else { - result = ec->errinfo; + result = ec->errinfo; rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY); while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) { /* caught a jump, exec the handler */ result = vm_exec_core(ec, initial); - vm_loop_start: - VM_ASSERT(ec->tag == &_tag); - /* when caught `throw`, `tag.state` is set. */ - if ((state = _tag.state) == TAG_NONE) break; - _tag.state = TAG_NONE; - } + vm_loop_start: + VM_ASSERT(ec->tag == &_tag); + /* when caught `throw`, `tag.state` is set. */ + if ((state = _tag.state) == TAG_NONE) break; + _tag.state = TAG_NONE; + } } EC_POP_TAG(); return result; @@ -2317,121 +2317,121 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, struct vm_throw_data *err = (struct vm_throw_data *)errinfo; for (;;) { - unsigned int i; - const struct iseq_catch_table_entry *entry; - const struct iseq_catch_table *ct; - unsigned long epc, cont_pc, cont_sp; - const rb_iseq_t *catch_iseq; - rb_control_frame_t *cfp; - VALUE type; - const rb_control_frame_t *escape_cfp; - - cont_pc = cont_sp = 0; - catch_iseq = NULL; - - while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) { - if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) { - EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self, - rb_vm_frame_method_entry(ec->cfp)->def->original_id, - rb_vm_frame_method_entry(ec->cfp)->called_id, - rb_vm_frame_method_entry(ec->cfp)->owner, Qnil); - RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, - rb_vm_frame_method_entry(ec->cfp)->owner, - rb_vm_frame_method_entry(ec->cfp)->def->original_id); - } - rb_vm_pop_frame(ec); - } - - cfp = ec->cfp; + unsigned int i; + const struct iseq_catch_table_entry *entry; + const struct iseq_catch_table *ct; + unsigned long epc, cont_pc, cont_sp; + const rb_iseq_t *catch_iseq; + rb_control_frame_t *cfp; + VALUE type; + const rb_control_frame_t *escape_cfp; + + cont_pc = cont_sp = 0; + catch_iseq = NULL; + + while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) { + if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) { + EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self, + rb_vm_frame_method_entry(ec->cfp)->def->original_id, + rb_vm_frame_method_entry(ec->cfp)->called_id, + rb_vm_frame_method_entry(ec->cfp)->owner, Qnil); + RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, + rb_vm_frame_method_entry(ec->cfp)->owner, + rb_vm_frame_method_entry(ec->cfp)->def->original_id); + } + rb_vm_pop_frame(ec); + } + + cfp = ec->cfp; epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded; - escape_cfp = NULL; - if (state == TAG_BREAK || state == TAG_RETURN) { - escape_cfp = THROW_DATA_CATCH_FRAME(err); - - if (cfp == escape_cfp) { - if (state == TAG_RETURN) { - if (!VM_FRAME_FINISHED_P(cfp)) { - THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); - THROW_DATA_STATE_SET(err, state = TAG_BREAK); - } - else { + escape_cfp = NULL; + if (state == TAG_BREAK || state == TAG_RETURN) { + escape_cfp = THROW_DATA_CATCH_FRAME(err); + + if (cfp == escape_cfp) { + if (state == TAG_RETURN) { + if (!VM_FRAME_FINISHED_P(cfp)) { + THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); + THROW_DATA_STATE_SET(err, state = TAG_BREAK); + } + else { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - } - } - if (catch_iseq == NULL) { - ec->errinfo = Qnil; - THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + } + } + if (catch_iseq == NULL) { + ec->errinfo = Qnil; + THROW_DATA_CATCH_FRAME_SET(err, cfp + 1); // cfp == escape_cfp here so calling with cfp_returning_with_value = true hook_before_rewind(ec, ec->cfp, true, state, err); - rb_vm_pop_frame(ec); - return THROW_DATA_VAL(err); - } - } - /* through */ - } - else { - /* TAG_BREAK */ + rb_vm_pop_frame(ec); + return THROW_DATA_VAL(err); + } + } + /* through */ + } + else { + /* TAG_BREAK */ #if OPT_STACK_CACHING - *initial = THROW_DATA_VAL(err); + *initial = THROW_DATA_VAL(err); #else - *ec->cfp->sp++ = THROW_DATA_VAL(err); + *ec->cfp->sp++ = THROW_DATA_VAL(err); #endif - ec->errinfo = Qnil; - return Qundef; - } - } - } + ec->errinfo = Qnil; + return Qundef; + } + } + } - if (state == TAG_RAISE) { + if (state == TAG_RAISE) { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - - if (entry->type == CATCH_TYPE_RESCUE || - entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - } - } - } - else if (state == TAG_RETRY) { + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + + if (entry->type == CATCH_TYPE_RESCUE || + entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + } + } + } + else if (state == TAG_RETRY) { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - else if (entry->type == CATCH_TYPE_RETRY) { - const rb_control_frame_t *escape_cfp; - escape_cfp = THROW_DATA_CATCH_FRAME(err); - if (cfp == escape_cfp) { + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + else if (entry->type == CATCH_TYPE_RETRY) { + const rb_control_frame_t *escape_cfp; + escape_cfp = THROW_DATA_CATCH_FRAME(err); + if (cfp == escape_cfp) { cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont; - ec->errinfo = Qnil; - return Qundef; - } - } - } - } - } + ec->errinfo = Qnil; + return Qundef; + } + } + } + } + } else if ((state == TAG_BREAK && !escape_cfp) || (state == TAG_REDO) || (state == TAG_NEXT)) { @@ -2443,88 +2443,88 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, }[state]; ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - - if (entry->start < epc && entry->end >= epc) { - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - else if (entry->type == type) { + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + + if (entry->start < epc && entry->end >= epc) { + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + else if (entry->type == type) { cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont; - cfp->sp = vm_base_ptr(cfp) + entry->sp; + cfp->sp = vm_base_ptr(cfp) + entry->sp; - if (state != TAG_REDO) { + if (state != TAG_REDO) { #if OPT_STACK_CACHING - *initial = THROW_DATA_VAL(err); + *initial = THROW_DATA_VAL(err); #else - *ec->cfp->sp++ = THROW_DATA_VAL(err); + *ec->cfp->sp++ = THROW_DATA_VAL(err); #endif - } - ec->errinfo = Qnil; - VM_ASSERT(ec->tag->state == TAG_NONE); - return Qundef; - } - } - } - } - else { + } + ec->errinfo = Qnil; + VM_ASSERT(ec->tag->state == TAG_NONE); + return Qundef; + } + } + } + } + else { ct = ISEQ_BODY(cfp->iseq)->catch_table; - if (ct) for (i = 0; i < ct->size; i++) { - entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); - if (entry->start < epc && entry->end >= epc) { - - if (entry->type == CATCH_TYPE_ENSURE) { - catch_iseq = entry->iseq; - cont_pc = entry->cont; - cont_sp = entry->sp; - break; - } - } - } - } - - if (catch_iseq != NULL) { /* found catch table */ - /* enter catch scope */ - const int arg_size = 1; - - rb_iseq_check(catch_iseq); - cfp->sp = vm_base_ptr(cfp) + cont_sp; + if (ct) for (i = 0; i < ct->size; i++) { + entry = UNALIGNED_MEMBER_PTR(ct, entries[i]); + if (entry->start < epc && entry->end >= epc) { + + if (entry->type == CATCH_TYPE_ENSURE) { + catch_iseq = entry->iseq; + cont_pc = entry->cont; + cont_sp = entry->sp; + break; + } + } + } + } + + if (catch_iseq != NULL) { /* found catch table */ + /* enter catch scope */ + const int arg_size = 1; + + rb_iseq_check(catch_iseq); + cfp->sp = vm_base_ptr(cfp) + cont_sp; cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + cont_pc; - /* push block frame */ - cfp->sp[0] = (VALUE)err; - vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE, - cfp->self, - VM_GUARDED_PREV_EP(cfp->ep), - 0, /* cref or me */ + /* push block frame */ + cfp->sp[0] = (VALUE)err; + vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE, + cfp->self, + VM_GUARDED_PREV_EP(cfp->ep), + 0, /* cref or me */ ISEQ_BODY(catch_iseq)->iseq_encoded, - cfp->sp + arg_size /* push value */, + cfp->sp + arg_size /* push value */, ISEQ_BODY(catch_iseq)->local_table_size - arg_size, ISEQ_BODY(catch_iseq)->stack_max); - state = 0; - ec->tag->state = TAG_NONE; - ec->errinfo = Qnil; + state = 0; + ec->tag->state = TAG_NONE; + ec->errinfo = Qnil; - return Qundef; - } - else { - hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err); + return Qundef; + } + else { + hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err); - if (VM_FRAME_FINISHED_P(ec->cfp)) { - rb_vm_pop_frame(ec); - ec->errinfo = (VALUE)err; - ec->tag = ec->tag->prev; - EC_JUMP_TAG(ec, state); - } - else { - rb_vm_pop_frame(ec); - } - } + if (VM_FRAME_FINISHED_P(ec->cfp)) { + rb_vm_pop_frame(ec); + ec->errinfo = (VALUE)err; + ec->tag = ec->tag->prev; + EC_JUMP_TAG(ec, state); + } + else { + rb_vm_pop_frame(ec); + } + } } } @@ -2557,13 +2557,13 @@ rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *cal const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp); if (me) { - if (idp) *idp = me->def->original_id; - if (called_idp) *called_idp = me->called_id; - if (klassp) *klassp = me->owner; - return TRUE; + if (idp) *idp = me->def->original_id; + if (called_idp) *called_idp = me->called_id; + if (klassp) *klassp = me->owner; + return TRUE; } else { - return FALSE; + return FALSE; } } @@ -2581,7 +2581,7 @@ rb_frame_method_id_and_class(ID *idp, VALUE *klassp) VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, - VALUE block_handler, VALUE filename) + VALUE block_handler, VALUE filename) { rb_execution_context_t *ec = GET_EC(); const rb_control_frame_t *reg_cfp = ec->cfp; @@ -2589,9 +2589,9 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE val; vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, - recv, block_handler, - (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ - 0, reg_cfp->sp, 0, 0); + recv, block_handler, + (VALUE)vm_cref_new_toplevel(ec), /* cref or me */ + 0, reg_cfp->sp, 0, 0); val = (*func)(arg); @@ -2675,17 +2675,17 @@ rb_vm_mark(void *ptr) RUBY_MARK_ENTER("vm"); RUBY_GC_INFO("-------------------------------------------------\n"); if (ptr) { - rb_vm_t *vm = ptr; + rb_vm_t *vm = ptr; rb_ractor_t *r = 0; long i, len; const VALUE *obj_ary; - ccan_list_for_each(&vm->ractor.set, r, vmlr_node) { + ccan_list_for_each(&vm->ractor.set, r, vmlr_node) { // ractor.set only contains blocking or running ractors VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) || rb_ractor_status_p(r, ractor_running)); rb_gc_mark(rb_ractor_self(r)); - } + } rb_gc_mark_movable(vm->mark_object_ary); @@ -2718,9 +2718,9 @@ rb_vm_mark(void *ptr) /* Prevent classes from moving */ rb_mark_tbl(vm->defined_module_hash); - if (vm->loading_table) { - rb_mark_tbl(vm->loading_table); - } + if (vm->loading_table) { + rb_mark_tbl(vm->loading_table); + } rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd); @@ -2779,34 +2779,34 @@ ruby_vm_destruct(rb_vm_t *vm) RUBY_FREE_ENTER("vm"); if (vm) { - rb_thread_t *th = vm->ractor.main_thread; - struct rb_objspace *objspace = vm->objspace; + rb_thread_t *th = vm->ractor.main_thread; + struct rb_objspace *objspace = vm->objspace; vm->ractor.main_thread = NULL; if (th) { rb_fiber_reset_root_local_storage(th); - thread_free(th); - } - rb_vm_living_threads_init(vm); - ruby_vm_run_at_exit_hooks(vm); - if (vm->loading_table) { - st_foreach(vm->loading_table, free_loading_table_entry, 0); - st_free_table(vm->loading_table); - vm->loading_table = 0; - } - if (vm->frozen_strings) { - st_free_table(vm->frozen_strings); - vm->frozen_strings = 0; - } - RB_ALTSTACK_FREE(vm->main_altstack); - if (objspace) { - rb_objspace_free(objspace); - } + thread_free(th); + } + rb_vm_living_threads_init(vm); + ruby_vm_run_at_exit_hooks(vm); + if (vm->loading_table) { + st_foreach(vm->loading_table, free_loading_table_entry, 0); + st_free_table(vm->loading_table); + vm->loading_table = 0; + } + if (vm->frozen_strings) { + st_free_table(vm->frozen_strings); + vm->frozen_strings = 0; + } + RB_ALTSTACK_FREE(vm->main_altstack); + if (objspace) { + rb_objspace_free(objspace); + } rb_native_mutex_destroy(&vm->waitpid_lock); rb_native_mutex_destroy(&vm->workqueue_lock); - /* after freeing objspace, you *can't* use ruby_xfree() */ - ruby_mimfree(vm); - ruby_current_vm_ptr = NULL; + /* after freeing objspace, you *can't* use ruby_xfree() */ + ruby_mimfree(vm); + ruby_current_vm_ptr = NULL; } RUBY_FREE_LEAVE("vm"); return 0; @@ -2920,11 +2920,11 @@ get_param(const char *name, size_t default_value, size_t min_value) const char *envval; size_t result = default_value; if ((envval = getenv(name)) != 0) { - long val = atol(envval); - if (val < (long)min_value) { - val = (long)min_value; - } - result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN); + long val = atol(envval); + if (val < (long)min_value) { + val = (long)min_value; + } + result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN); } if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */ @@ -2940,7 +2940,7 @@ check_machine_stack_size(size_t *sizep) #ifdef PTHREAD_STACK_MIN if (size < (size_t)PTHREAD_STACK_MIN) { - *sizep = (size_t)PTHREAD_STACK_MIN * 2; + *sizep = (size_t)PTHREAD_STACK_MIN * 2; } #endif } @@ -2950,23 +2950,23 @@ vm_default_params_setup(rb_vm_t *vm) { vm->default_params.thread_vm_stack_size = get_param("RUBY_THREAD_VM_STACK_SIZE", - RUBY_VM_THREAD_VM_STACK_SIZE, - RUBY_VM_THREAD_VM_STACK_SIZE_MIN); + RUBY_VM_THREAD_VM_STACK_SIZE, + RUBY_VM_THREAD_VM_STACK_SIZE_MIN); vm->default_params.thread_machine_stack_size = get_param("RUBY_THREAD_MACHINE_STACK_SIZE", - RUBY_VM_THREAD_MACHINE_STACK_SIZE, - RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN); + RUBY_VM_THREAD_MACHINE_STACK_SIZE, + RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN); vm->default_params.fiber_vm_stack_size = get_param("RUBY_FIBER_VM_STACK_SIZE", - RUBY_VM_FIBER_VM_STACK_SIZE, - RUBY_VM_FIBER_VM_STACK_SIZE_MIN); + RUBY_VM_FIBER_VM_STACK_SIZE, + RUBY_VM_FIBER_VM_STACK_SIZE_MIN); vm->default_params.fiber_machine_stack_size = get_param("RUBY_FIBER_MACHINE_STACK_SIZE", - RUBY_VM_FIBER_MACHINE_STACK_SIZE, - RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN); + RUBY_VM_FIBER_MACHINE_STACK_SIZE, + RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN); /* environment dependent check */ check_machine_stack_size(&vm->default_params.thread_machine_stack_size); @@ -3040,17 +3040,17 @@ rb_execution_context_mark(const rb_execution_context_t *ec) /* mark VM stack */ if (ec->vm_stack) { VM_ASSERT(ec->cfp); - VALUE *p = ec->vm_stack; - VALUE *sp = ec->cfp->sp; - rb_control_frame_t *cfp = ec->cfp; - rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size); + VALUE *p = ec->vm_stack; + VALUE *sp = ec->cfp->sp; + rb_control_frame_t *cfp = ec->cfp; + rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size); VM_ASSERT(sp == ec->cfp->sp); rb_gc_mark_vm_stack_values((long)(sp - p), p); - while (cfp != limit_cfp) { - const VALUE *ep = cfp->ep; - VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep)); + while (cfp != limit_cfp) { + const VALUE *ep = cfp->ep; + VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep)); rb_gc_mark_movable(cfp->self); rb_gc_mark_movable((VALUE)cfp->iseq); rb_gc_mark_movable((VALUE)cfp->block_code); @@ -3061,24 +3061,24 @@ rb_execution_context_mark(const rb_execution_context_t *ec) rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]); } - if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) { + if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) { rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]); rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]); - } + } } - cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); - } + cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); + } } /* mark machine stack */ if (ec->machine.stack_start && ec->machine.stack_end && - ec != GET_EC() /* marked for current ec at the first stage of marking */ - ) { - rb_gc_mark_machine_stack(ec); - rb_gc_mark_locations((VALUE *)&ec->machine.regs, - (VALUE *)(&ec->machine.regs) + - sizeof(ec->machine.regs) / (sizeof(VALUE))); + ec != GET_EC() /* marked for current ec at the first stage of marking */ + ) { + rb_gc_mark_machine_stack(ec); + rb_gc_mark_locations((VALUE *)&ec->machine.regs, + (VALUE *)(&ec->machine.regs) + + sizeof(ec->machine.regs) / (sizeof(VALUE))); } RUBY_MARK_UNLESS_NULL(ec->errinfo); @@ -3156,20 +3156,20 @@ thread_free(void *ptr) RUBY_FREE_ENTER("thread"); if (th->locking_mutex != Qfalse) { - rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex); + rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex); } if (th->keeping_mutexes != NULL) { - rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes); + rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes); } rb_threadptr_root_fiber_release(th); if (th->vm && th->vm->ractor.main_thread == th) { - RUBY_GC_INFO("MRI main thread\n"); + RUBY_GC_INFO("MRI main thread\n"); } else { ruby_xfree(th->nt); // TODO - ruby_xfree(th); + ruby_xfree(th); } RUBY_FREE_LEAVE("thread"); @@ -3182,10 +3182,10 @@ thread_memsize(const void *ptr) size_t size = sizeof(rb_thread_t); if (!th->root_fiber) { - size += th->ec->vm_stack_size * sizeof(VALUE); + size += th->ec->vm_stack_size * sizeof(VALUE); } if (th->ec->local_storage) { - size += rb_id_table_memsize(th->ec->local_storage); + size += rb_id_table_memsize(th->ec->local_storage); } return size; } @@ -3194,9 +3194,9 @@ thread_memsize(const void *ptr) const rb_data_type_t ruby_threadptr_data_type = { "VM/thread", { - thread_mark, - thread_free, - thread_memsize, + thread_mark, + thread_free, + thread_memsize, thread_compact, }, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY @@ -3315,7 +3315,7 @@ static VALUE m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2) { REWIND_CFP({ - rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2)); + rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2)); }); return Qnil; } @@ -3324,7 +3324,7 @@ static VALUE m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2) { REWIND_CFP({ - rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2)); + rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2)); }); return Qnil; } @@ -3334,8 +3334,8 @@ m_core_undef_method(VALUE self, VALUE cbase, VALUE sym) { REWIND_CFP({ ID mid = SYM2ID(sym); - rb_undef(cbase, mid); - rb_clear_method_cache(self, mid); + rb_undef(cbase, mid); + rb_clear_method_cache(self, mid); }); return Qnil; } @@ -3434,11 +3434,11 @@ nsdr(VALUE self) int i; if (syms == 0) { - rb_memerror(); + rb_memerror(); } for (i=0; i<n; i++) { - rb_ary_push(ary, rb_str_new2(syms[i])); + rb_ary_push(ary, rb_str_new2(syms[i])); } free(syms); /* OK */ #endif @@ -3829,37 +3829,37 @@ Init_VM(void) /* VM bootstrap: phase 2 */ { - rb_vm_t *vm = ruby_current_vm_ptr; - rb_thread_t *th = GET_THREAD(); - VALUE filename = rb_fstring_lit("<main>"); - const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); + rb_vm_t *vm = ruby_current_vm_ptr; + rb_thread_t *th = GET_THREAD(); + VALUE filename = rb_fstring_lit("<main>"); + const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP); // Ractor setup rb_ractor_main_setup(vm, th->ractor, th); - /* create vm object */ - vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm); + /* create vm object */ + vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm); - /* create main thread */ + /* create main thread */ th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th); - vm->ractor.main_thread = th; + vm->ractor.main_thread = th; vm->ractor.main_ractor = th->ractor; - th->vm = vm; - th->top_wrapper = 0; - th->top_self = rb_vm_top_self(); + th->vm = vm; + th->top_wrapper = 0; + th->top_self = rb_vm_top_self(); rb_gc_register_mark_object((VALUE)iseq); - th->ec->cfp->iseq = iseq; + th->ec->cfp->iseq = iseq; th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded; - th->ec->cfp->self = th->top_self; + th->ec->cfp->self = th->top_self; - VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME); - VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE)); + VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME); + VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE)); - /* - * The Binding of the top level scope - */ - rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new()); + /* + * The Binding of the top level scope + */ + rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new()); rb_objspace_gc_enable(vm->objspace); } @@ -3894,8 +3894,8 @@ Init_BareVM(void) rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm)); rb_thread_t * th = ruby_mimmalloc(sizeof(*th)); if (!vm || !th) { - fputs("[FATAL] failed to allocate memory\n", stderr); - exit(EXIT_FAILURE); + fputs("[FATAL] failed to allocate memory\n", stderr); + exit(EXIT_FAILURE); } // setup the VM @@ -3982,8 +3982,8 @@ rb_ruby_debug_ptr(void) /* iseq.c */ VALUE rb_insn_operand_intern(const rb_iseq_t *iseq, - VALUE insn, int op_no, VALUE op, - int len, size_t pos, VALUE *pnop, VALUE child); + VALUE insn, int op_no, VALUE op, + int len, size_t pos, VALUE *pnop, VALUE child); st_table * rb_vm_fstring_table(void) @@ -4021,29 +4021,29 @@ vm_analysis_insn(int insn) CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM"); uh = rb_const_get(rb_cRubyVM, usage_hash); if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) { - ihash = rb_hash_new(); - HASH_ASET(uh, INT2FIX(insn), ihash); + ihash = rb_hash_new(); + HASH_ASET(uh, INT2FIX(insn), ihash); } if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) { - cv = INT2FIX(0); + cv = INT2FIX(0); } HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1)); /* calc bigram */ if (prev_insn != -1) { - VALUE bi; - VALUE ary[2]; - VALUE cv; + VALUE bi; + VALUE ary[2]; + VALUE cv; - ary[0] = INT2FIX(prev_insn); - ary[1] = INT2FIX(insn); - bi = rb_ary_new4(2, &ary[0]); + ary[0] = INT2FIX(prev_insn); + ary[1] = INT2FIX(insn); + bi = rb_ary_new4(2, &ary[0]); - uh = rb_const_get(rb_cRubyVM, bigram_hash); - if (NIL_P(cv = rb_hash_aref(uh, bi))) { - cv = INT2FIX(0); - } - HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1)); + uh = rb_const_get(rb_cRubyVM, bigram_hash); + if (NIL_P(cv = rb_hash_aref(uh, bi))) { + cv = INT2FIX(0); + } + HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1)); } prev_insn = insn; } @@ -4063,19 +4063,19 @@ vm_analysis_operand(int insn, int n, VALUE op) uh = rb_const_get(rb_cRubyVM, usage_hash); if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) { - ihash = rb_hash_new(); - HASH_ASET(uh, INT2FIX(insn), ihash); + ihash = rb_hash_new(); + HASH_ASET(uh, INT2FIX(insn), ihash); } if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) { - ophash = rb_hash_new(); - HASH_ASET(ihash, INT2FIX(n), ophash); + ophash = rb_hash_new(); + HASH_ASET(ihash, INT2FIX(n), ophash); } /* intern */ valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0); /* set count */ if (NIL_P(cv = rb_hash_aref(ophash, valstr))) { - cv = INT2FIX(0); + cv = INT2FIX(0); } HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1)); } @@ -4087,16 +4087,16 @@ vm_analysis_register(int reg, int isset) VALUE uh; VALUE valstr; static const char regstrs[][5] = { - "pc", /* 0 */ - "sp", /* 1 */ - "ep", /* 2 */ - "cfp", /* 3 */ - "self", /* 4 */ - "iseq", /* 5 */ + "pc", /* 0 */ + "sp", /* 1 */ + "ep", /* 2 */ + "cfp", /* 3 */ + "self", /* 4 */ + "iseq", /* 5 */ }; static const char getsetstr[][4] = { - "get", - "set", + "get", + "set", }; static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2]; @@ -4104,22 +4104,22 @@ vm_analysis_register(int reg, int isset) CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS"); if (syms[0] == 0) { - char buff[0x10]; - int i; - - for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) { - int j; - for (j = 0; j < 2; j++) { - snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]); - syms[i][j] = ID2SYM(rb_intern(buff)); - } - } + char buff[0x10]; + int i; + + for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) { + int j; + for (j = 0; j < 2; j++) { + snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]); + syms[i][j] = ID2SYM(rb_intern(buff)); + } + } } valstr = syms[reg][isset]; uh = rb_const_get(rb_cRubyVM, usage_hash); if (NIL_P(cv = rb_hash_aref(uh, valstr))) { - cv = INT2FIX(0); + cv = INT2FIX(0); } HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1)); } @@ -4260,10 +4260,10 @@ static void vm_collect_usage_insn(int insn) { if (RUBY_DTRACE_INSN_ENABLED()) { - RUBY_DTRACE_INSN(rb_insns_name(insn)); + RUBY_DTRACE_INSN(rb_insns_name(insn)); } if (ruby_vm_collect_usage_func_insn) - (*ruby_vm_collect_usage_func_insn)(insn); + (*ruby_vm_collect_usage_func_insn)(insn); } /* @param insn instruction number @@ -4274,15 +4274,15 @@ static void vm_collect_usage_operand(int insn, int n, VALUE op) { if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) { - VALUE valstr; + VALUE valstr; - valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0); + valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0); - RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn)); - RB_GC_GUARD(valstr); + RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn)); + RB_GC_GUARD(valstr); } if (ruby_vm_collect_usage_func_operand) - (*ruby_vm_collect_usage_func_operand)(insn, n, op); + (*ruby_vm_collect_usage_func_operand)(insn, n, op); } /* @param reg register id. see code of vm_analysis_register() */ @@ -4291,7 +4291,7 @@ static void vm_collect_usage_register(int reg, int isset) { if (ruby_vm_collect_usage_func_register) - (*ruby_vm_collect_usage_func_register)(reg, isset); + (*ruby_vm_collect_usage_func_register)(reg, isset); } #endif |