summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compile.c4
-rw-r--r--iseq.c55
-rw-r--r--rjit_c.rb1
-rw-r--r--vm_callinfo.h26
-rw-r--r--vm_core.h2
-rw-r--r--vm_insnhelper.c30
-rw-r--r--vm_trace.c10
-rw-r--r--yjit/src/cruby_bindings.inc.rs1
8 files changed, 114 insertions, 15 deletions
diff --git a/compile.c b/compile.c
index e863924397..546c07cec5 100644
--- a/compile.c
+++ b/compile.c
@@ -3646,6 +3646,10 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
if (IS_TRACE(iobj->link.next)) {
if (IS_NEXT_INSN_ID(iobj->link.next, leave)) {
iobj->insn_id = BIN(opt_invokebuiltin_delegate_leave);
+ const struct rb_builtin_function *bf = (const struct rb_builtin_function *)iobj->operands[0];
+ if (iobj == (INSN *)list && bf->argc == 0 && (iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF)) {
+ iseq->body->builtin_attrs |= BUILTIN_ATTR_SINGLE_NOARG_INLINE;
+ }
}
}
}
diff --git a/iseq.c b/iseq.c
index 2e072f9ac2..1e9a3f7497 100644
--- a/iseq.c
+++ b/iseq.c
@@ -3550,9 +3550,32 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
}
}
-bool rb_vm_call_ivar_attrset_p(const vm_call_handler ch);
void rb_vm_cc_general(const struct rb_callcache *cc);
+static bool
+clear_attr_cc(VALUE v)
+{
+ if (imemo_type_p(v, imemo_callcache) && vm_cc_ivar_p((const struct rb_callcache *)v)) {
+ rb_vm_cc_general((struct rb_callcache *)v);
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
+static bool
+clear_bf_cc(VALUE v)
+{
+ if (imemo_type_p(v, imemo_callcache) && vm_cc_bf_p((const struct rb_callcache *)v)) {
+ rb_vm_cc_general((struct rb_callcache *)v);
+ return true;
+ }
+ else {
+ return false;
+ }
+}
+
static int
clear_attr_ccs_i(void *vstart, void *vend, size_t stride, void *data)
{
@@ -3560,11 +3583,7 @@ clear_attr_ccs_i(void *vstart, void *vend, size_t stride, void *data)
for (; v != (VALUE)vend; v += stride) {
void *ptr = asan_poisoned_object_p(v);
asan_unpoison_object(v, false);
-
- if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
- rb_vm_cc_general((struct rb_callcache *)v);
- }
-
+ clear_attr_cc(v);
asan_poison_object_if(ptr, v);
}
return 0;
@@ -3577,6 +3596,25 @@ rb_clear_attr_ccs(void)
}
static int
+clear_bf_ccs_i(void *vstart, void *vend, size_t stride, void *data)
+{
+ VALUE v = (VALUE)vstart;
+ for (; v != (VALUE)vend; v += stride) {
+ void *ptr = asan_poisoned_object_p(v);
+ asan_unpoison_object(v, false);
+ clear_bf_cc(v);
+ asan_poison_object_if(ptr, v);
+ }
+ return 0;
+}
+
+void
+rb_clear_bf_ccs(void)
+{
+ rb_objspace_each_objects(clear_bf_ccs_i, NULL);
+}
+
+static int
trace_set_i(void *vstart, void *vend, size_t stride, void *data)
{
rb_event_flag_t turnon_events = *(rb_event_flag_t *)data;
@@ -3589,8 +3627,9 @@ trace_set_i(void *vstart, void *vend, size_t stride, void *data)
if (rb_obj_is_iseq(v)) {
rb_iseq_trace_set(rb_iseq_check((rb_iseq_t *)v), turnon_events);
}
- else if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
- rb_vm_cc_general((struct rb_callcache *)v);
+ else if (clear_attr_cc(v)) {
+ }
+ else if (clear_bf_cc(v)) {
}
asan_poison_object_if(ptr, v);
diff --git a/rjit_c.rb b/rjit_c.rb
index c1030e94e3..96b21f2b7d 100644
--- a/rjit_c.rb
+++ b/rjit_c.rb
@@ -952,6 +952,7 @@ module RubyVM::RJIT # :nodoc: all
),
method_missing_reason: self.method_missing_reason,
v: self.VALUE,
+ bf: CType::Pointer.new { self.rb_builtin_function },
), Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), aux_)")],
)
end
diff --git a/vm_callinfo.h b/vm_callinfo.h
index 1edeee1f56..bf34908eea 100644
--- a/vm_callinfo.h
+++ b/vm_callinfo.h
@@ -290,6 +290,7 @@ struct rb_callcache {
} attr;
const enum method_missing_reason method_missing_reason; /* used by method_missing */
VALUE v;
+ const struct rb_builtin_function *bf;
} aux_;
};
@@ -439,6 +440,9 @@ vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *c
/* callcache: mutate */
+#define VM_CALLCACH_IVAR IMEMO_FL_USER0
+#define VM_CALLCACH_BF IMEMO_FL_USER1
+
static inline void
vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
{
@@ -458,6 +462,13 @@ vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*attr_value = (attr_index_t)(index + 1) | ((uintptr_t)(dest_shape_id) << SHAPE_FLAG_SHIFT);
+ *(VALUE *)&cc->flags |= VM_CALLCACH_IVAR;
+}
+
+static inline bool
+vm_cc_ivar_p(const struct rb_callcache *cc)
+{
+ return (cc->flags & VM_CALLCACH_IVAR) != 0;
}
static inline void
@@ -481,6 +492,21 @@ vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missi
}
static inline void
+vm_cc_bf_set(const struct rb_callcache *cc, const struct rb_builtin_function *bf)
+{
+ VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
+ VM_ASSERT(cc != vm_cc_empty());
+ *(const struct rb_builtin_function **)&cc->aux_.bf = bf;
+ *(VALUE *)&cc->flags |= VM_CALLCACH_BF;
+}
+
+static inline bool
+vm_cc_bf_p(const struct rb_callcache *cc)
+{
+ return (cc->flags & VM_CALLCACH_BF) != 0;
+}
+
+static inline void
vm_cc_invalidate(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
diff --git a/vm_core.h b/vm_core.h
index f7313205ee..43e95a5dbe 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -371,6 +371,8 @@ enum rb_builtin_attr {
BUILTIN_ATTR_LEAF = 0x01,
// The iseq does not allocate objects.
BUILTIN_ATTR_NO_GC = 0x02,
+ // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
+ BUILTIN_ATTR_SINGLE_NOARG_INLINE = 0x04,
};
typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
diff --git a/vm_insnhelper.c b/vm_insnhelper.c
index e2cab6054a..e985e52077 100644
--- a/vm_insnhelper.c
+++ b/vm_insnhelper.c
@@ -2789,6 +2789,17 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cc), 0, param, local);
}
+static VALUE builtin_invoker0(rb_execution_context_t *ec, VALUE self, const VALUE *argv, rb_insn_func_t funcptr);
+
+static VALUE
+vm_call_single_noarg_inline_builtin(rb_execution_context_t *ec, rb_control_frame_t *cfp,
+ struct rb_calling_info *calling)
+{
+ const struct rb_builtin_function *bf = calling->cc->aux_.bf;
+ cfp->sp -= (calling->argc + 1);
+ return builtin_invoker0(ec, calling->recv, NULL, (rb_insn_func_t)bf->func_ptr);
+}
+
static inline int
vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
@@ -2808,7 +2819,18 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
VM_ASSERT(ci == calling->ci);
VM_ASSERT(cc == calling->cc);
- CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), cacheable_ci && vm_call_iseq_optimizable_p(ci, cc));
+
+ if (cacheable_ci && vm_call_iseq_optimizable_p(ci, cc)) {
+ if ((iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_INLINE) &&
+ !(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
+ VM_ASSERT(iseq->body->builtin_attrs & BUILTIN_ATTR_LEAF);
+ vm_cc_bf_set(cc, (void *)iseq->body->iseq_encoded[1]);
+ CC_SET_FASTPATH(cc, vm_call_single_noarg_inline_builtin, true);
+ }
+ else {
+ CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), true);
+ }
+ }
return 0;
}
else if (rb_iseq_only_optparam_p(iseq)) {
@@ -3507,12 +3529,6 @@ vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
}
-bool
-rb_vm_call_ivar_attrset_p(const vm_call_handler ch)
-{
- return (ch == vm_call_ivar || ch == vm_call_attrset);
-}
-
static inline VALUE
vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
{
diff --git a/vm_trace.c b/vm_trace.c
index 0cdefe3793..16242e6725 100644
--- a/vm_trace.c
+++ b/vm_trace.c
@@ -93,6 +93,7 @@ rb_hook_list_free(rb_hook_list_t *hooks)
/* ruby_vm_event_flags management */
void rb_clear_attr_ccs(void);
+void rb_clear_bf_ccs(void);
static void
update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events)
@@ -102,6 +103,8 @@ update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events
bool first_time_iseq_events_p = new_iseq_events & ~enabled_iseq_events;
bool enable_c_call = (prev_events & RUBY_EVENT_C_CALL) == 0 && (new_events & RUBY_EVENT_C_CALL);
bool enable_c_return = (prev_events & RUBY_EVENT_C_RETURN) == 0 && (new_events & RUBY_EVENT_C_RETURN);
+ bool enable_call = (prev_events & RUBY_EVENT_CALL) == 0 && (new_events & RUBY_EVENT_CALL);
+ bool enable_return = (prev_events & RUBY_EVENT_RETURN) == 0 && (new_events & RUBY_EVENT_RETURN);
// Modify ISEQs or CCs to enable tracing
if (first_time_iseq_events_p) {
@@ -112,6 +115,9 @@ update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events
else if (enable_c_call || enable_c_return) {
rb_clear_attr_ccs();
}
+ else if (enable_call || enable_return) {
+ rb_clear_bf_ccs();
+ }
ruby_vm_event_flags = new_events;
ruby_vm_event_enabled_global_flags |= new_events;
@@ -1258,6 +1264,10 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
+ if ((tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN)) &&
+ iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_INLINE) {
+ rb_clear_bf_ccs();
+ }
if (n == 0) {
rb_raise(rb_eArgError, "can not enable any hooks");
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index 5d16ad13f2..2b98b73dba 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -664,6 +664,7 @@ pub struct iseq_inline_cvar_cache_entry {
}
pub const BUILTIN_ATTR_LEAF: rb_builtin_attr = 1;
pub const BUILTIN_ATTR_NO_GC: rb_builtin_attr = 2;
+pub const BUILTIN_ATTR_SINGLE_NOARG_INLINE: rb_builtin_attr = 4;
pub type rb_builtin_attr = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]