summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJean Boussier <[email protected]>2025-04-04 13:28:51 +0200
committerJean Boussier <[email protected]>2025-04-04 16:26:29 +0200
commit085cc6e43473f2a3c81311a07c1fc8efa46c118b (patch)
treecbcbe1bcb361879562e8f1240d584de9dce908f3
parenteb765913c108fa0e71ab7f9852457a914a7d98f0 (diff)
Ractor: revert to moving object bytes, but size pool aware
Using `rb_obj_clone` introduce other problems, such as `initialize_*` callbacks invocation in the context of the parent ractor. So we can revert back to copy the content of the object slots, but in a way that is aware of size pools.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/13070
-rw-r--r--bootstraptest/test_ractor.rb21
-rw-r--r--gc.c8
-rw-r--r--internal/gc.h1
-rw-r--r--ractor.c33
-rw-r--r--variable.c21
5 files changed, 73 insertions, 11 deletions
diff --git a/bootstraptest/test_ractor.rb b/bootstraptest/test_ractor.rb
index 7cc3092c49..1f17deec2e 100644
--- a/bootstraptest/test_ractor.rb
+++ b/bootstraptest/test_ractor.rb
@@ -2101,3 +2101,24 @@ assert_equal 'ok', %q{
:fail
end
}
+
+# move objects inside frozen containers
+assert_equal 'ok', %q{
+ ractor = Ractor.new { Ractor.receive }
+ obj = Array.new(10, 42)
+ original = obj.dup
+ ractor.send([obj].freeze, move: true)
+ roundtripped_obj = ractor.take[0]
+ roundtripped_obj == original ? :ok : roundtripped_obj
+}
+
+# move object with generic ivar
+assert_equal 'ok', %q{
+ ractor = Ractor.new { Ractor.receive }
+ obj = Array.new(10, 42)
+ obj.instance_variable_set(:@array, [1])
+
+ ractor.send(obj, move: true)
+ roundtripped_obj = ractor.take
+ roundtripped_obj.instance_variable_get(:@array) == [1] ? :ok : roundtripped_obj
+}
diff --git a/gc.c b/gc.c
index a9c91250df..89d727f3e1 100644
--- a/gc.c
+++ b/gc.c
@@ -2659,14 +2659,6 @@ rb_gc_mark_roots(void *objspace, const char **categoryp)
#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA(d)->type->function.dmark
void
-rb_gc_ractor_moved(VALUE dest, VALUE src)
-{
- rb_gc_obj_free(rb_gc_get_objspace(), src);
- MEMZERO((void *)src, char, rb_gc_obj_slot_size(src));
- RBASIC(src)->flags = T_OBJECT | FL_FREEZE; // Avoid mutations using bind_call, etc.
-}
-
-void
rb_gc_mark_children(void *objspace, VALUE obj)
{
if (FL_TEST(obj, FL_EXIVAR)) {
diff --git a/internal/gc.h b/internal/gc.h
index 454f8ec685..4e9b4554e8 100644
--- a/internal/gc.h
+++ b/internal/gc.h
@@ -183,7 +183,6 @@ struct rb_gc_object_metadata_entry {
/* gc.c */
RUBY_ATTR_MALLOC void *ruby_mimmalloc(size_t size);
RUBY_ATTR_MALLOC void *ruby_mimcalloc(size_t num, size_t size);
-void rb_gc_ractor_moved(VALUE dest, VALUE src);
void ruby_mimfree(void *ptr);
void rb_gc_prepare_heap(void);
void rb_objspace_set_event_hook(const rb_event_flag_t event);
diff --git a/ractor.c b/ractor.c
index d8a22c0927..d50704dec4 100644
--- a/ractor.c
+++ b/ractor.c
@@ -3539,6 +3539,19 @@ rb_obj_traverse_replace(VALUE obj,
}
}
+static const bool wb_protected_types[RUBY_T_MASK] = {
+ [T_OBJECT] = RGENGC_WB_PROTECTED_OBJECT,
+ [T_HASH] = RGENGC_WB_PROTECTED_HASH,
+ [T_ARRAY] = RGENGC_WB_PROTECTED_ARRAY,
+ [T_STRING] = RGENGC_WB_PROTECTED_STRING,
+ [T_STRUCT] = RGENGC_WB_PROTECTED_STRUCT,
+ [T_COMPLEX] = RGENGC_WB_PROTECTED_COMPLEX,
+ [T_REGEXP] = RGENGC_WB_PROTECTED_REGEXP,
+ [T_MATCH] = RGENGC_WB_PROTECTED_MATCH,
+ [T_FLOAT] = RGENGC_WB_PROTECTED_FLOAT,
+ [T_RATIONAL] = RGENGC_WB_PROTECTED_RATIONAL,
+};
+
static enum obj_traverse_iterator_result
move_enter(VALUE obj, struct obj_traverse_replace_data *data)
{
@@ -3547,7 +3560,10 @@ move_enter(VALUE obj, struct obj_traverse_replace_data *data)
return traverse_skip;
}
else {
- data->replacement = rb_obj_clone(obj);
+ VALUE type = RB_BUILTIN_TYPE(obj);
+ type |= wb_protected_types[type] ? FL_WB_PROTECTED : 0;
+ NEWOBJ_OF(moved, struct RBasic, 0, type, rb_gc_obj_slot_size(obj), 0);
+ data->replacement = (VALUE)moved;
return traverse_cont;
}
}
@@ -3555,7 +3571,20 @@ move_enter(VALUE obj, struct obj_traverse_replace_data *data)
static enum obj_traverse_iterator_result
move_leave(VALUE obj, struct obj_traverse_replace_data *data)
{
- rb_gc_ractor_moved(data->replacement, obj);
+ size_t size = rb_gc_obj_slot_size(obj);
+ memcpy((void *)data->replacement, (void *)obj, size);
+ FL_UNSET_RAW(data->replacement, FL_SEEN_OBJ_ID);
+
+ void rb_replace_generic_ivar(VALUE clone, VALUE obj); // variable.c
+
+ if (UNLIKELY(FL_TEST_RAW(obj, FL_EXIVAR))) {
+ rb_replace_generic_ivar(data->replacement, obj);
+ }
+
+ // Avoid mutations using bind_call, etc.
+ // We keep FL_SEEN_OBJ_ID so GC later clean the obj_id_table.
+ MEMZERO((char *)obj + sizeof(struct RBasic), char, size - sizeof(struct RBasic));
+ RBASIC(obj)->flags = T_OBJECT | FL_FREEZE | (RBASIC(obj)->flags & FL_SEEN_OBJ_ID);
RBASIC_SET_CLASS_RAW(obj, rb_cRactorMovedObject);
return traverse_cont;
}
diff --git a/variable.c b/variable.c
index b987a57f1e..68e54a21ab 100644
--- a/variable.c
+++ b/variable.c
@@ -2158,6 +2158,27 @@ rb_copy_generic_ivar(VALUE clone, VALUE obj)
}
void
+rb_replace_generic_ivar(VALUE clone, VALUE obj)
+{
+ RUBY_ASSERT(FL_TEST(obj, FL_EXIVAR));
+
+ RB_VM_LOCK_ENTER();
+ {
+ st_data_t ivtbl, obj_data = (st_data_t)obj;
+ if (st_delete(generic_iv_tbl_, &obj_data, &ivtbl)) {
+ FL_UNSET_RAW(obj, FL_EXIVAR);
+
+ st_insert(generic_iv_tbl_, (st_data_t)clone, ivtbl);
+ FL_SET_RAW(clone, FL_EXIVAR);
+ }
+ else {
+ rb_bug("unreachable");
+ }
+ }
+ RB_VM_LOCK_LEAVE();
+}
+
+void
rb_ivar_foreach(VALUE obj, rb_ivar_foreach_callback_func *func, st_data_t arg)
{
if (SPECIAL_CONST_P(obj)) return;