diff options
author | Peter Zhu <[email protected]> | 2023-01-17 11:21:21 -0500 |
---|---|---|
committer | Peter Zhu <[email protected]> | 2023-01-19 11:23:35 -0500 |
commit | 41bf2354e30fc585a61528560c291fd8ee77d107 (patch) | |
tree | 7617b29ea0e6313b6ae3adc12b64ac7f89a7cae2 /iseq.c | |
parent | 6f3aff3961a4c5ce87e05096a1a9dcf1055b7647 (diff) |
Add rb_gc_mark_and_move and implement on iseq
This commit adds rb_gc_mark_and_move which takes a pointer to an object
and marks it during marking phase and updates references during compaction.
This allows for marking and reference updating to be combined into a
single function, which reduces code duplication and prevents bugs if
marking and reference updating goes out of sync.
This commit also implements rb_gc_mark_and_move on iseq as an example.
Notes
Notes:
Merged: https://github.com/ruby/ruby/pull/7140
Diffstat (limited to 'iseq.c')
-rw-r--r-- | iseq.c | 233 |
1 files changed, 72 insertions, 161 deletions
@@ -206,7 +206,7 @@ rb_iseq_free(const rb_iseq_t *iseq) typedef VALUE iseq_value_itr_t(void *ctx, VALUE obj); static inline void -iseq_scan_bits(unsigned int page, iseq_bits_t bits, VALUE *code, iseq_value_itr_t *func, void *data) +iseq_scan_bits(unsigned int page, iseq_bits_t bits, VALUE *code, VALUE *original_iseq) { unsigned int offset; unsigned int page_offset = (page * ISEQ_MBITS_BITLENGTH); @@ -214,20 +214,17 @@ iseq_scan_bits(unsigned int page, iseq_bits_t bits, VALUE *code, iseq_value_itr_ while (bits) { offset = ntz_intptr(bits); VALUE op = code[page_offset + offset]; - VALUE newop = func(data, op); - if (newop != op) { - code[page_offset + offset] = newop; - if (data) { - VALUE *original_iseq = (VALUE *)data; - original_iseq[page_offset + offset] = newop; - } + rb_gc_mark_and_move(&code[page_offset + offset]); + VALUE newop = code[page_offset + offset]; + if (original_iseq && newop != op) { + original_iseq[page_offset + offset] = newop; } bits &= bits - 1; // Reset Lowest Set Bit (BLSR) } } static void -rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data) +rb_iseq_mark_and_move_each_value(const rb_iseq_t *iseq, VALUE *original_iseq) { unsigned int size; VALUE *code; @@ -248,10 +245,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data) if (icvarc->entry) { RUBY_ASSERT(!RB_TYPE_P(icvarc->entry->class_value, T_NONE)); - VALUE nv = func(data, icvarc->entry->class_value); - if (icvarc->entry->class_value != nv) { - icvarc->entry->class_value = nv; - } + rb_gc_mark_and_move(&icvarc->entry->class_value); } } @@ -259,10 +253,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data) for (unsigned int i = 0; i < body->ise_size; i++, is_entries++) { union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)is_entries; if (is->once.value) { - VALUE nv = func(data, is->once.value); - if (is->once.value != nv) { - is->once.value = nv; - } + rb_gc_mark_and_move(&is->once.value); } } @@ -270,10 +261,7 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data) for (unsigned int i = 0; i < body->ic_size; i++, is_entries++) { IC ic = (IC)is_entries; if (ic->entry) { - VALUE nv = func(data, (VALUE)ic->entry); - if ((VALUE)ic->entry != nv) { - ic->entry = (void *)nv; - } + rb_gc_mark_and_move_ptr(&ic->entry); } } } @@ -281,139 +269,55 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data) // Embedded VALUEs if (body->mark_bits.list) { if (ISEQ_MBITS_BUFLEN(size) == 1) { - iseq_scan_bits(0, body->mark_bits.single, code, func, data); + iseq_scan_bits(0, body->mark_bits.single, code, original_iseq); } else { if (body->mark_bits.list) { for (unsigned int i = 0; i < ISEQ_MBITS_BUFLEN(size); i++) { iseq_bits_t bits = body->mark_bits.list[i]; - iseq_scan_bits(i, bits, code, func, data); - } - } - } - } -} - -static VALUE -update_each_insn_value(void *ctx, VALUE obj) -{ - return rb_gc_location(obj); -} - -void -rb_iseq_update_references(rb_iseq_t *iseq) -{ - if (ISEQ_BODY(iseq)) { - struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); - - body->variable.coverage = rb_gc_location(body->variable.coverage); - body->variable.pc2branchindex = rb_gc_location(body->variable.pc2branchindex); - body->variable.script_lines = rb_gc_location(body->variable.script_lines); - body->location.label = rb_gc_location(body->location.label); - body->location.base_label = rb_gc_location(body->location.base_label); - body->location.pathobj = rb_gc_location(body->location.pathobj); - if (body->local_iseq) { - body->local_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->local_iseq); - } - if (body->parent_iseq) { - body->parent_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->parent_iseq); - } - if (body->mandatory_only_iseq) { - body->mandatory_only_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->mandatory_only_iseq); - } - if (body->call_data) { - for (unsigned int i=0; i<body->ci_size; i++) { - struct rb_call_data *cds = body->call_data; - if (cds[i].ci) { - cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci); - } - cds[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)cds[i].cc); - } - } - VALUE *original_iseq = ISEQ_ORIGINAL_ISEQ(iseq); - rb_iseq_each_value(iseq, update_each_insn_value, (void *)original_iseq); - - if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) { - int i, j; - - i = body->param.keyword->required_num; - - for (j = 0; i < body->param.keyword->num; i++, j++) { - VALUE obj = body->param.keyword->default_values[j]; - if (!UNDEF_P(obj)) { - body->param.keyword->default_values[j] = rb_gc_location(obj); - } - } - } - - if (body->catch_table) { - struct iseq_catch_table *table = body->catch_table; - unsigned int i; - for (i = 0; i < table->size; i++) { - struct iseq_catch_table_entry *entry; - entry = UNALIGNED_MEMBER_PTR(table, entries[i]); - if (entry->iseq) { - entry->iseq = (rb_iseq_t *)rb_gc_location((VALUE)entry->iseq); + iseq_scan_bits(i, bits, code, original_iseq); } } } -#if USE_MJIT - mjit_update_references(iseq); -#endif -#if USE_YJIT - rb_yjit_iseq_update_references(body->yjit_payload); -#endif } } -static VALUE -each_insn_value(void *ctx, VALUE obj) -{ - rb_gc_mark_movable(obj); - return obj; -} - void -rb_iseq_mark(const rb_iseq_t *iseq) +rb_iseq_mark_and_update(rb_iseq_t *iseq, bool reference_updating) { RUBY_MARK_ENTER("iseq"); - RUBY_MARK_UNLESS_NULL(iseq->wrapper); + rb_gc_mark_and_move(&iseq->wrapper); if (ISEQ_BODY(iseq)) { - const struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq); + struct rb_iseq_constant_body *body = ISEQ_BODY(iseq); - rb_iseq_each_value(iseq, each_insn_value, NULL); + rb_iseq_mark_and_move_each_value(iseq, reference_updating ? ISEQ_ORIGINAL_ISEQ(iseq) : NULL); - rb_gc_mark_movable(body->variable.coverage); - rb_gc_mark_movable(body->variable.pc2branchindex); - rb_gc_mark_movable(body->variable.script_lines); - rb_gc_mark_movable(body->location.label); - rb_gc_mark_movable(body->location.base_label); - rb_gc_mark_movable(body->location.pathobj); - RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)body->mandatory_only_iseq); - RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)body->parent_iseq); + rb_gc_mark_and_move(&body->variable.coverage); + rb_gc_mark_and_move(&body->variable.pc2branchindex); + rb_gc_mark_and_move(&body->variable.script_lines); + rb_gc_mark_and_move(&body->location.label); + rb_gc_mark_and_move(&body->location.base_label); + rb_gc_mark_and_move(&body->location.pathobj); + if (body->local_iseq) rb_gc_mark_and_move_ptr(&body->local_iseq); + if (body->parent_iseq) rb_gc_mark_and_move_ptr(&body->parent_iseq); + if (body->mandatory_only_iseq) rb_gc_mark_and_move_ptr(&body->mandatory_only_iseq); if (body->call_data) { - struct rb_call_data *cds = (struct rb_call_data *)body->call_data; - for (unsigned int i=0; i<body->ci_size; i++) { - const struct rb_callinfo *ci = cds[i].ci; - const struct rb_callcache *cc = cds[i].cc; + for (unsigned int i = 0; i < body->ci_size; i++) { + struct rb_call_data *cds = body->call_data; - if (ci) { - rb_gc_mark_movable((VALUE)ci); - } + if (cds[i].ci) rb_gc_mark_and_move_ptr(&cds[i].ci); - if (cc) { - VM_ASSERT((cc->flags & VM_CALLCACHE_ON_STACK) == 0); + if (cds[i].cc && (reference_updating || vm_cc_markable(cds[i].cc))) { + VM_ASSERT(reference_updating || (cds[i].cc->flags & VM_CALLCACHE_ON_STACK) == 0); - if (vm_cc_markable(cc)) { - if (!vm_cc_invalidated_p(cc)) { - rb_gc_mark_movable((VALUE)cc); - } - else { - cds[i].cc = rb_vm_empty_cc(); - } + if (reference_updating || !vm_cc_invalidated_p(cds[i].cc)) { + rb_gc_mark_and_move_ptr(&cds[i].cc); + } + else { + cds[i].cc = rb_vm_empty_cc(); } } } @@ -421,57 +325,64 @@ rb_iseq_mark(const rb_iseq_t *iseq) if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) { const struct rb_iseq_param_keyword *const keyword = body->param.keyword; - int i, j; - i = keyword->required_num; - - for (j = 0; i < keyword->num; i++, j++) { - VALUE obj = keyword->default_values[j]; - if (!SPECIAL_CONST_P(obj)) { - rb_gc_mark_movable(obj); - } + for (int j = 0, i = keyword->required_num; i < keyword->num; i++, j++) { + rb_gc_mark_and_move(&keyword->default_values[j]); } } if (body->catch_table) { - const struct iseq_catch_table *table = body->catch_table; - unsigned int i; - for (i = 0; i < table->size; i++) { - const struct iseq_catch_table_entry *entry; + struct iseq_catch_table *table = body->catch_table; + + for (unsigned int i = 0; i < table->size; i++) { + struct iseq_catch_table_entry *entry; entry = UNALIGNED_MEMBER_PTR(table, entries[i]); if (entry->iseq) { - rb_gc_mark_movable((VALUE)entry->iseq); + rb_gc_mark_and_move_ptr(&entry->iseq); } } } + if (reference_updating) { #if USE_MJIT - mjit_mark_cc_entries(body); + mjit_update_references(iseq); #endif #if USE_YJIT - rb_yjit_iseq_mark(body->yjit_payload); + rb_yjit_iseq_update_references(body->yjit_payload); #endif + } + else { +#if USE_MJIT + mjit_mark_cc_entries(body); +#endif +#if USE_YJIT + rb_yjit_iseq_mark(body->yjit_payload); +#endif + } } - if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) { - rb_gc_mark(iseq->aux.loader.obj); - } - else if (FL_TEST_RAW((VALUE)iseq, ISEQ_USE_COMPILE_DATA)) { - const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq); + // TODO: make these not pinned + if (!reference_updating) { + if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) { + rb_gc_mark(iseq->aux.loader.obj); + } + else if (FL_TEST_RAW((VALUE)iseq, ISEQ_USE_COMPILE_DATA)) { + const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq); - rb_iseq_mark_insn_storage(compile_data->insn.storage_head); + rb_iseq_mark_insn_storage(compile_data->insn.storage_head); - RUBY_MARK_UNLESS_NULL(compile_data->err_info); - if (RTEST(compile_data->catch_table_ary)) { - rb_gc_mark(compile_data->catch_table_ary); + RUBY_MARK_UNLESS_NULL(compile_data->err_info); + if (RTEST(compile_data->catch_table_ary)) { + rb_gc_mark(compile_data->catch_table_ary); + } + VM_ASSERT(compile_data != NULL); } - VM_ASSERT(compile_data != NULL); - } - else { - /* executable */ - VM_ASSERT(ISEQ_EXECUTABLE_P(iseq)); - if (iseq->aux.exec.local_hooks) { - rb_hook_list_mark(iseq->aux.exec.local_hooks); + else { + /* executable */ + VM_ASSERT(ISEQ_EXECUTABLE_P(iseq)); + if (iseq->aux.exec.local_hooks) { + rb_hook_list_mark(iseq->aux.exec.local_hooks); + } } } |