diff options
author | Jeremy Evans <[email protected]> | 2024-01-26 12:29:37 -0800 |
---|---|---|
committer | Jeremy Evans <[email protected]> | 2024-01-27 13:02:42 -0800 |
commit | a591e11a7a6f6749044b6bef9c35633ad1930db5 (patch) | |
tree | f5a20bb0b402c2da45906563e45422cfbeed743e /compile.c | |
parent | f12ebe11888d9fdd121c98ca8a5155dc044f4cf4 (diff) |
Eliminate 1-2 array allocations for each splat used in a op_asgn method
Given code such as:
```ruby
h[*a, 1] += 1
h[*b] += 2
```
Ruby would previously allocate 5 arrays:
* splatarray true for a
* newarray for 1
* concatarray for [*a, 1] and [1]
* newarray for 2
* concatarray for b and [2]
This optimizes it to only allocate 2 arrays:
* splatarray true for a
* splatarray true for b
Instead of the newarray/concatarray combination, pushtoarray is used.
Note above that there was no splatarray true for b originally. The
normal compilation uses splatarray false for b. Instead of trying
to find and modify the splatarray false to splatarray true, this
adds splatarray true for b, which requires a couple of swap
instructions, before the pushtoarray. This could be further
optimized to remove the need for those three instructions, but I'm
not sure if the complexity is worth it.
Additionally, this sets VM_CALL_ARGS_SPLAT_MUT on the call to
[]= in the h[*b] case, so that if []= has a splat parameter, the
new array can be used directly, without additional duplication.
Diffstat (limited to 'compile.c')
-rw-r--r-- | compile.c | 32 |
1 files changed, 24 insertions, 8 deletions
@@ -9091,20 +9091,28 @@ compile_op_asgn1(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node if (flag & VM_CALL_ARGS_SPLAT) { if (flag & VM_CALL_KW_SPLAT) { ADD_INSN1(ret, node, topn, INT2FIX(2 + boff)); + if (!(flag & VM_CALL_ARGS_SPLAT_MUT)) { + ADD_INSN1(ret, node, splatarray, Qtrue); + flag |= VM_CALL_ARGS_SPLAT_MUT; + } ADD_INSN(ret, node, swap); - ADD_INSN1(ret, node, newarray, INT2FIX(1)); - ADD_INSN(ret, node, concatarray); + ADD_INSN1(ret, node, pushtoarray, INT2FIX(1)); ADD_INSN1(ret, node, setn, INT2FIX(2 + boff)); ADD_INSN(ret, node, pop); } else { - ADD_INSN1(ret, node, newarray, INT2FIX(1)); if (boff > 0) { ADD_INSN1(ret, node, dupn, INT2FIX(3)); ADD_INSN(ret, node, swap); ADD_INSN(ret, node, pop); } - ADD_INSN(ret, node, concatarray); + if (!(flag & VM_CALL_ARGS_SPLAT_MUT)) { + ADD_INSN(ret, node, swap); + ADD_INSN1(ret, node, splatarray, Qtrue); + ADD_INSN(ret, node, swap); + flag |= VM_CALL_ARGS_SPLAT_MUT; + } + ADD_INSN1(ret, node, pushtoarray, INT2FIX(1)); if (boff > 0) { ADD_INSN1(ret, node, setn, INT2FIX(3)); ADD_INSN(ret, node, pop); @@ -9151,20 +9159,28 @@ compile_op_asgn1(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node if (flag & VM_CALL_ARGS_SPLAT) { if (flag & VM_CALL_KW_SPLAT) { ADD_INSN1(ret, node, topn, INT2FIX(2 + boff)); + if (!(flag & VM_CALL_ARGS_SPLAT_MUT)) { + ADD_INSN1(ret, node, splatarray, Qtrue); + flag |= VM_CALL_ARGS_SPLAT_MUT; + } ADD_INSN(ret, node, swap); - ADD_INSN1(ret, node, newarray, INT2FIX(1)); - ADD_INSN(ret, node, concatarray); + ADD_INSN1(ret, node, pushtoarray, INT2FIX(1)); ADD_INSN1(ret, node, setn, INT2FIX(2 + boff)); ADD_INSN(ret, node, pop); } else { - ADD_INSN1(ret, node, newarray, INT2FIX(1)); if (boff > 0) { ADD_INSN1(ret, node, dupn, INT2FIX(3)); ADD_INSN(ret, node, swap); ADD_INSN(ret, node, pop); } - ADD_INSN(ret, node, concatarray); + if (!(flag & VM_CALL_ARGS_SPLAT_MUT)) { + ADD_INSN(ret, node, swap); + ADD_INSN1(ret, node, splatarray, Qtrue); + ADD_INSN(ret, node, swap); + flag |= VM_CALL_ARGS_SPLAT_MUT; + } + ADD_INSN1(ret, node, pushtoarray, INT2FIX(1)); if (boff > 0) { ADD_INSN1(ret, node, setn, INT2FIX(3)); ADD_INSN(ret, node, pop); |