summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndres Freund2025-04-02 18:25:17 +0000
committerAndres Freund2025-04-02 18:50:03 +0000
commit77d90d6d6334cbd0a423637e4306727bce2437f1 (patch)
tree15c2e12fe455a99f3ce86d7ef00983c656be2cf9
parent2d6cfb0cddd35d724da4441c57e1c41a3991cbcb (diff)
Remove HeapBitmapScan's skip_fetch optimization
The optimization does not take the removal of TIDs by a concurrent vacuum into account. The concurrent vacuum can remove dead TIDs and make pages ALL_VISIBLE while those dead TIDs are referenced in the bitmap. This can lead to a skip_fetch scan returning too many tuples. It likely would be possible to implement this optimization safely, but we don't have the necessary infrastructure in place. Nor is it clear that it's worth building that infrastructure, given how limited the skip_fetch optimization is. In the backbranches we just disable the optimization by always passing need_tuples=true to table_beginscan_bm(). We can't perform API/ABI changes in the backbranches and we want to make the change as minimal as possible. Author: Matthias van de Meent <[email protected]> Reported-By: Konstantin Knizhnik <[email protected]> Discussion: https://postgr.es/m/CAEze2Wg3gXXZTr6_rwC+s4-o2ZVFB5F985uUSgJTsECx6AmGcQ@mail.gmail.com Backpatch-through: 13
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index f0ac4e27d95..4b1d0e1ed01 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -743,6 +743,20 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
scanstate->pstate = NULL;
/*
+ * Unfortunately it turns out that the below optimization does not
+ * take the removal of TIDs by a concurrent vacuum into
+ * account. The concurrent vacuum can remove dead TIDs and make
+ * pages ALL_VISIBLE while those dead TIDs are referenced in the
+ * bitmap. This would lead to a !need_tuples scan returning too
+ * many tuples.
+ *
+ * In the back-branches, we therefore simply disable the
+ * optimization. Removing all the relevant code would be too
+ * invasive (and a major backpatching pain).
+ */
+ scanstate->can_skip_fetch = false;
+#ifdef NOT_ANYMORE
+ /*
* We can potentially skip fetching heap pages if we do not need any
* columns of the table, either for checking non-indexable quals or for
* returning data. This test is a bit simplistic, as it checks the
@@ -751,6 +765,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
*/
scanstate->can_skip_fetch = (node->scan.plan.qual == NIL &&
node->scan.plan.targetlist == NIL);
+#endif
/*
* Miscellaneous initialization