From 9c2919df1419216e596819e9a3e23616d431d8d0 Mon Sep 17 00:00:00 2001
From: "Andrey M. Borodin" <x4mmm@flight.local>
Date: Sat, 23 Jul 2022 14:08:10 +0500
Subject: [PATCH v14 1/3] Refactor amcheck to extract common locking routines

---
 contrib/amcheck/Makefile        |   2 +
 contrib/amcheck/amcheck.c       | 188 +++++++++++++++++++
 contrib/amcheck/amcheck.h       |  27 +++
 contrib/amcheck/meson.build     |   1 +
 contrib/amcheck/verify_nbtree.c | 308 ++++++++------------------------
 5 files changed, 297 insertions(+), 229 deletions(-)
 create mode 100644 contrib/amcheck/amcheck.c
 create mode 100644 contrib/amcheck/amcheck.h

diff --git a/contrib/amcheck/Makefile b/contrib/amcheck/Makefile
index b82f221e50b..f10fd9d89d5 100644
--- a/contrib/amcheck/Makefile
+++ b/contrib/amcheck/Makefile
@@ -3,11 +3,13 @@
 MODULE_big	= amcheck
 OBJS = \
 	$(WIN32RES) \
+	amcheck.o \
 	verify_heapam.o \
 	verify_nbtree.o
 
 EXTENSION = amcheck
 DATA = amcheck--1.2--1.3.sql amcheck--1.1--1.2.sql amcheck--1.0--1.1.sql amcheck--1.0.sql
+
 PGFILEDESC = "amcheck - function for verifying relation integrity"
 
 REGRESS = check check_btree check_heap
diff --git a/contrib/amcheck/amcheck.c b/contrib/amcheck/amcheck.c
new file mode 100644
index 00000000000..0194ef0d7a2
--- /dev/null
+++ b/contrib/amcheck/amcheck.c
@@ -0,0 +1,188 @@
+/*-------------------------------------------------------------------------
+ *
+ * amcheck.c
+ *		Utility functions common to all access methods.
+ *
+ * Copyright (c) 2017-2019, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *	  contrib/amcheck/amcheck.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/genam.h"
+#include "access/table.h"
+#include "access/tableam.h"
+#include "amcheck.h"
+#include "catalog/index.h"
+#include "commands/tablecmds.h"
+
+
+static bool
+amcheck_index_mainfork_expected(Relation rel);
+
+/*
+ * Check if index relation should have a file for its main relation
+ * fork.  Verification uses this to skip unlogged indexes when in hot standby
+ * mode, where there is simply nothing to verify.
+ *
+ * NB: Caller should call index_checkable()
+ * before calling here.
+ */
+static bool
+amcheck_index_mainfork_expected(Relation rel)
+{
+	if (rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED ||
+		!RecoveryInProgress())
+		return true;
+
+	ereport(NOTICE,
+			(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
+			 errmsg("cannot verify unlogged index \"%s\" during recovery, skipping",
+					RelationGetRelationName(rel))));
+
+	return false;
+}
+
+void
+amcheck_lock_relation_and_check(Oid indrelid, IndexCheckableCallback checkable,
+												IndexDoCheckCallback check, LOCKMODE lockmode, void *state)
+{
+	Oid			heapid;
+	Relation	indrel;
+	Relation	heaprel;
+	Oid			save_userid;
+	int			save_sec_context;
+	int			save_nestlevel;
+
+	/*
+	 * We must lock table before index to avoid deadlocks.  However, if the
+	 * passed indrelid isn't an index then IndexGetRelation() will fail.
+	 * Rather than emitting a not-very-helpful error message, postpone
+	 * complaining, expecting that the is-it-an-index test below will fail.
+	 *
+	 * In hot standby mode this will raise an error when parentcheck is true.
+	 */
+	heapid = IndexGetRelation(indrelid, true);
+	if (OidIsValid(heapid))
+	{
+		heaprel = table_open(heapid, lockmode);
+
+		/*
+		 * Switch to the table owner's userid, so that any index functions are
+		 * run as that user.  Also lock down security-restricted operations
+		 * and arrange to make GUC variable changes local to this command.
+		 */
+		GetUserIdAndSecContext(&save_userid, &save_sec_context);
+		SetUserIdAndSecContext(heaprel->rd_rel->relowner,
+							   save_sec_context | SECURITY_RESTRICTED_OPERATION);
+		save_nestlevel = NewGUCNestLevel();
+	}
+	else
+	{
+		heaprel = NULL;
+		/* for "gcc -Og" https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78394 */
+		save_userid = InvalidOid;
+		save_sec_context = -1;
+		save_nestlevel = -1;
+	}
+
+	/*
+	 * Open the target index relations separately (like relation_openrv(), but
+	 * with heap relation locked first to prevent deadlocking).  In hot
+	 * standby mode this will raise an error when parentcheck is true.
+	 *
+	 * There is no need for the usual indcheckxmin usability horizon test
+	 * here, even in the heapallindexed case, because index undergoing
+	 * verification only needs to have entries for a new transaction snapshot.
+	 * (If this is a parentcheck verification, there is no question about
+	 * committed or recently dead heap tuples lacking index entries due to
+	 * concurrent activity.)
+	 */
+	indrel = index_open(indrelid, lockmode);
+
+	/*
+	 * Since we did the IndexGetRelation call above without any lock, it's
+	 * barely possible that a race against an index drop/recreation could have
+	 * netted us the wrong table.
+	 */
+	if (heaprel == NULL || heapid != IndexGetRelation(indrelid, false))
+		ereport(ERROR,
+				(errcode(ERRCODE_UNDEFINED_TABLE),
+				 errmsg("could not open parent table of index \"%s\"",
+						RelationGetRelationName(indrel))));
+
+	/* Relation suitable for checking */
+	checkable(indrel);
+
+	if (amcheck_index_mainfork_expected(indrel))
+		check(indrel, heaprel, state);
+
+	/* Roll back any GUC changes executed by index functions */
+	AtEOXact_GUC(false, save_nestlevel);
+
+	/* Restore userid and security context */
+	SetUserIdAndSecContext(save_userid, save_sec_context);
+
+	/*
+	 * Release locks early. That's ok here because nothing in the called
+	 * routines will trigger shared cache invalidations to be sent, so we can
+	 * relax the usual pattern of only releasing locks after commit.
+	 */
+	index_close(indrel, lockmode);
+	if (heaprel)
+		table_close(heaprel, lockmode);
+}
+
+/*
+ * PageGetItemId() wrapper that validates returned line pointer.
+ *
+ * Buffer page/page item access macros generally trust that line pointers are
+ * not corrupt, which might cause problems for verification itself.  For
+ * example, there is no bounds checking in PageGetItem().  Passing it a
+ * corrupt line pointer can cause it to return a tuple/pointer that is unsafe
+ * to dereference.
+ *
+ * Validating line pointers before tuples avoids undefined behavior and
+ * assertion failures with corrupt indexes, making the verification process
+ * more robust and predictable.
+ */
+ItemId
+PageGetItemIdCareful(Relation rel, BlockNumber block, Page page,
+					 OffsetNumber offset, size_t opaquesize)
+{
+	ItemId		itemid = PageGetItemId(page, offset);
+
+	Assert(opaquesize == MAXALIGN(opaquesize));
+
+	if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
+		BLCKSZ - MAXALIGN(opaquesize))
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+				 errmsg("line pointer points past end of tuple space in index \"%s\"",
+						RelationGetRelationName(rel)),
+				 errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
+									block, offset, ItemIdGetOffset(itemid),
+									ItemIdGetLength(itemid),
+									ItemIdGetFlags(itemid))));
+
+	/*
+	 * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree and gist
+	 * never uses either.  Verify that line pointer has storage, too, since
+	 * even LP_DEAD items should.
+	 */
+	if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
+		ItemIdGetLength(itemid) == 0)
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+				 errmsg("invalid line pointer storage in index \"%s\"",
+						RelationGetRelationName(rel)),
+				 errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
+									block, offset, ItemIdGetOffset(itemid),
+									ItemIdGetLength(itemid),
+									ItemIdGetFlags(itemid))));
+
+	return itemid;
+}
diff --git a/contrib/amcheck/amcheck.h b/contrib/amcheck/amcheck.h
new file mode 100644
index 00000000000..10906efd8a5
--- /dev/null
+++ b/contrib/amcheck/amcheck.h
@@ -0,0 +1,27 @@
+/*-------------------------------------------------------------------------
+ *
+ * amcheck.h
+ *		Shared routines for amcheck verifications.
+ *
+ * Copyright (c) 2019, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *	  contrib/amcheck/amcheck.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "storage/lockdefs.h"
+#include "utils/relcache.h"
+#include "miscadmin.h"
+
+/* Typedefs for callback functions for amcheck_lock_relation */
+typedef void (*IndexCheckableCallback) (Relation index);
+typedef void (*IndexDoCheckCallback) (Relation rel, Relation heaprel, void* state);
+
+extern void amcheck_lock_relation_and_check(Oid indrelid,
+											IndexCheckableCallback checkable,
+											IndexDoCheckCallback check,
+											LOCKMODE lockmode, void *state);
+
+extern ItemId PageGetItemIdCareful(Relation rel, BlockNumber block,
+					 Page page, OffsetNumber offset, size_t opaquesize);
\ No newline at end of file
diff --git a/contrib/amcheck/meson.build b/contrib/amcheck/meson.build
index 1db3d20349e..227e68ff834 100644
--- a/contrib/amcheck/meson.build
+++ b/contrib/amcheck/meson.build
@@ -1,4 +1,5 @@
 amcheck = shared_module('amcheck', [
+    'amcheck.c',
     'verify_heapam.c',
     'verify_nbtree.c',
   ],
diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c
index 9021d156eb7..950014f19d5 100644
--- a/contrib/amcheck/verify_nbtree.c
+++ b/contrib/amcheck/verify_nbtree.c
@@ -41,6 +41,8 @@
 #include "utils/memutils.h"
 #include "utils/snapmgr.h"
 
+#include "amcheck.h"
+
 
 PG_MODULE_MAGIC;
 
@@ -138,10 +140,8 @@ typedef struct BtreeLevel
 PG_FUNCTION_INFO_V1(bt_index_check);
 PG_FUNCTION_INFO_V1(bt_index_parent_check);
 
-static void bt_index_check_internal(Oid indrelid, bool parentcheck,
-									bool heapallindexed, bool rootdescend);
+static void bt_index_check_internal_callback(Relation indrel, Relation heaprel, void* state);
 static inline void btree_index_checkable(Relation rel);
-static inline bool btree_index_mainfork_expected(Relation rel);
 static void bt_check_every_level(Relation rel, Relation heaprel,
 								 bool heapkeyspace, bool readonly, bool heapallindexed,
 								 bool rootdescend);
@@ -184,12 +184,17 @@ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
 static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
 static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
 													IndexTuple itup);
-static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
-								   Page page, OffsetNumber offset);
 static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
 													  IndexTuple itup, bool nonpivot);
 static inline ItemPointer BTreeTupleGetPointsToTID(IndexTuple itup);
 
+typedef struct BTCheckCallbackState
+{
+	bool parentcheck;
+	bool heapallindexed;
+	bool rootdescend;
+} BTCheckCallbackState;
+
 /*
  * bt_index_check(index regclass, heapallindexed boolean)
  *
@@ -203,12 +208,17 @@ Datum
 bt_index_check(PG_FUNCTION_ARGS)
 {
 	Oid			indrelid = PG_GETARG_OID(0);
-	bool		heapallindexed = false;
+	BTCheckCallbackState args;
 
-	if (PG_NARGS() == 2)
-		heapallindexed = PG_GETARG_BOOL(1);
+	args.heapallindexed = false;
+	args.rootdescend = false;
+	args.parentcheck = false;
 
-	bt_index_check_internal(indrelid, false, heapallindexed, false);
+	if (PG_NARGS() >= 2)
+		args.heapallindexed = PG_GETARG_BOOL(1);
+
+	amcheck_lock_relation_and_check(indrelid, btree_index_checkable,
+		bt_index_check_internal_callback, AccessShareLock, &args);
 
 	PG_RETURN_VOID();
 }
@@ -226,15 +236,18 @@ Datum
 bt_index_parent_check(PG_FUNCTION_ARGS)
 {
 	Oid			indrelid = PG_GETARG_OID(0);
-	bool		heapallindexed = false;
-	bool		rootdescend = false;
+	BTCheckCallbackState args;
+	args.heapallindexed = false;
+	args.rootdescend = false;
+	args.parentcheck = true;
 
 	if (PG_NARGS() >= 2)
-		heapallindexed = PG_GETARG_BOOL(1);
+		args.heapallindexed = PG_GETARG_BOOL(1);
 	if (PG_NARGS() == 3)
-		rootdescend = PG_GETARG_BOOL(2);
+		args.rootdescend = PG_GETARG_BOOL(2);
 
-	bt_index_check_internal(indrelid, true, heapallindexed, rootdescend);
+	amcheck_lock_relation_and_check(indrelid, btree_index_checkable,
+		bt_index_check_internal_callback, ShareLock, &args);
 
 	PG_RETURN_VOID();
 }
@@ -242,126 +255,35 @@ bt_index_parent_check(PG_FUNCTION_ARGS)
 /*
  * Helper for bt_index_[parent_]check, coordinating the bulk of the work.
  */
-static void
-bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed,
-						bool rootdescend)
+static void bt_index_check_internal_callback(Relation indrel, Relation heaprel, void* state)
 {
-	Oid			heapid;
-	Relation	indrel;
-	Relation	heaprel;
-	LOCKMODE	lockmode;
-	Oid			save_userid;
-	int			save_sec_context;
-	int			save_nestlevel;
-
-	if (parentcheck)
-		lockmode = ShareLock;
-	else
-		lockmode = AccessShareLock;
-
-	/*
-	 * We must lock table before index to avoid deadlocks.  However, if the
-	 * passed indrelid isn't an index then IndexGetRelation() will fail.
-	 * Rather than emitting a not-very-helpful error message, postpone
-	 * complaining, expecting that the is-it-an-index test below will fail.
-	 *
-	 * In hot standby mode this will raise an error when parentcheck is true.
-	 */
-	heapid = IndexGetRelation(indrelid, true);
-	if (OidIsValid(heapid))
-	{
-		heaprel = table_open(heapid, lockmode);
-
-		/*
-		 * Switch to the table owner's userid, so that any index functions are
-		 * run as that user.  Also lock down security-restricted operations
-		 * and arrange to make GUC variable changes local to this command.
-		 */
-		GetUserIdAndSecContext(&save_userid, &save_sec_context);
-		SetUserIdAndSecContext(heaprel->rd_rel->relowner,
-							   save_sec_context | SECURITY_RESTRICTED_OPERATION);
-		save_nestlevel = NewGUCNestLevel();
-	}
-	else
-	{
-		heaprel = NULL;
-		/* Set these just to suppress "uninitialized variable" warnings */
-		save_userid = InvalidOid;
-		save_sec_context = -1;
-		save_nestlevel = -1;
-	}
-
-	/*
-	 * Open the target index relations separately (like relation_openrv(), but
-	 * with heap relation locked first to prevent deadlocking).  In hot
-	 * standby mode this will raise an error when parentcheck is true.
-	 *
-	 * There is no need for the usual indcheckxmin usability horizon test
-	 * here, even in the heapallindexed case, because index undergoing
-	 * verification only needs to have entries for a new transaction snapshot.
-	 * (If this is a parentcheck verification, there is no question about
-	 * committed or recently dead heap tuples lacking index entries due to
-	 * concurrent activity.)
-	 */
-	indrel = index_open(indrelid, lockmode);
-
-	/*
-	 * Since we did the IndexGetRelation call above without any lock, it's
-	 * barely possible that a race against an index drop/recreation could have
-	 * netted us the wrong table.
-	 */
-	if (heaprel == NULL || heapid != IndexGetRelation(indrelid, false))
-		ereport(ERROR,
-				(errcode(ERRCODE_UNDEFINED_TABLE),
-				 errmsg("could not open parent table of index \"%s\"",
-						RelationGetRelationName(indrel))));
-
-	/* Relation suitable for checking as B-Tree? */
-	btree_index_checkable(indrel);
-
-	if (btree_index_mainfork_expected(indrel))
-	{
-		bool		heapkeyspace,
+	BTCheckCallbackState* args = (BTCheckCallbackState*) state;
+	bool		heapkeyspace,
 					allequalimage;
 
-		if (!smgrexists(RelationGetSmgr(indrel), MAIN_FORKNUM))
-			ereport(ERROR,
-					(errcode(ERRCODE_INDEX_CORRUPTED),
-					 errmsg("index \"%s\" lacks a main relation fork",
-							RelationGetRelationName(indrel))));
+	if (!smgrexists(RelationGetSmgr(indrel), MAIN_FORKNUM))
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+					errmsg("index \"%s\" lacks a main relation fork",
+						RelationGetRelationName(indrel))));
 
-		/* Extract metadata from metapage, and sanitize it in passing */
-		_bt_metaversion(indrel, &heapkeyspace, &allequalimage);
-		if (allequalimage && !heapkeyspace)
-			ereport(ERROR,
-					(errcode(ERRCODE_INDEX_CORRUPTED),
-					 errmsg("index \"%s\" metapage has equalimage field set on unsupported nbtree version",
-							RelationGetRelationName(indrel))));
-		if (allequalimage && !_bt_allequalimage(indrel, false))
-			ereport(ERROR,
-					(errcode(ERRCODE_INDEX_CORRUPTED),
-					 errmsg("index \"%s\" metapage incorrectly indicates that deduplication is safe",
-							RelationGetRelationName(indrel))));
+	/* Extract metadata from metapage, and sanitize it in passing */
+	_bt_metaversion(indrel, &heapkeyspace, &allequalimage);
+	if (allequalimage && !heapkeyspace)
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+					errmsg("index \"%s\" metapage has equalimage field set on unsupported nbtree version",
+						RelationGetRelationName(indrel))));
+	if (allequalimage && !_bt_allequalimage(indrel, false))
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+					errmsg("index \"%s\" metapage incorrectly indicates that deduplication is safe",
+						RelationGetRelationName(indrel))));
 
-		/* Check index, possibly against table it is an index on */
-		bt_check_every_level(indrel, heaprel, heapkeyspace, parentcheck,
-							 heapallindexed, rootdescend);
-	}
+	/* Check index, possibly against table it is an index on */
+	bt_check_every_level(indrel, heaprel, heapkeyspace, args->parentcheck,
+							args->heapallindexed, args->rootdescend);
 
-	/* Roll back any GUC changes executed by index functions */
-	AtEOXact_GUC(false, save_nestlevel);
-
-	/* Restore userid and security context */
-	SetUserIdAndSecContext(save_userid, save_sec_context);
-
-	/*
-	 * Release locks early. That's ok here because nothing in the called
-	 * routines will trigger shared cache invalidations to be sent, so we can
-	 * relax the usual pattern of only releasing locks after commit.
-	 */
-	index_close(indrel, lockmode);
-	if (heaprel)
-		table_close(heaprel, lockmode);
 }
 
 /*
@@ -398,29 +320,6 @@ btree_index_checkable(Relation rel)
 				 errdetail("Index is not valid.")));
 }
 
-/*
- * Check if B-Tree index relation should have a file for its main relation
- * fork.  Verification uses this to skip unlogged indexes when in hot standby
- * mode, where there is simply nothing to verify.  We behave as if the
- * relation is empty.
- *
- * NB: Caller should call btree_index_checkable() before calling here.
- */
-static inline bool
-btree_index_mainfork_expected(Relation rel)
-{
-	if (rel->rd_rel->relpersistence != RELPERSISTENCE_UNLOGGED ||
-		!RecoveryInProgress())
-		return true;
-
-	ereport(DEBUG1,
-			(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
-			 errmsg("cannot verify unlogged index \"%s\" during recovery, skipping",
-					RelationGetRelationName(rel))));
-
-	return false;
-}
-
 /*
  * Main entry point for B-Tree SQL-callable functions. Walks the B-Tree in
  * logical order, verifying invariants as it goes.  Optionally, verification
@@ -793,9 +692,9 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
 				ItemId		itemid;
 
 				/* Internal page -- downlink gets leftmost on next level */
-				itemid = PageGetItemIdCareful(state, state->targetblock,
+				itemid = PageGetItemIdCareful(state->rel, state->targetblock,
 											  state->target,
-											  P_FIRSTDATAKEY(opaque));
+											  P_FIRSTDATAKEY(opaque), sizeof(BTPageOpaqueData));
 				itup = (IndexTuple) PageGetItem(state->target, itemid);
 				nextleveldown.leftmost = BTreeTupleGetDownLink(itup);
 				nextleveldown.level = opaque->btpo_level - 1;
@@ -875,8 +774,8 @@ nextpage:
 			IndexTuple	itup;
 			ItemId		itemid;
 
-			itemid = PageGetItemIdCareful(state, state->targetblock,
-										  state->target, P_HIKEY);
+			itemid = PageGetItemIdCareful(state->rel, state->targetblock,
+										  state->target, P_HIKEY, sizeof(BTPageOpaqueData));
 			itup = (IndexTuple) PageGetItem(state->target, itemid);
 
 			state->lowkey = MemoryContextAlloc(oldcontext, IndexTupleSize(itup));
@@ -1093,8 +992,8 @@ bt_target_page_check(BtreeCheckState *state)
 		IndexTuple	itup;
 
 		/* Verify line pointer before checking tuple */
-		itemid = PageGetItemIdCareful(state, state->targetblock,
-									  state->target, P_HIKEY);
+		itemid = PageGetItemIdCareful(state->rel, state->targetblock,
+									  state->target, P_HIKEY, sizeof(BTPageOpaqueData));
 		if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
 							 P_HIKEY))
 		{
@@ -1129,8 +1028,8 @@ bt_target_page_check(BtreeCheckState *state)
 
 		CHECK_FOR_INTERRUPTS();
 
-		itemid = PageGetItemIdCareful(state, state->targetblock,
-									  state->target, offset);
+		itemid = PageGetItemIdCareful(state->rel, state->targetblock,
+									  state->target, offset, sizeof(BTPageOpaqueData));
 		itup = (IndexTuple) PageGetItem(state->target, itemid);
 		tupsize = IndexTupleSize(itup);
 
@@ -1442,9 +1341,9 @@ bt_target_page_check(BtreeCheckState *state)
 							 OffsetNumberNext(offset));
 
 			/* Reuse itup to get pointed-to heap location of second item */
-			itemid = PageGetItemIdCareful(state, state->targetblock,
+			itemid = PageGetItemIdCareful(state->rel, state->targetblock,
 										  state->target,
-										  OffsetNumberNext(offset));
+										  OffsetNumberNext(offset), sizeof(BTPageOpaqueData));
 			itup = (IndexTuple) PageGetItem(state->target, itemid);
 			tid = BTreeTupleGetPointsToTID(itup);
 			nhtid = psprintf("(%u,%u)",
@@ -1735,8 +1634,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
 	if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
 	{
 		/* Return first data item (if any) */
-		rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
-										 P_FIRSTDATAKEY(opaque));
+		rightitem = PageGetItemIdCareful(state->rel, targetnext, rightpage,
+										 P_FIRSTDATAKEY(opaque), sizeof(BTPageOpaqueData));
 	}
 	else if (!P_ISLEAF(opaque) &&
 			 nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
@@ -1745,8 +1644,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
 		 * Return first item after the internal page's "negative infinity"
 		 * item
 		 */
-		rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
-										 OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
+		rightitem = PageGetItemIdCareful(state->rel, targetnext, rightpage,
+										 OffsetNumberNext(P_FIRSTDATAKEY(opaque)), sizeof(BTPageOpaqueData));
 	}
 	else
 	{
@@ -1865,8 +1764,8 @@ bt_child_highkey_check(BtreeCheckState *state,
 
 	if (OffsetNumberIsValid(target_downlinkoffnum))
 	{
-		itemid = PageGetItemIdCareful(state, state->targetblock,
-									  state->target, target_downlinkoffnum);
+		itemid = PageGetItemIdCareful(state->rel, state->targetblock,
+									  state->target, target_downlinkoffnum, sizeof(BTPageOpaqueData));
 		itup = (IndexTuple) PageGetItem(state->target, itemid);
 		downlink = BTreeTupleGetDownLink(itup);
 	}
@@ -1969,7 +1868,7 @@ bt_child_highkey_check(BtreeCheckState *state,
 			OffsetNumber pivotkey_offset;
 
 			/* Get high key */
-			itemid = PageGetItemIdCareful(state, blkno, page, P_HIKEY);
+			itemid = PageGetItemIdCareful(state->rel, blkno, page, P_HIKEY, sizeof(BTPageOpaqueData));
 			highkey = (IndexTuple) PageGetItem(page, itemid);
 
 			/*
@@ -2020,8 +1919,8 @@ bt_child_highkey_check(BtreeCheckState *state,
 													LSN_FORMAT_ARGS(state->targetlsn))));
 					pivotkey_offset = P_HIKEY;
 				}
-				itemid = PageGetItemIdCareful(state, state->targetblock,
-											  state->target, pivotkey_offset);
+				itemid = PageGetItemIdCareful(state->rel, state->targetblock,
+											  state->target, pivotkey_offset, sizeof(BTPageOpaqueData));
 				itup = (IndexTuple) PageGetItem(state->target, itemid);
 			}
 			else
@@ -2107,8 +2006,8 @@ bt_child_check(BtreeCheckState *state, BTScanInsert targetkey,
 	BTPageOpaque copaque;
 	BTPageOpaque topaque;
 
-	itemid = PageGetItemIdCareful(state, state->targetblock,
-								  state->target, downlinkoffnum);
+	itemid = PageGetItemIdCareful(state->rel, state->targetblock,
+								  state->target, downlinkoffnum, sizeof(BTPageOpaqueData));
 	itup = (IndexTuple) PageGetItem(state->target, itemid);
 	childblock = BTreeTupleGetDownLink(itup);
 
@@ -2339,7 +2238,7 @@ bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit,
 		 RelationGetRelationName(state->rel));
 
 	level = opaque->btpo_level;
-	itemid = PageGetItemIdCareful(state, blkno, page, P_FIRSTDATAKEY(opaque));
+	itemid = PageGetItemIdCareful(state->rel, blkno, page, P_FIRSTDATAKEY(opaque), sizeof(BTPageOpaqueData));
 	itup = (IndexTuple) PageGetItem(page, itemid);
 	childblk = BTreeTupleGetDownLink(itup);
 	for (;;)
@@ -2363,8 +2262,8 @@ bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit,
 										level - 1, copaque->btpo_level)));
 
 		level = copaque->btpo_level;
-		itemid = PageGetItemIdCareful(state, childblk, child,
-									  P_FIRSTDATAKEY(copaque));
+		itemid = PageGetItemIdCareful(state->rel, childblk, child,
+									  P_FIRSTDATAKEY(copaque), sizeof(BTPageOpaqueData));
 		itup = (IndexTuple) PageGetItem(child, itemid);
 		childblk = BTreeTupleGetDownLink(itup);
 		/* Be slightly more pro-active in freeing this memory, just in case */
@@ -2412,7 +2311,7 @@ bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit,
 	 */
 	if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
 	{
-		itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
+		itemid = PageGetItemIdCareful(state->rel, childblk, child, P_HIKEY, sizeof(BTPageOpaqueData));
 		itup = (IndexTuple) PageGetItem(child, itemid);
 		if (BTreeTupleGetTopParent(itup) == blkno)
 			return;
@@ -2782,8 +2681,8 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
 	Assert(key->pivotsearch);
 
 	/* Verify line pointer before checking tuple */
-	itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
-								  upperbound);
+	itemid = PageGetItemIdCareful(state->rel, state->targetblock, state->target,
+								  upperbound, sizeof(BTPageOpaqueData));
 	/* pg_upgrade'd indexes may legally have equal sibling tuples */
 	if (!key->heapkeyspace)
 		return invariant_leq_offset(state, key, upperbound);
@@ -2905,8 +2804,8 @@ invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key,
 	Assert(key->pivotsearch);
 
 	/* Verify line pointer before checking tuple */
-	itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
-								  upperbound);
+	itemid = PageGetItemIdCareful(state->rel, nontargetblock, nontarget,
+								  upperbound, sizeof(BTPageOpaqueData));
 	cmp = _bt_compare(state->rel, key, nontarget, upperbound);
 
 	/* pg_upgrade'd indexes may legally have equal sibling tuples */
@@ -3143,55 +3042,6 @@ bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
 	return skey;
 }
 
-/*
- * PageGetItemId() wrapper that validates returned line pointer.
- *
- * Buffer page/page item access macros generally trust that line pointers are
- * not corrupt, which might cause problems for verification itself.  For
- * example, there is no bounds checking in PageGetItem().  Passing it a
- * corrupt line pointer can cause it to return a tuple/pointer that is unsafe
- * to dereference.
- *
- * Validating line pointers before tuples avoids undefined behavior and
- * assertion failures with corrupt indexes, making the verification process
- * more robust and predictable.
- */
-static ItemId
-PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page,
-					 OffsetNumber offset)
-{
-	ItemId		itemid = PageGetItemId(page, offset);
-
-	if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
-		BLCKSZ - MAXALIGN(sizeof(BTPageOpaqueData)))
-		ereport(ERROR,
-				(errcode(ERRCODE_INDEX_CORRUPTED),
-				 errmsg("line pointer points past end of tuple space in index \"%s\"",
-						RelationGetRelationName(state->rel)),
-				 errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
-									block, offset, ItemIdGetOffset(itemid),
-									ItemIdGetLength(itemid),
-									ItemIdGetFlags(itemid))));
-
-	/*
-	 * Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
-	 * never uses either.  Verify that line pointer has storage, too, since
-	 * even LP_DEAD items should within nbtree.
-	 */
-	if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
-		ItemIdGetLength(itemid) == 0)
-		ereport(ERROR,
-				(errcode(ERRCODE_INDEX_CORRUPTED),
-				 errmsg("invalid line pointer storage in index \"%s\"",
-						RelationGetRelationName(state->rel)),
-				 errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
-									block, offset, ItemIdGetOffset(itemid),
-									ItemIdGetLength(itemid),
-									ItemIdGetFlags(itemid))));
-
-	return itemid;
-}
-
 /*
  * BTreeTupleGetHeapTID() wrapper that enforces that a heap TID is present in
  * cases where that is mandatory (i.e. for non-pivot tuples)
-- 
2.37.3.542.gdd3f6c4cae

