summaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
authorMichael Paquier2025-04-19 10:17:42 +0000
committerMichael Paquier2025-04-19 10:17:42 +0000
commit88e947136b47664b6936b35542f2d1eda0c90588 (patch)
tree6a5999ed2088fb1455940632b70a8d452ec56347 /src/backend
parent114f7fa81c72637d75b574269f2076dcc1104e24 (diff)
Fix typos and grammar in the code
The large majority of these have been introduced by recent commits done in the v18 development cycle. Author: Alexander Lakhin <[email protected]> Discussion: https://postgr.es/m/[email protected]
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/gin/gininsert.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c2
-rw-r--r--src/backend/access/transam/xlog.c6
-rw-r--r--src/backend/catalog/catalog.c2
-rw-r--r--src/backend/commands/tablecmds.c2
-rw-r--r--src/backend/executor/execMain.c2
-rw-r--r--src/backend/executor/execPartition.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c2
-rw-r--r--src/backend/executor/nodeSeqscan.c2
-rw-r--r--src/backend/nodes/queryjumblefuncs.c2
-rw-r--r--src/backend/postmaster/postmaster.c6
-rw-r--r--src/backend/storage/aio/README.md2
-rw-r--r--src/backend/storage/aio/method_worker.c2
-rw-r--r--src/backend/storage/buffer/bufmgr.c2
-rw-r--r--src/backend/storage/page/bufpage.c2
-rw-r--r--src/backend/utils/adt/mcxtfuncs.c6
-rw-r--r--src/backend/utils/adt/pg_locale.c2
-rw-r--r--src/backend/utils/cache/plancache.c2
-rw-r--r--src/backend/utils/mmgr/mcxt.c2
19 files changed, 30 insertions, 30 deletions
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index cfab93ec30c..a7b7b5996e3 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -167,7 +167,7 @@ typedef struct
/*
* The sortstate used only within a single worker for the first merge pass
- * happenning there. In principle it doesn't need to be part of the build
+ * happening there. In principle it doesn't need to be part of the build
* state and we could pass it around directly, but it's more convenient
* this way. And it's part of the build state, after all.
*/
@@ -1306,7 +1306,7 @@ GinBufferIsEmpty(GinBuffer *buffer)
* Compare if the tuple matches the already accumulated data in the GIN
* buffer. Compare scalar fields first, before the actual key.
*
- * Returns true if the key matches, and the TID belonds to the buffer, or
+ * Returns true if the key matches, and the TID belongs to the buffer, or
* false if the key does not match.
*/
static bool
@@ -1497,7 +1497,7 @@ GinBufferStoreTuple(GinBuffer *buffer, GinTuple *tup)
buffer->items = repalloc(buffer->items,
(buffer->nitems + tup->nitems) * sizeof(ItemPointerData));
- new = ginMergeItemPointers(&buffer->items[buffer->nfrozen], /* first unfronzen */
+ new = ginMergeItemPointers(&buffer->items[buffer->nfrozen], /* first unfrozen */
(buffer->nitems - buffer->nfrozen), /* num of unfrozen */
items, tup->nitems, &nnew);
@@ -1531,7 +1531,7 @@ GinBufferReset(GinBuffer *buffer)
pfree(DatumGetPointer(buffer->key));
/*
- * Not required, but makes it more likely to trigger NULL derefefence if
+ * Not required, but makes it more likely to trigger NULL dereference if
* using the value incorrectly, etc.
*/
buffer->key = (Datum) 0;
@@ -1603,7 +1603,7 @@ GinBufferCanAddKey(GinBuffer *buffer, GinTuple *tup)
*
* After waiting for all workers to finish, merge the per-worker results into
* the complete index. The results from each worker are sorted by block number
- * (start of the page range). While combinig the per-worker results we merge
+ * (start of the page range). While combining the per-worker results we merge
* summaries for the same page range, and also fill-in empty summaries for
* ranges without any tuples.
*
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index f69397623df..77264ddeecb 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -1792,7 +1792,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
truncatt = BTreeTupleGetNAtts(itup, rel);
pstate.forcenonrequired = false;
- pstate.startikey = 0; /* _bt_set_startikey ignores HIKEY */
+ pstate.startikey = 0; /* _bt_set_startikey ignores P_HIKEY */
_bt_checkkeys(scan, &pstate, arrayKeys, itup, truncatt);
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index ec40c0b7c42..2d4c346473b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -473,7 +473,7 @@ typedef struct XLogCtlData
XLogRecPtr InitializedFrom;
/*
- * Latest reserved for inititalization page in the cache (last byte
+ * Latest reserved for initialization page in the cache (last byte
* position + 1).
*
* To change the identity of a buffer, you need to advance
@@ -2221,7 +2221,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
* m must observe f[k] == false. Otherwise, it will later attempt
* CAS(v, k, k + 1) with success.
* 4. Therefore, corresponding read_barrier() (while j == k) on
- * process m happend before write_barrier() of process k. But then
+ * process m reached before write_barrier() of process k. But then
* process k attempts CAS(v, k, k + 1) after process m successfully
* incremented v to k, and that CAS operation must succeed.
* That leads to a contradiction. So, there is no such k (k < n)
@@ -2253,7 +2253,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
if (pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]) != NewPageEndPtr)
{
/*
- * Page at nextidx wasn't initialized yet, so we cann't move
+ * Page at nextidx wasn't initialized yet, so we can't move
* InitializedUpto further. It will be moved by backend which
* will initialize nextidx.
*/
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 35ebb0ccda4..60000bd0bc7 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -143,7 +143,7 @@ IsCatalogRelationOid(Oid relid)
*
* The relcache must not use these indexes. Inserting into any UNIQUE
* index compares index keys while holding BUFFER_LOCK_EXCLUSIVE.
- * bttextcmp() can search the COLLID catcache. Depending on concurrent
+ * bttextcmp() can search the COLLOID catcache. Depending on concurrent
* invalidation traffic, catcache can reach relcache builds. A backend
* would self-deadlock on LWLocks if the relcache build read the
* exclusive-locked buffer.
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 80f689bbbc5..265b1c397fb 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -11999,7 +11999,7 @@ DropForeignKeyConstraintTriggers(Relation trigrel, Oid conoid, Oid confrelid,
if (OidIsValid(confrelid) && trgform->tgrelid != confrelid)
continue;
- /* We should be droping trigger related to foreign key constraint */
+ /* We should be dropping trigger related to foreign key constraint */
Assert(trgform->tgfoid == F_RI_FKEY_CHECK_INS ||
trgform->tgfoid == F_RI_FKEY_CHECK_UPD ||
trgform->tgfoid == F_RI_FKEY_CASCADE_DEL ||
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 2da848970be..7230f968101 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1861,7 +1861,7 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
MemoryContext oldContext;
/*
- * CheckConstraintFetch let this pass with only a warning, but now we
+ * CheckNNConstraintFetch let this pass with only a warning, but now we
* should fail rather than possibly failing to enforce an important
* constraint.
*/
diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c
index 5a77c253826..9435cc21fe7 100644
--- a/src/backend/executor/execPartition.c
+++ b/src/backend/executor/execPartition.c
@@ -1778,7 +1778,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
* Updates the PartitionPruneState found at given part_prune_index in
* EState.es_part_prune_states for use during "exec" pruning if required.
* Also returns the set of subplans to initialize that would be stored at
- * part_prune_index in EState.es_part_prune_result by
+ * part_prune_index in EState.es_part_prune_results by
* ExecDoInitialPruning(). Maps in PartitionPruneState are updated to
* account for initial pruning possibly having eliminated some of the
* subplans.
@@ -2109,7 +2109,7 @@ CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
*/
partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
- /* Remember for InitExecPartitionPruneContext(). */
+ /* Remember for InitExecPartitionPruneContexts(). */
pprune->partrel = partrel;
partkey = RelationGetPartitionKey(partrel);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 333cbf78343..46d533b7288 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1311,7 +1311,7 @@ ExecInsert(ModifyTableContext *context,
/*
* Convert the OLD tuple to the new partition's format/slot, if
- * needed. Note that ExceDelete() already converted it to the
+ * needed. Note that ExecDelete() already converted it to the
* root's partition's format/slot.
*/
oldSlot = context->cpDeletedSlot;
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 6f9e991eeae..ed35c58c2c3 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -100,7 +100,7 @@ SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
* ExecSeqScan(node)
*
* Scans the relation sequentially and returns the next qualifying
- * tuple. This variant is used when there is no es_eqp_active, no qual
+ * tuple. This variant is used when there is no es_epq_active, no qual
* and no projection. Passing const-NULLs for these to ExecScanExtended
* allows the compiler to eliminate the additional code that would
* ordinarily be required for the evaluation of these.
diff --git a/src/backend/nodes/queryjumblefuncs.c b/src/backend/nodes/queryjumblefuncs.c
index 27fb87d3aaa..d1e82a63f09 100644
--- a/src/backend/nodes/queryjumblefuncs.c
+++ b/src/backend/nodes/queryjumblefuncs.c
@@ -357,7 +357,7 @@ AppendJumble64(JumbleState *jstate, const unsigned char *value)
/*
* FlushPendingNulls
- * Incorporate the pending_null value into the jumble buffer.
+ * Incorporate the pending_nulls value into the jumble buffer.
*
* Note: Callers must ensure that there's at least 1 pending NULL.
*/
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 17fed96fe20..490f7ce3664 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2718,7 +2718,7 @@ HandleFatalError(QuitSignalReason reason, bool consider_sigabrt)
/*
* Choose the appropriate new state to react to the fatal error. Unless we
* were already in the process of shutting down, we go through
- * PM_WAIT_BACKEND. For errors during the shutdown sequence, we directly
+ * PM_WAIT_BACKENDS. For errors during the shutdown sequence, we directly
* switch to PM_WAIT_DEAD_END.
*/
switch (pmState)
@@ -3001,7 +3001,7 @@ PostmasterStateMachine(void)
/*
* Stop any dead-end children and stop creating new ones.
*
- * NB: Similar code exists in HandleFatalErrors(), when the
+ * NB: Similar code exists in HandleFatalError(), when the
* error happens in pmState > PM_WAIT_BACKENDS.
*/
UpdatePMState(PM_WAIT_DEAD_END);
@@ -3082,7 +3082,7 @@ PostmasterStateMachine(void)
{
/*
* PM_WAIT_IO_WORKERS state ends when there's only checkpointer and
- * dead_end children left.
+ * dead-end children left.
*/
if (io_worker_count == 0)
{
diff --git a/src/backend/storage/aio/README.md b/src/backend/storage/aio/README.md
index b00de269ad9..f10b5c7e31e 100644
--- a/src/backend/storage/aio/README.md
+++ b/src/backend/storage/aio/README.md
@@ -103,7 +103,7 @@ pgaio_io_set_handle_data_32(ioh, (uint32 *) buffer, 1);
*
* E.g. md.c needs to translate block numbers into offsets in segments.
*
- * Once the IO handle has been handed off to smgstartreadv(), it may not
+ * Once the IO handle has been handed off to smgrstartreadv(), it may not
* further be used, as the IO may immediately get executed below
* smgrstartreadv() and the handle reused for another IO.
*
diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c
index 0fde2a5b30d..6e8b1327946 100644
--- a/src/backend/storage/aio/method_worker.c
+++ b/src/backend/storage/aio/method_worker.c
@@ -321,7 +321,7 @@ pgaio_worker_die(int code, Datum arg)
}
/*
- * Register the worker in shared memory, assign MyWorkerId and register a
+ * Register the worker in shared memory, assign MyIoWorkerId and register a
* shutdown callback to release registration.
*/
static void
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 1f2a9fe9976..fe0ceeadc13 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -4970,7 +4970,7 @@ FlushRelationBuffers(Relation rel)
ResourceOwnerEnlarge(CurrentResourceOwner);
/*
- * Pin/upin mostly to make valgrind work, but it also seems
+ * Pin/unpin mostly to make valgrind work, but it also seems
* like the right thing to do.
*/
PinLocalBuffer(bufHdr, false);
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 82457bacc62..dbb49ed9197 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -88,7 +88,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* To allow the caller to report statistics about checksum failures,
* *checksum_failure_p can be passed in. Note that there may be checksum
* failures even if this function returns true, due to
- * IGNORE_CHECKSUM_FAILURE.
+ * PIV_IGNORE_CHECKSUM_FAILURE.
*/
bool
PageIsVerified(PageData *page, BlockNumber blkno, int flags, bool *checksum_failure_p)
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index 254cdd34fba..206b601a52b 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -323,8 +323,8 @@ pg_log_backend_memory_contexts(PG_FUNCTION_ARGS)
* Signal a backend or an auxiliary process to send its memory contexts,
* wait for the results and display them.
*
- * By default, only superusers or users with PG_READ_ALL_STATS are allowed to
- * signal a process to return the memory contexts. This is because allowing
+ * By default, only superusers or users with ROLE_PG_READ_ALL_STATS are allowed
+ * to signal a process to return the memory contexts. This is because allowing
* any users to issue this request at an unbounded rate would cause lots of
* requests to be sent, which can lead to denial of service. Additional roles
* can be permitted with GRANT.
@@ -495,7 +495,7 @@ pg_get_process_memory_contexts(PG_FUNCTION_ARGS)
* statistics are available within the allowed time then display
* previously published statistics if there are any. If no
* previous statistics are available then return NULL. The timer
- * is defined in milliseconds since thats what the condition
+ * is defined in milliseconds since that's what the condition
* variable sleep uses.
*/
if (ConditionVariableTimedSleep(&memCxtState[procNumber].memcxt_cv,
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index a73aac4f98c..ab6317de5ae 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -19,7 +19,7 @@
* immediately.
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are
- * permanentaly set to "C", and then we use temporary locale_t
+ * permanently set to "C", and then we use temporary locale_t
* objects when we need to look up locale data based on the GUCs
* of the same name. Information is cached when the GUCs change.
* The cached information is only used by the formatting functions
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 3b681647060..9bcbc4c3e97 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -1271,7 +1271,7 @@ UpdateCachedPlan(CachedPlanSource *plansource, int query_index,
/*
* XXX Should this also (re)set the properties of the CachedPlan that are
* set in BuildCachedPlan() after creating the fresh plans such as
- * planRoleId, dependsOnRole, and save_xmin?
+ * planRoleId, dependsOnRole, and saved_xmin?
*/
/*
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index e9aab36d110..468d0250b2e 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -910,7 +910,7 @@ MemoryContextStatsDetail(MemoryContext context,
*
* Print stats for this context if possible, but in any case accumulate counts
* into *totals (if not NULL). The callers should make sure that print_location
- * is set to PRINT_STATS_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
+ * is set to PRINT_STATS_TO_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
*/
static void
MemoryContextStatsInternal(MemoryContext context, int level,