diff options
Diffstat (limited to 'src/backend')
423 files changed, 5241 insertions, 5051 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index c64ede9dac5..009ebe7a1cb 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -21,7 +21,7 @@ * tuptoaster.c. * * This change will break any code that assumes it needn't detoast values - * that have been put into a tuple but never sent to disk. Hopefully there + * that have been put into a tuple but never sent to disk. Hopefully there * are few such places. * * Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since @@ -387,7 +387,7 @@ nocachegetattr(HeapTuple tuple, /* * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the + * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ if (HeapTupleHasVarWidth(tuple)) @@ -454,7 +454,7 @@ nocachegetattr(HeapTuple tuple, * * Note - This loop is a little tricky. For each non-null attribute, * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no + * then advance over the attr based on its length. Nulls have no * storage and no alignment padding either. We can use/set * attcacheoff until we reach either a null or a var-width attribute. */ @@ -549,7 +549,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) /* * cmin and cmax are now both aliases for the same field, which - * can in fact also be a combo command id. XXX perhaps we should + * can in fact also be a combo command id. XXX perhaps we should * return the "real" cmin or cmax if possible, that is if we are * inside the originating transaction? */ @@ -709,7 +709,7 @@ heap_form_tuple(TupleDesc tupleDescriptor, len += data_len; /* - * Allocate and zero the space needed. Note that the tuple body and + * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len); diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 7da10e9a74a..5fd400990b7 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor, /* * If value is stored EXTERNAL, must fetch it so we are not depending - * on outside storage. This should be improved someday. + * on outside storage. This should be improved someday. */ if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i]))) { @@ -280,7 +280,7 @@ nocache_index_getattr(IndexTuple tup, /* * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the + * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ if (IndexTupleHasVarwidths(tup)) @@ -347,7 +347,7 @@ nocache_index_getattr(IndexTuple tup, * * Note - This loop is a little tricky. For each non-null attribute, * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no + * then advance over the attr based on its length. Nulls have no * storage and no alignment padding either. We can use/set * attcacheoff until we reach either a null or a var-width attribute. */ diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index af59aa1a406..c7fa727485c 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -182,7 +182,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) * or some similar function; it does not contain a full set of fields. * The targetlist will be NIL when executing a utility function that does * not have a plan. If the targetlist isn't NIL then it is a Query node's - * targetlist; it is up to us to ignore resjunk columns in it. The formats[] + * targetlist; it is up to us to ignore resjunk columns in it. The formats[] * array pointer might be NULL (if we are doing Describe on a prepared stmt); * send zeroes for the format codes in that case. */ diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 530a1aee7bb..522b671993e 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -540,7 +540,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val, * Add a new string reloption * * "validator" is an optional function pointer that can be used to test the - * validity of the values. It must elog(ERROR) when the argument string is + * validity of the values. It must elog(ERROR) when the argument string is * not acceptable for the variable. Note that the default value must pass * the validation. */ @@ -868,7 +868,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions) * is returned. * * Note: values of type int, bool and real are allocated as part of the - * returned array. Values of type string are allocated separately and must + * returned array. Values of type string are allocated separately and must * be freed by the caller. */ relopt_value * @@ -1205,7 +1205,7 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) {"check_option", RELOPT_TYPE_STRING, offsetof(StdRdOptions, check_option_offset)}, {"user_catalog_table", RELOPT_TYPE_BOOL, - offsetof(StdRdOptions, user_catalog_table)} + offsetof(StdRdOptions, user_catalog_table)} }; options = parseRelOptions(reloptions, validate, kind, &numoptions); diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c index 1b6c6d957c9..2e48b32ba3b 100644 --- a/src/backend/access/common/tupconvert.c +++ b/src/backend/access/common/tupconvert.c @@ -5,7 +5,7 @@ * * These functions provide conversion between rowtypes that are logically * equivalent but might have columns in a different order or different sets - * of dropped columns. There is some overlap of functionality with the + * of dropped columns. There is some overlap of functionality with the * executor's "junkfilter" routines, but these functions work on bare * HeapTuples rather than TupleTableSlots. * diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 74cfb6499a5..f3b36893f78 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -581,7 +581,7 @@ TupleDescInitEntryCollation(TupleDesc desc, * Given a relation schema (list of ColumnDef nodes), build a TupleDesc. * * Note: the default assumption is no OIDs; caller may modify the returned - * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in + * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in * later on. */ TupleDesc diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c index 32dbed68c77..66cea28113a 100644 --- a/src/backend/access/gin/ginarrayproc.c +++ b/src/backend/access/gin/ginarrayproc.c @@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS) /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare and + * against nulls here. This is because array_contain_compare and * array_eq handle nulls differently ... */ res = true; @@ -279,9 +279,10 @@ ginarraytriconsistent(PG_FUNCTION_ARGS) res = GIN_MAYBE; break; case GinEqualStrategy: + /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare and + * against nulls here. This is because array_contain_compare and * array_eq handle nulls differently ... */ res = GIN_MAYBE; diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 9b0f82fc904..27f88e0eb21 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -251,6 +251,7 @@ ginFindParents(GinBtree btree, GinBtreeStack *stack) Assert(blkno != btree->rootBlkno); ptr->blkno = blkno; ptr->buffer = buffer; + /* * parent may be wrong, but if so, the ginFinishSplit call will * recurse to call ginFindParents again to fix it. @@ -328,7 +329,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, GinPlaceToPageRC rc; uint16 xlflags = 0; Page childpage = NULL; - Page newlpage = NULL, newrpage = NULL; + Page newlpage = NULL, + newrpage = NULL; if (GinPageIsData(page)) xlflags |= GIN_INSERT_ISDATA; @@ -346,8 +348,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, } /* - * Try to put the incoming tuple on the page. placeToPage will decide - * if the page needs to be split. + * Try to put the incoming tuple on the page. placeToPage will decide if + * the page needs to be split. */ rc = btree->placeToPage(btree, stack->buffer, stack, insertdata, updateblkno, @@ -371,7 +373,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, XLogRecPtr recptr; XLogRecData rdata[3]; ginxlogInsert xlrec; - BlockIdData childblknos[2]; + BlockIdData childblknos[2]; xlrec.node = btree->index->rd_node; xlrec.blkno = BufferGetBlockNumber(stack->buffer); @@ -449,7 +451,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, data.flags = xlflags; if (childbuf != InvalidBuffer) { - Page childpage = BufferGetPage(childbuf); + Page childpage = BufferGetPage(childbuf); + GinPageGetOpaque(childpage)->flags &= ~GIN_INCOMPLETE_SPLIT; data.leftChildBlkno = BufferGetBlockNumber(childbuf); @@ -505,8 +508,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, /* * Construct a new root page containing downlinks to the new left - * and right pages. (do this in a temporary copy first rather - * than overwriting the original page directly, so that we can still + * and right pages. (do this in a temporary copy first rather than + * overwriting the original page directly, so that we can still * abort gracefully if this fails.) */ newrootpg = PageGetTempPage(newrpage); @@ -604,7 +607,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, else { elog(ERROR, "unknown return code from GIN placeToPage method: %d", rc); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } } @@ -627,8 +630,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack, bool first = true; /* - * freestack == false when we encounter an incompletely split page during a - * scan, while freestack == true is used in the normal scenario that a + * freestack == false when we encounter an incompletely split page during + * a scan, while freestack == true is used in the normal scenario that a * split is finished right after the initial insert. */ if (!freestack) @@ -650,8 +653,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack, * then continue with the current one. * * Note: we have to finish *all* incomplete splits we encounter, even - * if we have to move right. Otherwise we might choose as the target - * a page that has no downlink in the parent, and splitting it further + * if we have to move right. Otherwise we might choose as the target a + * page that has no downlink in the parent, and splitting it further * would fail. */ if (GinPageIsIncompleteSplit(BufferGetPage(parent->buffer))) diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index 9f3009b5894..3af027187ac 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum, * Since the entries are being inserted into a balanced binary tree, you * might think that the order of insertion wouldn't be critical, but it turns * out that inserting the entries in sorted order results in a lot of - * rebalancing operations and is slow. To prevent this, we attempt to insert + * rebalancing operations and is slow. To prevent this, we attempt to insert * the nodes in an order that will produce a nearly-balanced tree if the input * is in fact sorted. * diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index c11ed858833..272a9ca7c09 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -49,8 +49,8 @@ typedef struct dlist_head segments; /* a list of leafSegmentInfos */ /* - * The following fields represent how the segments are split across - * pages, if a page split is required. Filled in by leafRepackItems. + * The following fields represent how the segments are split across pages, + * if a page split is required. Filled in by leafRepackItems. */ dlist_node *lastleft; /* last segment on left page */ int lsize; /* total size on left page */ @@ -61,7 +61,7 @@ typedef struct typedef struct { - dlist_node node; /* linked list pointers */ + dlist_node node; /* linked list pointers */ /*------------- * 'action' indicates the status of this in-memory segment, compared to @@ -83,9 +83,9 @@ typedef struct int nmodifieditems; /* - * The following fields represent the items in this segment. If 'items' - * is not NULL, it contains a palloc'd array of the itemsin this segment. - * If 'seg' is not NULL, it contains the items in an already-compressed + * The following fields represent the items in this segment. If 'items' is + * not NULL, it contains a palloc'd array of the itemsin this segment. If + * 'seg' is not NULL, it contains the items in an already-compressed * format. It can point to an on-disk page (!modified), or a palloc'd * segment in memory. If both are set, they must represent the same items. */ @@ -386,7 +386,7 @@ GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset) if (offset != maxoff + 1) memmove(ptr + sizeof(PostingItem), ptr, - (maxoff - offset + 1) * sizeof(PostingItem)); + (maxoff - offset + 1) *sizeof(PostingItem)); } memcpy(ptr, data, sizeof(PostingItem)); @@ -436,8 +436,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, int maxitems = items->nitem - items->curitem; Page page = BufferGetPage(buf); int i; - ItemPointerData rbound; - ItemPointerData lbound; + ItemPointerData rbound; + ItemPointerData lbound; bool needsplit; bool append; int segsize; @@ -451,7 +451,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, Assert(GinPageIsData(page)); - rbound = *GinDataPageGetRightBound(page); + rbound = *GinDataPageGetRightBound(page); /* * Count how many of the new items belong to this page. @@ -464,8 +464,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, { /* * This needs to go to some other location in the tree. (The - * caller should've chosen the insert location so that at least - * the first item goes here.) + * caller should've chosen the insert location so that at + * least the first item goes here.) */ Assert(i > 0); break; @@ -553,7 +553,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, /* Add the new items to the segments */ if (!addItemsToLeaf(leaf, newItems, maxitems)) { - /* all items were duplicates, we have nothing to do */ + /* all items were duplicates, we have nothing to do */ items->curitem += maxitems; MemoryContextSwitchTo(oldCxt); @@ -680,7 +680,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, Assert(GinPageRightMost(page) || ginCompareItemPointers(GinDataPageGetRightBound(*newlpage), - GinDataPageGetRightBound(*newrpage)) < 0); + GinDataPageGetRightBound(*newrpage)) < 0); if (append) elog(DEBUG2, "appended %d items to block %u; split %d/%d (%d to go)", @@ -769,16 +769,16 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) * We don't try to re-encode the segments here, even though some of them * might be really small now that we've removed some items from them. It * seems like a waste of effort, as there isn't really any benefit from - * larger segments per se; larger segments only help to pack more items - * in the same space. We might as well delay doing that until the next + * larger segments per se; larger segments only help to pack more items in + * the same space. We might as well delay doing that until the next * insertion, which will need to re-encode at least part of the page * anyway. * - * Also note if the page was in uncompressed, pre-9.4 format before, it - * is now represented as one huge segment that contains all the items. - * It might make sense to split that, to speed up random access, but we - * don't bother. You'll have to REINDEX anyway if you want the full gain - * of the new tighter index format. + * Also note if the page was in uncompressed, pre-9.4 format before, it is + * now represented as one huge segment that contains all the items. It + * might make sense to split that, to speed up random access, but we don't + * bother. You'll have to REINDEX anyway if you want the full gain of the + * new tighter index format. */ if (removedsomething) { @@ -795,6 +795,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) { leafSegmentInfo *seginfo = dlist_container(leafSegmentInfo, node, iter.cur); + if (seginfo->action != GIN_SEGMENT_UNMODIFIED) modified = true; if (modified && seginfo->action != GIN_SEGMENT_DELETE) @@ -862,10 +863,11 @@ constructLeafRecompressWALData(Buffer buf, disassembledLeaf *leaf) } walbufbegin = palloc( - sizeof(ginxlogRecompressDataLeaf) + - BLCKSZ + /* max size needed to hold the segment data */ - nmodified * 2 + /* (segno + action) per action */ - sizeof(XLogRecData)); + sizeof(ginxlogRecompressDataLeaf) + + BLCKSZ + /* max size needed to hold the segment + * data */ + nmodified * 2 + /* (segno + action) per action */ + sizeof(XLogRecData)); walbufend = walbufbegin; recompress_xlog = (ginxlogRecompressDataLeaf *) walbufend; @@ -965,9 +967,9 @@ dataPlaceToPageLeafRecompress(Buffer buf, disassembledLeaf *leaf) int segsize; /* - * If the page was in pre-9.4 format before, convert the header, and - * force all segments to be copied to the page whether they were modified - * or not. + * If the page was in pre-9.4 format before, convert the header, and force + * all segments to be copied to the page whether they were modified or + * not. */ if (!GinPageIsCompressed(page)) { @@ -1022,6 +1024,7 @@ dataPlaceToPageLeafSplit(Buffer buf, disassembledLeaf *leaf, dlist_node *node; dlist_node *firstright; leafSegmentInfo *seginfo; + /* these must be static so they can be returned to caller */ static ginxlogSplitDataLeaf split_xlog; static XLogRecData rdata[3]; @@ -1121,6 +1124,7 @@ dataPlaceToPageInternal(GinBtree btree, Buffer buf, GinBtreeStack *stack, Page page = BufferGetPage(buf); OffsetNumber off = stack->off; PostingItem *pitem; + /* these must be static so they can be returned to caller */ static XLogRecData rdata; static ginxlogInsertDataInternal data; @@ -1198,7 +1202,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, int nrightitems; Size pageSize = PageGetPageSize(oldpage); ItemPointerData oldbound = *GinDataPageGetRightBound(oldpage); - ItemPointer bound; + ItemPointer bound; Page lpage; Page rpage; OffsetNumber separator; @@ -1216,8 +1220,8 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, *prdata = rdata; /* - * First construct a new list of PostingItems, which includes all the - * old items, and the new item. + * First construct a new list of PostingItems, which includes all the old + * items, and the new item. */ memcpy(allitems, GinDataPageGetPostingItem(oldpage, FirstOffsetNumber), (off - 1) * sizeof(PostingItem)); @@ -1402,8 +1406,8 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems) leafSegmentInfo *newseg; /* - * If the page is completely empty, just construct one new segment to - * hold all the new items. + * If the page is completely empty, just construct one new segment to hold + * all the new items. */ if (dlist_is_empty(&leaf->segments)) { @@ -1418,9 +1422,9 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems) dlist_foreach(iter, &leaf->segments) { - leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur); + leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur); int nthis; - ItemPointer tmpitems; + ItemPointer tmpitems; int ntmpitems; /* @@ -1434,7 +1438,7 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems) ItemPointerData next_first; next = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, - dlist_next_node(&leaf->segments, iter.cur)); + dlist_next_node(&leaf->segments, iter.cur)); if (next->items) next_first = next->items[0]; else @@ -1556,27 +1560,27 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) if (seginfo->seg == NULL) { if (seginfo->nitems > GinPostingListSegmentMaxSize) - npacked = 0; /* no chance that it would fit. */ + npacked = 0; /* no chance that it would fit. */ else { seginfo->seg = ginCompressPostingList(seginfo->items, seginfo->nitems, - GinPostingListSegmentMaxSize, + GinPostingListSegmentMaxSize, &npacked); } if (npacked != seginfo->nitems) { /* - * Too large. Compress again to the target size, and create - * a new segment to represent the remaining items. The new - * segment is inserted after this one, so it will be - * processed in the next iteration of this loop. + * Too large. Compress again to the target size, and + * create a new segment to represent the remaining items. + * The new segment is inserted after this one, so it will + * be processed in the next iteration of this loop. */ if (seginfo->seg) pfree(seginfo->seg); seginfo->seg = ginCompressPostingList(seginfo->items, seginfo->nitems, - GinPostingListSegmentTargetSize, + GinPostingListSegmentTargetSize, &npacked); if (seginfo->action != GIN_SEGMENT_INSERT) seginfo->action = GIN_SEGMENT_REPLACE; @@ -1596,7 +1600,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) */ if (SizeOfGinPostingList(seginfo->seg) < GinPostingListSegmentMinSize && next_node) { - int nmerged; + int nmerged; nextseg = dlist_container(leafSegmentInfo, node, next_node); @@ -1741,8 +1745,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, GinPageGetOpaque(tmppage)->rightlink = InvalidBlockNumber; /* - * Write as many of the items to the root page as fit. In segments - * of max GinPostingListSegmentMaxSize bytes each. + * Write as many of the items to the root page as fit. In segments of max + * GinPostingListSegmentMaxSize bytes each. */ nrootitems = 0; rootsize = 0; diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 4291bab63be..412f90da4db 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -135,7 +135,8 @@ GinFormTuple(GinState *ginstate, */ if (data) { - char *ptr = GinGetPosting(itup); + char *ptr = GinGetPosting(itup); + memcpy(ptr, data, dataSize); } @@ -162,7 +163,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup, { Pointer ptr = GinGetPosting(itup); int nipd = GinGetNPosting(itup); - ItemPointer ipd; + ItemPointer ipd; int ndecoded; if (GinItupIsCompressed(itup)) @@ -192,7 +193,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup, * Form a non-leaf entry tuple by copying the key data from the given tuple, * which can be either a leaf or non-leaf entry tuple. * - * Any posting list in the source tuple is not copied. The specified child + * Any posting list in the source tuple is not copied. The specified child * block number is inserted into t_tid. */ static IndexTuple diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index a16c2140c22..09c3e39bf3b 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -440,7 +440,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * Create temporary index tuples for a single indexable item (one index column * for the heap tuple specified by ht_ctid), and append them to the array * in *collector. They will subsequently be written out using - * ginHeapTupleFastInsert. Note that to guarantee consistent state, all + * ginHeapTupleFastInsert. Note that to guarantee consistent state, all * temp tuples for a given heap tuple must be written in one call to * ginHeapTupleFastInsert. */ @@ -707,7 +707,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, * * This can be called concurrently by multiple backends, so it must cope. * On first glance it looks completely not concurrent-safe and not crash-safe - * either. The reason it's okay is that multiple insertion of the same entry + * either. The reason it's okay is that multiple insertion of the same entry * is detected and treated as a no-op by gininsert.c. If we crash after * posting entries to the main index and before removing them from the * pending list, it's okay because when we redo the posting later on, nothing @@ -761,7 +761,7 @@ ginInsertCleanup(GinState *ginstate, LockBuffer(metabuffer, GIN_UNLOCK); /* - * Initialize. All temporary space will be in opCtx + * Initialize. All temporary space will be in opCtx */ opCtx = AllocSetContextCreate(CurrentMemoryContext, "GIN insert cleanup temporary context", @@ -855,7 +855,7 @@ ginInsertCleanup(GinState *ginstate, /* * While we left the page unlocked, more stuff might have gotten - * added to it. If so, process those entries immediately. There + * added to it. If so, process those entries immediately. There * shouldn't be very many, so we don't worry about the fact that * we're doing this with exclusive lock. Insertion algorithm * guarantees that inserted row(s) will not continue on next page. diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index fda19cf4e69..271f09901b9 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -85,7 +85,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, page = BufferGetPage(buffer); if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0) { - int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap); + int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap); + scanEntry->predictNumberResult += n; } @@ -100,7 +101,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, /* * Collects TIDs into scanEntry->matchBitmap for all heap tuples that - * match the search entry. This supports three different match modes: + * match the search entry. This supports three different match modes: * * 1. Partial-match support: scan from current point until the * comparePartialFn says we're done. @@ -196,7 +197,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* * In ALL mode, we are not interested in null items, so we can * stop if we get to a null-item placeholder (which will be the - * last entry for a given attnum). We do want to include NULL_KEY + * last entry for a given attnum). We do want to include NULL_KEY * and EMPTY_ITEM entries, though. */ if (icategory == GIN_CAT_NULL_ITEM) @@ -407,7 +408,7 @@ restartScanEntry: else if (GinGetNPosting(itup) > 0) { entry->list = ginReadTuple(ginstate, entry->attnum, itup, - &entry->nlist); + &entry->nlist); entry->predictNumberResult = entry->nlist; entry->isFinished = FALSE; @@ -463,11 +464,11 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key) * considerably, if the frequent term can be put in the additional set. * * There can be many legal ways to divide them entries into these two - * sets. A conservative division is to just put everything in the - * required set, but the more you can put in the additional set, the more - * you can skip during the scan. To maximize skipping, we try to put as - * many frequent items as possible into additional, and less frequent - * ones into required. To do that, sort the entries by frequency + * sets. A conservative division is to just put everything in the required + * set, but the more you can put in the additional set, the more you can + * skip during the scan. To maximize skipping, we try to put as many + * frequent items as possible into additional, and less frequent ones into + * required. To do that, sort the entries by frequency * (predictNumberResult), and put entries into the required set in that * order, until the consistent function says that none of the remaining * entries can form a match, without any items from the required set. The @@ -635,8 +636,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan if (stepright) { /* - * We've processed all the entries on this page. If it was the last - * page in the tree, we're done. + * We've processed all the entries on this page. If it was the + * last page in the tree, we're done. */ if (GinPageRightMost(page)) { @@ -647,8 +648,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan } /* - * Step to next page, following the right link. then find the first - * ItemPointer greater than advancePast. + * Step to next page, following the right link. then find the + * first ItemPointer greater than advancePast. */ entry->buffer = ginStepRight(entry->buffer, ginstate->index, @@ -658,7 +659,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan stepright = true; if (GinPageGetOpaque(page)->flags & GIN_DELETED) - continue; /* page was deleted by concurrent vacuum */ + continue; /* page was deleted by concurrent vacuum */ /* * The first item > advancePast might not be on this page, but @@ -781,6 +782,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, gotitem = true; break; } + /* * Not a lossy page. Skip over any offsets <= advancePast, and * return that. @@ -788,8 +790,9 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, if (entry->matchResult->blockno == advancePastBlk) { /* - * First, do a quick check against the last offset on the page. - * If that's > advancePast, so are all the other offsets. + * First, do a quick check against the last offset on the + * page. If that's > advancePast, so are all the other + * offsets. */ if (entry->matchResult->offsets[entry->matchResult->ntuples - 1] <= advancePastOff) { @@ -890,8 +893,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, /* * We might have already tested this item; if so, no need to repeat work. - * (Note: the ">" case can happen, if advancePast is exact but we previously - * had to set curItem to a lossy-page pointer.) + * (Note: the ">" case can happen, if advancePast is exact but we + * previously had to set curItem to a lossy-page pointer.) */ if (ginCompareItemPointers(&key->curItem, &advancePast) > 0) return; @@ -942,8 +945,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, /* * Ok, we now know that there are no matches < minItem. * - * If minItem is lossy, it means that there were no exact items on - * the page among requiredEntries, because lossy pointers sort after exact + * If minItem is lossy, it means that there were no exact items on the + * page among requiredEntries, because lossy pointers sort after exact * items. However, there might be exact items for the same page among * additionalEntries, so we mustn't advance past them. */ @@ -1085,6 +1088,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, if (entry->isFinished) key->entryRes[i] = GIN_FALSE; #if 0 + /* * This case can't currently happen, because we loaded all the entries * for this item earlier. @@ -1119,6 +1123,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, break; default: + /* * the 'default' case shouldn't happen, but if the consistent * function returns something bogus, this is the safe result @@ -1129,11 +1134,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, } /* - * We have a tuple, and we know if it matches or not. If it's a - * non-match, we could continue to find the next matching tuple, but - * let's break out and give scanGetItem a chance to advance the other - * keys. They might be able to skip past to a much higher TID, allowing - * us to save work. + * We have a tuple, and we know if it matches or not. If it's a non-match, + * we could continue to find the next matching tuple, but let's break out + * and give scanGetItem a chance to advance the other keys. They might be + * able to skip past to a much higher TID, allowing us to save work. */ /* clean up after consistentFn calls */ @@ -1165,14 +1169,14 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, * matching item. * * This logic works only if a keyGetItem stream can never contain both - * exact and lossy pointers for the same page. Else we could have a + * exact and lossy pointers for the same page. Else we could have a * case like * * stream 1 stream 2 - * ... ... + * ... ... * 42/6 42/7 * 50/1 42/0xffff - * ... ... + * ... ... * * We would conclude that 42/6 is not a match and advance stream 1, * thus never detecting the match to the lossy pointer in stream 2. @@ -1205,12 +1209,11 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, } /* - * It's a match. We can conclude that nothing < matches, so - * the other key streams can skip to this item. + * It's a match. We can conclude that nothing < matches, so the + * other key streams can skip to this item. * - * Beware of lossy pointers, though; from a lossy pointer, we - * can only conclude that nothing smaller than this *block* - * matches. + * Beware of lossy pointers, though; from a lossy pointer, we can + * only conclude that nothing smaller than this *block* matches. */ if (ItemPointerIsLossyPage(&key->curItem)) { @@ -1229,8 +1232,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, } /* - * If this is the first key, remember this location as a - * potential match, and proceed to check the rest of the keys. + * If this is the first key, remember this location as a potential + * match, and proceed to check the rest of the keys. * * Otherwise, check if this is the same item that we checked the * previous keys for (or a lossy pointer for the same page). If @@ -1247,7 +1250,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, if (ItemPointerIsLossyPage(&key->curItem) || ItemPointerIsLossyPage(item)) { - Assert (GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item)); + Assert(GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item)); match = (GinItemPointerGetBlockNumber(&key->curItem) == GinItemPointerGetBlockNumber(item)); } @@ -1264,8 +1267,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, /* * Now *item contains the first ItemPointer after previous result that - * satisfied all the keys for that exact TID, or a lossy reference - * to the same page. + * satisfied all the keys for that exact TID, or a lossy reference to the + * same page. * * We must return recheck = true if any of the keys are marked recheck. */ @@ -1776,10 +1779,10 @@ gingetbitmap(PG_FUNCTION_ARGS) /* * First, scan the pending list and collect any matching entries into the - * bitmap. After we scan a pending item, some other backend could post it + * bitmap. After we scan a pending item, some other backend could post it * into the main index, and so we might visit it a second time during the * main scan. This is okay because we'll just re-set the same bit in the - * bitmap. (The possibility of duplicate visits is a major reason why GIN + * bitmap. (The possibility of duplicate visits is a major reason why GIN * can't support the amgettuple API, however.) Note that it would not do * to scan the main index before the pending list, since concurrent * cleanup could then make us miss entries entirely. diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 3bafb6471b3..b27cae3aab2 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -40,7 +40,7 @@ typedef struct * Adds array of item pointers to tuple's posting list, or * creates posting tree and tuple pointing to tree in case * of not enough space. Max size of tuple is defined in - * GinFormTuple(). Returns a new, modified index tuple. + * GinFormTuple(). Returns a new, modified index tuple. * items[] must be in sorted order with no duplicates. */ static IndexTuple diff --git a/src/backend/access/gin/ginlogic.c b/src/backend/access/gin/ginlogic.c index 167d25ea5c7..052abd2bd8e 100644 --- a/src/backend/access/gin/ginlogic.c +++ b/src/backend/access/gin/ginlogic.c @@ -47,7 +47,7 @@ * Maximum number of MAYBE inputs that shimTriConsistentFn will try to * resolve by calling all combinations. */ -#define MAX_MAYBE_ENTRIES 4 +#define MAX_MAYBE_ENTRIES 4 /* * Dummy consistent functions for an EVERYTHING key. Just claim it matches. @@ -95,14 +95,14 @@ static GinTernaryValue directTriConsistentFn(GinScanKey key) { return DatumGetGinTernaryValue(FunctionCall7Coll( - key->triConsistentFmgrInfo, - key->collation, - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), - PointerGetDatum(key->queryValues), + key->triConsistentFmgrInfo, + key->collation, + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(key->queryValues), PointerGetDatum(key->queryCategories))); } @@ -115,15 +115,16 @@ static bool shimBoolConsistentFn(GinScanKey key) { GinTernaryValue result; + result = DatumGetGinTernaryValue(FunctionCall7Coll( - key->triConsistentFmgrInfo, - key->collation, - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), - PointerGetDatum(key->queryValues), + key->triConsistentFmgrInfo, + key->collation, + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(key->queryValues), PointerGetDatum(key->queryCategories))); if (result == GIN_MAYBE) { @@ -240,8 +241,8 @@ ginInitConsistentFunction(GinState *ginstate, GinScanKey key) key->boolConsistentFn = shimBoolConsistentFn; if (OidIsValid(ginstate->triConsistentFn[key->attnum - 1].fn_oid)) - key->triConsistentFn = directTriConsistentFn; + key->triConsistentFn = directTriConsistentFn; else - key->triConsistentFn = shimTriConsistentFn; + key->triConsistentFn = shimTriConsistentFn; } } diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c index 81bbb09c244..606a824f125 100644 --- a/src/backend/access/gin/ginpostinglist.c +++ b/src/backend/access/gin/ginpostinglist.c @@ -126,9 +126,9 @@ encode_varbyte(uint64 val, unsigned char **ptr) static uint64 decode_varbyte(unsigned char **ptr) { - uint64 val; + uint64 val; unsigned char *p = *ptr; - uint64 c; + uint64 c; c = *(p++); val = c & 0x7F; @@ -210,7 +210,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize, uint64 val = itemptr_to_uint64(&ipd[totalpacked]); uint64 delta = val - prev; - Assert (val > prev); + Assert(val > prev); if (endptr - ptr >= 6) encode_varbyte(delta, &ptr); @@ -225,7 +225,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize, encode_varbyte(delta, &p); if (p - buf > (endptr - ptr)) - break; /* output is full */ + break; /* output is full */ memcpy(ptr, buf, p - buf); ptr += (p - buf); @@ -286,7 +286,7 @@ ginPostingListDecode(GinPostingList *plist, int *ndecoded) ItemPointer ginPostingListDecodeAllSegments(GinPostingList *segment, int len, int *ndecoded_out) { - ItemPointer result; + ItemPointer result; int nallocated; uint64 val; char *endseg = ((char *) segment) + len; @@ -349,7 +349,7 @@ ginPostingListDecodeAllSegmentsToTbm(GinPostingList *ptr, int len, TIDBitmap *tbm) { int ndecoded; - ItemPointer items; + ItemPointer items; items = ginPostingListDecodeAllSegments(ptr, len, &ndecoded); tbm_add_tuples(tbm, items, ndecoded, false); @@ -374,8 +374,8 @@ ginMergeItemPointers(ItemPointerData *a, uint32 na, dst = (ItemPointer) palloc((na + nb) * sizeof(ItemPointerData)); /* - * If the argument arrays don't overlap, we can just append them to - * each other. + * If the argument arrays don't overlap, we can just append them to each + * other. */ if (na == 0 || nb == 0 || ginCompareItemPointers(&a[na - 1], &b[0]) < 0) { diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index b19386e19ad..66c62b2e32a 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -389,7 +389,7 @@ ginNewScanKey(IndexScanDesc scan) /* * If the index is version 0, it may be missing null and placeholder * entries, which would render searches for nulls and full-index scans - * unreliable. Throw an error if so. + * unreliable. Throw an error if so. */ if (hasNullQuery && !so->isVoidRes) { diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index 4dadb50dcaa..3ca0b68434b 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -67,6 +67,7 @@ initGinState(GinState *state, Relation index) fmgr_info_copy(&(state->extractQueryFn[i]), index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC), CurrentMemoryContext); + /* * Check opclass capability to do tri-state or binary logic consistent * check. @@ -74,14 +75,14 @@ initGinState(GinState *state, Relation index) if (index_getprocid(index, i + 1, GIN_TRICONSISTENT_PROC) != InvalidOid) { fmgr_info_copy(&(state->triConsistentFn[i]), - index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC), + index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC), CurrentMemoryContext); } if (index_getprocid(index, i + 1, GIN_CONSISTENT_PROC) != InvalidOid) { fmgr_info_copy(&(state->consistentFn[i]), - index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC), + index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC), CurrentMemoryContext); } @@ -458,7 +459,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, * If there's more than one key, sort and unique-ify. * * XXX Using qsort here is notationally painful, and the overhead is - * pretty bad too. For small numbers of keys it'd likely be better to use + * pretty bad too. For small numbers of keys it'd likely be better to use * a simple insertion sort. */ if (*nentries > 1) diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 72f734caf8d..af4d2714b5f 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -47,7 +47,7 @@ ginVacuumItemPointers(GinVacuumState *gvs, ItemPointerData *items, { int i, remaining = 0; - ItemPointer tmpitems = NULL; + ItemPointer tmpitems = NULL; /* * Iterate over TIDs array @@ -208,8 +208,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, } /* - * if we have root and there are empty pages in tree, then we don't release - * lock to go further processing and guarantee that tree is unused + * if we have root and there are empty pages in tree, then we don't + * release lock to go further processing and guarantee that tree is unused */ if (!(isRoot && hasVoidPage)) { @@ -236,7 +236,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn Buffer pBuffer; Page page, parentPage; - BlockNumber rightlink; + BlockNumber rightlink; /* * Lock the pages in the same order as an insertion would, to avoid @@ -302,11 +302,11 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn data.rightLink = GinPageGetOpaque(page)->rightlink; /* - * We can't pass buffer_std = TRUE, because we didn't set pd_lower - * on pre-9.4 versions. The page might've been binary-upgraded from - * an older version, and hence not have pd_lower set correctly. - * Ditto for the left page, but removing the item from the parent - * updated its pd_lower, so we know that's OK at this point. + * We can't pass buffer_std = TRUE, because we didn't set pd_lower on + * pre-9.4 versions. The page might've been binary-upgraded from an + * older version, and hence not have pd_lower set correctly. Ditto for + * the left page, but removing the item from the parent updated its + * pd_lower, so we know that's OK at this point. */ rdata[0].buffer = dBuffer; rdata[0].buffer_std = FALSE; @@ -538,7 +538,8 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3 } /* - * if we already created a temporary page, make changes in place + * if we already created a temporary page, make changes in + * place */ if (tmppage == origpage) { diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index d19389330c5..a8a917a9d0e 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -133,7 +133,7 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber) { RelFileNode node; - ForkNumber forknum; + ForkNumber forknum; BlockNumber blknum; BufferGetTag(buffer, &node, &forknum, &blknum); @@ -341,8 +341,8 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) payload = XLogRecGetData(record) + sizeof(ginxlogInsert); /* - * First clear incomplete-split flag on child page if this finishes - * a split. + * First clear incomplete-split flag on child page if this finishes a + * split. */ if (!isLeaf) { @@ -472,8 +472,8 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) payload = XLogRecGetData(record) + sizeof(ginxlogSplit); /* - * First clear incomplete-split flag on child page if this finishes - * a split + * First clear incomplete-split flag on child page if this finishes a + * split */ if (!isLeaf) { @@ -522,7 +522,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) if (isRoot) { - BlockNumber rootBlkno = data->rrlink; + BlockNumber rootBlkno = data->rrlink; Buffer rootBuf = XLogReadBuffer(data->node, rootBlkno, true); Page rootPage = BufferGetPage(rootBuf); @@ -711,9 +711,9 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) Buffer buffer; /* - * Restore the metapage. This is essentially the same as a full-page image, - * so restore the metapage unconditionally without looking at the LSN, to - * avoid torn page hazards. + * Restore the metapage. This is essentially the same as a full-page + * image, so restore the metapage unconditionally without looking at the + * LSN, to avoid torn page hazards. */ metabuffer = XLogReadBuffer(data->node, GIN_METAPAGE_BLKNO, false); if (!BufferIsValid(metabuffer)) @@ -877,7 +877,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record) /* * In normal operation, shiftList() takes exclusive lock on all the - * pages-to-be-deleted simultaneously. During replay, however, it should + * pages-to-be-deleted simultaneously. During replay, however, it should * be all right to lock them one at a time. This is dependent on the fact * that we are deleting pages from the head of the list, and that readers * share-lock the next page before releasing the one they are on. So we diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 64125d51953..e6f06c29e51 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1382,7 +1382,7 @@ initGISTstate(Relation index) /* * If the index column has a specified collation, we should honor that * while doing comparisons. However, we may have a collatable storage - * type for a noncollatable indexed data type. If there's no index + * type for a noncollatable indexed data type. If there's no index * collation then specify default collation in case the support * functions need collation. This is harmless if the support * functions don't care about collation, so we just do it diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 92a9dce8e61..7a8692b5087 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -31,7 +31,7 @@ * * On success return for a heap tuple, *recheck_p is set to indicate * whether recheck is needed. We recheck if any of the consistent() functions - * request it. recheck is not interesting when examining a non-leaf entry, + * request it. recheck is not interesting when examining a non-leaf entry, * since we must visit the lower index page if there's any doubt. * * If we are doing an ordered scan, so->distances[] is filled with distance @@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan, /* * If it's a leftover invalid tuple from pre-9.1, treat it as a match with - * minimum possible distances. This means we'll always follow it to the + * minimum possible distances. This means we'll always follow it to the * referenced page. */ if (GistTupleIsInvalid(tuple)) @@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan, * ntids: if not NULL, gistgetbitmap's output tuple counter * * If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap - * tuples should be reported directly into the bitmap. If they are NULL, + * tuples should be reported directly into the bitmap. If they are NULL, * we're doing a plain or ordered indexscan. For a plain indexscan, heap * tuple TIDs are returned into so->pageData[]. For an ordered indexscan, * heap tuple TIDs are pushed into individual search queue items. diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 5194fe08ab7..8360b16ae50 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg) /* * If new item is heap tuple, it goes to front of chain; otherwise insert * it before the first index-page item, so that index pages are visited in - * LIFO order, ensuring depth-first search of index pages. See comments + * LIFO order, ensuring depth-first search of index pages. See comments * in gist_private.h. */ if (GISTSearchItemIsHeap(*newitem)) diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c index 2dd26de0982..e1994bf04b5 100644 --- a/src/backend/access/gist/gistsplit.c +++ b/src/backend/access/gist/gistsplit.c @@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec, * Recompute unions of left- and right-side subkeys after a page split, * ignoring any tuples that are marked in spl->spl_dontcare[]. * - * Note: we always recompute union keys for all index columns. In some cases + * Note: we always recompute union keys for all index columns. In some cases * this might represent duplicate work for the leftmost column(s), but it's * not safe to assume that "zero penalty to move a tuple" means "the union * key doesn't change at all". Penalty functions aren't 100% accurate. @@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, /* * Remove tuples that are marked don't-cares from the tuple index array a[] - * of length *len. This is applied separately to the spl_left and spl_right + * of length *len. This is applied separately to the spl_left and spl_right * arrays. */ static void @@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare) /* * Place a single don't-care tuple into either the left or right side of the * split, according to which has least penalty for merging the tuple into - * the previously-computed union keys. We need consider only columns starting + * the previously-computed union keys. We need consider only columns starting * at attno. */ static void @@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, /* * There is only one previously defined union, so we just choose swap - * or not by lowest penalty for that side. We can only get here if a + * or not by lowest penalty for that side. We can only get here if a * secondary split happened to have all NULLs in its column in the * tuples that the outer recursion level had assigned to one side. * (Note that the null checks in gistSplitByKey don't prevent the @@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec sv->spl_rdatum = v->spl_rattr[attno]; /* - * Let the opclass-specific PickSplit method do its thing. Note that at + * Let the opclass-specific PickSplit method do its thing. Note that at * this point we know there are no null keys in the entryvec. */ FunctionCall2Coll(&giststate->picksplitFn[attno], diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index fbccdb800bc..f32e35ad159 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ * some inserts to go to other equally-good subtrees. * * keep_current_best is -1 if we haven't yet had to make a random choice - * whether to keep the current best tuple. If we have done so, and + * whether to keep the current best tuple. If we have done so, and * decided to keep it, keep_current_best is 1; if we've decided to * replace, keep_current_best is 0. (This state will be reset to -1 as * soon as we've made the replacement, but sometimes we make the choice in @@ -456,7 +456,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ { /* * New best penalty for column. Tentatively select this tuple - * as the target, and record the best penalty. Then reset the + * as the target, and record the best penalty. Then reset the * next column's penalty to "unknown" (and indirectly, the * same for all the ones to its right). This will force us to * adopt this tuple's penalty values as the best for all the @@ -475,7 +475,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ { /* * The current tuple is exactly as good for this column as the - * best tuple seen so far. The next iteration of this loop + * best tuple seen so far. The next iteration of this loop * will compare the next column. */ } @@ -681,7 +681,7 @@ gistcheckpage(Relation rel, Buffer buf) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 215806be12f..278d386a7cd 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS) stats->estimated_count = info->estimated_count; /* - * XXX the above is wrong if index is partial. Would it be OK to just + * XXX the above is wrong if index is partial. Would it be OK to just * return NULL, or is there work we must do below? */ } diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index e12b9c66dc1..7d36b2ab6a3 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */ * follow-right flag, because that change is not included in the full-page * image. To be sure that the intermediate state with the wrong flag value is * not visible to concurrent Hot Standby queries, this function handles - * restoring the full-page image as well as updating the flag. (Note that + * restoring the full-page image as well as updating the flag. (Note that * we never need to do anything else to the child page in the current WAL * action.) */ @@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) /* * We need to acquire and hold lock on target page while updating the left - * child page. If we have a full-page image of target page, getting the + * child page. If we have a full-page image of target page, getting the * lock is a side-effect of restoring that image. Note that even if the * target page no longer exists, we'll still attempt to replay the change * on the child page. @@ -387,6 +387,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf, for (ptr = dist; ptr; ptr = ptr->next) npage++; + /* * the caller should've checked this already, but doesn't hurt to check * again. diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 502fc31dd19..7abb7a47fc2 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -78,7 +78,7 @@ hashbuild(PG_FUNCTION_ARGS) * (assuming their hash codes are pretty random) there will be no locality * of access to the index, and if the index is bigger than available RAM * then we'll thrash horribly. To prevent that scenario, we can sort the - * tuples by (expected) bucket number. However, such a sort is useless + * tuples by (expected) bucket number. However, such a sort is useless * overhead when the index does fit in RAM. We choose to sort if the * initial index size exceeds NBuffers. * @@ -248,7 +248,7 @@ hashgettuple(PG_FUNCTION_ARGS) /* * An insertion into the current index page could have happened while * we didn't have read lock on it. Re-find our position by looking - * for the TID we previously returned. (Because we hold share lock on + * for the TID we previously returned. (Because we hold share lock on * the bucket, no deletions or splits could have occurred; therefore * we can expect that the TID still exists in the current index page, * at an offset >= where we were.) @@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS) /* * Read the metapage to fetch original bucket and tuple counts. Also, we * keep a copy of the last-seen metapage so that we can use its - * hashm_spares[] values to compute bucket page addresses. This is a bit + * hashm_spares[] values to compute bucket page addresses. This is a bit * hokey but perfectly safe, since the interesting entries in the spares * array cannot change under us; and it beats rereading the metapage for * each bucket. @@ -655,7 +655,7 @@ loop_top: { /* * Otherwise, our count is untrustworthy since we may have - * double-scanned tuples in split buckets. Proceed by dead-reckoning. + * double-scanned tuples in split buckets. Proceed by dead-reckoning. * (Note: we still return estimated_count = false, because using this * count is better than not updating reltuples at all.) */ diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index 6d351da5b0a..c61fec6b84f 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -11,7 +11,7 @@ * src/backend/access/hash/hashfunc.c * * NOTES - * These functions are stored in pg_amproc. For each operator class + * These functions are stored in pg_amproc. For each operator class * defined for hash indexes, they compute the hash value of the argument. * * Additional hash functions appear in /utils/adt/ files for various @@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS) /* * Note: this is currently identical in behavior to hashvarlena, but keep * it as a separate function in case we someday want to do something - * different in non-C locales. (See also hashbpchar, if so.) + * different in non-C locales. (See also hashbpchar, if so.) */ result = hash_any((unsigned char *) VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); @@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS) * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the opposite - * direction from the goal of parallelism. I did what I could. Rotates + * direction from the goal of parallelism. I did what I could. Rotates * seem to cost as much as shifts on every machine I could lay my hands on, * and rotates are much kinder to the top and bottom bits, so I used rotates. *---------- @@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS) * substantial performance increase since final() does not need to * do well in reverse, but is does need to affect all output bits. * mix(), on the other hand, does not need to affect all output - * bits (affecting 32 bits is enough). The original hash function had + * bits (affecting 32 bits is enough). The original hash function had * a single mixing operation that had to satisfy both sets of requirements * and was slower as a result. *---------- @@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS) * k : the key (the unaligned variable-length array of bytes) * len : the length of the key, counting by bytes * - * Returns a uint32 value. Every bit of the key affects every bit of + * Returns a uint32 value. Every bit of the key affects every bit of * the return value. Every 1-bit and 2-bit delta achieves avalanche. * About 6*len+35 instructions. The best hash table sizes are powers * of 2. There is no need to do mod a prime (mod is sooo slow!). diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 49211eef9a3..05e9808b8ad 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, IndexTuple itup) /* * If the previous iteration of this loop locked what is still the - * correct target bucket, we are done. Otherwise, drop any old lock + * correct target bucket, we are done. Otherwise, drop any old lock * and lock what now appears to be the correct bucket. */ if (retry) diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 2389c3843f7..628c05698b9 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -80,7 +80,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * * Add an overflow page to the bucket whose last page is pointed to by 'buf'. * - * On entry, the caller must hold a pin but no lock on 'buf'. The pin is + * On entry, the caller must hold a pin but no lock on 'buf'. The pin is * dropped before exiting (we assume the caller is not interested in 'buf' * anymore). The returned overflow page will be pinned and write-locked; * it is guaranteed to be empty. @@ -89,12 +89,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * That buffer is returned in the same state. * * The caller must hold at least share lock on the bucket, to ensure that - * no one else tries to compact the bucket meanwhile. This guarantees that + * no one else tries to compact the bucket meanwhile. This guarantees that * 'buf' won't stop being part of the bucket while it's unlocked. * * NB: since this could be executed concurrently by multiple processes, * one should not assume that the returned overflow page will be the - * immediate successor of the originally passed 'buf'. Additional overflow + * immediate successor of the originally passed 'buf'. Additional overflow * pages might have been added to the bucket chain in between. */ Buffer @@ -157,7 +157,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf) /* * _hash_getovflpage() * - * Find an available overflow page and return it. The returned buffer + * Find an available overflow page and return it. The returned buffer * is pinned and write-locked, and has had _hash_pageinit() applied, * but it is caller's responsibility to fill the special space. * @@ -253,7 +253,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) * We create the new bitmap page with all pages marked "in use". * Actually two pages in the new bitmap's range will exist * immediately: the bitmap page itself, and the following page which - * is the one we return to the caller. Both of these are correctly + * is the one we return to the caller. Both of these are correctly * marked "in use". Subsequent pages do not exist yet, but it is * convenient to pre-mark them as "in use" too. */ @@ -284,7 +284,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) metap->hashm_spares[splitnum]++; /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) @@ -313,7 +313,7 @@ found: blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) @@ -494,7 +494,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, /* * _hash_initbitmap() * - * Initialize a new bitmap page. The metapage has a write-lock upon + * Initialize a new bitmap page. The metapage has a write-lock upon * entering the function, and must be written by caller after return. * * 'blkno' is the block number of the new bitmap page. diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 1552b73f28b..9e4a2e04340 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -49,7 +49,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, * of the locking rules). However, we can skip taking lmgr locks when the * index is local to the current backend (ie, either temp or new in the * current transaction). No one else can see it, so there's no reason to - * take locks. We still take buffer-level locks, but not lmgr locks. + * take locks. We still take buffer-level locks, but not lmgr locks. */ #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel)) @@ -136,7 +136,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags) * * This must be used only to fetch pages that are known to be before * the index's filesystem EOF, but are to be filled from scratch. - * _hash_pageinit() is applied automatically. Otherwise it has + * _hash_pageinit() is applied automatically. Otherwise it has * effects similar to _hash_getbuf() with access = HASH_WRITE. * * When this routine returns, a write lock is set on the @@ -344,7 +344,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) /* * Determine the target fill factor (in tuples per bucket) for this index. * The idea is to make the fill factor correspond to pages about as full - * as the user-settable fillfactor parameter says. We can compute it + * as the user-settable fillfactor parameter says. We can compute it * exactly since the index datatype (i.e. uint32 hash key) is fixed-width. */ data_width = sizeof(uint32); @@ -377,7 +377,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) /* * We initialize the metapage, the first N bucket pages, and the first * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend() - * calls to occur. This ensures that the smgr level has the right idea of + * calls to occur. This ensures that the smgr level has the right idea of * the physical index length. */ metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum); @@ -545,7 +545,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Determine which bucket is to be split, and attempt to lock the old - * bucket. If we can't get the lock, give up. + * bucket. If we can't get the lock, give up. * * The lock protects us against other backends, but not against our own * backend. Must check for active scans separately. @@ -603,7 +603,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) } /* - * Okay to proceed with split. Update the metapage bucket mapping info. + * Okay to proceed with split. Update the metapage bucket mapping info. * * Since we are scribbling on the metapage data right in the shared * buffer, any failure in this next little bit leaves us with a big @@ -641,7 +641,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the * split lock, other splits could begin, so these values might be out of - * date before _hash_splitbucket finishes. That's okay, since all it + * date before _hash_splitbucket finishes. That's okay, since all it * needs is to tell which of these two buckets to map hashkeys into. */ maxbucket = metap->hashm_maxbucket; @@ -876,7 +876,7 @@ _hash_splitbucket(Relation rel, /* * We're at the end of the old bucket chain, so we're done partitioning - * the tuples. Before quitting, call _hash_squeezebucket to ensure the + * the tuples. Before quitting, call _hash_squeezebucket to ensure the * tuples remaining in the old bucket (including the overflow pages) are * packed as tightly as possible. The new bucket is already tight. */ diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index ad405646c53..5aabe066064 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) /* * If the previous iteration of this loop locked what is still the - * correct target bucket, we are done. Otherwise, drop any old lock + * correct target bucket, we are done. Otherwise, drop any old lock * and lock what now appears to be the correct bucket. */ if (retry) @@ -269,7 +269,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) * _hash_step() -- step to the next valid item in a scan in the bucket. * * If no valid record exists in the requested direction, return - * false. Else, return true and set the hashso_curpos for the + * false. Else, return true and set the hashso_curpos for the * scan to the right thing. * * 'bufP' points to the current buffer, which is pinned and read-locked. diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c index e13670c4f40..c0d6fec2567 100644 --- a/src/backend/access/hash/hashsort.c +++ b/src/backend/access/hash/hashsort.c @@ -8,7 +8,7 @@ * thrashing. We use tuplesort.c to sort the given index tuples into order. * * Note: if the number of rows in the table has been underestimated, - * bucket splits may occur during the index build. In that case we'd + * bucket splits may occur during the index build. In that case we'd * be inserting into two or more buckets for each possible masked-off * hash code value. That's no big problem though, since we'll still have * plenty of locality of access. @@ -52,7 +52,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets) hspool->index = index; /* - * Determine the bitmask for hash code values. Since there are currently + * Determine the bitmask for hash code values. Since there are currently * num_buckets buckets in the index, the appropriate mask can be computed * as follows. * diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 20bd2792585..43652921ac1 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) @@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull) * * Returns the offset of the first index entry having hashkey >= hash_value, * or the page's max offset plus one if hash_value is greater than all - * existing hash keys in the page. This is the appropriate place to start + * existing hash keys in the page. This is the appropriate place to start * a search, or to insert a new item. */ OffsetNumber diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 336fbb06dac..405117a5261 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -88,11 +88,11 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared); static void HeapSatisfiesHOTandKeyUpdate(Relation relation, - Bitmapset *hot_attrs, - Bitmapset *key_attrs, Bitmapset *id_attrs, - bool *satisfies_hot, bool *satisfies_key, - bool *satisfies_id, - HeapTuple oldtup, HeapTuple newtup); + Bitmapset *hot_attrs, + Bitmapset *key_attrs, Bitmapset *id_attrs, + bool *satisfies_hot, bool *satisfies_key, + bool *satisfies_id, + HeapTuple oldtup, HeapTuple newtup); static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, @@ -113,7 +113,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status XLTW_Oper oper, int *remaining); static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, - bool *copy); + bool *copy); /* @@ -213,7 +213,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) * while the scan is in progress will be invisible to my snapshot anyway. * (That is not true when using a non-MVCC snapshot. However, we couldn't * guarantee to return tuples added after scan start anyway, since they - * might go into pages we already scanned. To guarantee consistent + * might go into pages we already scanned. To guarantee consistent * results for a non-MVCC snapshot, the caller must hold some higher-level * lock that ensures the interesting tuple(s) won't change.) */ @@ -221,7 +221,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) /* * If the table is large relative to NBuffers, use a bulk-read access - * strategy and enable synchronized scanning (see syncscan.c). Although + * strategy and enable synchronized scanning (see syncscan.c). Although * the thresholds for these features could be different, we make them the * same so that there are only two behaviors to tune rather than four. * (However, some callers need to be able to disable one or both of these @@ -325,7 +325,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) } /* - * Be sure to check for interrupts at least once per page. Checks at + * Be sure to check for interrupts at least once per page. Checks at * higher code levels won't be able to stop a seqscan that encounters many * pages' worth of consecutive dead tuples. */ @@ -349,7 +349,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) /* * We must hold share lock on the buffer content while examining tuple - * visibility. Afterwards, however, the tuples we have found to be + * visibility. Afterwards, however, the tuples we have found to be * visible are guaranteed good as long as we hold the buffer pin. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); @@ -1126,7 +1126,7 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode) * * Same as relation_openrv, but with an additional missing_ok argument * allowing a NULL return rather than an error if the relation is not - * found. (Note that some other causes, such as permissions problems, + * found. (Note that some other causes, such as permissions problems, * will still result in an ereport.) * ---------------- */ @@ -1740,7 +1740,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, /* * When first_call is true (and thus, skip is initially false) we'll - * return the first tuple we find. But on later passes, heapTuple + * return the first tuple we find. But on later passes, heapTuple * will initially be pointing to the tuple we returned last time. * Returning it again would be incorrect (and would loop forever), so * we skip it and return the next match we find. @@ -1834,7 +1834,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot, * possibly uncommitted version. * * *tid is both an input and an output parameter: it is updated to - * show the latest version of the row. Note that it will not be changed + * show the latest version of the row. Note that it will not be changed * if no version of the row passes the snapshot test. */ void @@ -1955,7 +1955,7 @@ heap_get_latest_tid(Relation relation, * * This is called after we have waited for the XMAX transaction to terminate. * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will - * be set on exit. If the transaction committed, we set the XMAX_COMMITTED + * be set on exit. If the transaction committed, we set the XMAX_COMMITTED * hint bit if possible --- but beware that that may not yet be possible, * if the transaction committed asynchronously. * @@ -2042,7 +2042,7 @@ FreeBulkInsertState(BulkInsertState bistate) * The return value is the OID assigned to the tuple (either here or by the * caller), or InvalidOid if no OID. The header fields of *tup are updated * to match the stored tuple; in particular tup->t_self receives the actual - * TID where the tuple was stored. But note that any toasting of fields + * TID where the tuple was stored. But note that any toasting of fields * within the tuple data is NOT reflected into *tup. */ Oid @@ -2071,7 +2071,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, * For a heap insert, we only need to check for table-level SSI locks. Our * new tuple can't possibly conflict with existing tuple locks, and heap * page locks are only consolidated versions of tuple locks; they do not - * lock "gaps" as index page locks do. So we don't need to identify a + * lock "gaps" as index page locks do. So we don't need to identify a * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); @@ -2123,8 +2123,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, bool need_tuple_data; /* - * For logical decoding, we need the tuple even if we're doing a - * full page write, so make sure to log it separately. (XXX We could + * For logical decoding, we need the tuple even if we're doing a full + * page write, so make sure to log it separately. (XXX We could * alternatively store a pointer into the FPW). * * Also, if this is a catalog, we need to transmit combocids to @@ -2165,9 +2165,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, rdata[2].next = NULL; /* - * Make a separate rdata entry for the tuple's buffer if we're - * doing logical decoding, so that an eventual FPW doesn't - * remove the tuple's data. + * Make a separate rdata entry for the tuple's buffer if we're doing + * logical decoding, so that an eventual FPW doesn't remove the + * tuple's data. */ if (need_tuple_data) { @@ -2248,7 +2248,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, /* * If the object id of this tuple has already been assigned, trust the - * caller. There are a couple of ways this can happen. At initial db + * caller. There are a couple of ways this can happen. At initial db * creation, the backend program sets oids for tuples. When we define * an index, we set the oid. Finally, in the future, we may allow * users to set their own object ids in order to support a persistent @@ -2342,7 +2342,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, * For a heap insert, we only need to check for table-level SSI locks. Our * new tuple can't possibly conflict with existing tuple locks, and heap * page locks are only consolidated versions of tuple locks; they do not - * lock "gaps" as index page locks do. So we don't need to identify a + * lock "gaps" as index page locks do. So we don't need to identify a * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); @@ -2356,7 +2356,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, int nthispage; /* - * Find buffer where at least the next tuple will fit. If the page is + * Find buffer where at least the next tuple will fit. If the page is * all-visible, this will also pin the requisite visibility map page. */ buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len, @@ -2487,9 +2487,9 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, rdata[1].next = NULL; /* - * Make a separate rdata entry for the tuple's buffer if - * we're doing logical decoding, so that an eventual FPW - * doesn't remove the tuple's data. + * Make a separate rdata entry for the tuple's buffer if we're + * doing logical decoding, so that an eventual FPW doesn't remove + * the tuple's data. */ if (need_tuple_data) { @@ -2597,8 +2597,8 @@ compute_infobits(uint16 infomask, uint16 infomask2) static inline bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) { - const uint16 interesting = - HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; + const uint16 interesting = + HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; if ((new_infomask & interesting) != (old_infomask & interesting)) return true; @@ -2650,7 +2650,7 @@ heap_delete(Relation relation, ItemPointer tid, bool have_tuple_lock = false; bool iscombo; bool all_visible_cleared = false; - HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */ + HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */ bool old_key_copied = false; Assert(ItemPointerIsValid(tid)); @@ -2751,10 +2751,10 @@ l1: /* * You might think the multixact is necessarily done here, but not * so: it could have surviving members, namely our own xact or - * other subxacts of this backend. It is legal for us to delete + * other subxacts of this backend. It is legal for us to delete * the tuple in either case, however (the latter case is * essentially a situation of upgrading our former shared lock to - * exclusive). We don't bother changing the on-disk hint bits + * exclusive). We don't bother changing the on-disk hint bits * since we are about to overwrite the xmax altogether. */ } @@ -2836,7 +2836,7 @@ l1: * If this is the first possibly-multixact-able operation in the current * transaction, set my per-backend OldestMemberMXactId setting. We can be * certain that the transaction will never become a member of any older - * MultiXactIds than that. (We have to do this even if we end up just + * MultiXactIds than that. (We have to do this even if we end up just * using our own TransactionId below, since some other backend could * incorporate our XID into a MultiXact immediately afterwards.) */ @@ -2852,7 +2852,7 @@ l1: /* * If this transaction commits, the tuple will become DEAD sooner or * later. Set flag that this page is a candidate for pruning once our xid - * falls below the OldestXmin horizon. If the transaction finally aborts, + * falls below the OldestXmin horizon. If the transaction finally aborts, * the subsequent page pruning will be a no-op and the hint will be * cleared. */ @@ -2919,7 +2919,7 @@ l1: xlhdr.t_hoff = old_key_tuple->t_data->t_hoff; rdata[1].next = &(rdata[2]); - rdata[2].data = (char*)&xlhdr; + rdata[2].data = (char *) &xlhdr; rdata[2].len = SizeOfHeapHeader; rdata[2].buffer = InvalidBuffer; rdata[2].next = NULL; @@ -2994,7 +2994,7 @@ l1: * * This routine may be used to delete a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock - * on the relation associated with the tuple). Any failure is reported + * on the relation associated with the tuple). Any failure is reported * via ereport(). */ void @@ -3110,7 +3110,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, /* * Fetch the list of attributes to be checked for HOT update. This is * wasted effort if we fail to update or have to put the new tuple on a - * different page. But we must compute the list before obtaining buffer + * different page. But we must compute the list before obtaining buffer * lock --- in the worst case, if we are doing an update on one of the * relevant system catalogs, we could deadlock if we try to fetch the list * later. In any case, the relcache caches the data so this is usually @@ -3122,7 +3122,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL); key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY); id_attrs = RelationGetIndexAttrBitmap(relation, - INDEX_ATTR_BITMAP_IDENTITY_KEY); + INDEX_ATTR_BITMAP_IDENTITY_KEY); block = ItemPointerGetBlockNumber(otid); buffer = ReadBuffer(relation, block); @@ -3193,7 +3193,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, * If this is the first possibly-multixact-able operation in the * current transaction, set my per-backend OldestMemberMXactId * setting. We can be certain that the transaction will never become a - * member of any older MultiXactIds than that. (We have to do this + * member of any older MultiXactIds than that. (We have to do this * even if we end up just using our own TransactionId below, since * some other backend could incorporate our XID into a MultiXact * immediately afterwards.) @@ -3238,7 +3238,7 @@ l2: /* * XXX note that we don't consider the "no wait" case here. This * isn't a problem currently because no caller uses that case, but it - * should be fixed if such a caller is introduced. It wasn't a + * should be fixed if such a caller is introduced. It wasn't a * problem previously because this code would always wait, but now * that some tuple locks do not conflict with one of the lock modes we * use, it is possible that this case is interesting to handle @@ -3276,7 +3276,7 @@ l2: * it as locker, unless it is gone completely. * * If it's not a multi, we need to check for sleeping conditions - * before actually going to sleep. If the update doesn't conflict + * before actually going to sleep. If the update doesn't conflict * with the locks, we just continue without sleeping (but making sure * it is preserved). */ @@ -3302,10 +3302,10 @@ l2: goto l2; /* - * Note that the multixact may not be done by now. It could have + * Note that the multixact may not be done by now. It could have * surviving members; our own xact or other subxacts of this * backend, and also any other concurrent transaction that locked - * the tuple with KeyShare if we only got TupleLockUpdate. If + * the tuple with KeyShare if we only got TupleLockUpdate. If * this is the case, we have to be careful to mark the updated * tuple with the surviving members in Xmax. * @@ -3512,7 +3512,7 @@ l2: * If the toaster needs to be activated, OR if the new tuple will not fit * on the same page as the old, then we need to release the content lock * (but not the pin!) on the old tuple's buffer while we are off doing - * TOAST and/or table-file-extension work. We must mark the old tuple to + * TOAST and/or table-file-extension work. We must mark the old tuple to * show that it's already being updated, else other processes may try to * update it themselves. * @@ -3578,7 +3578,7 @@ l2: * there's more free now than before. * * What's more, if we need to get a new page, we will need to acquire - * buffer locks on both old and new pages. To avoid deadlock against + * buffer locks on both old and new pages. To avoid deadlock against * some other backend trying to get the same two locks in the other * order, we must be consistent about the order we get the locks in. * We use the rule "lock the lower-numbered page of the relation @@ -3638,7 +3638,7 @@ l2: /* * At this point newbuf and buffer are both pinned and locked, and newbuf - * has enough space for the new tuple. If they are the same buffer, only + * has enough space for the new tuple. If they are the same buffer, only * one pin is held. */ @@ -3646,7 +3646,7 @@ l2: { /* * Since the new tuple is going into the same page, we might be able - * to do a HOT update. Check if any of the index columns have been + * to do a HOT update. Check if any of the index columns have been * changed. If not, then HOT update is possible. */ if (satisfies_hot) @@ -3672,13 +3672,13 @@ l2: /* * If this transaction commits, the old tuple will become DEAD sooner or * later. Set flag that this page is a candidate for pruning once our xid - * falls below the OldestXmin horizon. If the transaction finally aborts, + * falls below the OldestXmin horizon. If the transaction finally aborts, * the subsequent page pruning will be a no-op and the hint will be * cleared. * * XXX Should we set hint on newbuf as well? If the transaction aborts, * there would be a prunable tuple in the newbuf; but for now we choose - * not to optimize for aborts. Note that heap_xlog_update must be kept in + * not to optimize for aborts. Note that heap_xlog_update must be kept in * sync if this decision changes. */ PageSetPrunable(page, xid); @@ -3775,7 +3775,7 @@ l2: * Mark old tuple for invalidation from system caches at next command * boundary, and mark the new tuple for invalidation in case we abort. We * have to do this before releasing the buffer because oldtup is in the - * buffer. (heaptup is all in local memory, but it's necessary to process + * buffer. (heaptup is all in local memory, but it's necessary to process * both tuple versions in one call to inval.c so we can avoid redundant * sinval messages.) */ @@ -3853,7 +3853,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, /* * Extract the corresponding values. XXX this is pretty inefficient if - * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do + * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do * a single heap_deform_tuple call on each tuple, instead? But that * doesn't work for system columns ... */ @@ -3876,7 +3876,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, /* * We do simple binary comparison of the two datums. This may be overly * strict because there can be multiple binary representations for the - * same logical value. But we should be OK as long as there are no false + * same logical value. But we should be OK as long as there are no false * positives. Using a type-specific equality operator is messy because * there could be multiple notions of equality in different operator * classes; furthermore, we cannot safely invoke user-defined functions @@ -3951,8 +3951,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs, /* * Since the HOT attributes are a superset of the key attributes and * the key attributes are a superset of the id attributes, this logic - * is guaranteed to identify the next column that needs to be - * checked. + * is guaranteed to identify the next column that needs to be checked. */ if (hot_result && next_hot_attnum > FirstLowInvalidHeapAttributeNumber) check_now = next_hot_attnum; @@ -3981,12 +3980,11 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs, } /* - * Advance the next attribute numbers for the sets that contain - * the attribute we just checked. As we work our way through the - * columns, the next_attnum values will rise; but when each set - * becomes empty, bms_first_member() will return -1 and the attribute - * number will end up with a value less than - * FirstLowInvalidHeapAttributeNumber. + * Advance the next attribute numbers for the sets that contain the + * attribute we just checked. As we work our way through the columns, + * the next_attnum values will rise; but when each set becomes empty, + * bms_first_member() will return -1 and the attribute number will end + * up with a value less than FirstLowInvalidHeapAttributeNumber. */ if (hot_result && check_now == next_hot_attnum) { @@ -4015,7 +4013,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs, * * This routine may be used to update a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock - * on the relation associated with the tuple). Any failure is reported + * on the relation associated with the tuple). Any failure is reported * via ereport(). */ void @@ -4057,7 +4055,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update) { - int retval; + int retval; if (is_update) retval = tupleLockExtraInfo[mode].updstatus; @@ -4239,15 +4237,15 @@ l3: * However, if there are updates, we need to walk the update chain * to mark future versions of the row as locked, too. That way, * if somebody deletes that future version, we're protected - * against the key going away. This locking of future versions + * against the key going away. This locking of future versions * could block momentarily, if a concurrent transaction is * deleting a key; or it could return a value to the effect that - * the transaction deleting the key has already committed. So we + * the transaction deleting the key has already committed. So we * do this before re-locking the buffer; otherwise this would be * prone to deadlocks. * * Note that the TID we're locking was grabbed before we unlocked - * the buffer. For it to change while we're not looking, the + * the buffer. For it to change while we're not looking, the * other properties we're testing for below after re-locking the * buffer would also change, in which case we would restart this * loop above. @@ -4472,7 +4470,7 @@ l3: * Of course, the multixact might not be done here: if we're * requesting a light lock mode, other transactions with light * locks could still be alive, as well as locks owned by our - * own xact or other subxacts of this backend. We need to + * own xact or other subxacts of this backend. We need to * preserve the surviving MultiXact members. Note that it * isn't absolutely necessary in the latter case, but doing so * is simpler. @@ -4516,7 +4514,7 @@ l3: /* * xwait is done, but if xwait had just locked the tuple then * some other xact could update this tuple before we get to - * this point. Check for xmax change, and start over if so. + * this point. Check for xmax change, and start over if so. */ if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) || !TransactionIdEquals( @@ -4525,7 +4523,7 @@ l3: goto l3; /* - * Otherwise check if it committed or aborted. Note we cannot + * Otherwise check if it committed or aborted. Note we cannot * be here if the tuple was only locked by somebody who didn't * conflict with us; that should have been handled above. So * that transaction must necessarily be gone by now. @@ -4605,7 +4603,7 @@ failed: * If this is the first possibly-multixact-able operation in the current * transaction, set my per-backend OldestMemberMXactId setting. We can be * certain that the transaction will never become a member of any older - * MultiXactIds than that. (We have to do this even if we end up just + * MultiXactIds than that. (We have to do this even if we end up just * using our own TransactionId below, since some other backend could * incorporate our XID into a MultiXact immediately afterwards.) */ @@ -4641,7 +4639,7 @@ failed: HeapTupleHeaderSetXmax(tuple->t_data, xid); /* - * Make sure there is no forward chain link in t_ctid. Note that in the + * Make sure there is no forward chain link in t_ctid. Note that in the * cases where the tuple has been updated, we must not overwrite t_ctid, * because it was set by the updater. Moreover, if the tuple has been * updated, we need to follow the update chain to lock the new versions of @@ -4653,8 +4651,8 @@ failed: MarkBufferDirty(*buffer); /* - * XLOG stuff. You might think that we don't need an XLOG record because - * there is no state change worth restoring after a crash. You would be + * XLOG stuff. You might think that we don't need an XLOG record because + * there is no state change worth restoring after a crash. You would be * wrong however: we have just written either a TransactionId or a * MultiXactId that may never have been seen on disk before, and we need * to make sure that there are XLOG entries covering those ID numbers. @@ -4818,7 +4816,7 @@ l5: * If the XMAX is already a MultiXactId, then we need to expand it to * include add_to_xmax; but if all the members were lockers and are * all gone, we can do away with the IS_MULTI bit and just set - * add_to_xmax as the only locker/updater. If all lockers are gone + * add_to_xmax as the only locker/updater. If all lockers are gone * and we have an updater that aborted, we can also do without a * multi. * @@ -4881,7 +4879,7 @@ l5: */ MultiXactStatus new_status; MultiXactStatus old_status; - LockTupleMode old_mode; + LockTupleMode old_mode; if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)) { @@ -4900,8 +4898,8 @@ l5: { /* * LOCK_ONLY can be present alone only when a page has been - * upgraded by pg_upgrade. But in that case, - * TransactionIdIsInProgress() should have returned false. We + * upgraded by pg_upgrade. But in that case, + * TransactionIdIsInProgress() should have returned false. We * assume it's no longer locked in this case. */ elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax); @@ -4929,12 +4927,13 @@ l5: if (xmax == add_to_xmax) { /* - * Note that it's not possible for the original tuple to be updated: - * we wouldn't be here because the tuple would have been invisible and - * we wouldn't try to update it. As a subtlety, this code can also - * run when traversing an update chain to lock future versions of a - * tuple. But we wouldn't be here either, because the add_to_xmax - * would be different from the original updater. + * Note that it's not possible for the original tuple to be + * updated: we wouldn't be here because the tuple would have been + * invisible and we wouldn't try to update it. As a subtlety, + * this code can also run when traversing an update chain to lock + * future versions of a tuple. But we wouldn't be here either, + * because the add_to_xmax would be different from the original + * updater. */ Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)); @@ -5013,7 +5012,7 @@ static HTSU_Result test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait) { - MultiXactStatus wantedstatus; + MultiXactStatus wantedstatus; *needwait = false; wantedstatus = get_mxact_status_for_lock(mode, false); @@ -5026,18 +5025,18 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, if (TransactionIdIsCurrentTransactionId(xid)) { /* - * Updated by our own transaction? Just return failure. This shouldn't - * normally happen. + * Updated by our own transaction? Just return failure. This + * shouldn't normally happen. */ return HeapTupleSelfUpdated; } else if (TransactionIdIsInProgress(xid)) { /* - * If the locking transaction is running, what we do depends on whether - * the lock modes conflict: if they do, then we must wait for it to - * finish; otherwise we can fall through to lock this tuple version - * without waiting. + * If the locking transaction is running, what we do depends on + * whether the lock modes conflict: if they do, then we must wait for + * it to finish; otherwise we can fall through to lock this tuple + * version without waiting. */ if (DoLockModesConflict(LOCKMODE_from_mxstatus(status), LOCKMODE_from_mxstatus(wantedstatus))) @@ -5046,8 +5045,8 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, } /* - * If we set needwait above, then this value doesn't matter; otherwise, - * this value signals to caller that it's okay to proceed. + * If we set needwait above, then this value doesn't matter; + * otherwise, this value signals to caller that it's okay to proceed. */ return HeapTupleMayBeUpdated; } @@ -5059,7 +5058,7 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, * The other transaction committed. If it was only a locker, then the * lock is completely gone now and we can return success; but if it * was an update, then what we do depends on whether the two lock - * modes conflict. If they conflict, then we must report error to + * modes conflict. If they conflict, then we must report error to * caller. But if they don't, we can fall through to allow the current * transaction to lock the tuple. * @@ -5133,8 +5132,8 @@ l4: LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* - * Check the tuple XMIN against prior XMAX, if any. If we reached - * the end of the chain, we're done, so return success. + * Check the tuple XMIN against prior XMAX, if any. If we reached the + * end of the chain, we're done, so return success. */ if (TransactionIdIsValid(priorXmax) && !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data), @@ -5162,14 +5161,14 @@ l4: rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data); if (old_infomask & HEAP_XMAX_IS_MULTI) { - int nmembers; - int i; + int nmembers; + int i; MultiXactMember *members; nmembers = GetMultiXactIdMembers(rawxmax, &members, false); for (i = 0; i < nmembers; i++) { - HTSU_Result res; + HTSU_Result res; res = test_lockmode_for_conflict(members[i].status, members[i].xid, @@ -5196,7 +5195,7 @@ l4: } else { - HTSU_Result res; + HTSU_Result res; MultiXactStatus status; /* @@ -5219,9 +5218,9 @@ l4: else { /* - * LOCK_ONLY present alone (a pg_upgraded tuple - * marked as share-locked in the old cluster) shouldn't - * be seen in the middle of an update chain. + * LOCK_ONLY present alone (a pg_upgraded tuple marked + * as share-locked in the old cluster) shouldn't be + * seen in the middle of an update chain. */ elog(ERROR, "invalid lock status in tuple"); } @@ -5323,7 +5322,7 @@ l4: * The initial tuple is assumed to be already locked. * * This function doesn't check visibility, it just inconditionally marks the - * tuple(s) as locked. If any tuple in the updated chain is being deleted + * tuple(s) as locked. If any tuple in the updated chain is being deleted * concurrently (or updated with the key being modified), sleep until the * transaction doing it is finished. * @@ -5347,7 +5346,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, * If this is the first possibly-multixact-able operation in the * current transaction, set my per-backend OldestMemberMXactId * setting. We can be certain that the transaction will never become a - * member of any older MultiXactIds than that. (We have to do this + * member of any older MultiXactIds than that. (We have to do this * even if we end up just using our own TransactionId below, since * some other backend could incorporate our XID into a MultiXact * immediately afterwards.) @@ -5366,7 +5365,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, * heap_inplace_update - update a tuple "in place" (ie, overwrite it) * * Overwriting violates both MVCC and transactional safety, so the uses - * of this function in Postgres are extremely limited. Nonetheless we + * of this function in Postgres are extremely limited. Nonetheless we * find some places to use it. * * The tuple cannot change size, and therefore it's reasonable to assume @@ -5608,7 +5607,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, */ if (ISUPDATE_from_mxstatus(members[i].status)) { - TransactionId xid = members[i].xid; + TransactionId xid = members[i].xid; /* * It's an update; should we keep it? If the transaction is known @@ -5728,7 +5727,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * heap_prepare_freeze_tuple * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) - * are older than the specified cutoff XID and cutoff MultiXactId. If so, + * are older than the specified cutoff XID and cutoff MultiXactId. If so, * setup enough state (in the *frz output argument) to later execute and * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing * is to be changed. @@ -5801,11 +5800,11 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, else if (flags & FRM_RETURN_IS_XID) { /* - * NB -- some of these transformations are only valid because - * we know the return Xid is a tuple updater (i.e. not merely a + * NB -- some of these transformations are only valid because we + * know the return Xid is a tuple updater (i.e. not merely a * locker.) Also note that the only reason we don't explicitely - * worry about HEAP_KEYS_UPDATED is because it lives in t_infomask2 - * rather than t_infomask. + * worry about HEAP_KEYS_UPDATED is because it lives in + * t_infomask2 rather than t_infomask. */ frz->t_infomask &= ~HEAP_XMAX_BITS; frz->xmax = newxmax; @@ -5815,8 +5814,8 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, } else if (flags & FRM_RETURN_IS_MULTI) { - uint16 newbits; - uint16 newbits2; + uint16 newbits; + uint16 newbits2; /* * We can't use GetMultiXactIdHintBits directly on the new multi @@ -5851,7 +5850,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED + - * LOCKED. Normalize to INVALID just to be sure no one gets confused. + * LOCKED. Normalize to INVALID just to be sure no one gets confused. * Also get rid of the HEAP_KEYS_UPDATED bit. */ frz->t_infomask &= ~HEAP_XMAX_BITS; @@ -6111,7 +6110,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple) * used to optimize multixact access in case it's a lock-only multi); 'nowait' * indicates whether to use conditional lock acquisition, to allow callers to * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up - * context information for error messages. 'remaining', if not NULL, receives + * context information for error messages. 'remaining', if not NULL, receives * the number of members that are still running, including any (non-aborted) * subtransactions of our own transaction. * @@ -6173,7 +6172,7 @@ Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, * return failure, if asked to avoid waiting.) * * Note that we don't set up an error context callback ourselves, - * but instead we pass the info down to XactLockTableWait. This + * but instead we pass the info down to XactLockTableWait. This * might seem a bit wasteful because the context is set up and * tore down for each member of the multixact, but in reality it * should be barely noticeable, and it avoids duplicate code. @@ -6242,7 +6241,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, * heap_tuple_needs_freeze * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) - * are older than the specified cutoff XID or MultiXactId. If so, return TRUE. + * are older than the specified cutoff XID or MultiXactId. If so, return TRUE. * * It doesn't matter whether the tuple is alive or dead, we are checking * to see if a tuple needs to be removed or frozen to avoid wraparound. @@ -6366,7 +6365,7 @@ heap_restrpos(HeapScanDesc scan) else { /* - * If we reached end of scan, rs_inited will now be false. We must + * If we reached end of scan, rs_inited will now be false. We must * reset it to true to keep heapgettup from doing the wrong thing. */ scan->rs_inited = true; @@ -6548,7 +6547,7 @@ log_heap_clean(Relation reln, Buffer buffer, } /* - * Perform XLogInsert for a heap-freeze operation. Caller must have already + * Perform XLogInsert for a heap-freeze operation. Caller must have already * modified the buffer and marked it dirty. */ XLogRecPtr @@ -6593,7 +6592,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, /* * Perform XLogInsert for a heap-visible operation. 'block' is the block * being marked all-visible, and vm_buffer is the buffer containing the - * corresponding visibility map block. Both should have already been modified + * corresponding visibility map block. Both should have already been modified * and dirtied. * * If checksums are enabled, we also add the heap_buffer to the chain to @@ -6642,7 +6641,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, } /* - * Perform XLogInsert for a heap-update operation. Caller must already + * Perform XLogInsert for a heap-update operation. Caller must already * have modified the buffer(s) and marked them dirty. */ static XLogRecPtr @@ -6674,10 +6673,10 @@ log_heap_update(Relation reln, Buffer oldbuf, info = XLOG_HEAP_UPDATE; /* - * If the old and new tuple are on the same page, we only need to log - * the parts of the new tuple that were changed. That saves on the amount - * of WAL we need to write. Currently, we just count any unchanged bytes - * in the beginning and end of the tuple. That's quick to check, and + * If the old and new tuple are on the same page, we only need to log the + * parts of the new tuple that were changed. That saves on the amount of + * WAL we need to write. Currently, we just count any unchanged bytes in + * the beginning and end of the tuple. That's quick to check, and * perfectly covers the common case that only one field is updated. * * We could do this even if the old and new tuple are on different pages, @@ -6688,10 +6687,10 @@ log_heap_update(Relation reln, Buffer oldbuf, * updates tend to create the new tuple version on the same page, there * isn't much to be gained by doing this across pages anyway. * - * Skip this if we're taking a full-page image of the new page, as we don't - * include the new tuple in the WAL record in that case. Also disable if - * wal_level='logical', as logical decoding needs to be able to read the - * new tuple in whole from the WAL record alone. + * Skip this if we're taking a full-page image of the new page, as we + * don't include the new tuple in the WAL record in that case. Also + * disable if wal_level='logical', as logical decoding needs to be able to + * read the new tuple in whole from the WAL record alone. */ if (oldbuf == newbuf && !need_tuple_data && !XLogCheckBufferNeedsBackup(newbuf)) @@ -6707,6 +6706,7 @@ log_heap_update(Relation reln, Buffer oldbuf, if (newp[prefixlen] != oldp[prefixlen]) break; } + /* * Storing the length of the prefix takes 2 bytes, so we need to save * at least 3 bytes or there's no point. @@ -6793,8 +6793,8 @@ log_heap_update(Relation reln, Buffer oldbuf, xlhdr.header.t_infomask2 = newtup->t_data->t_infomask2; xlhdr.header.t_infomask = newtup->t_data->t_infomask; xlhdr.header.t_hoff = newtup->t_data->t_hoff; - Assert(offsetof(HeapTupleHeaderData, t_bits) + prefixlen + suffixlen <= newtup->t_len); - xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) - prefixlen - suffixlen; + Assert(offsetof(HeapTupleHeaderData, t_bits) +prefixlen + suffixlen <= newtup->t_len); + xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -prefixlen - suffixlen; /* * As with insert records, we need not store this rdata segment if we @@ -6816,7 +6816,7 @@ log_heap_update(Relation reln, Buffer oldbuf, if (prefixlen == 0) { rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits); - rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) - suffixlen; + rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen; rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref; rdata[nr].buffer_std = true; rdata[nr].next = NULL; @@ -6829,7 +6829,7 @@ log_heap_update(Relation reln, Buffer oldbuf, * two separate rdata entries. */ /* bitmap [+ padding] [+ oid] */ - if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) > 0) + if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0) { rdata[nr - 1].next = &(rdata[nr]); rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits); @@ -6853,13 +6853,13 @@ log_heap_update(Relation reln, Buffer oldbuf, /* * Separate storage for the FPW buffer reference of the new page in the * wal_level >= logical case. - */ + */ if (need_tuple_data) { rdata[nr - 1].next = &(rdata[nr]); rdata[nr].data = NULL, - rdata[nr].len = 0; + rdata[nr].len = 0; rdata[nr].buffer = newbufref; rdata[nr].buffer_std = true; rdata[nr].next = NULL; @@ -6992,8 +6992,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata); /* - * The page may be uninitialized. If so, we can't set the LSN because - * that would corrupt the page. + * The page may be uninitialized. If so, we can't set the LSN because that + * would corrupt the page. */ if (!PageIsNew(page)) { @@ -7173,14 +7173,14 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * */ for (natt = 0; natt < idx_desc->natts; natt++) { - int attno = idx_rel->rd_index->indkey.values[natt]; + int attno = idx_rel->rd_index->indkey.values[natt]; if (attno < 0) { /* * The OID column can appear in an index definition, but that's - * OK, becuse we always copy the OID if present (see below). - * Other system columns may not. + * OK, becuse we always copy the OID if present (see below). Other + * system columns may not. */ if (attno == ObjectIdAttributeNumber) continue; @@ -7210,7 +7210,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * */ if (HeapTupleHasExternal(key_tuple)) { - HeapTuple oldtup = key_tuple; + HeapTuple oldtup = key_tuple; + key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation)); heap_freetuple(oldtup); } @@ -7963,7 +7964,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update) /* * In normal operation, it is important to lock the two pages in * page-number order, to avoid possible deadlocks against other update - * operations going the other way. However, during WAL replay there can + * operations going the other way. However, during WAL replay there can * be no other update happening, so we don't need to worry about that. But * we *do* need to worry that we don't expose an inconsistent state to Hot * Standby queries --- so the original page can't be unlocked before we've @@ -8169,7 +8170,7 @@ newsame:; if (suffixlen > 0) memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen); - newlen = offsetof(HeapTupleHeaderData, t_bits) + xlhdr.t_len + prefixlen + suffixlen; + newlen = offsetof(HeapTupleHeaderData, t_bits) +xlhdr.t_len + prefixlen + suffixlen; htup->t_infomask2 = xlhdr.header.t_infomask2; htup->t_infomask = xlhdr.header.t_infomask; htup->t_hoff = xlhdr.header.t_hoff; @@ -8444,6 +8445,7 @@ heap2_redo(XLogRecPtr lsn, XLogRecord *record) heap_xlog_lock_updated(lsn, record); break; case XLOG_HEAP2_NEW_CID: + /* * Nothing to do on a real replay, only used during logical * decoding. diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index b306398aec1..631af759d78 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -146,7 +146,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, /* * If there are two buffers involved and we pinned just one of them, * it's possible that the second one became all-visible while we were - * busy pinning the first one. If it looks like that's a possible + * busy pinning the first one. If it looks like that's a possible * scenario, we'll need to make a second pass through this loop. */ if (buffer2 == InvalidBuffer || buffer1 == buffer2 @@ -177,7 +177,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the * same buffer we select for insertion of the new tuple (this could only * happen if space is freed in that page after heap_update finds there's not - * enough there). In that case, the page will be pinned and locked only once. + * enough there). In that case, the page will be pinned and locked only once. * * For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by * locking them only after locking the corresponding heap page, and taking @@ -198,7 +198,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, * for additional constraints needed for safe usage of this behavior.) * * The caller can also provide a BulkInsertState object to optimize many - * insertions into the same relation. This keeps a pin on the current + * insertions into the same relation. This keeps a pin on the current * insertion target page (to save pin/unpin cycles) and also passes a * BULKWRITE buffer selection strategy object to the buffer manager. * Passing NULL for bistate selects the default behavior. @@ -251,7 +251,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * We first try to put the tuple on the same page we last inserted a tuple - * on, as cached in the BulkInsertState or relcache entry. If that + * on, as cached in the BulkInsertState or relcache entry. If that * doesn't work, we ask the Free Space Map to locate a suitable page. * Since the FSM's info might be out of date, we have to be prepared to * loop around and retry multiple times. (To insure this isn't an infinite @@ -283,7 +283,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * If the FSM knows nothing of the rel, try the last page before we - * give up and extend. This avoids one-tuple-per-page syndrome during + * give up and extend. This avoids one-tuple-per-page syndrome during * bootstrapping or in a recently-started system. */ if (targetBlock == InvalidBlockNumber) @@ -305,7 +305,7 @@ RelationGetBufferForTuple(Relation relation, Size len, * If the page-level all-visible flag is set, caller will need to * clear both that and the corresponding visibility map bit. However, * by the time we return, we'll have x-locked the buffer, and we don't - * want to do any I/O while in that state. So we check the bit here + * want to do any I/O while in that state. So we check the bit here * before taking the lock, and pin the page if it appears necessary. * Checking without the lock creates a risk of getting the wrong * answer, so we'll have to recheck after acquiring the lock. @@ -347,7 +347,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * We now have the target page (and the other buffer, if any) pinned - * and locked. However, since our initial PageIsAllVisible checks + * and locked. However, since our initial PageIsAllVisible checks * were performed before acquiring the lock, the results might now be * out of date, either for the selected victim buffer, or for the * other buffer passed by the caller. In that case, we'll need to @@ -390,7 +390,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Not enough space, so we must give up our page locks and pin (if - * any) and prepare to look elsewhere. We don't care which order we + * any) and prepare to look elsewhere. We don't care which order we * unlock the two buffers in, so this can be slightly simpler than the * code above. */ @@ -432,7 +432,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * XXX This does an lseek - rather expensive - but at the moment it is the - * only way to accurately determine how many blocks are in a relation. Is + * only way to accurately determine how many blocks are in a relation. Is * it worth keeping an accurate file length in shared memory someplace, * rather than relying on the kernel to do it for us? */ @@ -452,7 +452,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Release the file-extension lock; it's now OK for someone else to extend - * the relation some more. Note that we cannot release this lock before + * the relation some more. Note that we cannot release this lock before * we have buffer lock on the new page, or we risk a race condition * against vacuumlazy.c --- see comments therein. */ diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 3c69e1badac..06b54889230 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -117,7 +117,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) * Checking free space here is questionable since we aren't holding any * lock on the buffer; in the worst case we could get a bogus answer. It's * unlikely to be *seriously* wrong, though, since reading either pd_lower - * or pd_upper is probably atomic. Avoiding taking a lock seems more + * or pd_upper is probably atomic. Avoiding taking a lock seems more * important than sometimes getting a wrong answer in what is after all * just a heuristic estimate. */ @@ -332,8 +332,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, * OldestXmin is the cutoff XID used to identify dead tuples. * * We don't actually change the page here, except perhaps for hint-bit updates - * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in - * prstate showing the changes to be made. Items to be redirected are added + * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in + * prstate showing the changes to be made. Items to be redirected are added * to the redirected[] array (two entries per redirection); items to be set to * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED * state are added to nowunused[]. @@ -384,7 +384,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, * We need this primarily to handle aborted HOT updates, that is, * XMIN_INVALID heap-only tuples. Those might not be linked to by * any chain, since the parent tuple might be re-updated before - * any pruning occurs. So we have to be able to reap them + * any pruning occurs. So we have to be able to reap them * separately from chain-pruning. (Note that * HeapTupleHeaderIsHotUpdated will never return true for an * XMIN_INVALID tuple, so this code will work even when there were @@ -496,9 +496,10 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, break; case HEAPTUPLE_DELETE_IN_PROGRESS: + /* - * This tuple may soon become DEAD. Update the hint field - * so that the page is reconsidered for pruning in future. + * This tuple may soon become DEAD. Update the hint field so + * that the page is reconsidered for pruning in future. */ heap_prune_record_prunable(prstate, HeapTupleHeaderGetUpdateXid(htup)); @@ -574,7 +575,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, /* * If the root entry had been a normal tuple, we are deleting it, so - * count it in the result. But changing a redirect (even to DEAD + * count it in the result. But changing a redirect (even to DEAD * state) doesn't count. */ if (ItemIdIsNormal(rootlp)) @@ -663,7 +664,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum) * buffer, and is inside a critical section. * * This is split out because it is also used by heap_xlog_clean() - * to replay the WAL record when needed after a crash. Note that the + * to replay the WAL record when needed after a crash. Note that the * arguments are identical to those of log_heap_clean(). */ void diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index ef8c12194c7..7b579114774 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -10,7 +10,7 @@ * * The caller is responsible for creating the new heap, all catalog * changes, supplying the tuples to be written to the new heap, and - * rebuilding indexes. The caller must hold AccessExclusiveLock on the + * rebuilding indexes. The caller must hold AccessExclusiveLock on the * target table, because we assume no one else is writing into it. * * To use the facility: @@ -43,7 +43,7 @@ * to substitute the correct ctid instead. * * For each ctid reference from A -> B, we might encounter either A first - * or B first. (Note that a tuple in the middle of a chain is both A and B + * or B first. (Note that a tuple in the middle of a chain is both A and B * of different pairs.) * * If we encounter A first, we'll store the tuple in the unresolved_tups @@ -58,11 +58,11 @@ * and can write A immediately with the correct ctid. * * Entries in the hash tables can be removed as soon as the later tuple - * is encountered. That helps to keep the memory usage down. At the end, + * is encountered. That helps to keep the memory usage down. At the end, * both tables are usually empty; we should have encountered both A and B * of each pair. However, it's possible for A to be RECENTLY_DEAD and B * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test - * for deadness using OldestXmin is not exact. In such a case we might + * for deadness using OldestXmin is not exact. In such a case we might * encounter B first, and skip it, and find A later. Then A would be added * to unresolved_tups, and stay there until end of the rewrite. Since * this case is very unusual, we don't worry about the memory usage. @@ -78,7 +78,7 @@ * of CLUSTERing on an unchanging key column, we'll see all the versions * of a given tuple together anyway, and so the peak memory usage is only * proportional to the number of RECENTLY_DEAD versions of a single row, not - * in the whole table. Note that if we do fail halfway through a CLUSTER, + * in the whole table. Note that if we do fail halfway through a CLUSTER, * the old table is still valid, so failure is not catastrophic. * * We can't use the normal heap_insert function to insert into the new @@ -143,13 +143,13 @@ typedef struct RewriteStateData BlockNumber rs_blockno; /* block where page will go */ bool rs_buffer_valid; /* T if any tuples in buffer */ bool rs_use_wal; /* must we WAL-log inserts? */ - bool rs_logical_rewrite; /* do we need to do logical rewriting */ + bool rs_logical_rewrite; /* do we need to do logical rewriting */ TransactionId rs_oldest_xmin; /* oldest xmin used by caller to * determine tuple visibility */ TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff * point */ - TransactionId rs_logical_xmin; /* Xid that will be used as cutoff - * point for logical rewrites */ + TransactionId rs_logical_xmin; /* Xid that will be used as cutoff + * point for logical rewrites */ MultiXactId rs_cutoff_multi;/* MultiXactId that will be used as cutoff * point for multixacts */ MemoryContext rs_cxt; /* for hash tables and entries and tuples in @@ -158,7 +158,7 @@ typedef struct RewriteStateData HTAB *rs_unresolved_tups; /* unmatched A tuples */ HTAB *rs_old_new_tid_map; /* unmatched B tuples */ HTAB *rs_logical_mappings; /* logical remapping files */ - uint32 rs_num_rewrite_mappings; /* # in memory mappings */ + uint32 rs_num_rewrite_mappings; /* # in memory mappings */ } RewriteStateData; /* @@ -199,12 +199,12 @@ typedef OldToNewMappingData *OldToNewMapping; */ typedef struct RewriteMappingFile { - TransactionId xid; /* xid that might need to see the row */ - int vfd; /* fd of mappings file */ - off_t off; /* how far have we written yet */ - uint32 num_mappings; /* number of in-memory mappings */ - dlist_head mappings; /* list of in-memory mappings */ - char path[MAXPGPATH]; /* path, for error messages */ + TransactionId xid; /* xid that might need to see the row */ + int vfd; /* fd of mappings file */ + off_t off; /* how far have we written yet */ + uint32 num_mappings; /* number of in-memory mappings */ + dlist_head mappings; /* list of in-memory mappings */ + char path[MAXPGPATH]; /* path, for error messages */ } RewriteMappingFile; /* @@ -213,8 +213,8 @@ typedef struct RewriteMappingFile */ typedef struct RewriteMappingDataEntry { - LogicalRewriteMappingData map; /* map between old and new location of - * the tuple */ + LogicalRewriteMappingData map; /* map between old and new location of + * the tuple */ dlist_node node; } RewriteMappingDataEntry; @@ -346,7 +346,7 @@ end_heap_rewrite(RewriteState state) } /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. * * It's obvious that we must do this when not WAL-logging. It's less @@ -617,7 +617,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple) } /* - * Insert a tuple to the new relation. This has to track heap_insert + * Insert a tuple to the new relation. This has to track heap_insert * and its subsidiary functions! * * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the @@ -866,13 +866,13 @@ logical_heap_rewrite_flush_mappings(RewriteState state) hash_seq_init(&seq_status, state->rs_logical_mappings); while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL) { - XLogRecData rdata[2]; - char *waldata; - char *waldata_start; + XLogRecData rdata[2]; + char *waldata; + char *waldata_start; xl_heap_rewrite_mapping xlrec; - Oid dboid; - uint32 len; - int written; + Oid dboid; + uint32 len; + int written; /* this file hasn't got any new mappings */ if (src->num_mappings == 0) @@ -962,14 +962,14 @@ logical_end_heap_rewrite(RewriteState state) return; /* writeout remaining in-memory entries */ - if (state->rs_num_rewrite_mappings > 0 ) + if (state->rs_num_rewrite_mappings > 0) logical_heap_rewrite_flush_mappings(state); /* Iterate over all mappings we have written and fsync the files. */ hash_seq_init(&seq_status, state->rs_logical_mappings); while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL) { - if(FileSync(src->vfd) != 0) + if (FileSync(src->vfd) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", src->path))); @@ -985,10 +985,10 @@ static void logical_rewrite_log_mapping(RewriteState state, TransactionId xid, LogicalRewriteMappingData *map) { - RewriteMappingFile *src; - RewriteMappingDataEntry *pmap; - Oid relid; - bool found; + RewriteMappingFile *src; + RewriteMappingDataEntry *pmap; + Oid relid; + bool found; relid = RelationGetRelid(state->rs_old_rel); @@ -1027,7 +1027,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid, if (src->vfd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create file \"%s\": %m", path))); + errmsg("could not create file \"%s\": %m", path))); } pmap = MemoryContextAlloc(state->rs_cxt, @@ -1041,7 +1041,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid, * Write out buffer every time we've too many in-memory entries across all * mapping files. */ - if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */) + if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ ) logical_heap_rewrite_flush_mappings(state); } @@ -1054,11 +1054,11 @@ logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple) { ItemPointerData new_tid = new_tuple->t_self; - TransactionId cutoff = state->rs_logical_xmin; - TransactionId xmin; - TransactionId xmax; - bool do_log_xmin = false; - bool do_log_xmax = false; + TransactionId cutoff = state->rs_logical_xmin; + TransactionId xmin; + TransactionId xmax; + bool do_log_xmin = false; + bool do_log_xmax = false; LogicalRewriteMappingData map; /* no logical rewrite in progress, we don't need to log anything */ @@ -1147,7 +1147,8 @@ heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r) if (fd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create file \"%s\": %m", path))); + errmsg("could not create file \"%s\": %m", path))); + /* * Truncate all data that's not guaranteed to have been safely fsynced (by * previous record or by the last checkpoint). @@ -1174,6 +1175,7 @@ heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r) ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", path))); + /* * Now fsync all previously written data. We could improve things and only * do this for the last write to a file, but the required bookkeeping @@ -1222,13 +1224,14 @@ CheckPointLogicalRewriteHeap(void) mappings_dir = AllocateDir("pg_llog/mappings"); while ((mapping_de = ReadDir(mappings_dir, "pg_llog/mappings")) != NULL) { - struct stat statbuf; + struct stat statbuf; Oid dboid; Oid relid; XLogRecPtr lsn; TransactionId rewrite_xid; TransactionId create_xid; - uint32 hi, lo; + uint32 hi, + lo; if (strcmp(mapping_de->d_name, ".") == 0 || strcmp(mapping_de->d_name, "..") == 0) @@ -1244,7 +1247,7 @@ CheckPointLogicalRewriteHeap(void) if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT, &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6) - elog(ERROR,"could not parse filename \"%s\"", mapping_de->d_name); + elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name); lsn = ((uint64) hi) << 32 | lo; @@ -1258,7 +1261,7 @@ CheckPointLogicalRewriteHeap(void) } else { - int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); /* * The file cannot vanish due to concurrency since this function @@ -1269,6 +1272,7 @@ CheckPointLogicalRewriteHeap(void) ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); + /* * We could try to avoid fsyncing files that either haven't * changed or have only been created since the checkpoint's start, diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c index edd0395d8e9..7ea1ead543f 100644 --- a/src/backend/access/heap/syncscan.c +++ b/src/backend/access/heap/syncscan.c @@ -4,7 +4,7 @@ * heap scan synchronization support * * When multiple backends run a sequential scan on the same table, we try - * to keep them synchronized to reduce the overall I/O needed. The goal is + * to keep them synchronized to reduce the overall I/O needed. The goal is * to read each page into shared buffer cache only once, and let all backends * that take part in the shared scan process the page before it falls out of * the cache. @@ -26,7 +26,7 @@ * don't want such queries to slow down others. * * There can realistically only be a few large sequential scans on different - * tables in progress at any time. Therefore we just keep the scan positions + * tables in progress at any time. Therefore we just keep the scan positions * in a small LRU list which we scan every time we need to look up or update a * scan position. The whole mechanism is only applied for tables exceeding * a threshold size (but that is not the concern of this module). @@ -243,7 +243,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set) * relation, or 0 if no valid location is found. * * We expect the caller has just done RelationGetNumberOfBlocks(), and - * so that number is passed in rather than computing it again. The result + * so that number is passed in rather than computing it again. The result * is guaranteed less than relnblocks (assuming that's > 0). */ BlockNumber diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index dde74d47978..4adfe8217bd 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -53,11 +53,11 @@ static struct varlena *toast_fetch_datum(struct varlena * attr); static struct varlena *toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length); static int toast_open_indexes(Relation toastrel, - LOCKMODE lock, - Relation **toastidxs, - int *num_indexes); + LOCKMODE lock, + Relation **toastidxs, + int *num_indexes); static void toast_close_indexes(Relation *toastidxs, int num_indexes, - LOCKMODE lock); + LOCKMODE lock); /* ---------- @@ -91,8 +91,9 @@ heap_tuple_fetch_attr(struct varlena * attr) * to persist a Datum for unusually long time, like in a HOLD cursor. */ struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, attr); - attr = (struct varlena *)redirect.pointer; + attr = (struct varlena *) redirect.pointer; /* nested indirect Datums aren't allowed */ Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr)); @@ -147,8 +148,9 @@ heap_tuple_untoast_attr(struct varlena * attr) else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, attr); - attr = (struct varlena *)redirect.pointer; + attr = (struct varlena *) redirect.pointer; /* nested indirect Datums aren't allowed */ Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr)); @@ -217,6 +219,7 @@ heap_tuple_untoast_attr_slice(struct varlena * attr, else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, attr); /* nested indirect Datums aren't allowed */ @@ -299,6 +302,7 @@ toast_raw_datum_size(Datum value) else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr); /* nested indirect Datums aren't allowed */ @@ -354,6 +358,7 @@ toast_datum_size(Datum value) else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr); /* nested indirect Datums aren't allowed */ @@ -597,7 +602,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * We took care of UPDATE above, so any external value we find * still in the tuple must be someone else's we cannot reuse. * Fetch it back (without decompression, unless we are forcing - * PLAIN storage). If necessary, we'll push it out as a new + * PLAIN storage). If necessary, we'll push it out as a new * external value below. */ if (VARATT_IS_EXTERNAL(new_value)) @@ -740,7 +745,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, /* * Second we look for attributes of attstorage 'x' or 'e' that are still - * inline. But skip this if there's no toast table to push them to. + * inline. But skip this if there's no toast table to push them to. */ while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && @@ -850,7 +855,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, } /* - * Finally we store attributes of type 'm' externally. At this point we + * Finally we store attributes of type 'm' externally. At this point we * increase the target tuple size, so that 'm' attributes aren't stored * externally unless really necessary. */ @@ -1438,7 +1443,7 @@ toast_save_datum(Relation rel, Datum value, * those versions could easily reference the same toast value. * When we copy the second or later version of such a row, * reusing the OID will mean we select an OID that's already - * in the new toast table. Check for that, and if so, just + * in the new toast table. Check for that, and if so, just * fall through without writing the data again. * * While annoying and ugly-looking, this is a good thing @@ -1467,7 +1472,7 @@ toast_save_datum(Relation rel, Datum value, { toast_pointer.va_valueid = GetNewOidWithIndex(toastrel, - RelationGetRelid(toastidxs[validIndex]), + RelationGetRelid(toastidxs[validIndex]), (AttrNumber) 1); } while (toastid_valueid_exists(rel->rd_toastoid, toast_pointer.va_valueid)); @@ -1488,7 +1493,7 @@ toast_save_datum(Relation rel, Datum value, */ while (data_todo > 0) { - int i; + int i; /* * Calculate the size of this chunk @@ -1506,7 +1511,7 @@ toast_save_datum(Relation rel, Datum value, heap_insert(toastrel, toasttup, mycid, options, NULL); /* - * Create the index entry. We cheat a little here by not using + * Create the index entry. We cheat a little here by not using * FormIndexDatum: this relies on the knowledge that the index columns * are the same as the initial columns of the table for all the * indexes. @@ -1656,8 +1661,8 @@ toastrel_valueid_exists(Relation toastrel, Oid valueid) * Is there any such chunk? */ toastscan = systable_beginscan(toastrel, - RelationGetRelid(toastidxs[validIndex]), - true, SnapshotToast, 1, &toastkey); + RelationGetRelid(toastidxs[validIndex]), + true, SnapshotToast, 1, &toastkey); if (systable_getnext(toastscan) != NULL) result = true; @@ -2126,7 +2131,8 @@ toast_open_indexes(Relation toastrel, /* Fetch the first valid index in list */ for (i = 0; i < *num_indexes; i++) { - Relation toastidx = (*toastidxs)[i]; + Relation toastidx = (*toastidxs)[i]; + if (toastidx->rd_index->indisvalid) { res = i; @@ -2136,14 +2142,14 @@ toast_open_indexes(Relation toastrel, } /* - * Free index list, not necessary anymore as relations are opened - * and a valid index has been found. + * Free index list, not necessary anymore as relations are opened and a + * valid index has been found. */ list_free(indexlist); /* - * The toast relation should have one valid index, so something is - * going wrong if there is nothing. + * The toast relation should have one valid index, so something is going + * wrong if there is nothing. */ if (!found) elog(ERROR, "no valid index found for toast relation with Oid %d", @@ -2161,7 +2167,7 @@ toast_open_indexes(Relation toastrel, static void toast_close_indexes(Relation *toastidxs, int num_indexes, LOCKMODE lock) { - int i; + int i; /* Close relations and clean up things */ for (i = 0; i < num_indexes; i++) diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 899ffacf1e9..a0c0c7f2a6b 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -27,7 +27,7 @@ * the sense that we make sure that whenever a bit is set, we know the * condition is true, but if a bit is not set, it might or might not be true. * - * Clearing a visibility map bit is not separately WAL-logged. The callers + * Clearing a visibility map bit is not separately WAL-logged. The callers * must make sure that whenever a bit is cleared, the bit is cleared on WAL * replay of the updating operation as well. * @@ -36,9 +36,9 @@ * it may still be the case that every tuple on the page is visible to all * transactions; we just don't know that for certain. The difficulty is that * there are two bits which are typically set together: the PD_ALL_VISIBLE bit - * on the page itself, and the visibility map bit. If a crash occurs after the + * on the page itself, and the visibility map bit. If a crash occurs after the * visibility map page makes it to disk and before the updated heap page makes - * it to disk, redo must set the bit on the heap page. Otherwise, the next + * it to disk, redo must set the bit on the heap page. Otherwise, the next * insert, update, or delete on the heap page will fail to realize that the * visibility map bit must be cleared, possibly causing index-only scans to * return wrong answers. @@ -59,10 +59,10 @@ * the buffer lock over any I/O that may be required to read in the visibility * map page. To avoid this, we examine the heap page before locking it; * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map - * bit. Then, we lock the buffer. But this creates a race condition: there + * bit. Then, we lock the buffer. But this creates a race condition: there * is a possibility that in the time it takes to lock the buffer, the * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the - * buffer, pin the visibility map page, and relock the buffer. This shouldn't + * buffer, pin the visibility map page, and relock the buffer. This shouldn't * happen often, because only VACUUM currently sets visibility map bits, * and the race will only occur if VACUUM processes a given page at almost * exactly the same time that someone tries to further modify it. @@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf) * visibilitymap_set - set a bit on a previously pinned page * * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, - * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the + * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the * one provided; in normal running, we generate a new XLOG record and set the - * page LSN to that value. cutoff_xid is the largest xmin on the page being + * page LSN to that value. cutoff_xid is the largest xmin on the page being * marked all-visible; it is needed for Hot Standby, and can be * InvalidTransactionId if the page contains no tuples. * @@ -320,10 +320,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, * releasing *buf after it's done testing and setting bits. * * NOTE: This function is typically called without a lock on the heap page, - * so somebody else could change the bit just after we look at it. In fact, + * so somebody else could change the bit just after we look at it. In fact, * since we don't lock the visibility map page either, it's even possible that * someone else could have changed the bit just before we look at it, but yet - * we might see the old value. It is the caller's responsibility to deal with + * we might see the old value. It is the caller's responsibility to deal with * all concurrency issues! */ bool @@ -526,7 +526,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) /* * We might not have opened the relation at the smgr level yet, or we - * might have been forced to close it by a sinval message. The code below + * might have been forced to close it by a sinval message. The code below * won't necessarily notice relation extension immediately when extend = * false, so we rely on sinval messages to ensure that our ideas about the * size of the map aren't too far out of date. diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 50cb92a47b4..850008b3407 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -45,7 +45,7 @@ * * At the end of a scan, the AM's endscan routine undoes the locking, * but does *not* call IndexScanEnd --- the higher-level index_endscan - * routine does that. (We can't do it in the AM because index_endscan + * routine does that. (We can't do it in the AM because index_endscan * still needs to touch the IndexScanDesc after calling the AM.) * * Because of this, the AM does not have a choice whether to call @@ -79,7 +79,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys) scan->heapRelation = NULL; /* may be set later */ scan->indexRelation = indexRelation; - scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */ + scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */ scan->numberOfKeys = nkeys; scan->numberOfOrderBys = norderbys; @@ -188,7 +188,7 @@ BuildIndexValueDescription(Relation indexRelation, * at rd_opcintype not the index tupdesc. * * Note: this is a bit shaky for opclasses that have pseudotype - * input types such as ANYARRAY or RECORD. Currently, the + * input types such as ANYARRAY or RECORD. Currently, the * typoutput functions associated with the pseudotypes will work * okay, but we might have to try harder in future. */ @@ -269,7 +269,7 @@ systable_beginscan(Relation heapRelation, if (snapshot == NULL) { - Oid relid = RelationGetRelid(heapRelation); + Oid relid = RelationGetRelid(heapRelation); snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); sysscan->snapshot = snapshot; @@ -442,7 +442,7 @@ systable_endscan(SysScanDesc sysscan) * index order. Also, for largely historical reasons, the index to use * is opened and locked by the caller, not here. * - * Currently we do not support non-index-based scans here. (In principle + * Currently we do not support non-index-based scans here. (In principle * we could do a heapscan and sort, but the uses are in places that * probably don't need to still work with corrupted catalog indexes.) * For the moment, therefore, these functions are merely the thinnest of @@ -475,7 +475,7 @@ systable_beginscan_ordered(Relation heapRelation, if (snapshot == NULL) { - Oid relid = RelationGetRelid(heapRelation); + Oid relid = RelationGetRelid(heapRelation); snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); sysscan->snapshot = snapshot; diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index a4b5f3d698e..53cf96fc103 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -84,7 +84,7 @@ * * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there * to check that we don't try to scan or do retail insertions into an index - * that is currently being rebuilt or pending rebuild. This helps to catch + * that is currently being rebuilt or pending rebuild. This helps to catch * things that don't work when reindexing system catalogs. The assertion * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS * when calling the index AM's ambuild routine, and there is no reason for @@ -149,7 +149,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation, * index_open - open an index relation by relation OID * * If lockmode is not "NoLock", the specified kind of lock is - * obtained on the index. (Generally, NoLock should only be + * obtained on the index. (Generally, NoLock should only be * used if the caller knows it has some appropriate lock on the * index already.) * @@ -414,7 +414,7 @@ index_markpos(IndexScanDesc scan) * returnable tuple in each HOT chain, and so restoring the prior state at the * granularity of the index AM is sufficient. Since the only current user * of mark/restore functionality is nodeMergejoin.c, this effectively means - * that merge-join plans only work for MVCC snapshots. This could be fixed + * that merge-join plans only work for MVCC snapshots. This could be fixed * if necessary, but for now it seems unimportant. * ---------------- */ @@ -553,7 +553,7 @@ index_fetch_heap(IndexScanDesc scan) /* * If we scanned a whole HOT chain and found only dead tuples, tell index * AM to kill its entry for that TID (this will take effect in the next - * amgettuple call, in index_getnext_tid). We do not do this when in + * amgettuple call, in index_getnext_tid). We do not do this when in * recovery because it may violate MVCC to do so. See comments in * RelationGetIndexScan(). */ @@ -590,7 +590,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) { /* * We are resuming scan of a HOT chain after having returned an - * earlier member. Must still hold pin on current heap page. + * earlier member. Must still hold pin on current heap page. */ Assert(BufferIsValid(scan->xs_cbuf)); Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) == @@ -760,7 +760,7 @@ index_can_return(Relation indexRelation) * particular indexed attribute are those with both types equal to * the index opclass' opcintype (note that this is subtly different * from the indexed attribute's own type: it may be a binary-compatible - * type instead). Only the default functions are stored in relcache + * type instead). Only the default functions are stored in relcache * entries --- access methods can use the syscache to look up non-default * functions. * @@ -794,7 +794,7 @@ index_getprocid(Relation irel, * index_getprocinfo * * This routine allows index AMs to keep fmgr lookup info for - * support procs in the relcache. As above, only the "default" + * support procs in the relcache. As above, only the "default" * functions for any particular indexed attribute are cached. * * Note: the return value points into cached data that will be lost during diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c index 86ac7d3ec21..b1f9ae36850 100644 --- a/src/backend/access/nbtree/nbtcompare.c +++ b/src/backend/access/nbtree/nbtcompare.c @@ -25,7 +25,7 @@ * Although any negative int32 (except INT_MIN) is acceptable for reporting * "<", and any positive int32 is acceptable for reporting ">", routines * that work on 32-bit or wider datatypes can't just return "a - b". - * That could overflow and give the wrong answer. Also, one must not + * That could overflow and give the wrong answer. Also, one must not * return INT_MIN to report "<", since some callers will negate the result. * * NOTE: it is critical that the comparison function impose a total order diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 0d806af5055..d64cbd98223 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -90,7 +90,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); * By here, itup is filled in, including the TID. * * If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this - * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or + * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or * UNIQUE_CHECK_EXISTING) it will throw error for a duplicate. * For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and * don't actually insert. @@ -129,7 +129,7 @@ top: * If the page was split between the time that we surrendered our read * lock and acquired our write lock, then this page may no longer be the * right place for the key we want to insert. In this case, we need to - * move right in the tree. See Lehman and Yao for an excruciatingly + * move right in the tree. See Lehman and Yao for an excruciatingly * precise description. */ buf = _bt_moveright(rel, buf, natts, itup_scankey, false, @@ -211,7 +211,7 @@ top: * is the first tuple on the next page. * * Returns InvalidTransactionId if there is no conflict, else an xact ID - * we must wait for to see if it commits a conflicting tuple. If an actual + * we must wait for to see if it commits a conflicting tuple. If an actual * conflict is detected, no return --- just ereport(). * * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return @@ -293,7 +293,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, /* * If we are doing a recheck, we expect to find the tuple we - * are rechecking. It's not a duplicate, but we have to keep + * are rechecking. It's not a duplicate, but we have to keep * scanning. */ if (checkUnique == UNIQUE_CHECK_EXISTING && @@ -482,7 +482,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * If the new key is equal to one or more existing keys, we can * legitimately place it anywhere in the series of equal keys --- in fact, * if the new key is equal to the page's "high key" we can place it on - * the next page. If it is equal to the high key, and there's not room + * the next page. If it is equal to the high key, and there's not room * to insert the new tuple on the current page without splitting, then * we can move right hoping to find more free space and avoid a split. * (We should not move right indefinitely, however, since that leads to @@ -494,7 +494,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * removing any LP_DEAD tuples. * * On entry, *buf and *offsetptr point to the first legal position - * where the new tuple could be inserted. The caller should hold an + * where the new tuple could be inserted. The caller should hold an * exclusive lock on *buf. *offsetptr can also be set to * InvalidOffsetNumber, in which case the function will search for the * right location within the page if needed. On exit, they point to the @@ -564,7 +564,7 @@ _bt_findinsertloc(Relation rel, * on every insert. We implement "get tired" as a random choice, * since stopping after scanning a fixed number of pages wouldn't work * well (we'd never reach the right-hand side of previously split - * pages). Currently the probability of moving right is set at 0.99, + * pages). Currently the probability of moving right is set at 0.99, * which may seem too high to change the behavior much, but it does an * excellent job of preventing O(N^2) behavior with many equal keys. *---------- @@ -574,7 +574,7 @@ _bt_findinsertloc(Relation rel, while (PageGetFreeSpace(page) < itemsz) { Buffer rbuf; - BlockNumber rblkno; + BlockNumber rblkno; /* * before considering moving right, see if we can obtain enough space @@ -620,10 +620,10 @@ _bt_findinsertloc(Relation rel, lpageop = (BTPageOpaque) PageGetSpecialPointer(page); /* - * If this page was incompletely split, finish the split now. - * We do this while holding a lock on the left sibling, which - * is not good because finishing the split could be a fairly - * lengthy operation. But this should happen very seldom. + * If this page was incompletely split, finish the split now. We + * do this while holding a lock on the left sibling, which is not + * good because finishing the split could be a fairly lengthy + * operation. But this should happen very seldom. */ if (P_INCOMPLETE_SPLIT(lpageop)) { @@ -681,7 +681,7 @@ _bt_findinsertloc(Relation rel, * + updates the metapage if a true root or fast root is split. * * On entry, we must have the correct buffer in which to do the - * insertion, and the buffer must be pinned and write-locked. On return, + * insertion, and the buffer must be pinned and write-locked. On return, * we will have dropped both the pin and the lock on the buffer. * * When inserting to a non-leaf page, 'cbuf' is the left-sibling of the @@ -978,7 +978,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, * origpage is the original page to be split. leftpage is a temporary * buffer that receives the left-sibling data, which will be copied back * into origpage on success. rightpage is the new page that receives the - * right-sibling data. If we fail before reaching the critical section, + * right-sibling data. If we fail before reaching the critical section, * origpage hasn't been modified and leftpage is only workspace. In * principle we shouldn't need to worry about rightpage either, because it * hasn't been linked into the btree page structure; but to avoid leaving @@ -1196,7 +1196,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, * page. If you're confused, imagine that page A splits to A B and * then again, yielding A C B, while vacuum is in progress. Tuples * originally in A could now be in either B or C, hence vacuum must - * examine both pages. But if D, our right sibling, has a different + * examine both pages. But if D, our right sibling, has a different * cycleid then it could not contain any tuples that were in A when * the vacuum started. */ @@ -1330,11 +1330,10 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, lastrdata++; /* - * Although we don't need to WAL-log anything on the left page, - * we still need XLogInsert to consider storing a full-page image - * of the left page, so make an empty entry referencing that - * buffer. This also ensures that the left page is always backup - * block 1. + * Although we don't need to WAL-log anything on the left page, we + * still need XLogInsert to consider storing a full-page image of + * the left page, so make an empty entry referencing that buffer. + * This also ensures that the left page is always backup block 1. */ lastrdata->data = NULL; lastrdata->len = 0; @@ -1448,7 +1447,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, * * We return the index of the first existing tuple that should go on the * righthand page, plus a boolean indicating whether the new tuple goes on - * the left or right page. The bool is necessary to disambiguate the case + * the left or right page. The bool is necessary to disambiguate the case * where firstright == newitemoff. */ static OffsetNumber @@ -1684,7 +1683,7 @@ _bt_checksplitloc(FindSplitData *state, * * On entry, buf and rbuf are the left and right split pages, which we * still hold write locks on per the L&Y algorithm. We release the - * write locks once we have write lock on the parent page. (Any sooner, + * write locks once we have write lock on the parent page. (Any sooner, * and it'd be possible for some other process to try to split or delete * one of these pages, and get confused because it cannot find the downlink.) * @@ -1705,7 +1704,7 @@ _bt_insert_parent(Relation rel, * Here we have to do something Lehman and Yao don't talk about: deal with * a root split and construction of a new root. If our stack is empty * then we have just split a node on what had been the root level when we - * descended the tree. If it was still the root then we perform a + * descended the tree. If it was still the root then we perform a * new-root construction. If it *wasn't* the root anymore, search to find * the next higher level that someone constructed meanwhile, and find the * right place to insert as for the normal case. @@ -1917,7 +1916,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) /* * These loops will check every item on the page --- but in an * order that's attuned to the probability of where it actually - * is. Scan to the right first, then to the left. + * is. Scan to the right first, then to the left. */ for (offnum = start; offnum <= maxoff; diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index c0ebb95ba8a..d357b33bc05 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -12,7 +12,7 @@ * src/backend/access/nbtree/nbtpage.c * * NOTES - * Postgres btree pages look like ordinary relation pages. The opaque + * Postgres btree pages look like ordinary relation pages. The opaque * data at high addresses includes pointers to left and right siblings * and flag data describing page state. The first page in a btree, page * zero, is special -- it stores meta-information describing the tree. @@ -36,7 +36,7 @@ static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, static bool _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, Buffer *topparent, OffsetNumber *topoff, BlockNumber *target, BlockNumber *rightsib); -static void _bt_log_reuse_page(Relation rel, BlockNumber blkno, +static void _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedXid); /* @@ -62,7 +62,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) metaopaque->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential + * Set pd_lower just past the end of the metadata. This is not essential * but it makes the page look compressible to xlog.c. */ ((PageHeader) page)->pd_lower = @@ -80,7 +80,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) * * The access type parameter (BT_READ or BT_WRITE) controls whether * a new root page will be created or not. If access = BT_READ, - * and no root page exists, we just return InvalidBuffer. For + * and no root page exists, we just return InvalidBuffer. For * BT_WRITE, we try to create the root page if it doesn't exist. * NOTE that the returned root page will have only a read lock set * on it even if access = BT_WRITE! @@ -197,7 +197,7 @@ _bt_getroot(Relation rel, int access) /* * Metadata initialized by someone else. In order to guarantee no * deadlocks, we have to release the metadata page and start all - * over again. (Is that really true? But it's hardly worth trying + * over again. (Is that really true? But it's hardly worth trying * to optimize this case.) */ _bt_relbuf(rel, metabuf); @@ -254,7 +254,7 @@ _bt_getroot(Relation rel, int access) END_CRIT_SECTION(); /* - * swap root write lock for read lock. There is no danger of anyone + * swap root write lock for read lock. There is no danger of anyone * else accessing the new root page while it's unlocked, since no one * else knows where it is yet. */ @@ -322,7 +322,7 @@ _bt_getroot(Relation rel, int access) * By the time we acquire lock on the root page, it might have been split and * not be the true root anymore. This is okay for the present uses of this * routine; we only really need to be able to move up at least one tree level - * from whatever non-root page we were at. If we ever do need to lock the + * from whatever non-root page we were at. If we ever do need to lock the * one true root page, we could loop here, re-reading the metapage on each * failure. (Note that it wouldn't do to hold the lock on the metapage while * moving to the root --- that'd deadlock against any concurrent root split.) @@ -497,7 +497,7 @@ _bt_checkpage(Relation rel, Buffer buf) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) @@ -564,7 +564,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX /* * _bt_getbuf() -- Get a buffer by block number for read or write. * - * blkno == P_NEW means to get an unallocated index page. The page + * blkno == P_NEW means to get an unallocated index page. The page * will be initialized before returning it. * * When this routine returns, the appropriate lock is set on the @@ -595,7 +595,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) * First see if the FSM knows of any free pages. * * We can't trust the FSM's report unreservedly; we have to check that - * the page is still free. (For example, an already-free page could + * the page is still free. (For example, an already-free page could * have been re-used between the time the last VACUUM scanned it and * the time the VACUUM made its FSM updates.) * @@ -774,7 +774,7 @@ _bt_page_recyclable(Page page) /* * Delete item(s) from a btree page during VACUUM. * - * This must only be used for deleting leaf items. Deleting an item on a + * This must only be used for deleting leaf items. Deleting an item on a * non-leaf page has to be done as part of an atomic action that includes * deleting the page it points to. * @@ -842,7 +842,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, /* * The target-offsets array is not in the buffer, but pretend that it - * is. When XLogInsert stores the whole buffer, the offsets array + * is. When XLogInsert stores the whole buffer, the offsets array * need not be stored too. */ if (nitems > 0) @@ -1049,11 +1049,12 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, lbuf = _bt_getbuf(rel, leftsib, BT_READ); lpage = BufferGetPage(lbuf); lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage); + /* * If the left sibling was concurrently split, so that its - * next-pointer doesn't point to the current page anymore, - * the split that created the current page must be completed. - * (We don't allow splitting an incompletely split page again + * next-pointer doesn't point to the current page anymore, the + * split that created the current page must be completed. (We + * don't allow splitting an incompletely split page again * until the previous split has been completed) */ if (lopaque->btpo_next == parent && @@ -1066,7 +1067,7 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, } return _bt_lock_branch_parent(rel, parent, stack->bts_parent, - topparent, topoff, target, rightsib); + topparent, topoff, target, rightsib); } else { @@ -1112,6 +1113,7 @@ _bt_pagedel(Relation rel, Buffer buf) bool rightsib_empty; Page page; BTPageOpaque opaque; + /* * "stack" is a search stack leading (approximately) to the target page. * It is initially NULL, but when iterating, we keep it to avoid @@ -1140,24 +1142,24 @@ _bt_pagedel(Relation rel, Buffer buf) * was never supposed to leave half-dead pages in the tree, it was * just a transient state, but it was nevertheless possible in * error scenarios. We don't know how to deal with them here. They - * are harmless as far as searches are considered, but inserts into - * the deleted keyspace could add out-of-order downlinks in the - * upper levels. Log a notice, hopefully the admin will notice and - * reindex. + * are harmless as far as searches are considered, but inserts + * into the deleted keyspace could add out-of-order downlinks in + * the upper levels. Log a notice, hopefully the admin will notice + * and reindex. */ if (P_ISHALFDEAD(opaque)) ereport(LOG, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("index \"%s\" contains a half-dead internal page", - RelationGetRelationName(rel)), + errmsg("index \"%s\" contains a half-dead internal page", + RelationGetRelationName(rel)), errhint("This can be caused by an interrupt VACUUM in version 9.3 or older, before upgrade. Please REINDEX it."))); _bt_relbuf(rel, buf); return ndeleted; } /* - * We can never delete rightmost pages nor root pages. While at - * it, check that page is not already deleted and is empty. + * We can never delete rightmost pages nor root pages. While at it, + * check that page is not already deleted and is empty. * * To keep the algorithm simple, we also never delete an incompletely * split page (they should be rare enough that this doesn't make any @@ -1167,10 +1169,10 @@ _bt_pagedel(Relation rel, Buffer buf) * left half of an incomplete split, but ensuring that it's not the * right half is more complicated. For that, we have to check that * the left sibling doesn't have its INCOMPLETE_SPLIT flag set. On - * the first iteration, we temporarily release the lock on the - * current page, and check the left sibling and also construct a - * search stack to. On subsequent iterations, we know we stepped right - * from a page that passed these tests, so it's OK. + * the first iteration, we temporarily release the lock on the current + * page, and check the left sibling and also construct a search stack + * to. On subsequent iterations, we know we stepped right from a page + * that passed these tests, so it's OK. */ if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) || P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) || @@ -1184,9 +1186,9 @@ _bt_pagedel(Relation rel, Buffer buf) } /* - * First, remove downlink pointing to the page (or a parent of the page, - * if we are going to delete a taller branch), and mark the page as - * half-dead. + * First, remove downlink pointing to the page (or a parent of the + * page, if we are going to delete a taller branch), and mark the page + * as half-dead. */ if (!P_ISHALFDEAD(opaque)) { @@ -1205,7 +1207,7 @@ _bt_pagedel(Relation rel, Buffer buf) ItemId itemid; IndexTuple targetkey; Buffer lbuf; - BlockNumber leftsib; + BlockNumber leftsib; itemid = PageGetItemId(page, P_HIKEY); targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid)); @@ -1219,9 +1221,9 @@ _bt_pagedel(Relation rel, Buffer buf) LockBuffer(buf, BUFFER_LOCK_UNLOCK); /* - * Fetch the left sibling, to check that it's not marked - * with INCOMPLETE_SPLIT flag. That would mean that the - * page to-be-deleted doesn't have a downlink, and the page + * Fetch the left sibling, to check that it's not marked with + * INCOMPLETE_SPLIT flag. That would mean that the page + * to-be-deleted doesn't have a downlink, and the page * deletion algorithm isn't prepared to handle that. */ if (!P_LEFTMOST(opaque)) @@ -1267,7 +1269,7 @@ _bt_pagedel(Relation rel, Buffer buf) /* * Then unlink it from its siblings. Each call to - *_bt_unlink_halfdead_page unlinks the topmost page from the branch, + * _bt_unlink_halfdead_page unlinks the topmost page from the branch, * making it shallower. Iterate until the leaf page is gone. */ rightsib_empty = false; @@ -1291,8 +1293,8 @@ _bt_pagedel(Relation rel, Buffer buf) * is that it was the rightmost child of the parent. Now that we * removed the downlink for this page, the right sibling might now be * the only child of the parent, and could be removed. It would be - * picked up by the next vacuum anyway, but might as well try to remove - * it now, so loop back to process the right sibling. + * picked up by the next vacuum anyway, but might as well try to + * remove it now, so loop back to process the right sibling. */ if (!rightsib_empty) break; @@ -1310,9 +1312,9 @@ _bt_pagedel(Relation rel, Buffer buf) static bool _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) { - BlockNumber leafblkno; + BlockNumber leafblkno; BlockNumber leafrightsib; - BlockNumber target; + BlockNumber target; BlockNumber rightsib; ItemId itemid; Page page; @@ -1351,7 +1353,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) /* * Check that the parent-page index items we're about to delete/overwrite - * contain what we expect. This can fail if the index has become corrupt + * contain what we expect. This can fail if the index has become corrupt * for some reason. We want to throw any error before entering the * critical section --- otherwise it'd be a PANIC. * @@ -1490,9 +1492,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) BlockNumber leafblkno = BufferGetBlockNumber(leafbuf); BlockNumber leafleftsib; BlockNumber leafrightsib; - BlockNumber target; - BlockNumber leftsib; - BlockNumber rightsib; + BlockNumber target; + BlockNumber leftsib; + BlockNumber rightsib; Buffer lbuf = InvalidBuffer; Buffer buf; Buffer rbuf; @@ -1506,7 +1508,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) int targetlevel; ItemPointer leafhikey; BlockNumber nextchild; - BlockNumber topblkno; + BlockNumber topblkno; page = BufferGetPage(leafbuf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -1596,7 +1598,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) lbuf = InvalidBuffer; /* - * Next write-lock the target page itself. It should be okay to take just + * Next write-lock the target page itself. It should be okay to take just * a write lock not a superexclusive lock, since no scans would stop on an * empty page. */ @@ -1605,9 +1607,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * Check page is still empty etc, else abandon deletion. This is just - * for paranoia's sake; a half-dead page cannot resurrect because there - * can be only one vacuum process running at a time. + * Check page is still empty etc, else abandon deletion. This is just for + * paranoia's sake; a half-dead page cannot resurrect because there can be + * only one vacuum process running at a time. */ if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque)) { @@ -1733,7 +1735,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) * we're in VACUUM and would not otherwise have an XID. Having already * updated links to the target, ReadNewTransactionId() suffices as an * upper bound. Any scan having retained a now-stale link is advertising - * in its PGXACT an xmin less than or equal to the value we read here. It + * in its PGXACT an xmin less than or equal to the value we read here. It * will continue to do so, holding back RecentGlobalXmin, for the duration * of that scan. */ diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 542ed439843..36dc6c278ea 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -208,7 +208,7 @@ btbuildempty(PG_FUNCTION_ARGS) metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, P_NONE, 0); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ PageSetChecksumInplace(metapage, BTREE_METAPAGE); smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, (char *) metapage, true); @@ -427,7 +427,7 @@ btbeginscan(PG_FUNCTION_ARGS) /* * We don't know yet whether the scan will be index-only, so we do not - * allocate the tuple workspace arrays until btrescan. However, we set up + * allocate the tuple workspace arrays until btrescan. However, we set up * scan->xs_itupdesc whether we'll need it or not, since that's so cheap. */ so->currTuples = so->markTuples = NULL; @@ -472,7 +472,7 @@ btrescan(PG_FUNCTION_ARGS) /* * Allocate tuple workspace arrays, if needed for an index-only scan and - * not already done in a previous rescan call. To save on palloc + * not already done in a previous rescan call. To save on palloc * overhead, both workspaces are allocated as one palloc block; only this * function and btendscan know that. * @@ -952,7 +952,7 @@ restart: vstate->lastBlockLocked = blkno; /* - * Check whether we need to recurse back to earlier pages. What we + * Check whether we need to recurse back to earlier pages. What we * are concerned about is a page split that happened since we started * the vacuum scan. If the split moved some tuples to a lower page * then we might have missed 'em. If so, set up for tail recursion. diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 0bf12f0e107..203b9691baa 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir); * * NOTE that the returned buffer is read-locked regardless of the access * parameter. However, access = BT_WRITE will allow an empty root page - * to be created and returned. When access = BT_READ, an empty index + * to be created and returned. When access = BT_READ, an empty index * will result in *bufP being set to InvalidBuffer. Also, in BT_WRITE mode, * any incomplete splits encountered during the search will be finished. */ @@ -271,7 +271,7 @@ _bt_moveright(Relation rel, * (or leaf keys > given scankey when nextkey is true). * * This procedure is not responsible for walking right, it just examines - * the given page. _bt_binsrch() has no lock or refcount side effects + * the given page. _bt_binsrch() has no lock or refcount side effects * on the buffer. */ OffsetNumber @@ -403,7 +403,7 @@ _bt_compare(Relation rel, /* * The scan key is set up with the attribute number associated with each * term in the key. It is important that, if the index is multi-key, the - * scan contain the first k key attributes, and that they be in order. If + * scan contain the first k key attributes, and that they be in order. If * you think about how multi-key ordering works, you'll understand why * this is. * @@ -442,7 +442,7 @@ _bt_compare(Relation rel, /* * The sk_func needs to be passed the index value as left arg and * the sk_argument as right arg (they might be of different - * types). Since it is convenient for callers to think of + * types). Since it is convenient for callers to think of * _bt_compare as comparing the scankey to the index item, we have * to flip the sign of the comparison result. (Unless it's a DESC * column, in which case we *don't* flip the sign.) @@ -471,7 +471,7 @@ _bt_compare(Relation rel, * _bt_first() -- Find the first item in a scan. * * We need to be clever about the direction of scan, the search - * conditions, and the tree ordering. We find the first item (or, + * conditions, and the tree ordering. We find the first item (or, * if backwards scan, the last item) in the tree that satisfies the * qualifications in the scan key. On success exit, the page containing * the current index tuple is pinned but not locked, and data about @@ -527,7 +527,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * We want to identify the keys that can be used as starting boundaries; * these are =, >, or >= keys for a forward scan or =, <, <= keys for * a backwards scan. We can use keys for multiple attributes so long as - * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept + * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept * a > or < boundary or find an attribute with no boundary (which can be * thought of as the same as "> -infinity"), we can't use keys for any * attributes to its right, because it would break our simplistic notion @@ -742,7 +742,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * even if the row comparison is of ">" or "<" type, because the * condition applied to all but the last row member is effectively * ">=" or "<=", and so the extra keys don't break the positioning - * scheme. But, by the same token, if we aren't able to use all + * scheme. But, by the same token, if we aren't able to use all * the row members, then the part of the row comparison that we * did use has to be treated as just a ">=" or "<=" condition, and * so we'd better adjust strat_total accordingly. @@ -861,7 +861,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* * Find first item >= scankey, then back up one to arrive at last - * item < scankey. (Note: this positioning strategy is only used + * item < scankey. (Note: this positioning strategy is only used * for a backward scan, so that is always the correct starting * position.) */ @@ -910,7 +910,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) case BTGreaterEqualStrategyNumber: /* - * Find first item >= scankey. (This is only used for forward + * Find first item >= scankey. (This is only used for forward * scans.) */ nextkey = false; @@ -988,7 +988,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * * The actually desired starting point is either this item or the prior * one, or in the end-of-page case it's the first item on the next page or - * the last item on this page. Adjust the starting offset if needed. (If + * the last item on this page. Adjust the starting offset if needed. (If * this results in an offset before the first item or after the last one, * _bt_readpage will report no items found, and then we'll step to the * next page as needed.) @@ -1304,7 +1304,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir) * than the walk-right case because of the possibility that the page * to our left splits while we are in flight to it, plus the * possibility that the page we were on gets deleted after we leave - * it. See nbtree/README for details. + * it. See nbtree/README for details. */ for (;;) { @@ -1399,7 +1399,7 @@ _bt_walk_left(Relation rel, Buffer buf) * anymore, not that its left sibling got split more than four times. * * Note that it is correct to test P_ISDELETED not P_IGNORE here, - * because half-dead pages are still in the sibling chain. Caller + * because half-dead pages are still in the sibling chain. Caller * must reject half-dead pages if wanted. */ tries = 0; @@ -1425,7 +1425,7 @@ _bt_walk_left(Relation rel, Buffer buf) if (P_ISDELETED(opaque)) { /* - * It was deleted. Move right to first nondeleted page (there + * It was deleted. Move right to first nondeleted page (there * must be one); that is the page that has acquired the deleted * one's keyspace, so stepping left from it will take us where we * want to be. @@ -1469,7 +1469,7 @@ _bt_walk_left(Relation rel, Buffer buf) * _bt_get_endpoint() -- Find the first or last page on a given tree level * * If the index is empty, we will return InvalidBuffer; any other failure - * condition causes ereport(). We will not return a dead page. + * condition causes ereport(). We will not return a dead page. * * The returned buffer is pinned and read-locked. */ diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 9ddc2754997..1281a120c56 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -7,7 +7,7 @@ * * We use tuplesort.c to sort the given index tuples into order. * Then we scan the index tuples in order and build the btree pages - * for each level. We load source tuples into leaf-level pages. + * for each level. We load source tuples into leaf-level pages. * Whenever we fill a page at one level, we add a link to it to its * parent level (starting a new parent level if necessary). When * done, we write out each final page on each level, adding it to @@ -42,11 +42,11 @@ * * Since the index will never be used unless it is completely built, * from a crash-recovery point of view there is no need to WAL-log the - * steps of the build. After completing the index build, we can just sync + * steps of the build. After completing the index build, we can just sync * the whole file to disk using smgrimmedsync() before exiting this module. * This can be seen to be sufficient for crash recovery by considering that * it's effectively equivalent to what would happen if a CHECKPOINT occurred - * just after the index build. However, it is clearly not sufficient if the + * just after the index build. However, it is clearly not sufficient if the * DBA is using the WAL log for PITR or replication purposes, since another * machine would not be able to reconstruct the index from WAL. Therefore, * we log the completed index pages to WAL if and only if WAL archiving is @@ -89,7 +89,7 @@ struct BTSpool }; /* - * Status record for a btree page being built. We have one of these + * Status record for a btree page being built. We have one of these * for each active tree level. * * The reason we need to store a copy of the minimum key is that we'll @@ -160,7 +160,7 @@ _bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead) * We size the sort area as maintenance_work_mem rather than work_mem to * speed index creation. This should be OK since a single backend can't * run multiple index creations in parallel. Note that creation of a - * unique index actually requires two BTSpool objects. We expect that the + * unique index actually requires two BTSpool objects. We expect that the * second one (for dead tuples) won't get very full, so we give it only * work_mem. */ @@ -298,7 +298,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) PageSetChecksumInplace(page, blkno); /* - * Now write the page. There's no need for smgr to schedule an fsync for + * Now write the page. There's no need for smgr to schedule an fsync for * this write; we'll do it ourselves before ending the build. */ if (blkno == wstate->btws_pages_written) @@ -423,14 +423,14 @@ _bt_sortaddtup(Page page, * A leaf page being built looks like: * * +----------------+---------------------------------+ - * | PageHeaderData | linp0 linp1 linp2 ... | + * | PageHeaderData | linp0 linp1 linp2 ... | * +-----------+----+---------------------------------+ * | ... linpN | | * +-----------+--------------------------------------+ * | ^ last | * | | * +-------------+------------------------------------+ - * | | itemN ... | + * | | itemN ... | * +-------------+------------------+-----------------+ * | ... item3 item2 item1 | "special space" | * +--------------------------------+-----------------+ @@ -492,9 +492,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) RelationGetRelationName(wstate->index)))); /* - * Check to see if page is "full". It's definitely full if the item won't + * Check to see if page is "full". It's definitely full if the item won't * fit. Otherwise, compare to the target freespace derived from the - * fillfactor. However, we must put at least two items on each page, so + * fillfactor. However, we must put at least two items on each page, so * disregard fillfactor if we don't have that many. */ if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY)) @@ -567,7 +567,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) } /* - * Write out the old page. We never need to touch it again, so we can + * Write out the old page. We never need to touch it again, so we can * free the opage workspace too. */ _bt_blwritepage(wstate, opage, oblkno); @@ -804,7 +804,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index is WAL-logged, we must fsync it down to disk before it's - * safe to commit the transaction. (For a non-WAL-logged index we don't + * safe to commit the transaction. (For a non-WAL-logged index we don't * care since the index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 922e6a9cd4e..f8f8e69be7f 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup) * comparison data ultimately used must match the key datatypes. * * The result cannot be used with _bt_compare(), unless comparison - * data is first stored into the key entries. Currently this + * data is first stored into the key entries. Currently this * routine is only called by nbtsort.c and tuplesort.c, which have * their own comparison routines. */ @@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) continue; /* - * First, deconstruct the array into elements. Anything allocated + * First, deconstruct the array into elements. Anything allocated * here (including a possibly detoasted array value) is in the * workspace context. */ @@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) &elem_values, &elem_nulls, &num_elems); /* - * Compress out any null elements. We can ignore them since we assume + * Compress out any null elements. We can ignore them since we assume * all btree operators are strict. */ num_nonnulls = 0; @@ -517,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg) * _bt_start_array_keys() -- Initialize array keys at start of a scan * * Set up the cur_elem counters and fill in the first sk_argument value for - * each array scankey. We can't do this until we know the scan direction. + * each array scankey. We can't do this until we know the scan direction. */ void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir) @@ -670,8 +670,8 @@ _bt_restore_array_keys(IndexScanDesc scan) * so that the index sorts in the desired direction. * * One key purpose of this routine is to discover which scan keys must be - * satisfied to continue the scan. It also attempts to eliminate redundant - * keys and detect contradictory keys. (If the index opfamily provides + * satisfied to continue the scan. It also attempts to eliminate redundant + * keys and detect contradictory keys. (If the index opfamily provides * incomplete sets of cross-type operators, we may fail to detect redundant * or contradictory keys, but we can survive that.) * @@ -702,7 +702,7 @@ _bt_restore_array_keys(IndexScanDesc scan) * that's the only one returned. (So, we return either a single = key, * or one or two boundary-condition keys for each attr.) However, if we * cannot compare two keys for lack of a suitable cross-type operator, - * we cannot eliminate either. If there are two such keys of the same + * we cannot eliminate either. If there are two such keys of the same * operator strategy, the second one is just pushed into the output array * without further processing here. We may also emit both >/>= or both * </<= keys if we can't compare them. The logic about required keys still @@ -737,7 +737,7 @@ _bt_restore_array_keys(IndexScanDesc scan) * Note: the reason we have to copy the preprocessed scan keys into private * storage is that we are modifying the array based on comparisons of the * key argument values, which could change on a rescan or after moving to - * new elements of array keys. Therefore we can't overwrite the source data. + * new elements of array keys. Therefore we can't overwrite the source data. */ void _bt_preprocess_keys(IndexScanDesc scan) @@ -919,7 +919,7 @@ _bt_preprocess_keys(IndexScanDesc scan) /* * Emit the cleaned-up keys into the outkeys[] array, and then - * mark them if they are required. They are required (possibly + * mark them if they are required. They are required (possibly * only in one direction) if all attrs before this one had "=". */ for (j = BTMaxStrategyNumber; --j >= 0;) @@ -1017,7 +1017,7 @@ _bt_preprocess_keys(IndexScanDesc scan) * and amoplefttype/amoprighttype equal to the two argument datatypes. * * If the opfamily doesn't supply a complete set of cross-type operators we - * may not be able to make the comparison. If we can make the comparison + * may not be able to make the comparison. If we can make the comparison * we store the operator result in *result and return TRUE. We return FALSE * if the comparison could not be made. * @@ -1043,7 +1043,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, StrategyNumber strat; /* - * First, deal with cases where one or both args are NULL. This should + * First, deal with cases where one or both args are NULL. This should * only happen when the scankeys represent IS NULL/NOT NULL conditions. */ if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL) @@ -1183,7 +1183,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, * * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a * NULL comparison value. Since all btree operators are assumed strict, - * a NULL means that the qual cannot be satisfied. We return TRUE if the + * a NULL means that the qual cannot be satisfied. We return TRUE if the * comparison value isn't NULL, or FALSE if the scan should be abandoned. * * This function is applied to the *input* scankey structure; therefore @@ -1212,7 +1212,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * --- we can treat IS NULL as an equality operator for purposes of search * strategy. * - * Likewise, "x IS NOT NULL" is supported. We treat that as either "less + * Likewise, "x IS NOT NULL" is supported. We treat that as either "less * than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS * FIRST index. * @@ -1284,7 +1284,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * Mark a scankey as "required to continue the scan". * * Depending on the operator type, the key may be required for both scan - * directions or just one. Also, if the key is a row comparison header, + * directions or just one. Also, if the key is a row comparison header, * we have to mark the appropriate subsidiary ScanKeys as required. In * such cases, the first subsidiary key is required, but subsequent ones * are required only as long as they correspond to successive index columns @@ -1296,7 +1296,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * scribbling on a data structure belonging to the index AM's caller, not on * our private copy. This should be OK because the marking will not change * from scan to scan within a query, and so we'd just re-mark the same way - * anyway on a rescan. Something to keep an eye on though. + * anyway on a rescan. Something to keep an eye on though. */ static void _bt_mark_scankey_required(ScanKey skey) @@ -1482,7 +1482,7 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. We can stop regardless * of whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On @@ -1498,8 +1498,8 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. We can stop regardless of + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. We can stop regardless of * whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On * a backward scan, however, we must keep going, because we @@ -1593,7 +1593,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. We can stop regardless * of whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On @@ -1609,8 +1609,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. We can stop regardless of + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. We can stop regardless of * whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On * a backward scan, however, we must keep going, because we @@ -1631,7 +1631,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, { /* * Unlike the simple-scankey case, this isn't a disallowed case. - * But it can never match. If all the earlier row comparison + * But it can never match. If all the earlier row comparison * columns are required for the scan direction, we can stop the * scan, because there can't be another tuple that will succeed. */ @@ -1696,7 +1696,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Tuple fails this qual. If it's a required qual for the current * scan direction, then we can conclude no further tuples will pass, - * either. Note we have to look at the deciding column, not + * either. Note we have to look at the deciding column, not * necessarily the first or last column of the row condition. */ if ((subkey->sk_flags & SK_BT_REQFWD) && @@ -1722,7 +1722,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, * is sufficient for setting LP_DEAD status (which is only a hint). * * We match items by heap TID before assuming they are the right ones to - * delete. We cope with cases where items have moved right due to insertions. + * delete. We cope with cases where items have moved right due to insertions. * If an item has moved off the current page due to a split, we'll fail to * find it and do nothing (this is not an error case --- we assume the item * will eventually get marked in a future indexscan). Note that because we @@ -1806,8 +1806,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock) /* * The following routines manage a shared-memory area in which we track * assignment of "vacuum cycle IDs" to currently-active btree vacuuming - * operations. There is a single counter which increments each time we - * start a vacuum to assign it a cycle ID. Since multiple vacuums could + * operations. There is a single counter which increments each time we + * start a vacuum to assign it a cycle ID. Since multiple vacuums could * be active concurrently, we have to track the cycle ID for each active * vacuum; this requires at most MaxBackends entries (usually far fewer). * We assume at most one vacuum can be active for a given index. diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 86824f3495e..640639c175e 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -40,9 +40,9 @@ _bt_restore_page(Page page, char *from, int len) int nitems; /* - * To get the items back in the original order, we add them to the page - * in reverse. To figure out where one tuple ends and another begins, - * we have to scan them in forward order first. + * To get the items back in the original order, we add them to the page in + * reverse. To figure out where one tuple ends and another begins, we + * have to scan them in forward order first. */ i = 0; while (from < end) @@ -97,7 +97,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn, pageop->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential + * Set pd_lower just past the end of the metadata. This is not essential * but it makes the page look compressible to xlog.c. */ ((PageHeader) metapg)->pd_lower = @@ -118,7 +118,7 @@ static void _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record, RelFileNode rnode, BlockNumber cblock) { - Buffer buf; + Buffer buf; buf = XLogReadBuffer(rnode, cblock, false); if (BufferIsValid(buf)) @@ -128,6 +128,7 @@ _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record, if (lsn > PageGetLSN(page)) { BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page); + Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0); pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT; @@ -153,6 +154,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, datapos = (char *) xlrec + SizeOfBtreeInsert; datalen = record->xl_len - SizeOfBtreeInsert; + /* * if this insert finishes a split at lower level, extract the block * number of the (left) child. @@ -172,10 +174,10 @@ btree_xlog_insert(bool isleaf, bool ismeta, } /* - * Insertion to an internal page finishes an incomplete split at the - * child level. Clear the incomplete-split flag in the child. Note: - * during normal operation, the child and parent pages are locked at the - * same time, so that clearing the flag and inserting the downlink appear + * Insertion to an internal page finishes an incomplete split at the child + * level. Clear the incomplete-split flag in the child. Note: during + * normal operation, the child and parent pages are locked at the same + * time, so that clearing the flag and inserting the downlink appear * atomic to other backends. We don't bother with that during replay, * because readers don't care about the incomplete-split flag and there * cannot be updates happening. @@ -279,9 +281,10 @@ btree_xlog_split(bool onleft, bool isroot, datapos += left_hikeysz; datalen -= left_hikeysz; } + /* - * If this insertion finishes an incomplete split, get the block number - * of the child. + * If this insertion finishes an incomplete split, get the block number of + * the child. */ if (!isleaf && !(record->xl_info & XLR_BKP_BLOCK(1))) { @@ -439,7 +442,7 @@ btree_xlog_split(bool onleft, bool isroot, * the backup block containing right sibling is 2 or 3, depending * whether this was a leaf or internal page. */ - int rnext_index = isleaf ? 2 : 3; + int rnext_index = isleaf ? 2 : 3; if (record->xl_info & XLR_BKP_BLOCK(rnext_index)) (void) RestoreBackupBlock(lsn, record, rnext_index, false, false); @@ -620,7 +623,7 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec) /* * In what follows, we have to examine the previous state of the index - * page, as well as the heap page(s) it points to. This is only valid if + * page, as well as the heap page(s) it points to. This is only valid if * WAL replay has reached a consistent database state; which means that * the preceding check is not just an optimization, but is *necessary*. We * won't have let in any user sessions before we reach consistency. @@ -629,9 +632,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec) elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data"); /* - * Get index page. If the DB is consistent, this should not fail, nor + * Get index page. If the DB is consistent, this should not fail, nor * should any of the heap page fetches below. If one does, we return - * InvalidTransactionId to cancel all HS transactions. That's probably + * InvalidTransactionId to cancel all HS transactions. That's probably * overkill, but it's safe, and certainly better than panicking here. */ ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false); @@ -716,9 +719,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec) /* * If all heap tuples were LP_DEAD then we will be returning * InvalidTransactionId here, which avoids conflicts. This matches - * existing logic which assumes that LP_DEAD tuples must already be - * older than the latestRemovedXid on the cleanup record that - * set them as LP_DEAD, hence must already have generated a conflict. + * existing logic which assumes that LP_DEAD tuples must already be older + * than the latestRemovedXid on the cleanup record that set them as + * LP_DEAD, hence must already have generated a conflict. */ return latestRemovedXid; } @@ -735,7 +738,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record) * If we have any conflict processing to do, it must happen before we * update the page. * - * Btree delete records can conflict with standby queries. You might + * Btree delete records can conflict with standby queries. You might * think that vacuum records would conflict as well, but we've handled * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid * cleaned by the vacuum of the heap and so we can resolve any conflicts @@ -828,7 +831,7 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record) ItemId itemid; IndexTuple itup; OffsetNumber nextoffset; - BlockNumber rightsib; + BlockNumber rightsib; poffset = ItemPointerGetOffsetNumber(&(xlrec->target.tid)); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index aa60c8db65c..cd1edfffa25 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -54,7 +54,7 @@ desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *insertData) walbuf += nitems * sizeof(ItemPointerData); } - switch(a_action) + switch (a_action) { case GIN_SEGMENT_ADDITEMS: appendStringInfo(buf, " %d (add %d items)", a_segno, nitems); @@ -94,13 +94,13 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) case XLOG_GIN_INSERT: { ginxlogInsert *xlrec = (ginxlogInsert *) rec; - char *payload = rec + sizeof(ginxlogInsert); + char *payload = rec + sizeof(ginxlogInsert); appendStringInfoString(buf, "Insert item, "); desc_node(buf, xlrec->node, xlrec->blkno); appendStringInfo(buf, " isdata: %c isleaf: %c", - (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', - (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); + (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', + (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); if (!(xlrec->flags & GIN_INSERT_ISLEAF)) { BlockNumber leftChildBlkno; @@ -115,11 +115,11 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) } if (!(xlrec->flags & GIN_INSERT_ISDATA)) appendStringInfo(buf, " isdelete: %c", - (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F'); + (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F'); else if (xlrec->flags & GIN_INSERT_ISLEAF) { ginxlogRecompressDataLeaf *insertData = - (ginxlogRecompressDataLeaf *) payload; + (ginxlogRecompressDataLeaf *) payload; if (xl_info & XLR_BKP_BLOCK(0)) appendStringInfo(buf, " (full page image)"); @@ -129,10 +129,11 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) else { ginxlogInsertDataInternal *insertData = (ginxlogInsertDataInternal *) payload; + appendStringInfo(buf, " pitem: %u-%u/%u", - PostingItemGetBlockNumber(&insertData->newitem), - ItemPointerGetBlockNumber(&insertData->newitem.key), - ItemPointerGetOffsetNumber(&insertData->newitem.key)); + PostingItemGetBlockNumber(&insertData->newitem), + ItemPointerGetBlockNumber(&insertData->newitem.key), + ItemPointerGetOffsetNumber(&insertData->newitem.key)); } } break; @@ -144,8 +145,8 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) desc_node(buf, ((ginxlogSplit *) rec)->node, ((ginxlogSplit *) rec)->lblkno); appendStringInfo(buf, " isrootsplit: %c", (((ginxlogSplit *) rec)->flags & GIN_SPLIT_ROOT) ? 'T' : 'F'); appendStringInfo(buf, " isdata: %c isleaf: %c", - (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', - (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); + (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', + (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); } break; case XLOG_GIN_VACUUM_PAGE: @@ -155,6 +156,7 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) case XLOG_GIN_VACUUM_DATA_LEAF_PAGE: { ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) rec; + appendStringInfoString(buf, "Vacuum data leaf page, "); desc_node(buf, xlrec->node, xlrec->blkno); if (xl_info & XLR_BKP_BLOCK(0)) diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index af7663b8cac..a3c746f1a84 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -140,7 +140,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec) xl_btree_unlink_page *xlrec = (xl_btree_unlink_page *) rec; appendStringInfo(buf, "unlink_page: rel %u/%u/%u; ", - xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); + xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); appendStringInfo(buf, "dead %u; left %u; right %u; btpo_xact %u; ", xlrec->deadblk, xlrec->leftsib, xlrec->rightsib, xlrec->btpo_xact); appendStringInfo(buf, "leaf %u; leafleft %u; leafright %u; topparent %u", diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 48f32cda241..c08d211104d 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -25,7 +25,7 @@ /* * SPPageDesc tracks all info about a page we are inserting into. In some * situations it actually identifies a tuple, or even a specific node within - * an inner tuple. But any of the fields can be invalid. If the buffer + * an inner tuple. But any of the fields can be invalid. If the buffer * field is valid, it implies we hold pin and exclusive lock on that buffer. * page pointer should be valid exactly when buffer is. */ @@ -249,7 +249,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, else { /* - * Tuple must be inserted into existing chain. We mustn't change the + * Tuple must be inserted into existing chain. We mustn't change the * chain's head address, but we don't need to chase the entire chain * to put the tuple at the end; we can insert it second. * @@ -814,7 +814,7 @@ doPickSplit(Relation index, SpGistState *state, * We may not actually insert new tuple because another picksplit may be * necessary due to too large value, but we will try to allocate enough * space to include it; and in any case it has to be included in the input - * for the picksplit function. So don't increment nToInsert yet. + * for the picksplit function. So don't increment nToInsert yet. */ in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state); heapPtrs[in.nTuples] = newLeafTuple->heapPtr; @@ -872,7 +872,7 @@ doPickSplit(Relation index, SpGistState *state, /* * Check to see if the picksplit function failed to separate the values, * ie, it put them all into the same child node. If so, select allTheSame - * mode and create a random split instead. See comments for + * mode and create a random split instead. See comments for * checkAllTheSame as to why we need to know if the new leaf tuples could * fit on one page. */ @@ -1037,7 +1037,7 @@ doPickSplit(Relation index, SpGistState *state, &xlrec.initDest); /* - * Attempt to assign node groups to the two pages. We might fail to + * Attempt to assign node groups to the two pages. We might fail to * do so, even if totalLeafSizes is less than the available space, * because we can't split a group across pages. */ @@ -1917,7 +1917,7 @@ spgdoinsert(Relation index, SpGistState *state, if (current.blkno == InvalidBlockNumber) { /* - * Create a leaf page. If leafSize is too large to fit on a page, + * Create a leaf page. If leafSize is too large to fit on a page, * we won't actually use the page yet, but it simplifies the API * for doPickSplit to always have a leaf page at hand; so just * quietly limit our request to a page size. @@ -2120,7 +2120,7 @@ spgdoinsert(Relation index, SpGistState *state, out.result.addNode.nodeLabel); /* - * Retry insertion into the enlarged node. We assume that + * Retry insertion into the enlarged node. We assume that * we'll get a MatchNode result this time. */ goto process_inner_tuple; diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index 2b1b49348cf..a4408f03bd3 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -163,7 +163,7 @@ spgbuildempty(PG_FUNCTION_ARGS) page = (Page) palloc(BLCKSZ); SpGistInitMetapage(page); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO); smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO, (char *) page, true); @@ -232,7 +232,7 @@ spginsert(PG_FUNCTION_ARGS) /* * We might have to repeat spgdoinsert() multiple times, if conflicts * occur with concurrent insertions. If so, reset the insertCtx each time - * to avoid cumulative memory consumption. That means we also have to + * to avoid cumulative memory consumption. That means we also have to * redo initSpGistState(), but it's cheap enough not to matter. */ while (!spgdoinsert(index, &spgstate, ht_ctid, *values, *isnull)) diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index 0a1e09c51e8..35cc41b3aab 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -103,7 +103,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so) * Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so. * * The point here is to eliminate null-related considerations from what the - * opclass consistent functions need to deal with. We assume all SPGiST- + * opclass consistent functions need to deal with. We assume all SPGiST- * indexable operators are strict, so any null RHS value makes the scan * condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL * conditions; their effect is reflected into searchNulls/searchNonNulls. @@ -600,7 +600,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, if (so->want_itup) { /* - * Reconstruct desired IndexTuple. We have to copy the datum out of + * Reconstruct desired IndexTuple. We have to copy the datum out of * the temp context anyway, so we may as well create the tuple here. */ so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc, diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index bcdd29362d7..5b7a5a06a0f 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -26,11 +26,11 @@ * In the worst case, an inner tuple in a text radix tree could have as many * as 256 nodes (one for each possible byte value). Each node can take 16 * bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page - * of size BLCKSZ. Rather than assuming we know the exact amount of overhead + * of size BLCKSZ. Rather than assuming we know the exact amount of overhead * imposed by page headers, tuple headers, etc, we leave 100 bytes for that * (the actual overhead should be no more than 56 bytes at this writing, so * there is slop in this number). So we can safely create prefixes up to - * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is + * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is * already 4K, there is no safe prefix length when BLCKSZ is less than 8K; * it is always possible to get "SPGiST inner tuple size exceeds maximum" * if there are too many distinct next-byte values at a given place in the @@ -327,7 +327,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS) } /* - * Sort by label bytes so that we can group the values into nodes. This + * Sort by label bytes so that we can group the values into nodes. This * also ensures that the nodes are ordered by label value, allowing the * use of binary search in searchChar. */ @@ -377,7 +377,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS) /* * Reconstruct values represented at this tuple, including parent data, - * prefix of this tuple if any, and the node label if any. in->level + * prefix of this tuple if any, and the node label if any. in->level * should be the length of the previously reconstructed value, and the * number of bytes added here is prefixSize or prefixSize + 1. * diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 3cbad99e46a..1a224ef7cc1 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index) * * When requesting an inner page, if we get one with the wrong parity, * we just release the buffer and try again. We will get a different page - * because GetFreeIndexPage will have marked the page used in FSM. The page + * because GetFreeIndexPage will have marked the page used in FSM. The page * is entered in our local lastUsedPages cache, so there's some hope of * making use of it later in this session, but otherwise we rely on VACUUM * to eventually re-enter the page in FSM, making it available for recycling. @@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index) * * When we return a buffer to the caller, the page is *not* entered into * the lastUsedPages cache; we expect the caller will do so after it's taken - * whatever space it will use. This is because after the caller has used up + * whatever space it will use. This is because after the caller has used up * some space, the page might have less space than whatever was cached already * so we'd rather not trash the old cache entry. */ @@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) /* * If possible, increase the space request to include relation's - * fillfactor. This ensures that when we add unrelated tuples to a page, + * fillfactor. This ensures that when we add unrelated tuples to a page, * we try to keep 100-fillfactor% available for adding tuples that are * related to the ones already on it. But fillfactor mustn't cause an * error for requests that would otherwise be legal. diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 633cf7aeae7..19a461be41d 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -211,7 +211,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, * Figure out exactly what we have to do. We do this separately from * actually modifying the page, mainly so that we have a representation * that can be dumped into WAL and then the replay code can do exactly - * the same thing. The output of this step consists of six arrays + * the same thing. The output of this step consists of six arrays * describing four kinds of operations, to be performed in this order: * * toDead[]: tuple numbers to be replaced with DEAD tuples @@ -287,7 +287,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, else { /* - * Second or later live tuple. Arrange to re-chain it to the + * Second or later live tuple. Arrange to re-chain it to the * previous live one, if there was a gap. */ if (interveningDeletable) diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 1689324f234..cc0184d174d 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -41,7 +41,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc) } /* - * Add a leaf tuple, or replace an existing placeholder tuple. This is used + * Add a leaf tuple, or replace an existing placeholder tuple. This is used * to replay SpGistPageAddNewItem() operations. If the offset points at an * existing tuple, it had better be a placeholder tuple. */ @@ -462,7 +462,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) } /* - * Update parent downlink. Since parent could be in either of the + * Update parent downlink. Since parent could be in either of the * previous two buffers, it's a bit tricky to determine which BKP bit * applies. */ @@ -799,7 +799,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) bbi++; /* - * Now we can release the leaf-page locks. It's okay to do this before + * Now we can release the leaf-page locks. It's okay to do this before * updating the parent downlink. */ if (BufferIsValid(srcBuffer)) diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 0eadd776af6..27ca4c65673 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -11,15 +11,15 @@ * log can be broken into relatively small, independent segments. * * XLOG interactions: this module generates an XLOG record whenever a new - * CLOG page is initialized to zeroes. Other writes of CLOG come from + * CLOG page is initialized to zeroes. Other writes of CLOG come from * recording of transaction commit or abort in xact.c, which generates its * own XLOG records for these events and will re-perform the status update - * on redo; so we need make no additional XLOG entry here. For synchronous + * on redo; so we need make no additional XLOG entry here. For synchronous * transaction commits, the XLOG is guaranteed flushed through the XLOG commit * record before we are called to log a commit, so the WAL rule "write xlog * before data" is satisfied automatically. However, for async commits we * must track the latest LSN affecting each CLOG page, so that we can flush - * XLOG that far and satisfy the WAL rule. We don't have to worry about this + * XLOG that far and satisfy the WAL rule. We don't have to worry about this * for aborts (whether sync or async), since the post-crash assumption would * be that such transactions failed anyway. * @@ -105,7 +105,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids, * in the tree of xid. In various cases nsubxids may be zero. * * lsn must be the WAL location of the commit record when recording an async - * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the + * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the * caller guarantees the commit record is already flushed in that case. It * should be InvalidXLogRecPtr for abort cases, too. * @@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn) * Testing during the PostgreSQL 9.2 development cycle revealed that on a * large multi-processor system, it was possible to have more CLOG page * requests in flight at one time than the numebr of CLOG buffers which existed - * at that time, which was hardcoded to 8. Further testing revealed that + * at that time, which was hardcoded to 8. Further testing revealed that * performance dropped off with more than 32 CLOG buffers, possibly because * the linear buffer search algorithm doesn't scale well. * diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 459f59cb4e0..9da22c8bdfc 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -5,7 +5,7 @@ * * The pg_multixact manager is a pg_clog-like manager that stores an array of * MultiXactMember for each MultiXactId. It is a fundamental part of the - * shared-row-lock implementation. Each MultiXactMember is comprised of a + * shared-row-lock implementation. Each MultiXactMember is comprised of a * TransactionId and a set of flag bits. The name is a bit historical: * originally, a MultiXactId consisted of more than one TransactionId (except * in rare corner cases), hence "multi". Nowadays, however, it's perfectly @@ -18,7 +18,7 @@ * * We use two SLRU areas, one for storing the offsets at which the data * starts for each MultiXactId in the other one. This trick allows us to - * store variable length arrays of TransactionIds. (We could alternatively + * store variable length arrays of TransactionIds. (We could alternatively * use one area containing counts and TransactionIds, with valid MultiXactId * values pointing at slots containing counts; but that way seems less robust * since it would get completely confused if someone inquired about a bogus @@ -38,7 +38,7 @@ * * Like clog.c, and unlike subtrans.c, we have to preserve state across * crashes and ensure that MXID and offset numbering increases monotonically - * across a crash. We do this in the same way as it's done for transaction + * across a crash. We do this in the same way as it's done for transaction * IDs: the WAL record is guaranteed to contain evidence of every MXID we * could need to worry about, and we just make sure that at the end of * replay, the next-MXID and next-offset counters are at least as large as @@ -50,7 +50,7 @@ * The minimum value in each database is stored in pg_database, and the * global minimum is part of pg_control. Any vacuum that is able to * advance its database's minimum value also computes a new global minimum, - * and uses this value to truncate older segments. When new multixactid + * and uses this value to truncate older segments. When new multixactid * values are to be created, care is taken that the counter does not * fall within the wraparound horizon considering the global minimum value. * @@ -85,13 +85,13 @@ /* - * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is + * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is * used everywhere else in Postgres. * * Note: because MultiXactOffsets are 32 bits and wrap around at 0xFFFFFFFF, * MultiXact page numbering also wraps around at * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE, and segment numbering at - * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need + * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need * take no explicit notice of that fact in this module, except when comparing * segment and page numbers in TruncateMultiXact (see * MultiXactOffsetPagePrecedes). @@ -110,7 +110,7 @@ * additional flag bits for each TransactionId. To do this without getting * into alignment issues, we store four bytes of flags, and then the * corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and - * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups + * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups * per page. This wastes 12 bytes per page, but that's OK -- simplicity (and * performance) trumps space efficiency here. * @@ -161,7 +161,7 @@ static SlruCtlData MultiXactMemberCtlData; #define MultiXactMemberCtl (&MultiXactMemberCtlData) /* - * MultiXact state shared across all backends. All this state is protected + * MultiXact state shared across all backends. All this state is protected * by MultiXactGenLock. (We also use MultiXactOffsetControlLock and * MultiXactMemberControlLock to guard accesses to the two sets of SLRU * buffers. For concurrency's sake, we avoid holding more than one of these @@ -179,7 +179,7 @@ typedef struct MultiXactStateData MultiXactId lastTruncationPoint; /* - * oldest multixact that is still on disk. Anything older than this + * oldest multixact that is still on disk. Anything older than this * should not be consulted. */ MultiXactId oldestMultiXactId; @@ -269,8 +269,8 @@ typedef struct mXactCacheEnt } mXactCacheEnt; #define MAX_CACHE_ENTRIES 256 -static dlist_head MXactCache = DLIST_STATIC_INIT(MXactCache); -static int MXactCacheMembers = 0; +static dlist_head MXactCache = DLIST_STATIC_INIT(MXactCache); +static int MXactCacheMembers = 0; static MemoryContext MXactContext = NULL; #ifdef MULTIXACT_DEBUG @@ -528,7 +528,7 @@ MultiXactIdIsRunning(MultiXactId multi) /* * This could be made faster by having another entry point in procarray.c, - * walking the PGPROC array only once for all the members. But in most + * walking the PGPROC array only once for all the members. But in most * cases nmembers should be small enough that it doesn't much matter. */ for (i = 0; i < nmembers; i++) @@ -579,9 +579,9 @@ MultiXactIdSetOldestMember(void) * back. Which would be wrong. * * Note that a shared lock is sufficient, because it's enough to stop - * someone from advancing nextMXact; and nobody else could be trying to - * write to our OldestMember entry, only reading (and we assume storing - * it is atomic.) + * someone from advancing nextMXact; and nobody else could be trying + * to write to our OldestMember entry, only reading (and we assume + * storing it is atomic.) */ LWLockAcquire(MultiXactGenLock, LW_SHARED); @@ -615,7 +615,7 @@ MultiXactIdSetOldestMember(void) * The value to set is the oldest of nextMXact and all the valid per-backend * OldestMemberMXactId[] entries. Because of the locking we do, we can be * certain that no subsequent call to MultiXactIdSetOldestMember can set - * an OldestMemberMXactId[] entry older than what we compute here. Therefore + * an OldestMemberMXactId[] entry older than what we compute here. Therefore * there is no live transaction, now or later, that can be a member of any * MultiXactId older than the OldestVisibleMXactId we compute here. */ @@ -751,7 +751,7 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members) * heap_lock_tuple() to have put it there, and heap_lock_tuple() generates * an XLOG record that must follow ours. The normal LSN interlock between * the data page and that XLOG record will ensure that our XLOG record - * reaches disk first. If the SLRU members/offsets data reaches disk + * reaches disk first. If the SLRU members/offsets data reaches disk * sooner than the XLOG record, we do not care because we'll overwrite it * with zeroes unless the XLOG record is there too; see notes at top of * this file. @@ -882,7 +882,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, * GetNewMultiXactId * Get the next MultiXactId. * - * Also, reserve the needed amount of space in the "members" area. The + * Also, reserve the needed amount of space in the "members" area. The * starting offset of the reserved space is returned in *offset. * * This may generate XLOG records for expansion of the offsets and/or members @@ -916,7 +916,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) /*---------- * Check to see if it's safe to assign another MultiXactId. This protects - * against catastrophic data loss due to multixact wraparound. The basic + * against catastrophic data loss due to multixact wraparound. The basic * rules are: * * If we're past multiVacLimit, start trying to force autovacuum cycles. @@ -930,7 +930,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) { /* * For safety's sake, we release MultiXactGenLock while sending - * signals, warnings, etc. This is not so much because we care about + * signals, warnings, etc. This is not so much because we care about * preserving concurrency in this situation, as to avoid any * possibility of deadlock while doing get_database_name(). First, * copy all the shared values we'll need in this path. @@ -981,8 +981,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) (errmsg_plural("database \"%s\" must be vacuumed before %u more MultiXactId is used", "database \"%s\" must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - result, - oldest_datname, - multiWrapLimit - result), + oldest_datname, + multiWrapLimit - result), errhint("Execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); else @@ -990,8 +990,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) (errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used", "database with OID %u must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - result, - oldest_datoid, - multiWrapLimit - result), + oldest_datoid, + multiWrapLimit - result), errhint("Execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); } @@ -1036,7 +1036,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) * until after file extension has succeeded! * * We don't care about MultiXactId wraparound here; it will be handled by - * the next iteration. But note that nextMXact may be InvalidMultiXactId + * the next iteration. But note that nextMXact may be InvalidMultiXactId * or the first value on a segment-beginning page after this routine * exits, so anyone else looking at the variable must be prepared to deal * with either case. Similarly, nextOffset may be zero, but we won't use @@ -1114,16 +1114,16 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, * need to allow an empty set to be returned regardless, if the caller is * willing to accept it; the caller is expected to check that it's an * allowed condition (such as ensuring that the infomask bits set on the - * tuple are consistent with the pg_upgrade scenario). If the caller is + * tuple are consistent with the pg_upgrade scenario). If the caller is * expecting this to be called only on recently created multis, then we * raise an error. * * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is - * seen, it implies undetected ID wraparound has occurred. This raises a + * seen, it implies undetected ID wraparound has occurred. This raises a * hard error. * * Shared lock is enough here since we aren't modifying any global state. - * Acquire it just long enough to grab the current counter values. We may + * Acquire it just long enough to grab the current counter values. We may * need both nextMXact and nextOffset; see below. */ LWLockAcquire(MultiXactGenLock, LW_SHARED); @@ -1151,12 +1151,12 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, /* * Find out the offset at which we need to start reading MultiXactMembers - * and the number of members in the multixact. We determine the latter as + * and the number of members in the multixact. We determine the latter as * the difference between this multixact's starting offset and the next * one's. However, there are some corner cases to worry about: * * 1. This multixact may be the latest one created, in which case there is - * no next one to look at. In this case the nextOffset value we just + * no next one to look at. In this case the nextOffset value we just * saved is the correct endpoint. * * 2. The next multixact may still be in process of being filled in: that @@ -1167,11 +1167,11 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, * (because we are careful to pre-zero offset pages). Because * GetNewMultiXactId will never return zero as the starting offset for a * multixact, when we read zero as the next multixact's offset, we know we - * have this case. We sleep for a bit and try again. + * have this case. We sleep for a bit and try again. * * 3. Because GetNewMultiXactId increments offset zero to offset one to * handle case #2, there is an ambiguity near the point of offset - * wraparound. If we see next multixact's offset is one, is that our + * wraparound. If we see next multixact's offset is one, is that our * multixact's actual endpoint, or did it end at zero with a subsequent * increment? We handle this using the knowledge that if the zero'th * member slot wasn't filled, it'll contain zero, and zero isn't a valid @@ -1297,8 +1297,8 @@ retry: /* * MultiXactHasRunningRemoteMembers - * Does the given multixact have still-live members from - * transactions other than our own? + * Does the given multixact have still-live members from + * transactions other than our own? */ bool MultiXactHasRunningRemoteMembers(MultiXactId multi) @@ -1694,7 +1694,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info, /* * Initialization of shared memory for MultiXact. We use two SLRU areas, - * thus double memory. Also, reserve space for the shared MultiXactState + * thus double memory. Also, reserve space for the shared MultiXactState * struct and the per-backend MultiXactId arrays (two of those, too). */ Size @@ -1754,7 +1754,7 @@ MultiXactShmemInit(void) /* * This func must be called ONCE on system install. It creates the initial - * MultiXact segments. (The MultiXacts directories are assumed to have been + * MultiXact segments. (The MultiXacts directories are assumed to have been * created by initdb, and MultiXactShmemInit must have been called already.) */ void @@ -1849,7 +1849,7 @@ MaybeExtendOffsetSlru(void) if (!SimpleLruDoesPhysicalPageExist(MultiXactOffsetCtl, pageno)) { - int slotno; + int slotno; /* * Fortunately for us, SimpleLruWritePage is already prepared to deal @@ -1925,7 +1925,7 @@ TrimMultiXact(void) MultiXactOffsetCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current offsets page. See notes in + * Zero out the remainder of the current offsets page. See notes in * StartupCLOG() for motivation. */ entryno = MultiXactIdToOffsetEntry(multi); @@ -1955,7 +1955,7 @@ TrimMultiXact(void) MultiXactMemberCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current members page. See notes in + * Zero out the remainder of the current members page. See notes in * TrimCLOG() for motivation. */ flagsoff = MXOffsetToFlagsOffset(offset); @@ -2097,7 +2097,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) /* * We'll start complaining loudly when we get within 10M multis of the - * stop point. This is kind of arbitrary, but if you let your gas gauge + * stop point. This is kind of arbitrary, but if you let your gas gauge * get down to 1% of full, would you be looking for the next gas station? * We need to be fairly liberal about this number because there are lots * of scenarios where most transactions are done by automatic clients that @@ -2172,8 +2172,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) (errmsg_plural("database \"%s\" must be vacuumed before %u more MultiXactId is used", "database \"%s\" must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - curMulti, - oldest_datname, - multiWrapLimit - curMulti), + oldest_datname, + multiWrapLimit - curMulti), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); else @@ -2181,8 +2181,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) (errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used", "database with OID %u must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - curMulti, - oldest_datoid, - multiWrapLimit - curMulti), + oldest_datoid, + multiWrapLimit - curMulti), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); } @@ -2375,16 +2375,16 @@ GetOldestMultiXactId(void) /* * SlruScanDirectory callback. - * This callback deletes segments that are outside the range determined by - * the given page numbers. + * This callback deletes segments that are outside the range determined by + * the given page numbers. * * Both range endpoints are exclusive (that is, segments containing any of * those pages are kept.) */ typedef struct MembersLiveRange { - int rangeStart; - int rangeEnd; + int rangeStart; + int rangeEnd; } MembersLiveRange; static bool @@ -2392,15 +2392,15 @@ SlruScanDirCbRemoveMembers(SlruCtl ctl, char *filename, int segpage, void *data) { MembersLiveRange *range = (MembersLiveRange *) data; - MultiXactOffset nextOffset; + MultiXactOffset nextOffset; if ((segpage == range->rangeStart) || (segpage == range->rangeEnd)) - return false; /* easy case out */ + return false; /* easy case out */ /* - * To ensure that no segment is spuriously removed, we must keep track - * of new segments added since the start of the directory scan; to do this, + * To ensure that no segment is spuriously removed, we must keep track of + * new segments added since the start of the directory scan; to do this, * we update our end-of-range point as we run. * * As an optimization, we can skip looking at shared memory if we know for @@ -2473,10 +2473,10 @@ void TruncateMultiXact(MultiXactId oldestMXact) { MultiXactOffset oldestOffset; - MultiXactOffset nextOffset; + MultiXactOffset nextOffset; mxtruncinfo trunc; MultiXactId earliest; - MembersLiveRange range; + MembersLiveRange range; /* * Note we can't just plow ahead with the truncation; it's possible that diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index b90db9a417d..1f9a100da85 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -15,7 +15,7 @@ * * We use a control LWLock to protect the shared data structures, plus * per-buffer LWLocks that synchronize I/O for each buffer. The control lock - * must be held to examine or modify any shared state. A process that is + * must be held to examine or modify any shared state. A process that is * reading in or writing out a page buffer does not hold the control lock, * only the per-buffer lock for the buffer it is working on. * @@ -34,7 +34,7 @@ * could have happened while we didn't have the lock). * * As with the regular buffer manager, it is possible for another process - * to re-dirty a page that is currently being written out. This is handled + * to re-dirty a page that is currently being written out. This is handled * by re-setting the page's page_dirty flag. * * @@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush; * page_lru_count entries to be "reset" to lower values than they should have, * in case a process is delayed while it executes this macro. With care in * SlruSelectLRUPage(), this does little harm, and in any case the absolute - * worst possible consequence is a nonoptimal choice of page to evict. The + * worst possible consequence is a nonoptimal choice of page to evict. The * gain from allowing concurrent reads of SLRU pages seems worth it. */ #define SlruRecentlyUsed(shared, slotno) \ @@ -481,7 +481,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid) * * NOTE: only one write attempt is made here. Hence, it is possible that * the page is still dirty at exit (if someone else re-dirtied it during - * the write). However, we *do* attempt a fresh write even if the page + * the write). However, we *do* attempt a fresh write even if the page * is already being written; this is for checkpoints. * * Control lock must be held at entry, and will be held at exit. @@ -634,7 +634,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * In a crash-and-restart situation, it's possible for us to receive * commands to set the commit status of transactions whose bits are in * already-truncated segments of the commit log (see notes in - * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case + * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case * where the file doesn't exist, and return zeroes instead. */ fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -964,9 +964,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) /* * If we find any EMPTY slot, just select that one. Else choose a - * victim page to replace. We normally take the least recently used + * victim page to replace. We normally take the least recently used * valid page, but we will never take the slot containing - * latest_page_number, even if it appears least recently used. We + * latest_page_number, even if it appears least recently used. We * will select a slot that is already I/O busy only if there is no * other choice: a read-busy slot will not be least recently used once * the read finishes, and waiting for an I/O on a write-busy slot is @@ -1041,7 +1041,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) /* * If all pages (except possibly the latest one) are I/O busy, we'll - * have to wait for an I/O to complete and then retry. In that + * have to wait for an I/O to complete and then retry. In that * unhappy case, we choose to wait for the I/O on the least recently * used slot, on the assumption that it was likely initiated first of * all the I/Os in progress and may therefore finish first. @@ -1193,7 +1193,7 @@ restart:; /* * Hmm, we have (or may have) I/O operations acting on the page, so * we've got to wait for them to finish and then start again. This is - * the same logic as in SlruSelectLRUPage. (XXX if page is dirty, + * the same logic as in SlruSelectLRUPage. (XXX if page is dirty, * wouldn't it be OK to just discard it without writing it? For now, * keep the logic the same as it was.) */ @@ -1293,7 +1293,7 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) cldir = AllocateDir(ctl->Dir); while ((clde = ReadDir(cldir, ctl->Dir)) != NULL) { - size_t len; + size_t len; len = strlen(clde->d_name); diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 2f5cfa0d223..bebaee92160 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -5,7 +5,7 @@ * * The pg_subtrans manager is a pg_clog-like manager that stores the parent * transaction Id for each transaction. It is a fundamental part of the - * nested transactions implementation. A main transaction has a parent + * nested transactions implementation. A main transaction has a parent * of InvalidTransactionId, and each subtransaction has its immediate parent. * The tree can easily be walked from child to parent, but not in the * opposite direction. @@ -191,7 +191,7 @@ SUBTRANSShmemInit(void) * must have been called already.) * * Note: it's not really necessary to create the initial segment now, - * since slru.c would create it on first write anyway. But we may as well + * since slru.c would create it on first write anyway. But we may as well * do it to be sure the directory is set up correctly. */ void diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c index 319a2185410..2d27b3ae318 100644 --- a/src/backend/access/transam/timeline.c +++ b/src/backend/access/transam/timeline.c @@ -66,7 +66,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end) * Try to read a timeline's history file. * * If successful, return the list of component TLIs (the given TLI followed by - * its ancestor TLIs). If we can't find the history file, assume that the + * its ancestor TLIs). If we can't find the history file, assume that the * timeline has no parents, and return a list of just the specified timeline * ID. */ @@ -150,7 +150,7 @@ readTimeLineHistory(TimeLineID targetTLI) if (nfields != 3) ereport(FATAL, (errmsg("syntax error in history file: %s", fline), - errhint("Expected a transaction log switchpoint location."))); + errhint("Expected a transaction log switchpoint location."))); if (result && tli <= lasttli) ereport(FATAL, @@ -281,7 +281,7 @@ findNewestTimeLine(TimeLineID startTLI) * reason: human-readable explanation of why the timeline was switched * * Currently this is only used at the end recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ void @@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, /* * Prefer link() to rename() here just to be really sure that we don't - * overwrite an existing file. However, there shouldn't be one, so + * overwrite an existing file. However, there shouldn't be one, so * rename() is an acceptable substitute except for the truly paranoid. */ #if HAVE_WORKING_LINK diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index 8965319551e..12982d9b556 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -145,7 +145,7 @@ TransactionIdDidCommit(TransactionId transactionId) * be a window just after database startup where we do not have complete * knowledge in pg_subtrans of the transactions after TransactionXmin. * StartupSUBTRANS() has ensured that any missing information will be - * zeroed. Since this case should not happen under normal conditions, it + * zeroed. Since this case should not happen under normal conditions, it * seems reasonable to emit a WARNING for it. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) @@ -301,7 +301,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2) { /* * If either ID is a permanent XID then we can just do unsigned - * comparison. If both are normal, do a modulo-2^32 comparison. + * comparison. If both are normal, do a modulo-2^32 comparison. */ int32 diff; diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 66dbf584568..70ca6ab67d1 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -443,7 +443,7 @@ LockGXact(const char *gid, Oid user) /* * Note: it probably would be possible to allow committing from * another database; but at the moment NOTIFY is known not to work and - * there may be some other issues as well. Hence disallow until + * there may be some other issues as well. Hence disallow until * someone gets motivated to make it work. */ if (MyDatabaseId != proc->databaseId) @@ -1031,7 +1031,7 @@ EndPrepare(GlobalTransaction gxact) * out the correct state file CRC, we have an inconsistency: the xact is * prepared according to WAL but not according to our on-disk state. We * use a critical section to force a PANIC if we are unable to complete - * the write --- then, WAL replay should repair the inconsistency. The + * the write --- then, WAL replay should repair the inconsistency. The * odds of a PANIC actually occurring should be very tiny given that we * were able to write the bogus CRC above. * @@ -1069,7 +1069,7 @@ EndPrepare(GlobalTransaction gxact) errmsg("could not close two-phase state file: %m"))); /* - * Mark the prepared transaction as valid. As soon as xact.c marks + * Mark the prepared transaction as valid. As soon as xact.c marks * MyPgXact as not running our XID (which it will do immediately after * this function returns), others can commit/rollback the xact. * @@ -1336,7 +1336,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit) /* * In case we fail while running the callbacks, mark the gxact invalid so * no one else will try to commit/rollback, and so it can be recycled - * properly later. It is still locked by our XID so it won't go away yet. + * properly later. It is still locked by our XID so it won't go away yet. * * (We assume it's safe to do this without taking TwoPhaseStateLock.) */ @@ -1540,7 +1540,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) * * This approach creates a race condition: someone else could delete a * GXACT between the time we release TwoPhaseStateLock and the time we try - * to open its state file. We handle this by special-casing ENOENT + * to open its state file. We handle this by special-casing ENOENT * failures: if we see that, we verify that the GXACT is no longer valid, * and if so ignore the failure. */ @@ -1621,7 +1621,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) * * We throw away any prepared xacts with main XID beyond nextXid --- if any * are present, it suggests that the DBA has done a PITR recovery to an - * earlier point in time without cleaning out pg_twophase. We dare not + * earlier point in time without cleaning out pg_twophase. We dare not * try to recover such prepared xacts since they likely depend on database * state that doesn't exist now. * @@ -1713,7 +1713,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p) * XID, and they may force us to advance nextXid. * * We don't expect anyone else to modify nextXid, hence we don't - * need to hold a lock while examining it. We still acquire the + * need to hold a lock while examining it. We still acquire the * lock to modify it, though. */ subxids = (TransactionId *) diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 51b6b1a3021..7013fb894b4 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -39,7 +39,7 @@ VariableCache ShmemVariableCache = NULL; * * Note: when this is called, we are actually already inside a valid * transaction, since XIDs are now not allocated until the transaction - * does something. So it is safe to do a database lookup if we want to + * does something. So it is safe to do a database lookup if we want to * issue a warning about XID wrap. */ TransactionId @@ -165,20 +165,20 @@ GetNewTransactionId(bool isSubXact) /* * Now advance the nextXid counter. This must not happen until after we * have successfully completed ExtendCLOG() --- if that routine fails, we - * want the next incoming transaction to try it again. We cannot assign + * want the next incoming transaction to try it again. We cannot assign * more XIDs until there is CLOG space for them. */ TransactionIdAdvance(ShmemVariableCache->nextXid); /* * We must store the new XID into the shared ProcArray before releasing - * XidGenLock. This ensures that every active XID older than + * XidGenLock. This ensures that every active XID older than * latestCompletedXid is present in the ProcArray, which is essential for * correct OldestXmin tracking; see src/backend/access/transam/README. * * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we * are relying on fetch/store of an xid to be atomic, else other backends - * might see a partially-set xid here. But holding both locks at once + * might see a partially-set xid here. But holding both locks at once * would be a nasty concurrency hit. So for now, assume atomicity. * * Note that readers of PGXACT xid fields should be careful to fetch the @@ -289,7 +289,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) /* * We'll start complaining loudly when we get within 10M transactions of - * the stop point. This is kind of arbitrary, but if you let your gas + * the stop point. This is kind of arbitrary, but if you let your gas * gauge get down to 1% of full, would you be looking for the next gas * station? We need to be fairly liberal about this number because there * are lots of scenarios where most transactions are done by automatic @@ -390,7 +390,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) * We primarily check whether oldestXidDB is valid. The cases we have in * mind are that that database was dropped, or the field was reset to zero * by pg_resetxlog. In either case we should force recalculation of the - * wrap limit. Also do it if oldestXid is old enough to be forcing + * wrap limit. Also do it if oldestXid is old enough to be forcing * autovacuums or other actions; this ensures we update our state as soon * as possible once extra overhead is being incurred. */ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 9ee11f34f2c..3e744097c79 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -270,7 +270,7 @@ static void CallSubXactCallbacks(SubXactEvent event, SubTransactionId parentSubid); static void CleanupTransaction(void); static void CheckTransactionChain(bool isTopLevel, bool throwError, - const char *stmtType); + const char *stmtType); static void CommitTransaction(void); static TransactionId RecordTransactionAbort(bool isSubXact); static void StartTransaction(void); @@ -450,7 +450,7 @@ AssignTransactionId(TransactionState s) { bool isSubXact = (s->parent != NULL); ResourceOwner currentOwner; - bool log_unknown_top = false; + bool log_unknown_top = false; /* Assert that caller didn't screw up */ Assert(!TransactionIdIsValid(s->transactionId)); @@ -487,8 +487,8 @@ AssignTransactionId(TransactionState s) /* * When wal_level=logical, guarantee that a subtransaction's xid can only - * be seen in the WAL stream if its toplevel xid has been logged - * before. If necessary we log a xact_assignment record with fewer than + * be seen in the WAL stream if its toplevel xid has been logged before. + * If necessary we log a xact_assignment record with fewer than * PGPROC_MAX_CACHED_SUBXIDS. Note that it is fine if didLogXid isn't set * for a transaction even though it appears in a WAL record, we just might * superfluously log something. That can happen when an xid is included @@ -637,7 +637,7 @@ SubTransactionIsActive(SubTransactionId subxid) * * "used" must be TRUE if the caller intends to use the command ID to mark * inserted/updated/deleted tuples. FALSE means the ID is being fetched - * for read-only purposes (ie, as a snapshot validity cutoff). See + * for read-only purposes (ie, as a snapshot validity cutoff). See * CommandCounterIncrement() for discussion. */ CommandId @@ -724,7 +724,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid) /* * We always say that BootstrapTransactionId is "not my transaction ID" - * even when it is (ie, during bootstrap). Along with the fact that + * even when it is (ie, during bootstrap). Along with the fact that * transam.c always treats BootstrapTransactionId as already committed, * this causes the tqual.c routines to see all tuples as committed, which * is what we need during bootstrap. (Bootstrap mode only inserts tuples, @@ -866,7 +866,7 @@ AtStart_Memory(void) /* * If this is the first time through, create a private context for * AbortTransaction to work in. By reserving some space now, we can - * insulate AbortTransaction from out-of-memory scenarios. Like + * insulate AbortTransaction from out-of-memory scenarios. Like * ErrorContext, we set it up with slow growth rate and a nonzero minimum * size, so that space will be reserved immediately. */ @@ -969,7 +969,7 @@ AtSubStart_ResourceOwner(void) Assert(s->parent != NULL); /* - * Create a resource owner for the subtransaction. We make it a child of + * Create a resource owner for the subtransaction. We make it a child of * the immediate parent's resource owner. */ s->curTransactionOwner = @@ -989,7 +989,7 @@ AtSubStart_ResourceOwner(void) * RecordTransactionCommit * * Returns latest XID among xact and its children, or InvalidTransactionId - * if the xact has no XID. (We compute that here just because it's easier.) + * if the xact has no XID. (We compute that here just because it's easier.) */ static TransactionId RecordTransactionCommit(void) @@ -1034,7 +1034,7 @@ RecordTransactionCommit(void) /* * If we didn't create XLOG entries, we're done here; otherwise we - * should flush those entries the same as a commit record. (An + * should flush those entries the same as a commit record. (An * example of a possible record that wouldn't cause an XID to be * assigned is a sequence advance record due to nextval() --- we want * to flush that to disk before reporting commit.) @@ -1051,7 +1051,7 @@ RecordTransactionCommit(void) BufmgrCommit(); /* - * Mark ourselves as within our "commit critical section". This + * Mark ourselves as within our "commit critical section". This * forces any concurrent checkpoint to wait until we've updated * pg_clog. Without this, it is possible for the checkpoint to set * REDO after the XLOG record but fail to flush the pg_clog update to @@ -1059,7 +1059,7 @@ RecordTransactionCommit(void) * crashes a little later. * * Note: we could, but don't bother to, set this flag in - * RecordTransactionAbort. That's because loss of a transaction abort + * RecordTransactionAbort. That's because loss of a transaction abort * is noncritical; the presumption would be that it aborted, anyway. * * It's safe to change the delayChkpt flag of our own backend without @@ -1168,15 +1168,15 @@ RecordTransactionCommit(void) /* * Check if we want to commit asynchronously. We can allow the XLOG flush * to happen asynchronously if synchronous_commit=off, or if the current - * transaction has not performed any WAL-logged operation. The latter + * transaction has not performed any WAL-logged operation. The latter * case can arise if the current transaction wrote only to temporary - * and/or unlogged tables. In case of a crash, the loss of such a + * and/or unlogged tables. In case of a crash, the loss of such a * transaction will be irrelevant since temp tables will be lost anyway, * and unlogged tables will be truncated. (Given the foregoing, you might * think that it would be unnecessary to emit the XLOG record at all in * this case, but we don't currently try to do that. It would certainly * cause problems at least in Hot Standby mode, where the - * KnownAssignedXids machinery requires tracking every XID assignment. It + * KnownAssignedXids machinery requires tracking every XID assignment. It * might be OK to skip it only when wal_level < hot_standby, but for now * we don't.) * @@ -1423,7 +1423,7 @@ AtSubCommit_childXids(void) * RecordTransactionAbort * * Returns latest XID among xact and its children, or InvalidTransactionId - * if the xact has no XID. (We compute that here just because it's easier.) + * if the xact has no XID. (We compute that here just because it's easier.) */ static TransactionId RecordTransactionAbort(bool isSubXact) @@ -1440,7 +1440,7 @@ RecordTransactionAbort(bool isSubXact) /* * If we haven't been assigned an XID, nobody will care whether we aborted - * or not. Hence, we're done in that case. It does not matter if we have + * or not. Hence, we're done in that case. It does not matter if we have * rels to delete (note that this routine is not responsible for actually * deleting 'em). We cannot have any child XIDs, either. */ @@ -1456,7 +1456,7 @@ RecordTransactionAbort(bool isSubXact) * We have a valid XID, so we should write an ABORT record for it. * * We do not flush XLOG to disk here, since the default assumption after a - * crash would be that we aborted, anyway. For the same reason, we don't + * crash would be that we aborted, anyway. For the same reason, we don't * need to worry about interlocking against checkpoint start. */ @@ -1624,7 +1624,7 @@ AtSubAbort_childXids(void) /* * We keep the child-XID arrays in TopTransactionContext (see - * AtSubCommit_childXids). This means we'd better free the array + * AtSubCommit_childXids). This means we'd better free the array * explicitly at abort to avoid leakage. */ if (s->childXids != NULL) @@ -1802,7 +1802,7 @@ StartTransaction(void) VirtualXactLockTableInsert(vxid); /* - * Advertise it in the proc array. We assume assignment of + * Advertise it in the proc array. We assume assignment of * LocalTransactionID is atomic, and the backendId should be set already. */ Assert(MyProc->backendId == vxid.backendId); @@ -1899,7 +1899,7 @@ CommitTransaction(void) /* * The remaining actions cannot call any user-defined code, so it's safe - * to start shutting down within-transaction services. But note that most + * to start shutting down within-transaction services. But note that most * of this stuff could still throw an error, which would switch us into * the transaction-abort path. */ @@ -2104,7 +2104,7 @@ PrepareTransaction(void) /* * The remaining actions cannot call any user-defined code, so it's safe - * to start shutting down within-transaction services. But note that most + * to start shutting down within-transaction services. But note that most * of this stuff could still throw an error, which would switch us into * the transaction-abort path. */ @@ -2224,7 +2224,7 @@ PrepareTransaction(void) XactLastRecEnd = 0; /* - * Let others know about no transaction in progress by me. This has to be + * Let others know about no transaction in progress by me. This has to be * done *after* the prepared transaction has been marked valid, else * someone may think it is unlocked and recyclable. */ @@ -2233,7 +2233,7 @@ PrepareTransaction(void) /* * This is all post-transaction cleanup. Note that if an error is raised * here, it's too late to abort the transaction. This should be just - * noncritical resource releasing. See notes in CommitTransaction. + * noncritical resource releasing. See notes in CommitTransaction. */ CallXactCallbacks(XACT_EVENT_PREPARE); @@ -2411,7 +2411,7 @@ AbortTransaction(void) ProcArrayEndTransaction(MyProc, latestXid); /* - * Post-abort cleanup. See notes in CommitTransaction() concerning + * Post-abort cleanup. See notes in CommitTransaction() concerning * ordering. We can skip all of it if the transaction failed before * creating a resource owner. */ @@ -2646,7 +2646,7 @@ CommitTransactionCommand(void) /* * Here we were in a perfectly good transaction block but the user - * told us to ROLLBACK anyway. We have to abort the transaction + * told us to ROLLBACK anyway. We have to abort the transaction * and then clean up. */ case TBLOCK_ABORT_PENDING: @@ -2666,7 +2666,7 @@ CommitTransactionCommand(void) /* * We were just issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already did + * Start a subtransaction. (DefineSavepoint already did * PushTransaction, so as to have someplace to put the SUBBEGIN * state.) */ @@ -2870,7 +2870,7 @@ AbortCurrentTransaction(void) break; /* - * Here, we failed while trying to COMMIT. Clean up the + * Here, we failed while trying to COMMIT. Clean up the * transaction and return to idle state (we do not want to stay in * the transaction). */ @@ -2932,7 +2932,7 @@ AbortCurrentTransaction(void) /* * If we failed while trying to create a subtransaction, clean up - * the broken subtransaction and abort the parent. The same + * the broken subtransaction and abort the parent. The same * applies if we get a failure while ending a subtransaction. */ case TBLOCK_SUBBEGIN: @@ -3485,7 +3485,7 @@ UserAbortTransactionBlock(void) break; /* - * We are inside a subtransaction. Mark everything up to top + * We are inside a subtransaction. Mark everything up to top * level as exitable. */ case TBLOCK_SUBINPROGRESS: @@ -3619,7 +3619,7 @@ ReleaseSavepoint(List *options) break; /* - * We are in a non-aborted subtransaction. This is the only valid + * We are in a non-aborted subtransaction. This is the only valid * case. */ case TBLOCK_SUBINPROGRESS: @@ -3676,7 +3676,7 @@ ReleaseSavepoint(List *options) /* * Mark "commit pending" all subtransactions up to the target - * subtransaction. The actual commits will happen when control gets to + * subtransaction. The actual commits will happen when control gets to * CommitTransactionCommand. */ xact = CurrentTransactionState; @@ -3775,7 +3775,7 @@ RollbackToSavepoint(List *options) /* * Mark "abort pending" all subtransactions up to the target - * subtransaction. The actual aborts will happen when control gets to + * subtransaction. The actual aborts will happen when control gets to * CommitTransactionCommand. */ xact = CurrentTransactionState; @@ -4182,7 +4182,7 @@ CommitSubTransaction(void) CommandCounterIncrement(); /* - * Prior to 8.4 we marked subcommit in clog at this point. We now only + * Prior to 8.4 we marked subcommit in clog at this point. We now only * perform that step, if required, as part of the atomic update of the * whole transaction tree at top level commit or abort. */ @@ -4641,7 +4641,7 @@ TransStateAsString(TransState state) /* * xactGetCommittedChildren * - * Gets the list of committed children of the current transaction. The return + * Gets the list of committed children of the current transaction. The return * value is the number of child transactions. *ptr is set to point to an * array of TransactionIds. The array is allocated in TopTransactionContext; * the caller should *not* pfree() it (this is a change from pre-8.4 code!). diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index a636bb6d2b0..3406fa5a29d 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -101,7 +101,7 @@ bool XLOG_DEBUG = false; * future XLOG segment as long as there aren't already XLOGfileslop future * segments; else we'll delete it. This could be made a separate GUC * variable, but at present I think it's sufficient to hardwire it as - * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free + * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free * no more than 2*CheckPointSegments log segments, and we want to recycle all * of them; the +1 allows boundary cases to happen without wasting a * delete/create-segment cycle. @@ -190,7 +190,7 @@ static bool LocalHotStandbyActive = false; * 0: unconditionally not allowed to insert XLOG * -1: must check RecoveryInProgress(); disallow until it is false * Most processes start with -1 and transition to 1 after seeing that recovery - * is not in progress. But we can also force the value for special cases. + * is not in progress. But we can also force the value for special cases. * The coding in XLogInsertAllowed() depends on the first two of these states * being numerically the same as bool true and false. */ @@ -223,7 +223,7 @@ static bool recoveryPauseAtTarget = true; static TransactionId recoveryTargetXid; static TimestampTz recoveryTargetTime; static char *recoveryTargetName; -static int min_recovery_apply_delay = 0; +static int min_recovery_apply_delay = 0; static TimestampTz recoveryDelayUntilTime; /* options taken from recovery.conf for XLOG streaming */ @@ -261,7 +261,7 @@ static bool recoveryStopAfter; * * expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of * its known parents, newest first (so recoveryTargetTLI is always the - * first list member). Only these TLIs are expected to be seen in the WAL + * first list member). Only these TLIs are expected to be seen in the WAL * segments we read, and indeed only these TLIs will be considered as * candidate WAL files to open at all. * @@ -290,7 +290,7 @@ XLogRecPtr XactLastRecEnd = InvalidXLogRecPtr; /* * RedoRecPtr is this backend's local copy of the REDO record pointer * (which is almost but not quite the same as a pointer to the most recent - * CHECKPOINT record). We update this from the shared-memory copy, + * CHECKPOINT record). We update this from the shared-memory copy, * XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we * hold an insertion lock). See XLogInsert for details. We are also allowed * to update from XLogCtl->RedoRecPtr if we hold the info_lck; @@ -418,11 +418,11 @@ typedef struct XLogCtlInsert slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */ /* - * CurrBytePos is the end of reserved WAL. The next record will be inserted - * at that position. PrevBytePos is the start position of the previously - * inserted (or rather, reserved) record - it is copied to the prev-link - * of the next record. These are stored as "usable byte positions" rather - * than XLogRecPtrs (see XLogBytePosToRecPtr()). + * CurrBytePos is the end of reserved WAL. The next record will be + * inserted at that position. PrevBytePos is the start position of the + * previously inserted (or rather, reserved) record - it is copied to the + * prev-link of the next record. These are stored as "usable byte + * positions" rather than XLogRecPtrs (see XLogBytePosToRecPtr()). */ uint64 CurrBytePos; uint64 PrevBytePos; @@ -464,7 +464,7 @@ typedef struct XLogCtlInsert /* * WAL insertion locks. */ - WALInsertLockPadded *WALInsertLocks; + WALInsertLockPadded *WALInsertLocks; LWLockTranche WALInsertLockTranche; int WALInsertLockTrancheId; } XLogCtlInsert; @@ -504,10 +504,11 @@ typedef struct XLogCtlData * Latest initialized page in the cache (last byte position + 1). * * To change the identity of a buffer (and InitializedUpTo), you need to - * hold WALBufMappingLock. To change the identity of a buffer that's still - * dirty, the old page needs to be written out first, and for that you - * need WALWriteLock, and you need to ensure that there are no in-progress - * insertions to the page by calling WaitXLogInsertionsToFinish(). + * hold WALBufMappingLock. To change the identity of a buffer that's + * still dirty, the old page needs to be written out first, and for that + * you need WALWriteLock, and you need to ensure that there are no + * in-progress insertions to the page by calling + * WaitXLogInsertionsToFinish(). */ XLogRecPtr InitializedUpTo; @@ -799,8 +800,8 @@ static void rm_redo_error_callback(void *arg); static int get_sync_bit(int method); static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, - XLogRecData *rdata, - XLogRecPtr StartPos, XLogRecPtr EndPos); + XLogRecData *rdata, + XLogRecPtr StartPos, XLogRecPtr EndPos); static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr); static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, @@ -860,6 +861,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata) if (rechdr == NULL) { static char rechdrbuf[SizeOfXLogRecord + MAXIMUM_ALIGNOF]; + rechdr = (XLogRecord *) MAXALIGN(&rechdrbuf); MemSet(rechdr, 0, SizeOfXLogRecord); } @@ -1075,12 +1077,12 @@ begin:; * record to the shared WAL buffer cache is a two-step process: * * 1. Reserve the right amount of space from the WAL. The current head of - * reserved space is kept in Insert->CurrBytePos, and is protected by - * insertpos_lck. + * reserved space is kept in Insert->CurrBytePos, and is protected by + * insertpos_lck. * * 2. Copy the record to the reserved WAL space. This involves finding the - * correct WAL buffer containing the reserved space, and copying the - * record in place. This can be done concurrently in multiple processes. + * correct WAL buffer containing the reserved space, and copying the + * record in place. This can be done concurrently in multiple processes. * * To keep track of which insertions are still in-progress, each concurrent * inserter acquires an insertion lock. In addition to just indicating that @@ -1232,6 +1234,7 @@ begin:; { TRACE_POSTGRESQL_XLOG_SWITCH(); XLogFlush(EndPos); + /* * Even though we reserved the rest of the segment for us, which is * reflected in EndPos, we return a pointer to just the end of the @@ -1272,7 +1275,7 @@ begin:; rdt_lastnormal->next = NULL; initStringInfo(&recordbuf); - for (;rdata != NULL; rdata = rdata->next) + for (; rdata != NULL; rdata = rdata->next) appendBinaryStringInfo(&recordbuf, rdata->data, rdata->len); appendStringInfoString(&buf, " - "); @@ -1514,8 +1517,8 @@ CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, /* * If this was an xlog-switch, it's not enough to write the switch record, - * we also have to consume all the remaining space in the WAL segment. - * We have already reserved it for us, but we still need to make sure it's + * we also have to consume all the remaining space in the WAL segment. We + * have already reserved it for us, but we still need to make sure it's * allocated and zeroed in the WAL buffers so that when the caller (or * someone else) does XLogWrite(), it can really write out all the zeros. */ @@ -1556,14 +1559,14 @@ WALInsertLockAcquire(void) /* * It doesn't matter which of the WAL insertion locks we acquire, so try - * the one we used last time. If the system isn't particularly busy, - * it's a good bet that it's still available, and it's good to have some + * the one we used last time. If the system isn't particularly busy, it's + * a good bet that it's still available, and it's good to have some * affinity to a particular lock so that you don't unnecessarily bounce * cache lines between processes when there's no contention. * * If this is the first time through in this backend, pick a lock - * (semi-)randomly. This allows the locks to be used evenly if you have - * a lot of very short connections. + * (semi-)randomly. This allows the locks to be used evenly if you have a + * lot of very short connections. */ static int lockToTry = -1; @@ -1583,10 +1586,10 @@ WALInsertLockAcquire(void) /* * If we couldn't get the lock immediately, try another lock next * time. On a system with more insertion locks than concurrent - * inserters, this causes all the inserters to eventually migrate - * to a lock that no-one else is using. On a system with more - * inserters than locks, it still helps to distribute the inserters - * evenly across the locks. + * inserters, this causes all the inserters to eventually migrate to a + * lock that no-one else is using. On a system with more inserters + * than locks, it still helps to distribute the inserters evenly + * across the locks. */ lockToTry = (lockToTry + 1) % num_xloginsert_locks; } @@ -1604,8 +1607,8 @@ WALInsertLockAcquireExclusive(void) /* * When holding all the locks, we only update the last lock's insertingAt * indicator. The others are set to 0xFFFFFFFFFFFFFFFF, which is higher - * than any real XLogRecPtr value, to make sure that no-one blocks - * waiting on those. + * than any real XLogRecPtr value, to make sure that no-one blocks waiting + * on those. */ for (i = 0; i < num_xloginsert_locks - 1; i++) { @@ -1655,7 +1658,7 @@ WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt) * WALInsertLockAcquireExclusive. */ LWLockUpdateVar(&WALInsertLocks[num_xloginsert_locks - 1].l.lock, - &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt, + &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt, insertingAt); } else @@ -1716,15 +1719,16 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto) * Loop through all the locks, sleeping on any in-progress insert older * than 'upto'. * - * finishedUpto is our return value, indicating the point upto which - * all the WAL insertions have been finished. Initialize it to the head - * of reserved WAL, and as we iterate through the insertion locks, back it + * finishedUpto is our return value, indicating the point upto which all + * the WAL insertions have been finished. Initialize it to the head of + * reserved WAL, and as we iterate through the insertion locks, back it * out for any insertion that's still in progress. */ finishedUpto = reservedUpto; for (i = 0; i < num_xloginsert_locks; i++) { - XLogRecPtr insertingat = InvalidXLogRecPtr; + XLogRecPtr insertingat = InvalidXLogRecPtr; + do { /* @@ -1797,9 +1801,9 @@ GetXLogBuffer(XLogRecPtr ptr) } /* - * The XLog buffer cache is organized so that a page is always loaded - * to a particular buffer. That way we can easily calculate the buffer - * a given page must be loaded into, from the XLogRecPtr alone. + * The XLog buffer cache is organized so that a page is always loaded to a + * particular buffer. That way we can easily calculate the buffer a given + * page must be loaded into, from the XLogRecPtr alone. */ idx = XLogRecPtrToBufIdx(ptr); @@ -1827,8 +1831,8 @@ GetXLogBuffer(XLogRecPtr ptr) if (expectedEndPtr != endptr) { /* - * Let others know that we're finished inserting the record up - * to the page boundary. + * Let others know that we're finished inserting the record up to the + * page boundary. */ WALInsertLockUpdateInsertingAt(expectedEndPtr - XLOG_BLCKSZ); @@ -1837,7 +1841,7 @@ GetXLogBuffer(XLogRecPtr ptr) if (expectedEndPtr != endptr) elog(PANIC, "could not find WAL buffer for %X/%X", - (uint32) (ptr >> 32) , (uint32) ptr); + (uint32) (ptr >> 32), (uint32) ptr); } else { @@ -1974,8 +1978,8 @@ XLogRecPtrToBytePos(XLogRecPtr ptr) else { result = fullsegs * UsableBytesInSegment + - (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */ - (fullpages - 1) * UsableBytesInPage; /* full pages */ + (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */ + (fullpages - 1) * UsableBytesInPage; /* full pages */ if (offset > 0) { Assert(offset >= SizeOfXLogShortPHD); @@ -2170,8 +2174,8 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) } /* - * Now the next buffer slot is free and we can set it up to be the next - * output page. + * Now the next buffer slot is free and we can set it up to be the + * next output page. */ NewPageBeginPtr = XLogCtl->InitializedUpTo; NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ; @@ -2194,7 +2198,8 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) /* NewPage->xlp_info = 0; */ /* done by memset */ NewPage ->xlp_tli = ThisTimeLineID; NewPage ->xlp_pageaddr = NewPageBeginPtr; - /* NewPage->xlp_rem_len = 0; */ /* done by memset */ + + /* NewPage->xlp_rem_len = 0; */ /* done by memset */ /* * If online backup is not in progress, mark the header to indicate @@ -2202,12 +2207,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) * blocks. This allows the WAL archiver to know whether it is safe to * compress archived WAL data by transforming full-block records into * the non-full-block format. It is sufficient to record this at the - * page level because we force a page switch (in fact a segment switch) - * when starting a backup, so the flag will be off before any records - * can be written during the backup. At the end of a backup, the last - * page will be marked as all unsafe when perhaps only part is unsafe, - * but at worst the archiver would miss the opportunity to compress a - * few records. + * page level because we force a page switch (in fact a segment + * switch) when starting a backup, so the flag will be off before any + * records can be written during the backup. At the end of a backup, + * the last page will be marked as all unsafe when perhaps only part + * is unsafe, but at worst the archiver would miss the opportunity to + * compress a few records. */ if (!Insert->forcePageWrites) NewPage ->xlp_info |= XLP_BKP_REMOVABLE; @@ -2329,7 +2334,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) * if we're passed a bogus WriteRqst.Write that is past the end of the * last page that's been initialized by AdvanceXLInsertBuffer. */ - XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx]; + XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx]; + if (LogwrtResult.Write >= EndPtr) elog(PANIC, "xlog write request %X/%X is past end of log %X/%X", (uint32) (LogwrtResult.Write >> 32), @@ -2413,7 +2419,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) do { errno = 0; - written = write(openLogFile, from, nleft); + written = write(openLogFile, from, nleft); if (written <= 0) { if (errno == EINTR) @@ -2422,7 +2428,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) (errcode_for_file_access(), errmsg("could not write to log file %s " "at offset %u, length %zu: %m", - XLogFileNameP(ThisTimeLineID, openLogSegNo), + XLogFileNameP(ThisTimeLineID, openLogSegNo), openLogOff, nbytes))); } nleft -= written; @@ -2500,7 +2506,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) { /* * Could get here without iterating above loop, in which case we might - * have no open file or the wrong one. However, we do not need to + * have no open file or the wrong one. However, we do not need to * fsync more than one file. */ if (sync_method != SYNC_METHOD_OPEN && @@ -2569,7 +2575,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) /* * If the WALWriter is sleeping, we should kick it to make it come out of - * low-power mode. Otherwise, determine whether there's a full page of + * low-power mode. Otherwise, determine whether there's a full page of * WAL available to write. */ if (!sleeping) @@ -2616,7 +2622,8 @@ XLogGetReplicationSlotMinimumLSN(void) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - XLogRecPtr retval; + XLogRecPtr retval; + SpinLockAcquire(&xlogctl->info_lck); retval = xlogctl->replicationSlotMinLSN; SpinLockRelease(&xlogctl->info_lck); @@ -2883,9 +2890,9 @@ XLogFlush(XLogRecPtr record) * We normally flush only completed blocks; but if there is nothing to do on * that basis, we check for unflushed async commits in the current incomplete * block, and flush through the latest one of those. Thus, if async commits - * are not being used, we will flush complete blocks only. We can guarantee + * are not being used, we will flush complete blocks only. We can guarantee * that async commits reach disk after at most three cycles; normally only - * one or two. (When flushing complete blocks, we allow XLogWrite to write + * one or two. (When flushing complete blocks, we allow XLogWrite to write * "flexibly", meaning it can stop at the end of the buffer ring; this makes a * difference only with very high load or long wal_writer_delay, but imposes * one extra cycle for the worst case for async commits.) @@ -3060,7 +3067,7 @@ XLogNeedsFlush(XLogRecPtr record) * log, seg: identify segment to be created/opened. * * *use_existent: if TRUE, OK to use a pre-existing file (else, any - * pre-existing file will be deleted). On return, TRUE if a pre-existing + * pre-existing file will be deleted). On return, TRUE if a pre-existing * file was used. * * use_lock: if TRUE, acquire ControlFileLock while moving file into @@ -3127,11 +3134,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) errmsg("could not create file \"%s\": %m", tmppath))); /* - * Zero-fill the file. We have to do this the hard way to ensure that all + * Zero-fill the file. We have to do this the hard way to ensure that all * the file space has really been allocated --- on platforms that allow * "holes" in files, just seeking to the end doesn't allocate intermediate * space. This way, we know that we have all the space and (after the - * fsync below) that all the indirect blocks are down on disk. Therefore, + * fsync below) that all the indirect blocks are down on disk. Therefore, * fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the * log file. * @@ -3223,7 +3230,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) * a different timeline) * * Currently this is only used during recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ static void @@ -3434,7 +3441,7 @@ XLogFileOpen(XLogSegNo segno) if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open transaction log file \"%s\": %m", path))); + errmsg("could not open transaction log file \"%s\": %m", path))); return fd; } @@ -3541,13 +3548,13 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source) * the timelines listed in expectedTLEs. * * We expect curFileTLI on entry to be the TLI of the preceding file in - * sequence, or 0 if there was no predecessor. We do not allow curFileTLI + * sequence, or 0 if there was no predecessor. We do not allow curFileTLI * to go backwards; this prevents us from picking up the wrong file when a * parent timeline extends to higher segment numbers than the child we * want to read. * * If we haven't read the timeline history file yet, read it now, so that - * we know which TLIs to scan. We don't save the list in expectedTLEs, + * we know which TLIs to scan. We don't save the list in expectedTLEs, * however, unless we actually find a valid segment. That way if there is * neither a timeline history file nor a WAL segment in the archive, and * streaming replication is set up, we'll read the timeline history file @@ -3611,7 +3618,7 @@ XLogFileClose(void) /* * WAL segment files will not be re-read in normal operation, so we advise - * the OS to release any cached pages. But do not do so if WAL archiving + * the OS to release any cached pages. But do not do so if WAL archiving * or streaming is active, because archiver and walsender process could * use the cache to read the WAL segment. */ @@ -3777,7 +3784,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) { /* * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that we + * deciding whether a segment is still needed. This ensures that we * won't prematurely remove a segment from a parent timeline. We could * probably be a little more proactive about removing segments of * non-parent timelines, but that would be a whole lot more @@ -3828,6 +3835,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) xlde->d_name))); #ifdef WIN32 + /* * On Windows, if another process (e.g another backend) * holds the file open in FILE_SHARE_DELETE mode, unlink @@ -4310,7 +4318,7 @@ rescanLatestTimeLine(void) * I/O routines for pg_control * * *ControlFile is a buffer in shared memory that holds an image of the - * contents of pg_control. WriteControlFile() initializes pg_control + * contents of pg_control. WriteControlFile() initializes pg_control * given a preloaded buffer, ReadControlFile() loads the buffer from * the pg_control file (during postmaster or standalone-backend startup), * and UpdateControlFile() rewrites pg_control after we modify xlog state. @@ -4715,7 +4723,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source) { /* * If we haven't yet changed the boot_val default of -1, just let it - * be. We'll fix it when XLOGShmemSize is called. + * be. We'll fix it when XLOGShmemSize is called. */ if (XLOGbuffers == -1) return true; @@ -4815,7 +4823,7 @@ XLOGShmemInit(void) /* WAL insertion locks. Ensure they're aligned to the full padded size */ allocptr += sizeof(WALInsertLockPadded) - - ((uintptr_t) allocptr) % sizeof(WALInsertLockPadded); + ((uintptr_t) allocptr) %sizeof(WALInsertLockPadded); WALInsertLocks = XLogCtl->Insert.WALInsertLocks = (WALInsertLockPadded *) allocptr; allocptr += sizeof(WALInsertLockPadded) * num_xloginsert_locks; @@ -4836,8 +4844,8 @@ XLOGShmemInit(void) /* * Align the start of the page buffers to a full xlog block size boundary. - * This simplifies some calculations in XLOG insertion. It is also required - * for O_DIRECT. + * This simplifies some calculations in XLOG insertion. It is also + * required for O_DIRECT. */ allocptr = (char *) TYPEALIGN(XLOG_BLCKSZ, allocptr); XLogCtl->pages = allocptr; @@ -5233,7 +5241,7 @@ readRecoveryCommandFile(void) const char *hintmsg; if (!parse_int(item->value, &min_recovery_apply_delay, GUC_UNIT_MS, - &hintmsg)) + &hintmsg)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("parameter \"%s\" requires a temporal value", "min_recovery_apply_delay"), @@ -5271,7 +5279,7 @@ readRecoveryCommandFile(void) /* * If user specified recovery_target_timeline, validate it or compute the - * "latest" value. We can't do this until after we've gotten the restore + * "latest" value. We can't do this until after we've gotten the restore * command and set InArchiveRecovery, because we need to fetch timeline * history files from the archive. */ @@ -5464,8 +5472,8 @@ recoveryStopsBefore(XLogRecord *record) * * when testing for an xid, we MUST test for equality only, since * transactions are numbered in the order they start, not the order - * they complete. A higher numbered xid will complete before you - * about 50% of the time... + * they complete. A higher numbered xid will complete before you about + * 50% of the time... */ stopsHere = (record->xl_xid == recoveryTargetXid); } @@ -5525,8 +5533,8 @@ recoveryStopsAfter(XLogRecord *record) record_info = record->xl_info & ~XLR_INFO_MASK; /* - * There can be many restore points that share the same name; we stop - * at the first one. + * There can be many restore points that share the same name; we stop at + * the first one. */ if (recoveryTarget == RECOVERY_TARGET_NAME && record->xl_rmid == RM_XLOG_ID && record_info == XLOG_RESTORE_POINT) @@ -5543,9 +5551,9 @@ recoveryStopsAfter(XLogRecord *record) strlcpy(recoveryStopName, recordRestorePointData->rp_name, MAXFNAMELEN); ereport(LOG, - (errmsg("recovery stopping at restore point \"%s\", time %s", - recoveryStopName, - timestamptz_to_str(recoveryStopTime)))); + (errmsg("recovery stopping at restore point \"%s\", time %s", + recoveryStopName, + timestamptz_to_str(recoveryStopTime)))); return true; } } @@ -5688,10 +5696,10 @@ recoveryApplyDelay(XLogRecord *record) /* * Is it a COMMIT record? * - * We deliberately choose not to delay aborts since they have no effect - * on MVCC. We already allow replay of records that don't have a - * timestamp, so there is already opportunity for issues caused by early - * conflicts on standbys. + * We deliberately choose not to delay aborts since they have no effect on + * MVCC. We already allow replay of records that don't have a timestamp, + * so there is already opportunity for issues caused by early conflicts on + * standbys. */ record_info = record->xl_info & ~XLR_INFO_MASK; if (!(record->xl_rmid == RM_XACT_ID && @@ -5711,7 +5719,7 @@ recoveryApplyDelay(XLogRecord *record) */ TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime, &secs, µsecs); - if (secs <= 0 && microsecs <=0) + if (secs <= 0 && microsecs <= 0) return false; while (true) @@ -5731,15 +5739,15 @@ recoveryApplyDelay(XLogRecord *record) TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime, &secs, µsecs); - if (secs <= 0 && microsecs <=0) + if (secs <= 0 && microsecs <= 0) break; elog(DEBUG2, "recovery apply delay %ld seconds, %d milliseconds", - secs, microsecs / 1000); + secs, microsecs / 1000); WaitLatch(&XLogCtl->recoveryWakeupLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - secs * 1000L + microsecs / 1000); + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + secs * 1000L + microsecs / 1000); } return true; } @@ -5978,7 +5986,7 @@ StartupXLOG(void) ValidateXLOGDirectoryStructure(); /* - * Clear out any old relcache cache files. This is *necessary* if we do + * Clear out any old relcache cache files. This is *necessary* if we do * any WAL replay, since that would probably result in the cache files * being out of sync with database reality. In theory we could leave them * in place if the database had been cleanly shut down, but it seems @@ -6050,7 +6058,7 @@ StartupXLOG(void) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Failed while allocating an XLog reading processor."))); + errdetail("Failed while allocating an XLog reading processor."))); xlogreader->system_identifier = ControlFile->system_identifier; if (read_backup_label(&checkPointLoc, &backupEndRequired, @@ -6261,9 +6269,9 @@ StartupXLOG(void) StartupReorderBuffer(); /* - * Startup MultiXact. We need to do this early for two reasons: one - * is that we might try to access multixacts when we do tuple freezing, - * and the other is we need its state initialized because we attempt + * Startup MultiXact. We need to do this early for two reasons: one is + * that we might try to access multixacts when we do tuple freezing, and + * the other is we need its state initialized because we attempt * truncation during restartpoints. */ StartupMultiXact(); @@ -6517,9 +6525,9 @@ StartupXLOG(void) } /* - * Initialize shared variables for tracking progress of WAL replay, - * as if we had just replayed the record before the REDO location - * (or the checkpoint record itself, if it's a shutdown checkpoint). + * Initialize shared variables for tracking progress of WAL replay, as + * if we had just replayed the record before the REDO location (or the + * checkpoint record itself, if it's a shutdown checkpoint). */ SpinLockAcquire(&xlogctl->info_lck); if (checkPoint.redo < RecPtr) @@ -6646,17 +6654,17 @@ StartupXLOG(void) } /* - * If we've been asked to lag the master, wait on - * latch until enough time has passed. + * If we've been asked to lag the master, wait on latch until + * enough time has passed. */ if (recoveryApplyDelay(record)) { /* - * We test for paused recovery again here. If - * user sets delayed apply, it may be because - * they expect to pause recovery in case of - * problems, so we must test again here otherwise - * pausing during the delay-wait wouldn't work. + * We test for paused recovery again here. If user sets + * delayed apply, it may be because they expect to pause + * recovery in case of problems, so we must test again + * here otherwise pausing during the delay-wait wouldn't + * work. */ if (xlogctl->recoveryPause) recoveryPausesHere(); @@ -6893,8 +6901,8 @@ StartupXLOG(void) /* * Consider whether we need to assign a new timeline ID. * - * If we are doing an archive recovery, we always assign a new ID. This - * handles a couple of issues. If we stopped short of the end of WAL + * If we are doing an archive recovery, we always assign a new ID. This + * handles a couple of issues. If we stopped short of the end of WAL * during recovery, then we are clearly generating a new timeline and must * assign it a unique new ID. Even if we ran to the end, modifying the * current last segment is problematic because it may result in trying to @@ -6969,7 +6977,7 @@ StartupXLOG(void) /* * Tricky point here: readBuf contains the *last* block that the LastRec - * record spans, not the one it starts in. The last block is indeed the + * record spans, not the one it starts in. The last block is indeed the * one we want to use. */ if (EndOfLog % XLOG_BLCKSZ != 0) @@ -6996,9 +7004,9 @@ StartupXLOG(void) else { /* - * There is no partial block to copy. Just set InitializedUpTo, - * and let the first attempt to insert a log record to initialize - * the next buffer. + * There is no partial block to copy. Just set InitializedUpTo, and + * let the first attempt to insert a log record to initialize the next + * buffer. */ XLogCtl->InitializedUpTo = EndOfLog; } @@ -7162,7 +7170,7 @@ StartupXLOG(void) XLogReportParameters(); /* - * All done. Allow backends to write WAL. (Although the bool flag is + * All done. Allow backends to write WAL. (Although the bool flag is * probably atomic in itself, we use the info_lck here to ensure that * there are no race conditions concerning visibility of other recent * updates to shared memory.) @@ -7200,7 +7208,7 @@ StartupXLOG(void) static void CheckRecoveryConsistency(void) { - XLogRecPtr lastReplayedEndRecPtr; + XLogRecPtr lastReplayedEndRecPtr; /* * During crash recovery, we don't reach a consistent state until we've @@ -7322,7 +7330,7 @@ RecoveryInProgress(void) /* * Initialize TimeLineID and RedoRecPtr when we discover that recovery * is finished. InitPostgres() relies upon this behaviour to ensure - * that InitXLOGAccess() is called at backend startup. (If you change + * that InitXLOGAccess() is called at backend startup. (If you change * this, see also LocalSetXLogInsertAllowed.) */ if (!LocalRecoveryInProgress) @@ -7335,6 +7343,7 @@ RecoveryInProgress(void) pg_memory_barrier(); InitXLOGAccess(); } + /* * Note: We don't need a memory barrier when we're still in recovery. * We might exit recovery immediately after return, so the caller @@ -7594,7 +7603,7 @@ GetRedoRecPtr(void) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - XLogRecPtr ptr; + XLogRecPtr ptr; /* * The possibly not up-to-date copy in XlogCtl is enough. Even if we @@ -7983,7 +7992,7 @@ CreateCheckPoint(int flags) /* * If this isn't a shutdown or forced checkpoint, and we have not inserted * any XLOG records since the start of the last checkpoint, skip the - * checkpoint. The idea here is to avoid inserting duplicate checkpoints + * checkpoint. The idea here is to avoid inserting duplicate checkpoints * when the system is idle. That wastes log space, and more importantly it * exposes us to possible loss of both current and previous checkpoint * records if the machine crashes just as we're writing the update. @@ -8120,7 +8129,7 @@ CreateCheckPoint(int flags) * performing those groups of actions. * * One example is end of transaction, so we must wait for any transactions - * that are currently in commit critical sections. If an xact inserted + * that are currently in commit critical sections. If an xact inserted * its commit record into XLOG just before the REDO point, then a crash * restart from the REDO point would not replay that record, which means * that our flushing had better include the xact's update of pg_clog. So @@ -8131,9 +8140,8 @@ CreateCheckPoint(int flags) * fuzzy: it is possible that we will wait for xacts we didn't really need * to wait for. But the delay should be short and it seems better to make * checkpoint take a bit longer than to hold off insertions longer than - * necessary. - * (In fact, the whole reason we have this issue is that xact.c does - * commit record XLOG insertion and clog update as two separate steps + * necessary. (In fact, the whole reason we have this issue is that xact.c + * does commit record XLOG insertion and clog update as two separate steps * protected by different locks, but again that seems best on grounds of * minimizing lock contention.) * @@ -8280,9 +8288,9 @@ CreateCheckPoint(int flags) /* * Truncate pg_subtrans if possible. We can throw away all data before - * the oldest XMIN of any running transaction. No future transaction will + * the oldest XMIN of any running transaction. No future transaction will * attempt to reference any pg_subtrans entry older than that (see Asserts - * in subtrans.c). During recovery, though, we mustn't do this because + * in subtrans.c). During recovery, though, we mustn't do this because * StartupSUBTRANS hasn't been called yet. */ if (!RecoveryInProgress()) @@ -8600,11 +8608,11 @@ CreateRestartPoint(int flags) _logSegNo--; /* - * Try to recycle segments on a useful timeline. If we've been promoted - * since the beginning of this restartpoint, use the new timeline - * chosen at end of recovery (RecoveryInProgress() sets ThisTimeLineID - * in that case). If we're still in recovery, use the timeline we're - * currently replaying. + * Try to recycle segments on a useful timeline. If we've been + * promoted since the beginning of this restartpoint, use the new + * timeline chosen at end of recovery (RecoveryInProgress() sets + * ThisTimeLineID in that case). If we're still in recovery, use the + * timeline we're currently replaying. * * There is no guarantee that the WAL segments will be useful on the * current timeline; if recovery proceeds to a new timeline right @@ -8636,9 +8644,9 @@ CreateRestartPoint(int flags) /* * Truncate pg_subtrans if possible. We can throw away all data before - * the oldest XMIN of any running transaction. No future transaction will + * the oldest XMIN of any running transaction. No future transaction will * attempt to reference any pg_subtrans entry older than that (see Asserts - * in subtrans.c). When hot standby is disabled, though, we mustn't do + * in subtrans.c). When hot standby is disabled, though, we mustn't do * this because StartupSUBTRANS hasn't been called yet. */ if (EnableHotStandby) @@ -8697,7 +8705,7 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo) /* then check whether slots limit removal further */ if (max_replication_slots > 0 && keep != InvalidXLogRecPtr) { - XLogRecPtr slotSegNo; + XLogRecPtr slotSegNo; XLByteToSeg(keep, slotSegNo); @@ -8730,7 +8738,7 @@ XLogPutNextOid(Oid nextOid) * We need not flush the NEXTOID record immediately, because any of the * just-allocated OIDs could only reach disk as part of a tuple insert or * update that would have its own XLOG record that must follow the NEXTOID - * record. Therefore, the standard buffer LSN interlock applied to those + * record. Therefore, the standard buffer LSN interlock applied to those * records will ensure no such OID reaches disk before the NEXTOID record * does. * @@ -8859,8 +8867,9 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) * lsn updates. We assume pd_lower/upper cannot be changed without an * exclusive lock, so the contents bkp are not racy. * - * With buffer_std set to false, XLogCheckBuffer() sets hole_length and - * hole_offset to 0; so the following code is safe for either case. + * With buffer_std set to false, XLogCheckBuffer() sets hole_length + * and hole_offset to 0; so the following code is safe for either + * case. */ memcpy(copied_buffer, origdata, bkpb.hole_offset); memcpy(copied_buffer + bkpb.hole_offset, @@ -9072,7 +9081,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) /* * We used to try to take the maximum of ShmemVariableCache->nextOid * and the recorded nextOid, but that fails if the OID counter wraps - * around. Since no OID allocation should be happening during replay + * around. Since no OID allocation should be happening during replay * anyway, better to just believe the record exactly. We still take * OidGenLock while setting the variable, just in case. */ @@ -9262,10 +9271,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) BkpBlock bkpb; /* - * Full-page image (FPI) records contain a backup block stored "inline" - * in the normal data since the locking when writing hint records isn't - * sufficient to use the normal backup block mechanism, which assumes - * exclusive lock on the buffer supplied. + * Full-page image (FPI) records contain a backup block stored + * "inline" in the normal data since the locking when writing hint + * records isn't sufficient to use the normal backup block mechanism, + * which assumes exclusive lock on the buffer supplied. * * Since the only change in these backup block are hint bits, there * are no recovery conflicts generated. @@ -9415,7 +9424,7 @@ get_sync_bit(int method) /* * Optimize writes by bypassing kernel cache with O_DIRECT when using - * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are + * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are * disabled, otherwise the archive command or walsender process will read * the WAL soon after writing it, which is guaranteed to cause a physical * read if we bypassed the kernel cache. We also skip the @@ -9619,7 +9628,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, * during an on-line backup even if not doing so at other times, because * it's quite possible for the backup dump to obtain a "torn" (partially * written) copy of a database page if it reads the page concurrently with - * our write to the same page. This can be fixed as long as the first + * our write to the same page. This can be fixed as long as the first * write to the page in the WAL sequence is a full-page write. Hence, we * turn on forcePageWrites and then force a CHECKPOINT, to ensure there * are no dirty pages in shared memory that might get dumped while the @@ -9663,7 +9672,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, * old timeline IDs. That would otherwise happen if you called * pg_start_backup() right after restoring from a PITR archive: the * first WAL segment containing the startup checkpoint has pages in - * the beginning with the old timeline ID. That can cause trouble at + * the beginning with the old timeline ID. That can cause trouble at * recovery: we won't have a history file covering the old timeline if * pg_xlog directory was not included in the base backup and the WAL * archive was cleared too before starting the backup. @@ -9686,7 +9695,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, bool checkpointfpw; /* - * Force a CHECKPOINT. Aside from being necessary to prevent torn + * Force a CHECKPOINT. Aside from being necessary to prevent torn * page problems, this guarantees that two successive backup runs * will have different checkpoint positions and hence different * history file names, even if nothing happened in between. @@ -10339,7 +10348,7 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli) * * If we see a backup_label during recovery, we assume that we are recovering * from a backup dump file, and we therefore roll forward from the checkpoint - * identified by the label file, NOT what pg_control says. This avoids the + * identified by the label file, NOT what pg_control says. This avoids the * problem that pg_control might have been archived one or more checkpoints * later than the start of the dump, and so if we rely on it as the start * point, we will fail to restore a consistent database state. @@ -10686,7 +10695,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * Standby mode is implemented by a state machine: * * 1. Read from either archive or pg_xlog (XLOG_FROM_ARCHIVE), or just - * pg_xlog (XLOG_FROM_XLOG) + * pg_xlog (XLOG_FROM_XLOG) * 2. Check trigger file * 3. Read from primary server via walreceiver (XLOG_FROM_STREAM) * 4. Rescan timelines @@ -10887,8 +10896,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * file from pg_xlog. */ readFile = XLogFileReadAnyTLI(readSegNo, DEBUG2, - currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY : - currentSource); + currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY : + currentSource); if (readFile >= 0) return true; /* success! */ @@ -10945,11 +10954,11 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, if (havedata) { /* - * Great, streamed far enough. Open the file if it's + * Great, streamed far enough. Open the file if it's * not open already. Also read the timeline history * file if we haven't initialized timeline history * yet; it should be streamed over and present in - * pg_xlog by now. Use XLOG_FROM_STREAM so that + * pg_xlog by now. Use XLOG_FROM_STREAM so that * source info is set correctly and XLogReceiptTime * isn't changed. */ @@ -11014,7 +11023,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, HandleStartupProcInterrupts(); } - return false; /* not reached */ + return false; /* not reached */ } /* @@ -11022,9 +11031,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * in the current WAL page, previously read by XLogPageRead(). * * 'emode' is the error mode that would be used to report a file-not-found - * or legitimate end-of-WAL situation. Generally, we use it as-is, but if + * or legitimate end-of-WAL situation. Generally, we use it as-is, but if * we're retrying the exact same record that we've tried previously, only - * complain the first time to keep the noise down. However, we only do when + * complain the first time to keep the noise down. However, we only do when * reading from pg_xlog, because we don't expect any invalid records in archive * or in records streamed from master. Files in the archive should be complete, * and we should never hit the end of WAL because we stop and wait for more WAL diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index a43793382e4..37745dce890 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -300,8 +300,8 @@ RestoreArchivedFile(char *path, const char *xlogfname, signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125; ereport(signaled ? FATAL : DEBUG2, - (errmsg("could not restore file \"%s\" from archive: %s", - xlogfname, wait_result_to_str(rc)))); + (errmsg("could not restore file \"%s\" from archive: %s", + xlogfname, wait_result_to_str(rc)))); not_available: diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index 5f8d65514c1..8a87581e79c 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -429,7 +429,7 @@ pg_is_in_recovery(PG_FUNCTION_ARGS) Datum pg_xlog_location_diff(PG_FUNCTION_ARGS) { - Datum result; + Datum result; result = DirectFunctionCall2(pg_lsn_mi, PG_GETARG_DATUM(0), diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index eff2081afe8..f06daa2638f 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -199,7 +199,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) randAccess = true; /* - * RecPtr is pointing to end+1 of the previous WAL record. If we're + * RecPtr is pointing to end+1 of the previous WAL record. If we're * at a page boundary, no more records can fit on the current page. We * must skip over the page header, but we can't do that until we've * read in the page, since the header size is variable. @@ -277,7 +277,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) /* * If the whole record header is on this page, validate it immediately. * Otherwise do just a basic sanity check on xl_tot_len, and validate the - * rest of the header after reading it from the next page. The xl_tot_len + * rest of the header after reading it from the next page. The xl_tot_len * check is necessary here to ensure that we enter the "Need to reassemble * record" code path below; otherwise we might fail to apply * ValidXLogRecordHeader at all. @@ -572,7 +572,7 @@ err: * Validate an XLOG record header. * * This is just a convenience subroutine to avoid duplicated code in - * XLogReadRecord. It's not intended for use from anywhere else. + * XLogReadRecord. It's not intended for use from anywhere else. */ static bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, @@ -661,7 +661,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, * data to read in) until we've checked the CRCs. * * We assume all of the record (that is, xl_tot_len bytes) has been read - * into memory at *record. Also, ValidXLogRecordHeader() has accepted the + * into memory at *record. Also, ValidXLogRecordHeader() has accepted the * record's header, which means in particular that xl_tot_len is at least * SizeOfXlogRecord, so it is safe to fetch xl_len. */ diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index c36e71d8066..4a542e65ca2 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -76,7 +76,7 @@ int numattr; /* number of attributes for cur. rel */ * in the core "bootstrapped" catalogs. * * XXX several of these input/output functions do catalog scans - * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some + * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some * order dependencies in the catalog creation process. */ struct typinfo @@ -374,9 +374,9 @@ AuxiliaryProcessMain(int argc, char *argv[]) #endif /* - * Assign the ProcSignalSlot for an auxiliary process. Since it + * Assign the ProcSignalSlot for an auxiliary process. Since it * doesn't have a BackendId, the slot is statically allocated based on - * the auxiliary process type (MyAuxProcType). Backends use slots + * the auxiliary process type (MyAuxProcType). Backends use slots * indexed in the range from 1 to MaxBackends (inclusive), so we use * MaxBackends + AuxProcType + 1 as the index of the slot for an * auxiliary process. @@ -561,7 +561,7 @@ bootstrap_signals(void) } /* - * Begin shutdown of an auxiliary process. This is approximately the equivalent + * Begin shutdown of an auxiliary process. This is approximately the equivalent * of ShutdownPostgres() in postinit.c. We can't run transactions in an * auxiliary process, so most of the work of AbortTransaction() is not needed, * but we do need to make sure we've released any LWLocks we are holding. @@ -876,7 +876,7 @@ cleanup(void) * and not an OID at all, until the first reference to a type not known in * TypInfo[]. At that point it will read and cache pg_type in the Typ array, * and subsequently return a real OID (and set the global pointer Ap to - * point at the found row in Typ). So caller must check whether Typ is + * point at the found row in Typ). So caller must check whether Typ is * still NULL to determine what the return value is! * ---------------- */ @@ -1073,9 +1073,9 @@ MapArrayTypeName(char *s) * * At bootstrap time, we define a bunch of indexes on system catalogs. * We postpone actually building the indexes until just before we're - * finished with initialization, however. This is because the indexes + * finished with initialization, however. This is because the indexes * themselves have catalog entries, and those have to be included in the - * indexes on those catalogs. Doing it in two phases is the simplest + * indexes on those catalogs. Doing it in two phases is the simplest * way of making sure the indexes have the right contents at the end. */ void @@ -1088,7 +1088,7 @@ index_register(Oid heap, /* * XXX mao 10/31/92 -- don't gc index reldescs, associated info at - * bootstrap time. we'll declare the indexes now, but want to create them + * bootstrap time. we'll declare the indexes now, but want to create them * later. */ diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index f4fc12d83ac..d9745cabd24 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -313,7 +313,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, /* * Restrict the operation to what we can actually grant or revoke, and - * issue a warning if appropriate. (For REVOKE this isn't quite what the + * issue a warning if appropriate. (For REVOKE this isn't quite what the * spec says to do: the spec seems to want a warning only if no privilege * bits actually change in the ACL. In practice that behavior seems much * too noisy, as well as inconsistent with the GRANT case.) @@ -1092,7 +1092,7 @@ SetDefaultACL(InternalDefaultACL *iacls) /* * The default for a global entry is the hard-wired default ACL for the - * particular object type. The default for non-global entries is an empty + * particular object type. The default for non-global entries is an empty * ACL. This must be so because global entries replace the hard-wired * defaults, while others are added on. */ @@ -1662,7 +1662,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname, * If the updated ACL is empty, we can set attacl to null, and maybe even * avoid an update of the pg_attribute row. This is worth testing because * we'll come through here multiple times for any relation-level REVOKE, - * even if there were never any column GRANTs. Note we are assuming that + * even if there were never any column GRANTs. Note we are assuming that * the "default" ACL state for columns is empty. */ if (ACL_NUM(new_acl) > 0) @@ -1787,7 +1787,7 @@ ExecGrant_Relation(InternalGrant *istmt) { /* * Mention the object name because the user needs to know - * which operations succeeded. This is required because + * which operations succeeded. This is required because * WARNING allows the command to continue. */ ereport(WARNING, @@ -1816,7 +1816,7 @@ ExecGrant_Relation(InternalGrant *istmt) /* * Set up array in which we'll accumulate any column privilege bits - * that need modification. The array is indexed such that entry [0] + * that need modification. The array is indexed such that entry [0] * corresponds to FirstLowInvalidHeapAttributeNumber. */ num_col_privileges = pg_class_tuple->relnatts - FirstLowInvalidHeapAttributeNumber + 1; @@ -3507,7 +3507,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid, * * Note: this considers only privileges granted specifically on the column. * It is caller's responsibility to take relation-level privileges into account - * as appropriate. (For the same reason, we have no special case for + * as appropriate. (For the same reason, we have no special case for * superuser-ness here.) */ AclMode @@ -3620,12 +3620,12 @@ pg_class_aclmask(Oid table_oid, Oid roleid, /* * Deny anyone permission to update a system catalog unless - * pg_authid.rolcatupdate is set. (This is to let superusers protect + * pg_authid.rolcatupdate is set. (This is to let superusers protect * themselves from themselves.) Also allow it if allowSystemTableMods. * * As of 7.4 we have some updatable system views; those shouldn't be * protected in this way. Assume the view rules can take care of - * themselves. ACL_USAGE is if we ever have system sequences. + * themselves. ACL_USAGE is if we ever have system sequences. */ if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE | ACL_USAGE)) && IsSystemClass(table_oid, classForm) && @@ -4331,7 +4331,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode, ReleaseSysCache(classTuple); /* - * Initialize result in case there are no non-dropped columns. We want to + * Initialize result in case there are no non-dropped columns. We want to * report failure in such cases for either value of 'how'. */ result = ACLCHECK_NO_PRIV; diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 3ec360c2be5..2eb2c2fddf6 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -48,7 +48,7 @@ * IsSystemRelation * True iff the relation is either a system catalog or toast table. * By a system catalog, we mean one that created in the pg_catalog schema - * during initdb. User-created relations in pg_catalog don't count as + * during initdb. User-created relations in pg_catalog don't count as * system catalogs. * * NB: TOAST relations are considered system relations by this test @@ -100,7 +100,7 @@ IsCatalogRelation(Relation relation) bool IsCatalogClass(Oid relid, Form_pg_class reltuple) { - Oid relnamespace = reltuple->relnamespace; + Oid relnamespace = reltuple->relnamespace; /* * Never consider relations outside pg_catalog/pg_toast to be catalog @@ -268,7 +268,7 @@ IsSharedRelation(Oid relationId) * Since the OID is not immediately inserted into the table, there is a * race condition here; but a problem could occur only if someone else * managed to cycle through 2^32 OIDs and generate the same OID before we - * finish inserting our row. This seems unlikely to be a problem. Note + * finish inserting our row. This seems unlikely to be a problem. Note * that if we had to *commit* the row to end the race condition, the risk * would be rather higher; therefore we use SnapshotDirty in the test, * so that we will see uncommitted rows. @@ -314,7 +314,7 @@ GetNewOid(Relation relation) * This is exported separately because there are cases where we want to use * an index that will not be recognized by RelationGetOidIndex: TOAST tables * have indexes that are usable, but have multiple columns and are on - * ordinary columns rather than a true OID column. This code will work + * ordinary columns rather than a true OID column. This code will work * anyway, so long as the OID is the index's first column. The caller must * pass in the actual heap attnum of the OID column, however. * diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index e5116693cf7..d41ba49f877 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -261,7 +261,7 @@ performDeletion(const ObjectAddress *object, depRel = heap_open(DependRelationId, RowExclusiveLock); /* - * Acquire deletion lock on the target object. (Ideally the caller has + * Acquire deletion lock on the target object. (Ideally the caller has * done this already, but many places are sloppy about it.) */ AcquireDeletionLock(object, 0); @@ -373,7 +373,7 @@ performMultipleDeletions(const ObjectAddresses *objects, /* * deleteWhatDependsOn: attempt to drop everything that depends on the - * specified object, though not the object itself. Behavior is always + * specified object, though not the object itself. Behavior is always * CASCADE. * * This is currently used only to clean out the contents of a schema @@ -399,7 +399,7 @@ deleteWhatDependsOn(const ObjectAddress *object, depRel = heap_open(DependRelationId, RowExclusiveLock); /* - * Acquire deletion lock on the target object. (Ideally the caller has + * Acquire deletion lock on the target object. (Ideally the caller has * done this already, but many places are sloppy about it.) */ AcquireDeletionLock(object, 0); @@ -441,7 +441,7 @@ deleteWhatDependsOn(const ObjectAddress *object, * Since this function is currently only used to clean out temporary * schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that * the operation is an automatic system operation rather than a user - * action. If, in the future, this function is used for other + * action. If, in the future, this function is used for other * purposes, we might need to revisit this. */ deleteOneObject(thisobj, &depRel, PERFORM_DELETION_INTERNAL); @@ -458,7 +458,7 @@ deleteWhatDependsOn(const ObjectAddress *object, * * For every object that depends on the starting object, acquire a deletion * lock on the object, add it to targetObjects (if not already there), - * and recursively find objects that depend on it. An object's dependencies + * and recursively find objects that depend on it. An object's dependencies * will be placed into targetObjects before the object itself; this means * that the finished list's order represents a safe deletion order. * @@ -510,7 +510,7 @@ findDependentObjects(const ObjectAddress *object, * will not break a loop at an internal dependency: if we enter the loop * at an "owned" object we will switch and start at the "owning" object * instead. We could probably hack something up to avoid breaking at an - * auto dependency, too, if we had to. However there are no known cases + * auto dependency, too, if we had to. However there are no known cases * where that would be necessary. */ if (stack_address_present_add_flags(object, flags, stack)) @@ -531,7 +531,7 @@ findDependentObjects(const ObjectAddress *object, /* * The target object might be internally dependent on some other object * (its "owner"), and/or be a member of an extension (also considered its - * owner). If so, and if we aren't recursing from the owning object, we + * owner). If so, and if we aren't recursing from the owning object, we * have to transform this deletion request into a deletion request of the * owning object. (We'll eventually recurse back to this object, but the * owning object has to be visited first so it will be deleted after.) The @@ -594,7 +594,7 @@ findDependentObjects(const ObjectAddress *object, /* * Exception 1a: if the owning object is listed in * pendingObjects, just release the caller's lock and - * return. We'll eventually complete the DROP when we + * return. We'll eventually complete the DROP when we * reach that entry in the pending list. */ if (pendingObjects && @@ -647,7 +647,7 @@ findDependentObjects(const ObjectAddress *object, * owning object. * * First, release caller's lock on this object and get - * deletion lock on the owning object. (We must release + * deletion lock on the owning object. (We must release * caller's lock to avoid deadlock against a concurrent * deletion of the owning object.) */ @@ -809,7 +809,7 @@ findDependentObjects(const ObjectAddress *object, systable_endscan(scan); /* - * Finally, we can add the target object to targetObjects. Be careful to + * Finally, we can add the target object to targetObjects. Be careful to * include any flags that were passed back down to us from inner recursion * levels. */ @@ -864,7 +864,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects, /* * We limit the number of dependencies reported to the client to * MAX_REPORTED_DEPS, since client software may not deal well with - * enormous error strings. The server log always gets a full report. + * enormous error strings. The server log always gets a full report. */ #define MAX_REPORTED_DEPS 100 @@ -897,7 +897,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects, DEPFLAG_EXTENSION)) { /* - * auto-cascades are reported at DEBUG2, not msglevel. We don't + * auto-cascades are reported at DEBUG2, not msglevel. We don't * try to combine them with the regular message because the * results are too confusing when client_min_messages and * log_min_messages are different. @@ -1079,7 +1079,7 @@ deleteOneObject(const ObjectAddress *object, Relation *depRel, int flags) systable_endscan(scan); /* - * Delete shared dependency references related to this object. Again, if + * Delete shared dependency references related to this object. Again, if * subId = 0, remove records for sub-objects too. */ deleteSharedDependencyRecordsFor(object->classId, object->objectId, @@ -1344,13 +1344,13 @@ recordDependencyOnExpr(const ObjectAddress *depender, * recordDependencyOnSingleRelExpr - find expression dependencies * * As above, but only one relation is expected to be referenced (with - * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a + * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a * range table. An additional frammish is that dependencies on that * relation (or its component columns) will be marked with 'self_behavior', * whereas 'behavior' is used for everything else. * * NOTE: the caller should ensure that a whole-table dependency on the - * specified relation is created separately, if one is needed. In particular, + * specified relation is created separately, if one is needed. In particular, * a whole-row Var "relation.*" will not cause this routine to emit any * dependency item. This is appropriate behavior for subexpressions of an * ordinary query, so other cases need to cope as necessary. @@ -1470,7 +1470,7 @@ find_expr_references_walker(Node *node, /* * A whole-row Var references no specific columns, so adds no new - * dependency. (We assume that there is a whole-table dependency + * dependency. (We assume that there is a whole-table dependency * arising from each underlying rangetable entry. While we could * record such a dependency when finding a whole-row Var that * references a relation directly, it's quite unclear how to extend @@ -1529,7 +1529,7 @@ find_expr_references_walker(Node *node, /* * If it's a regclass or similar literal referring to an existing - * object, add a reference to that object. (Currently, only the + * object, add a reference to that object. (Currently, only the * regclass and regconfig cases have any likely use, but we may as * well handle all the OID-alias datatypes consistently.) */ @@ -2130,7 +2130,7 @@ object_address_present_add_flags(const ObjectAddress *object, { /* * We get here if we find a need to delete a column after - * having already decided to drop its whole table. Obviously + * having already decided to drop its whole table. Obviously * we no longer need to drop the column. But don't plaster * its flags on the table. */ diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 2cf4bc033c8..33eef9f1caf 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -21,7 +21,7 @@ * the old heap_create_with_catalog, amcreate, and amdestroy. * those routines will soon call these routines using the function * manager, - * just like the poorly named "NewXXX" routines do. The + * just like the poorly named "NewXXX" routines do. The * "New" routines are all going to die soon, once and for all! * -cim 1/13/91 * @@ -199,7 +199,7 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids) /* * If the given name is a system attribute name, return a Form_pg_attribute - * pointer for a prototype definition. If not, return NULL. + * pointer for a prototype definition. If not, return NULL. */ Form_pg_attribute SystemAttributeByName(const char *attname, bool relhasoids) @@ -527,7 +527,7 @@ CheckAttributeType(const char *attname, int i; /* - * Check for self-containment. Eventually we might be able to allow + * Check for self-containment. Eventually we might be able to allow * this (just return without complaint, if so) but it's not clear how * many other places would require anti-recursion defenses before it * would be safe to allow tables to contain their own rowtype. @@ -590,7 +590,7 @@ CheckAttributeType(const char *attname, * attribute to insert (but we ignore attacl and attoptions, which are always * initialized to NULL). * - * indstate is the index state for CatalogIndexInsert. It can be passed as + * indstate is the index state for CatalogIndexInsert. It can be passed as * NULL, in which case we'll fetch the necessary info. (Don't do this when * inserting multiple attributes, because it's a tad more expensive.) */ @@ -757,7 +757,7 @@ AddNewAttributeTuples(Oid new_rel_oid, * Tuple data is taken from new_rel_desc->rd_rel, except for the * variable-width fields which are not present in a cached reldesc. * relacl and reloptions are passed in Datum form (to avoid having - * to reference the data types in heap.h). Pass (Datum) 0 to set them + * to reference the data types in heap.h). Pass (Datum) 0 to set them * to NULL. * -------------------------------- */ @@ -816,7 +816,7 @@ InsertPgClassTuple(Relation pg_class_desc, tup = heap_form_tuple(RelationGetDescr(pg_class_desc), values, nulls); /* - * The new tuple must have the oid already chosen for the rel. Sure would + * The new tuple must have the oid already chosen for the rel. Sure would * be embarrassing to do this sort of thing in polite company. */ HeapTupleSetOid(tup, new_rel_oid); @@ -1372,8 +1372,8 @@ heap_create_init_fork(Relation rel) * RelationRemoveInheritance * * Formerly, this routine checked for child relations and aborted the - * deletion if any were found. Now we rely on the dependency mechanism - * to check for or delete child relations. By the time we get here, + * deletion if any were found. Now we rely on the dependency mechanism + * to check for or delete child relations. By the time we get here, * there are no children and we need only remove any pg_inherits rows * linking this relation to its parent(s). */ @@ -1658,7 +1658,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum, /* * RemoveAttrDefaultById * - * Remove a pg_attrdef entry specified by OID. This is the guts of + * Remove a pg_attrdef entry specified by OID. This is the guts of * attribute-default removal. Note it should be called via performDeletion, * not directly. */ @@ -2065,7 +2065,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) /* * Deparsing of constraint expressions will fail unless the just-created - * pg_attribute tuples for this relation are made visible. So, bump the + * pg_attribute tuples for this relation are made visible. So, bump the * command counter. CAUTION: this will cause a relcache entry rebuild. */ CommandCounterIncrement(); @@ -2117,7 +2117,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) * the default and constraint expressions added to the relation. * * NB: caller should have opened rel with AccessExclusiveLock, and should - * hold that lock till end of transaction. Also, we assume the caller has + * hold that lock till end of transaction. Also, we assume the caller has * done a CommandCounterIncrement if necessary to make the relation's catalog * tuples visible. */ @@ -2262,7 +2262,7 @@ AddRelationNewConstraints(Relation rel, checknames = lappend(checknames, ccname); /* - * Check against pre-existing constraints. If we are allowed to + * Check against pre-existing constraints. If we are allowed to * merge with an existing constraint, there's no more to do here. * (We omit the duplicate constraint from the result, which is * what ATAddCheckConstraint wants.) @@ -2279,7 +2279,7 @@ AddRelationNewConstraints(Relation rel, * column constraint and "tab_check" for a table constraint. We * no longer have any info about the syntactic positioning of the * constraint phrase, so we approximate this by seeing whether the - * expression references more than one column. (If the user + * expression references more than one column. (If the user * played by the rules, the result is the same...) * * Note: pull_var_clause() doesn't descend into sublinks, but we @@ -2664,7 +2664,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum) * with the heap relation to zero tuples. * * The routine will truncate and then reconstruct the indexes on - * the specified relation. Caller must hold exclusive lock on rel. + * the specified relation. Caller must hold exclusive lock on rel. */ static void RelationTruncateIndexes(Relation heapRelation) @@ -2704,7 +2704,7 @@ RelationTruncateIndexes(Relation heapRelation) * This routine deletes all data within all the specified relations. * * This is not transaction-safe! There is another, transaction-safe - * implementation in commands/tablecmds.c. We now use this only for + * implementation in commands/tablecmds.c. We now use this only for * ON COMMIT truncation of temporary tables, where it doesn't matter. */ void @@ -2813,7 +2813,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables) return; /* - * Otherwise, must scan pg_constraint. We make one pass with all the + * Otherwise, must scan pg_constraint. We make one pass with all the * relations considered; if this finds nothing, then all is well. */ dependents = heap_truncate_find_FKs(oids); @@ -2874,7 +2874,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables) * behavior to change depending on chance locations of rows in pg_constraint.) * * Note: caller should already have appropriate lock on all rels mentioned - * in relationIds. Since adding or dropping an FK requires exclusive lock + * in relationIds. Since adding or dropping an FK requires exclusive lock * on both rels, this ensures that the answer will be stable. */ List * diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index c932c833421..80acc0ec27f 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -411,7 +411,7 @@ ConstructTupleDescriptor(Relation heapRelation, /* * We do not yet have the correct relation OID for the index, so just - * set it invalid for now. InitializeAttributeOids() will fix it + * set it invalid for now. InitializeAttributeOids() will fix it * later. */ to->attrelid = InvalidOid; @@ -651,7 +651,7 @@ UpdateIndexRelation(Oid indexoid, * heapRelation: table to build index on (suitably locked by caller) * indexRelationName: what it say * indexRelationId: normally, pass InvalidOid to let this routine - * generate an OID for the index. During bootstrap this may be + * generate an OID for the index. During bootstrap this may be * nonzero to specify a preselected OID. * relFileNode: normally, pass InvalidOid to get new storage. May be * nonzero to attach an existing valid build. @@ -670,7 +670,7 @@ UpdateIndexRelation(Oid indexoid, * allow_system_table_mods: allow table to be a system catalog * skip_build: true to skip the index_build() step for the moment; caller * must do it later (typically via reindex_index()) - * concurrent: if true, do not lock the table against writers. The index + * concurrent: if true, do not lock the table against writers. The index * will be marked "invalid" and the caller must take additional steps * to fix it up. * is_internal: if true, post creation hook for new index @@ -960,7 +960,7 @@ index_create(Relation heapRelation, /* * If there are no simply-referenced columns, give the index an - * auto dependency on the whole table. In most cases, this will + * auto dependency on the whole table. In most cases, this will * be redundant, but it might not be if the index expressions and * predicate contain no Vars or only whole-row Vars. */ @@ -1085,7 +1085,7 @@ index_create(Relation heapRelation, /* * Close the index; but we keep the lock that we acquired above until end - * of transaction. Closing the heap is caller's responsibility. + * of transaction. Closing the heap is caller's responsibility. */ index_close(indexRelation, NoLock); @@ -1243,7 +1243,7 @@ index_constraint_create(Relation heapRelation, * have been so marked already, so no need to clear the flag in the other * case. * - * Note: this might better be done by callers. We do it here to avoid + * Note: this might better be done by callers. We do it here to avoid * exposing index_update_stats() globally, but that wouldn't be necessary * if relhaspkey went away. */ @@ -1256,10 +1256,10 @@ index_constraint_create(Relation heapRelation, /* * If needed, mark the index as primary and/or deferred in pg_index. * - * Note: When making an existing index into a constraint, caller must - * have a table lock that prevents concurrent table updates; otherwise, - * there is a risk that concurrent readers of the table will miss seeing - * this index at all. + * Note: When making an existing index into a constraint, caller must have + * a table lock that prevents concurrent table updates; otherwise, there + * is a risk that concurrent readers of the table will miss seeing this + * index at all. */ if (update_pgindex && (mark_as_primary || deferrable)) { @@ -1336,7 +1336,7 @@ index_drop(Oid indexId, bool concurrent) * in multiple steps and waiting out any transactions that might be using * the index, so we don't need exclusive lock on the parent table. Instead * we take ShareUpdateExclusiveLock, to ensure that two sessions aren't - * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get + * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get * AccessExclusiveLock on the index below, once we're sure nobody else is * using it.) */ @@ -1376,7 +1376,7 @@ index_drop(Oid indexId, bool concurrent) * non-concurrent case we can just do that now. In the concurrent case * it's a bit trickier. The predicate locks must be moved when there are * no index scans in progress on the index and no more can subsequently - * start, so that no new predicate locks can be made on the index. Also, + * start, so that no new predicate locks can be made on the index. Also, * they must be moved before heap inserts stop maintaining the index, else * the conflict with the predicate lock on the index gap could be missed * before the lock on the heap relation is in place to detect a conflict @@ -1386,11 +1386,11 @@ index_drop(Oid indexId, bool concurrent) { /* * We must commit our transaction in order to make the first pg_index - * state update visible to other sessions. If the DROP machinery has + * state update visible to other sessions. If the DROP machinery has * already performed any other actions (removal of other objects, * pg_depend entries, etc), the commit would make those actions * permanent, which would leave us with inconsistent catalog state if - * we fail partway through the following sequence. Since DROP INDEX + * we fail partway through the following sequence. Since DROP INDEX * CONCURRENTLY is restricted to dropping just one index that has no * dependencies, we should get here before anything's been done --- * but let's check that to be sure. We can verify that the current @@ -1426,7 +1426,7 @@ index_drop(Oid indexId, bool concurrent) * We must commit our current transaction so that the indisvalid * update becomes visible to other transactions; then start another. * Note that any previously-built data structures are lost in the - * commit. The only data we keep past here are the relation IDs. + * commit. The only data we keep past here are the relation IDs. * * Before committing, get a session-level lock on the table, to ensure * that neither it nor the index can be dropped before we finish. This @@ -1443,10 +1443,10 @@ index_drop(Oid indexId, bool concurrent) /* * Now we must wait until no running transaction could be using the * index for a query. Use AccessExclusiveLock here to check for - * running transactions that hold locks of any kind on the table. - * Note we do not need to worry about xacts that open the table for - * reading after this point; they will see the index as invalid when - * they open the relation. + * running transactions that hold locks of any kind on the table. Note + * we do not need to worry about xacts that open the table for reading + * after this point; they will see the index as invalid when they open + * the relation. * * Note: the reason we use actual lock acquisition here, rather than * just checking the ProcArray and sleeping, is that deadlock is @@ -1468,7 +1468,7 @@ index_drop(Oid indexId, bool concurrent) /* * Now we are sure that nobody uses the index for queries; they just - * might have it open for updating it. So now we can unset indisready + * might have it open for updating it. So now we can unset indisready * and indislive, then wait till nobody could be using it at all * anymore. */ @@ -1599,7 +1599,7 @@ index_drop(Oid indexId, bool concurrent) * * IndexInfo stores the information about the index that's needed by * FormIndexDatum, which is used for both index_build() and later insertion - * of individual index tuples. Normally we build an IndexInfo for an index + * of individual index tuples. Normally we build an IndexInfo for an index * just once per command, and then use it for (potentially) many tuples. * ---------------- */ @@ -1669,7 +1669,7 @@ BuildIndexInfo(Relation index) * context must point to the heap tuple passed in. * * Notice we don't actually call index_form_tuple() here; we just prepare - * its input arrays values[] and isnull[]. This is because the index AM + * its input arrays values[] and isnull[]. This is because the index AM * may wish to alter the data before storage. * ---------------- */ @@ -1735,7 +1735,7 @@ FormIndexDatum(IndexInfo *indexInfo, * index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX * * This routine updates the pg_class row of either an index or its parent - * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed + * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed * to ensure we can do all the necessary work in just one update. * * hasindex: set relhasindex to this value @@ -1747,7 +1747,7 @@ FormIndexDatum(IndexInfo *indexInfo, * * NOTE: an important side-effect of this operation is that an SI invalidation * message is sent out to all backends --- including me --- causing relcache - * entries to be flushed or updated with the new data. This must happen even + * entries to be flushed or updated with the new data. This must happen even * if we find that no change is needed in the pg_class row. When updating * a heap entry, this ensures that other backends find out about the new * index. When updating an index, it's important because some index AMs @@ -1786,13 +1786,13 @@ index_update_stats(Relation rel, * 4. Even with just a single CREATE INDEX, there's a risk factor because * someone else might be trying to open the rel while we commit, and this * creates a race condition as to whether he will see both or neither of - * the pg_class row versions as valid. Again, a non-transactional update + * the pg_class row versions as valid. Again, a non-transactional update * avoids the risk. It is indeterminate which state of the row the other * process will see, but it doesn't matter (if he's only taking * AccessShareLock, then it's not critical that he see relhasindex true). * * It is safe to use a non-transactional update even though our - * transaction could still fail before committing. Setting relhasindex + * transaction could still fail before committing. Setting relhasindex * true is safe even if there are no indexes (VACUUM will eventually fix * it), likewise for relhaspkey. And of course the new relpages and * reltuples counts are correct regardless. However, we don't want to @@ -1804,7 +1804,7 @@ index_update_stats(Relation rel, pg_class = heap_open(RelationRelationId, RowExclusiveLock); /* - * Make a copy of the tuple to update. Normally we use the syscache, but + * Make a copy of the tuple to update. Normally we use the syscache, but * we can't rely on that during bootstrap or while reindexing pg_class * itself. */ @@ -1903,7 +1903,7 @@ index_update_stats(Relation rel, * index_build - invoke access-method-specific index build procedure * * On entry, the index's catalog entries are valid, and its physical disk - * file has been created but is empty. We call the AM-specific build + * file has been created but is empty. We call the AM-specific build * procedure to fill in the index contents. We then update the pg_class * entries of the index and heap relation as needed, using statistics * returned by ambuild as well as data passed by the caller. @@ -2001,7 +2001,7 @@ index_build(Relation heapRelation, * Therefore, this code path can only be taken during non-concurrent * CREATE INDEX. Thus the fact that heap_update will set the pg_index * tuple's xmin doesn't matter, because that tuple was created in the - * current transaction anyway. That also means we don't need to worry + * current transaction anyway. That also means we don't need to worry * about any concurrent readers of the tuple; no other transaction can see * it yet. */ @@ -2050,7 +2050,7 @@ index_build(Relation heapRelation, /* * If it's for an exclusion constraint, make a second pass over the heap - * to verify that the constraint is satisfied. We must not do this until + * to verify that the constraint is satisfied. We must not do this until * the index is fully valid. (Broken HOT chains shouldn't matter, though; * see comments for IndexCheckExclusion.) */ @@ -2075,8 +2075,8 @@ index_build(Relation heapRelation, * things to add it to the new index. After we return, the AM's index * build procedure does whatever cleanup it needs. * - * The total count of heap tuples is returned. This is for updating pg_class - * statistics. (It's annoying not to be able to do that here, but we want + * The total count of heap tuples is returned. This is for updating pg_class + * statistics. (It's annoying not to be able to do that here, but we want * to merge that update with others; see index_update_stats.) Note that the * index AM itself must keep track of the number of index tuples; we don't do * so here because the AM might reject some of the tuples for its own reasons, @@ -2126,7 +2126,7 @@ IndexBuildHeapScan(Relation heapRelation, /* * Need an EState for evaluation of index expressions and partial-index - * predicates. Also a slot to hold the current tuple. + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -2251,7 +2251,7 @@ IndexBuildHeapScan(Relation heapRelation, * building it, and may need to see such tuples.) * * However, if it was HOT-updated then we must only index - * the live tuple at the end of the HOT-chain. Since this + * the live tuple at the end of the HOT-chain. Since this * breaks semantics for pre-existing snapshots, mark the * index as unusable for them. */ @@ -2271,7 +2271,7 @@ IndexBuildHeapScan(Relation heapRelation, /* * Since caller should hold ShareLock or better, normally * the only way to see this is if it was inserted earlier - * in our own transaction. However, it can happen in + * in our own transaction. However, it can happen in * system catalogs, since we tend to release write lock * before commit there. Give a warning if neither case * applies. @@ -2426,7 +2426,7 @@ IndexBuildHeapScan(Relation heapRelation, /* * You'd think we should go ahead and build the index tuple here, but - * some index AMs want to do further processing on the data first. So + * some index AMs want to do further processing on the data first. So * pass the values[] and isnull[] arrays, instead. */ @@ -2517,7 +2517,7 @@ IndexCheckExclusion(Relation heapRelation, /* * Need an EState for evaluation of index expressions and partial-index - * predicates. Also a slot to hold the current tuple. + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -2597,11 +2597,11 @@ IndexCheckExclusion(Relation heapRelation, * We do a concurrent index build by first inserting the catalog entry for the * index via index_create(), marking it not indisready and not indisvalid. * Then we commit our transaction and start a new one, then we wait for all - * transactions that could have been modifying the table to terminate. Now + * transactions that could have been modifying the table to terminate. Now * we know that any subsequently-started transactions will see the index and * honor its constraints on HOT updates; so while existing HOT-chains might * be broken with respect to the index, no currently live tuple will have an - * incompatible HOT update done to it. We now build the index normally via + * incompatible HOT update done to it. We now build the index normally via * index_build(), while holding a weak lock that allows concurrent * insert/update/delete. Also, we index only tuples that are valid * as of the start of the scan (see IndexBuildHeapScan), whereas a normal @@ -2615,13 +2615,13 @@ IndexCheckExclusion(Relation heapRelation, * * Next, we mark the index "indisready" (but still not "indisvalid") and * commit the second transaction and start a third. Again we wait for all - * transactions that could have been modifying the table to terminate. Now + * transactions that could have been modifying the table to terminate. Now * we know that any subsequently-started transactions will see the index and * insert their new tuples into it. We then take a new reference snapshot * which is passed to validate_index(). Any tuples that are valid according * to this snap, but are not in the index, must be added to the index. * (Any tuples committed live after the snap will be inserted into the - * index by their originating transaction. Any tuples committed dead before + * index by their originating transaction. Any tuples committed dead before * the snap need not be indexed, because we will wait out all transactions * that might care about them before we mark the index valid.) * @@ -2630,7 +2630,7 @@ IndexCheckExclusion(Relation heapRelation, * ever say "delete it". (This should be faster than a plain indexscan; * also, not all index AMs support full-index indexscan.) Then we sort the * TIDs, and finally scan the table doing a "merge join" against the TID list - * to see which tuples are missing from the index. Thus we will ensure that + * to see which tuples are missing from the index. Thus we will ensure that * all tuples valid according to the reference snapshot are in the index. * * Building a unique index this way is tricky: we might try to insert a @@ -2646,7 +2646,7 @@ IndexCheckExclusion(Relation heapRelation, * were alive at the time of the reference snapshot are gone; this is * necessary to be sure there are none left with a transaction snapshot * older than the reference (and hence possibly able to see tuples we did - * not index). Then we mark the index "indisvalid" and commit. Subsequent + * not index). Then we mark the index "indisvalid" and commit. Subsequent * transactions will be able to use it for queries. * * Doing two full table scans is a brute-force strategy. We could try to be @@ -2672,7 +2672,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) indexRelation = index_open(indexId, RowExclusiveLock); /* - * Fetch info needed for index_insert. (You might think this should be + * Fetch info needed for index_insert. (You might think this should be * passed in from DefineIndex, but its copy is long gone due to having * been built in a previous transaction.) */ @@ -2789,7 +2789,7 @@ validate_index_heapscan(Relation heapRelation, /* * Need an EState for evaluation of index expressions and partial-index - * predicates. Also a slot to hold the current tuple. + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -2838,7 +2838,7 @@ validate_index_heapscan(Relation heapRelation, * visit the live tuples in order by their offsets, but the root * offsets that we need to compare against the index contents might be * ordered differently. So we might have to "look back" within the - * tuplesort output, but only within the current page. We handle that + * tuplesort output, but only within the current page. We handle that * by keeping a bool array in_index[] showing all the * already-passed-over tuplesort output TIDs of the current page. We * clear that array here, when advancing onto a new heap page. @@ -2923,7 +2923,7 @@ validate_index_heapscan(Relation heapRelation, /* * For the current heap tuple, extract all the attributes we use - * in this index, and note which are null. This also performs + * in this index, and note which are null. This also performs * evaluation of any expressions needed. */ FormIndexDatum(indexInfo, @@ -2945,7 +2945,7 @@ validate_index_heapscan(Relation heapRelation, * for a uniqueness check on the whole HOT-chain. That is, the * tuple we have here could be dead because it was already * HOT-updated, and if so the updating transaction will not have - * thought it should insert index entries. The index AM will + * thought it should insert index entries. The index AM will * check the whole HOT-chain and correctly detect a conflict if * there is one. */ @@ -3068,7 +3068,7 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action) /* * IndexGetRelation: given an index's relation OID, get the OID of the - * relation it is an index on. Uses the system cache. + * relation it is an index on. Uses the system cache. */ Oid IndexGetRelation(Oid indexId, bool missing_ok) @@ -3105,7 +3105,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) volatile bool skipped_constraint = false; /* - * Open and lock the parent heap relation. ShareLock is sufficient since + * Open and lock the parent heap relation. ShareLock is sufficient since * we only need to be sure no schema or data changes are going on. */ heapId = IndexGetRelation(indexId, false); @@ -3193,7 +3193,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) * chains, we had better force indcheckxmin true, because the normal * argument that the HOT chains couldn't conflict with the index is * suspect for an invalid index. (A conflict is definitely possible if - * the index was dead. It probably shouldn't happen otherwise, but let's + * the index was dead. It probably shouldn't happen otherwise, but let's * be conservative.) In this case advancing the usability horizon is * appropriate. * @@ -3277,7 +3277,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) * the data in a manner that risks a change in constraint validity. * * Returns true if any indexes were rebuilt (including toast table's index - * when relevant). Note that a CommandCounterIncrement will occur after each + * when relevant). Note that a CommandCounterIncrement will occur after each * index rebuild. */ bool @@ -3290,7 +3290,7 @@ reindex_relation(Oid relid, int flags) bool result; /* - * Open and lock the relation. ShareLock is sufficient since we only need + * Open and lock the relation. ShareLock is sufficient since we only need * to prevent schema and data changes in it. The lock level used here * should match ReindexTable(). */ @@ -3309,7 +3309,7 @@ reindex_relation(Oid relid, int flags) * reindex_index will attempt to update the pg_class rows for the relation * and index. If we are processing pg_class itself, we want to make sure * that the updates do not try to insert index entries into indexes we - * have not processed yet. (When we are trying to recover from corrupted + * have not processed yet. (When we are trying to recover from corrupted * indexes, that could easily cause a crash.) We can accomplish this * because CatalogUpdateIndexes will use the relcache's index list to know * which indexes to update. We just force the index list to be only the @@ -3318,7 +3318,7 @@ reindex_relation(Oid relid, int flags) * It is okay to not insert entries into the indexes we have not processed * yet because all of this is transaction-safe. If we fail partway * through, the updated rows are dead and it doesn't matter whether they - * have index entries. Also, a new pg_class index will be created with a + * have index entries. Also, a new pg_class index will be created with a * correct entry for its own pg_class row because we do * RelationSetNewRelfilenode() before we do index_build(). * diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index 4bf412fb0b6..05aa56e8593 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -149,7 +149,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) * CatalogUpdateIndexes - do all the indexing work for a new catalog tuple * * This is a convenience routine for the common case where we only need - * to insert or update a single tuple in a system catalog. Avoid using it for + * to insert or update a single tuple in a system catalog. Avoid using it for * multiple tuples, since opening the indexes and building the index info * structures is moderately expensive. */ diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 5bf6d289d84..89df585b870 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -66,10 +66,10 @@ * when we are obeying an override search path spec that says not to use the * temp namespace, or the temp namespace is included in the explicit list.) * - * 2. The system catalog namespace is always searched. If the system + * 2. The system catalog namespace is always searched. If the system * namespace is present in the explicit path then it will be searched in * the specified order; otherwise it will be searched after TEMP tables and - * *before* the explicit list. (It might seem that the system namespace + * *before* the explicit list. (It might seem that the system namespace * should be implicitly last, but this behavior appears to be required by * SQL99. Also, this provides a way to search the system namespace first * without thereby making it the default creation target namespace.) @@ -87,7 +87,7 @@ * to refer to the current backend's temp namespace. This is usually also * ignorable if the temp namespace hasn't been set up, but there's a special * case: if "pg_temp" appears first then it should be the default creation - * target. We kluge this case a little bit so that the temp namespace isn't + * target. We kluge this case a little bit so that the temp namespace isn't * set up until the first attempt to create something in it. (The reason for * klugery is that we can't create the temp namespace outside a transaction, * but initial GUC processing of search_path happens outside a transaction.) @@ -98,7 +98,7 @@ * In bootstrap mode, the search path is set equal to "pg_catalog", so that * the system namespace is the only one searched or inserted into. * initdb is also careful to set search_path to "pg_catalog" for its - * post-bootstrap standalone backend runs. Otherwise the default search + * post-bootstrap standalone backend runs. Otherwise the default search * path is determined by GUC. The factory default path contains the PUBLIC * namespace (if it exists), preceded by the user's personal namespace * (if one exists). @@ -162,13 +162,13 @@ static List *overrideStack = NIL; /* * myTempNamespace is InvalidOid until and unless a TEMP namespace is set up * in a particular backend session (this happens when a CREATE TEMP TABLE - * command is first executed). Thereafter it's the OID of the temp namespace. + * command is first executed). Thereafter it's the OID of the temp namespace. * * myTempToastNamespace is the OID of the namespace for my temp tables' toast - * tables. It is set when myTempNamespace is, and is InvalidOid before that. + * tables. It is set when myTempNamespace is, and is InvalidOid before that. * * myTempNamespaceSubID shows whether we've created the TEMP namespace in the - * current subtransaction. The flag propagates up the subtransaction tree, + * current subtransaction. The flag propagates up the subtransaction tree, * so the main transaction will correctly recognize the flag if all * intermediate subtransactions commit. When it is InvalidSubTransactionId, * we either haven't made the TEMP namespace yet, or have successfully @@ -250,7 +250,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, } /* - * DDL operations can change the results of a name lookup. Since all such + * DDL operations can change the results of a name lookup. Since all such * operations will generate invalidation messages, we keep track of * whether any such messages show up while we're performing the operation, * and retry until either (1) no more invalidation messages show up or (2) @@ -259,7 +259,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, * But if lockmode = NoLock, then we assume that either the caller is OK * with the answer changing under them, or that they already hold some * appropriate lock, and therefore return the first answer we get without - * checking for invalidation messages. Also, if the requested lock is + * checking for invalidation messages. Also, if the requested lock is * already held, no LockRelationOid will not AcceptInvalidationMessages, * so we may fail to notice a change. We could protect against that case * by calling AcceptInvalidationMessages() before beginning this loop, but @@ -396,7 +396,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, break; /* - * Something may have changed. Let's repeat the name lookup, to make + * Something may have changed. Let's repeat the name lookup, to make * sure this name still references the same relation it did * previously. */ @@ -869,7 +869,7 @@ TypeIsVisible(Oid typid) * and the returned nvargs will always be zero. * * If expand_defaults is true, functions that could match after insertion of - * default argument values will also be retrieved. In this case the returned + * default argument values will also be retrieved. In this case the returned * structs could have nargs > passed-in nargs, and ndargs is set to the number * of additional args (which can be retrieved from the function's * proargdefaults entry). @@ -1032,7 +1032,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames, * Call uses positional notation * * Check if function is variadic, and get variadic element type if - * so. If expand_variadic is false, we should just ignore + * so. If expand_variadic is false, we should just ignore * variadic-ness. */ if (pronargs <= nargs && expand_variadic) @@ -1162,7 +1162,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames, if (prevResult) { /* - * We have a match with a previous result. Decide which one + * We have a match with a previous result. Decide which one * to keep, or mark it ambiguous if we can't decide. The * logic here is preference > 0 means prefer the old result, * preference < 0 means prefer the new, preference = 0 means @@ -1553,7 +1553,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright) * identical entries in later namespaces. * * The returned items always have two args[] entries --- one or the other - * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too. + * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too. */ FuncCandidateList OpernameGetCandidates(List *names, char oprkind, bool missing_schema_ok) @@ -2536,7 +2536,7 @@ get_ts_config_oid(List *names, bool missing_ok) /* * TSConfigIsVisible * Determine whether a text search configuration (identified by OID) - * is visible in the current search path. Visible means "would be found + * is visible in the current search path. Visible means "would be found * by searching for the unqualified text search configuration name". */ bool @@ -2855,7 +2855,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p) /* * get_namespace_oid - given a namespace name, look up the OID * - * If missing_ok is false, throw an error if namespace name not found. If + * If missing_ok is false, throw an error if namespace name not found. If * true, just return InvalidOid. */ Oid @@ -3070,7 +3070,7 @@ GetTempNamespaceBackendId(Oid namespaceId) /* * GetTempToastNamespace - get the OID of my temporary-toast-table namespace, - * which must already be assigned. (This is only used when creating a toast + * which must already be assigned. (This is only used when creating a toast * table for a temp table, so we must have already done InitTempTableNamespace) */ Oid @@ -3168,8 +3168,8 @@ OverrideSearchPathMatchesCurrent(OverrideSearchPath *path) * * It's possible that newpath->useTemp is set but there is no longer any * active temp namespace, if the path was saved during a transaction that - * created a temp namespace and was later rolled back. In that case we just - * ignore useTemp. A plausible alternative would be to create a new temp + * created a temp namespace and was later rolled back. In that case we just + * ignore useTemp. A plausible alternative would be to create a new temp * namespace, but for existing callers that's not necessary because an empty * temp namespace wouldn't affect their results anyway. * @@ -3202,7 +3202,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath) firstNS = linitial_oid(oidlist); /* - * Add any implicitly-searched namespaces to the list. Note these go on + * Add any implicitly-searched namespaces to the list. Note these go on * the front, not the back; also notice that we do not check USAGE * permissions for these. */ @@ -3525,7 +3525,7 @@ recomputeNamespacePath(void) } /* - * Remember the first member of the explicit list. (Note: this is + * Remember the first member of the explicit list. (Note: this is * nominally wrong if temp_missing, but we need it anyway to distinguish * explicit from implicit mention of pg_catalog.) */ @@ -3535,7 +3535,7 @@ recomputeNamespacePath(void) firstNS = linitial_oid(oidlist); /* - * Add any implicitly-searched namespaces to the list. Note these go on + * Add any implicitly-searched namespaces to the list. Note these go on * the front, not the back; also notice that we do not check USAGE * permissions for these. */ @@ -3590,7 +3590,7 @@ InitTempTableNamespace(void) /* * First, do permission check to see if we are authorized to make temp - * tables. We use a nonstandard error message here since "databasename: + * tables. We use a nonstandard error message here since "databasename: * permission denied" might be a tad cryptic. * * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask; @@ -3609,9 +3609,9 @@ InitTempTableNamespace(void) * Do not allow a Hot Standby slave session to make temp tables. Aside * from problems with modifying the system catalogs, there is a naming * conflict: pg_temp_N belongs to the session with BackendId N on the - * master, not to a slave session with the same BackendId. We should not + * master, not to a slave session with the same BackendId. We should not * be able to get here anyway due to XactReadOnly checks, but let's just - * make real sure. Note that this also backstops various operations that + * make real sure. Note that this also backstops various operations that * allow XactReadOnly transactions to modify temp tables; they'd need * RecoveryInProgress checks if not for this. */ @@ -3967,7 +3967,7 @@ fetch_search_path(bool includeImplicit) /* * If the temp namespace should be first, force it to exist. This is so * that callers can trust the result to reflect the actual default - * creation namespace. It's a bit bogus to do this here, since + * creation namespace. It's a bit bogus to do this here, since * current_schema() is supposedly a stable function without side-effects, * but the alternatives seem worse. */ @@ -3989,7 +3989,7 @@ fetch_search_path(bool includeImplicit) /* * Fetch the active search path into a caller-allocated array of OIDs. - * Returns the number of path entries. (If this is more than sarray_len, + * Returns the number of path entries. (If this is more than sarray_len, * then the data didn't fit and is not all stored.) * * The returned list always includes the implicitly-prepended namespaces, diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 2b837a99c10..c7c8f4b1a36 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -467,7 +467,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid); * drop operation. * * Note: If the object is not found, we don't give any indication of the - * reason. (It might have been a missing schema if the name was qualified, or + * reason. (It might have been a missing schema if the name was qualified, or * an inexistant type name in case of a cast, function or operator; etc). * Currently there is only one caller that might be interested in such info, so * we don't spend much effort here. If more callers start to care, it might be @@ -665,7 +665,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, /* * If we're dealing with a relation or attribute, then the relation is - * already locked. Otherwise, we lock it now. + * already locked. Otherwise, we lock it now. */ if (address.classId != RelationRelationId) { diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index d99c2e5edae..1ad923ca6c5 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -152,10 +152,10 @@ AggregateCreate(const char *aggName, errdetail("An aggregate using a polymorphic transition type must have at least one polymorphic argument."))); /* - * An ordered-set aggregate that is VARIADIC must be VARIADIC ANY. In + * An ordered-set aggregate that is VARIADIC must be VARIADIC ANY. In * principle we could support regular variadic types, but it would make * things much more complicated because we'd have to assemble the correct - * subsets of arguments into array values. Since no standard aggregates + * subsets of arguments into array values. Since no standard aggregates * have use for such a case, we aren't bothering for now. */ if (AGGKIND_IS_ORDERED_SET(aggKind) && OidIsValid(variadicArgType) && @@ -167,7 +167,7 @@ AggregateCreate(const char *aggName, /* * If it's a hypothetical-set aggregate, there must be at least as many * direct arguments as aggregated ones, and the last N direct arguments - * must match the aggregated ones in type. (We have to check this again + * must match the aggregated ones in type. (We have to check this again * when the aggregate is called, in case ANY is involved, but it makes * sense to reject the aggregate definition now if the declared arg types * don't match up.) It's unconditionally OK if numDirectArgs == numArgs, diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c index fb947051214..434dbce97f9 100644 --- a/src/backend/catalog/pg_collation.c +++ b/src/backend/catalog/pg_collation.c @@ -78,7 +78,7 @@ CollationCreate(const char *collname, Oid collnamespace, collname, pg_encoding_to_char(collencoding)))); /* - * Also forbid matching an any-encoding entry. This test of course is not + * Also forbid matching an any-encoding entry. This test of course is not * backed up by the unique index, but it's not a problem since we don't * support adding any-encoding entries after initdb. */ diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 5fd9822c6ed..041f5ad6865 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -38,7 +38,7 @@ * Create a constraint table entry. * * Subsidiary records (such as triggers or indexes to implement the - * constraint) are *not* created here. But we do make dependency links + * constraint) are *not* created here. But we do make dependency links * from the constraint to the things it depends on. */ Oid @@ -305,7 +305,7 @@ CreateConstraintEntry(const char *constraintName, { /* * Register normal dependency on the unique index that supports a - * foreign-key constraint. (Note: for indexes associated with unique + * foreign-key constraint. (Note: for indexes associated with unique * or primary-key constraints, the dependency runs the other way, and * is not made here.) */ @@ -759,7 +759,7 @@ void get_constraint_relation_oids(Oid constraint_oid, Oid *conrelid, Oid *confrelid) { HeapTuple tup; - Form_pg_constraint con; + Form_pg_constraint con; tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraint_oid)); if (!HeapTupleIsValid(tup)) /* should not happen */ @@ -895,10 +895,10 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok) * the rel of interest are Vars with the indicated varno/varlevelsup. * * Currently we only check to see if the rel has a primary key that is a - * subset of the grouping_columns. We could also use plain unique constraints + * subset of the grouping_columns. We could also use plain unique constraints * if all their columns are known not null, but there's a problem: we need * to be able to represent the not-null-ness as part of the constraints added - * to *constraintDeps. FIXME whenever not-null constraints get represented + * to *constraintDeps. FIXME whenever not-null constraints get represented * in pg_constraint. */ bool diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c index 9f9bbe20742..3e73e0f45b8 100644 --- a/src/backend/catalog/pg_db_role_setting.c +++ b/src/backend/catalog/pg_db_role_setting.c @@ -172,7 +172,7 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt) /* * Drop some settings from the catalog. These can be for a particular - * database, or for a particular role. (It is of course possible to do both + * database, or for a particular role. (It is of course possible to do both * too, but it doesn't make sense for current uses.) */ void diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index fabc51c35c8..7b2d0a7649f 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -50,7 +50,7 @@ recordDependencyOn(const ObjectAddress *depender, /* * Record multiple dependencies (of the same kind) for a single dependent - * object. This has a little less overhead than recording each separately. + * object. This has a little less overhead than recording each separately. */ void recordMultipleDependencies(const ObjectAddress *depender, @@ -127,7 +127,7 @@ recordMultipleDependencies(const ObjectAddress *depender, /* * If we are executing a CREATE EXTENSION operation, mark the given object - * as being a member of the extension. Otherwise, do nothing. + * as being a member of the extension. Otherwise, do nothing. * * This must be called during creation of any user-definable object type * that could be a member of an extension. @@ -186,7 +186,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object, * (possibly with some differences from before). * * If skipExtensionDeps is true, we do not delete any dependencies that - * show that the given object is a member of an extension. This avoids + * show that the given object is a member of an extension. This avoids * needing a lot of extra logic to fetch and recreate that dependency. */ long @@ -492,7 +492,7 @@ getExtensionOfObject(Oid classId, Oid objectId) * Detect whether a sequence is marked as "owned" by a column * * An ownership marker is an AUTO dependency from the sequence to the - * column. If we find one, store the identity of the owning column + * column. If we find one, store the identity of the owning column * into *tableId and *colId and return TRUE; else return FALSE. * * Note: if there's more than one such pg_depend entry then you get diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index 4168c0e84af..b4f2051749d 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -465,7 +465,7 @@ restart: * We avoid doing this unless absolutely necessary; in most installations * it will never happen. The reason is that updating existing pg_enum * entries creates hazards for other backends that are concurrently reading - * pg_enum. Although system catalog scans now use MVCC semantics, the + * pg_enum. Although system catalog scans now use MVCC semantics, the * syscache machinery might read different pg_enum entries under different * snapshots, so some other backend might get confused about the proper * ordering if a concurrent renumbering occurs. diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c index ed2a41bfd8c..a54bc1b1faa 100644 --- a/src/backend/catalog/pg_largeobject.c +++ b/src/backend/catalog/pg_largeobject.c @@ -76,7 +76,7 @@ LargeObjectCreate(Oid loid) } /* - * Drop a large object having the given LO identifier. Both the data pages + * Drop a large object having the given LO identifier. Both the data pages * and metadata must be dropped. */ void diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index 8faa0152768..9a3e20a7aed 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -315,7 +315,7 @@ OperatorShellMake(const char *operatorName, * specify operators that do not exist. For example, if operator * "op" is being defined, the negator operator "negop" and the * commutator "commop" can also be defined without specifying - * any information other than their names. Since in order to + * any information other than their names. Since in order to * add "op" to the PG_OPERATOR catalog, all the Oid's for these * operators must be placed in the fields of "op", a forward * declaration is done on the commutator and negator operators. @@ -433,7 +433,7 @@ OperatorCreate(const char *operatorName, operatorName); /* - * Set up the other operators. If they do not currently exist, create + * Set up the other operators. If they do not currently exist, create * shells in order to get ObjectId's. */ diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index abf2f497e41..0fa331ad18f 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -229,7 +229,7 @@ ProcedureCreate(const char *procedureName, /* * Do not allow polymorphic return type unless at least one input argument - * is polymorphic. ANYRANGE return type is even stricter: must have an + * is polymorphic. ANYRANGE return type is even stricter: must have an * ANYRANGE input (since we can't deduce the specific range type from * ANYELEMENT). Also, do not allow return type INTERNAL unless at least * one input argument is INTERNAL. @@ -676,7 +676,7 @@ ProcedureCreate(const char *procedureName, /* * Set per-function configuration parameters so that the validation is - * done with the environment the function expects. However, if + * done with the environment the function expects. However, if * check_function_bodies is off, we don't do this, because that would * create dump ordering hazards that pg_dump doesn't know how to deal * with. (For example, a SET clause might refer to a not-yet-created @@ -948,7 +948,7 @@ sql_function_parse_error_callback(void *arg) /* * Adjust a syntax error occurring inside the function body of a CREATE - * FUNCTION or DO command. This can be used by any function validator or + * FUNCTION or DO command. This can be used by any function validator or * anonymous-block handler, not only for SQL-language functions. * It is assumed that the syntax error position is initially relative to the * function body string (as passed in). If possible, we adjust the position @@ -1081,7 +1081,7 @@ match_prosrc_to_literal(const char *prosrc, const char *literal, /* * This implementation handles backslashes and doubled quotes in the - * string literal. It does not handle the SQL syntax for literals + * string literal. It does not handle the SQL syntax for literals * continued across line boundaries. * * We do the comparison a character at a time, not a byte at a time, so diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 8942441dc50..7aa70fa3b2f 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -167,7 +167,7 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner) * shdepChangeDep * * Update shared dependency records to account for an updated referenced - * object. This is an internal workhorse for operations such as changing + * object. This is an internal workhorse for operations such as changing * an object's owner. * * There must be no more than one existing entry for the given dependent @@ -316,7 +316,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId) * was previously granted some rights to the object. * * This step is analogous to aclnewowner's removal of duplicate entries - * in the ACL. We have to do it to handle this scenario: + * in the ACL. We have to do it to handle this scenario: * A grants some rights on an object to B * ALTER OWNER changes the object's owner to B * ALTER OWNER changes the object's owner to C @@ -402,9 +402,9 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2) * and then insert or delete from pg_shdepend as appropriate. * * Note that we can't just insert all referenced roles blindly during GRANT, - * because we would end up with duplicate registered dependencies. We could + * because we would end up with duplicate registered dependencies. We could * check for existence of the tuples before inserting, but that seems to be - * more expensive than what we are doing here. Likewise we can't just delete + * more expensive than what we are doing here. Likewise we can't just delete * blindly during REVOKE, because the user may still have other privileges. * It is also possible that REVOKE actually adds dependencies, due to * instantiation of a formerly implicit default ACL (although at present, @@ -535,7 +535,7 @@ checkSharedDependencies(Oid classId, Oid objectId, /* * We limit the number of dependencies reported to the client to * MAX_REPORTED_DEPS, since client software may not deal well with - * enormous error strings. The server log always gets a full report. + * enormous error strings. The server log always gets a full report. */ #define MAX_REPORTED_DEPS 100 @@ -616,7 +616,7 @@ checkSharedDependencies(Oid classId, Oid objectId, bool stored = false; /* - * XXX this info is kept on a simple List. Maybe it's not good + * XXX this info is kept on a simple List. Maybe it's not good * for performance, but using a hash table seems needlessly * complex. The expected number of databases is not high anyway, * I suppose. @@ -853,7 +853,7 @@ shdepAddDependency(Relation sdepRel, /* * Make sure the object doesn't go away while we record the dependency on - * it. DROP routines should lock the object exclusively before they check + * it. DROP routines should lock the object exclusively before they check * shared dependencies. */ shdepLockAndCheckObject(refclassId, refobjId); @@ -1004,7 +1004,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId) /* * Currently, this routine need not support any other shared - * object types besides roles. If we wanted to record explicit + * object types besides roles. If we wanted to record explicit * dependencies on databases or tablespaces, we'd need code along * these lines: */ @@ -1150,7 +1150,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel) /* * shdepDropOwned * - * Drop the objects owned by any one of the given RoleIds. If a role has + * Drop the objects owned by any one of the given RoleIds. If a role has * access to an object, the grant will be removed as well (but the object * will not, of course). * diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 8e0e65b7219..f614915abfb 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -394,7 +394,7 @@ TypeCreate(Oid newTypeOid, if (HeapTupleIsValid(tup)) { /* - * check that the type is not already defined. It may exist as a + * check that the type is not already defined. It may exist as a * shell type, however. */ if (((Form_pg_type) GETSTRUCT(tup))->typisdefined) diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 85df9a10929..c3b2f072e44 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -35,7 +35,7 @@ * that have been created or deleted in the current transaction. When * a relation is created, we create the physical file immediately, but * remember it so that we can delete the file again if the current - * transaction is aborted. Conversely, a deletion request is NOT + * transaction is aborted. Conversely, a deletion request is NOT * executed immediately, but is just entered in the list. When and if * the transaction commits, we can delete the physical file. * @@ -344,7 +344,7 @@ smgrDoPendingDeletes(bool isCommit) if (maxrels == 0) { maxrels = 8; - srels = palloc(sizeof(SMgrRelation) * maxrels ); + srels = palloc(sizeof(SMgrRelation) * maxrels); } else if (maxrels <= nrels) { @@ -378,7 +378,7 @@ smgrDoPendingDeletes(bool isCommit) * *ptr is set to point to a freshly-palloc'd array of RelFileNodes. * If there are no relations to be deleted, *ptr is set to NULL. * - * Only non-temporary relations are included in the returned list. This is OK + * Only non-temporary relations are included in the returned list. This is OK * because the list is used only in contexts where temporary relations don't * matter: we're either writing to the two-phase state file (and transactions * that have touched temp tables can't be prepared) or we're writing to xlog diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c index 5275e4bfdb3..bdfeb90dd10 100644 --- a/src/backend/catalog/toasting.c +++ b/src/backend/catalog/toasting.c @@ -36,9 +36,9 @@ Oid binary_upgrade_next_toast_pg_type_oid = InvalidOid; static void CheckAndCreateToastTable(Oid relOid, Datum reloptions, - LOCKMODE lockmode, bool check); + LOCKMODE lockmode, bool check); static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, - Datum reloptions, LOCKMODE lockmode, bool check); + Datum reloptions, LOCKMODE lockmode, bool check); static bool needs_toast_table(Relation rel); @@ -106,7 +106,7 @@ BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid) /* create_toast_table does all the work */ if (!create_toast_table(rel, toastOid, toastIndexOid, (Datum) 0, - AccessExclusiveLock, false)) + AccessExclusiveLock, false)) elog(ERROR, "\"%s\" does not require a toast table", relName); @@ -177,8 +177,8 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, return false; /* - * If requested check lockmode is sufficient. This is a cross check - * in case of errors or conflicting decisions in earlier code. + * If requested check lockmode is sufficient. This is a cross check in + * case of errors or conflicting decisions in earlier code. */ if (check && lockmode != AccessExclusiveLock) elog(ERROR, "AccessExclusiveLock required to add toast table."); @@ -362,7 +362,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, } /* - * Check to see whether the table needs a TOAST table. It does only if + * Check to see whether the table needs a TOAST table. It does only if * (1) there are any toastable attributes, and (2) the maximum length * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to * create a toast table for something like "f1 varchar(20)".) diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index a73d7094376..fcf86dd0d93 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -296,7 +296,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, * * transtype can't be a pseudo-type, since we need to be able to store * values of the transtype. However, we can allow polymorphic transtype - * in some cases (AggregateCreate will check). Also, we allow "internal" + * in some cases (AggregateCreate will check). Also, we allow "internal" * for functions that want to pass pointers to private data structures; * but allow that only to superusers, since you could crash the system (or * worse) by connecting up incompatible internal-using functions in an @@ -317,7 +317,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, } /* - * If a moving-aggregate transtype is specified, look that up. Same + * If a moving-aggregate transtype is specified, look that up. Same * restrictions as for transtype. */ if (mtransType) diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index a43457bb575..80c9743a0d5 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -296,7 +296,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) } /* - * Executes an ALTER OBJECT / RENAME TO statement. Based on the object + * Executes an ALTER OBJECT / RENAME TO statement. Based on the object * type, the function appropriate to that type is executed. */ Oid diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index a04adeaac75..c09ca7e6db1 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -409,7 +409,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Open all indexes of the relation, and see if there are any analyzable - * columns in the indexes. We do not analyze index columns if there was + * columns in the indexes. We do not analyze index columns if there was * an explicit column list in the ANALYZE command, however. If we are * doing a recursive scan, we don't want to touch the parent's indexes at * all. @@ -466,7 +466,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Determine how many rows we need to sample, using the worst case from - * all analyzable columns. We use a lower bound of 100 rows to avoid + * all analyzable columns. We use a lower bound of 100 rows to avoid * possible overflow in Vitter's algorithm. (Note: that will also be the * target in the corner case where there are no analyzable columns.) */ @@ -501,7 +501,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, &totalrows, &totaldeadrows); /* - * Compute the statistics. Temporary results during the calculations for + * Compute the statistics. Temporary results during the calculations for * each column are stored in a child context. The calc routines are * responsible to make sure that whatever they store into the VacAttrStats * structure is allocated in anl_context. @@ -558,7 +558,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Emit the completed stats rows into pg_statistic, replacing any - * previous statistics for the target columns. (If there are stats in + * previous statistics for the target columns. (If there are stats in * pg_statistic for columns we didn't process, we leave them alone.) */ update_attstats(RelationGetRelid(onerel), inh, @@ -610,7 +610,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, } /* - * Report ANALYZE to the stats collector, too. However, if doing + * Report ANALYZE to the stats collector, too. However, if doing * inherited stats we shouldn't report, because the stats collector only * tracks per-table stats. */ @@ -872,7 +872,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) return NULL; /* - * Create the VacAttrStats struct. Note that we only have a copy of the + * Create the VacAttrStats struct. Note that we only have a copy of the * fixed fields of the pg_attribute tuple. */ stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats)); @@ -882,7 +882,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) /* * When analyzing an expression index, believe the expression tree's type * not the column datatype --- the latter might be the opckeytype storage - * type of the opclass, which is not interesting for our purposes. (Note: + * type of the opclass, which is not interesting for our purposes. (Note: * if we did anything with non-expression index columns, we'd need to * figure out where to get the correct type info from, but for now that's * not a problem.) It's not clear whether anyone will care about the @@ -921,7 +921,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) } /* - * Call the type-specific typanalyze function. If none is specified, use + * Call the type-specific typanalyze function. If none is specified, use * std_typanalyze(). */ if (OidIsValid(stats->attrtype->typanalyze)) @@ -997,7 +997,7 @@ BlockSampler_Next(BlockSampler bs) * If we are to skip, we should advance t (hence decrease K), and * repeat the same probabilistic test for the next block. The naive * implementation thus requires an anl_random_fract() call for each block - * number. But we can reduce this to one anl_random_fract() call per + * number. But we can reduce this to one anl_random_fract() call per * selected block, by noting that each time the while-test succeeds, * we can reinterpret V as a uniform random number in the range 0 to p. * Therefore, instead of choosing a new V, we just adjust p to be @@ -1127,7 +1127,7 @@ acquire_sample_rows(Relation onerel, int elevel, /* * We ignore unused and redirect line pointers. DEAD line * pointers should be counted as dead, because we need vacuum to - * run to get rid of them. Note that this rule agrees with the + * run to get rid of them. Note that this rule agrees with the * way that heap_page_prune() counts things. */ if (!ItemIdIsNormal(itemid)) @@ -1173,7 +1173,7 @@ acquire_sample_rows(Relation onerel, int elevel, * is the safer option. * * A special case is that the inserting transaction might - * be our own. In this case we should count and sample + * be our own. In this case we should count and sample * the row, to accommodate users who load a table and * analyze it in one transaction. (pgstat_report_analyze * has to adjust the numbers we send to the stats @@ -1215,7 +1215,7 @@ acquire_sample_rows(Relation onerel, int elevel, /* * The first targrows sample rows are simply copied into the * reservoir. Then we start replacing tuples in the sample - * until we reach the end of the relation. This algorithm is + * until we reach the end of the relation. This algorithm is * from Jeff Vitter's paper (see full citation below). It * works by repeatedly computing the number of tuples to skip * before selecting a tuple, which replaces a randomly chosen @@ -1274,7 +1274,7 @@ acquire_sample_rows(Relation onerel, int elevel, qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); /* - * Estimate total numbers of rows in relation. For live rows, use + * Estimate total numbers of rows in relation. For live rows, use * vac_estimate_reltuples; for dead rows, we have no source of old * information, so we have to assume the density is the same in unseen * pages as in the pages we scanned. @@ -1597,7 +1597,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel, * Statistics are stored in several places: the pg_class row for the * relation has stats about the whole relation, and there is a * pg_statistic row for each (non-system) attribute that has ever - * been analyzed. The pg_class values are updated by VACUUM, not here. + * been analyzed. The pg_class values are updated by VACUUM, not here. * * pg_statistic rows are just added or updated normally. This means * that pg_statistic will probably contain some deleted rows at the @@ -2001,7 +2001,7 @@ compute_minimal_stats(VacAttrStatsP stats, /* * If the value is toasted, we want to detoast it just once to * avoid repeated detoastings and resultant excess memory usage - * during the comparisons. Also, check to see if the value is + * during the comparisons. Also, check to see if the value is * excessively wide, and if so don't detoast at all --- just * ignore the value. */ @@ -2121,7 +2121,7 @@ compute_minimal_stats(VacAttrStatsP stats, * We assume (not very reliably!) that all the multiply-occurring * values are reflected in the final track[] list, and the other * nonnull values all appeared but once. (XXX this usually - * results in a drastic overestimate of ndistinct. Can we do + * results in a drastic overestimate of ndistinct. Can we do * any better?) *---------- */ @@ -2158,7 +2158,7 @@ compute_minimal_stats(VacAttrStatsP stats, * Decide how many values are worth storing as most-common values. If * we are able to generate a complete MCV list (all the values in the * sample will fit, and we think these are all the ones in the table), - * then do so. Otherwise, store only those values that are + * then do so. Otherwise, store only those values that are * significantly more common than the (estimated) average. We set the * threshold rather arbitrarily at 25% more than average, with at * least 2 instances in the sample. @@ -2326,7 +2326,7 @@ compute_scalar_stats(VacAttrStatsP stats, /* * If the value is toasted, we want to detoast it just once to * avoid repeated detoastings and resultant excess memory usage - * during the comparisons. Also, check to see if the value is + * during the comparisons. Also, check to see if the value is * excessively wide, and if so don't detoast at all --- just * ignore the value. */ @@ -2371,7 +2371,7 @@ compute_scalar_stats(VacAttrStatsP stats, * accumulate ordering-correlation statistics. * * To determine which are most common, we first have to count the - * number of duplicates of each value. The duplicates are adjacent in + * number of duplicates of each value. The duplicates are adjacent in * the sorted list, so a brute-force approach is to compare successive * datum values until we find two that are not equal. However, that * requires N-1 invocations of the datum comparison routine, which are @@ -2380,7 +2380,7 @@ compute_scalar_stats(VacAttrStatsP stats, * that are adjacent in the sorted order; otherwise it could not know * that it's ordered the pair correctly.) We exploit this by having * compare_scalars remember the highest tupno index that each - * ScalarItem has been found equal to. At the end of the sort, a + * ScalarItem has been found equal to. At the end of the sort, a * ScalarItem's tupnoLink will still point to itself if and only if it * is the last item of its group of duplicates (since the group will * be ordered by tupno). @@ -2500,7 +2500,7 @@ compute_scalar_stats(VacAttrStatsP stats, * Decide how many values are worth storing as most-common values. If * we are able to generate a complete MCV list (all the values in the * sample will fit, and we think these are all the ones in the table), - * then do so. Otherwise, store only those values that are + * then do so. Otherwise, store only those values that are * significantly more common than the (estimated) average. We set the * threshold rather arbitrarily at 25% more than average, with at * least 2 instances in the sample. Also, we won't suppress values @@ -2655,7 +2655,7 @@ compute_scalar_stats(VacAttrStatsP stats, /* * The object of this loop is to copy the first and last values[] - * entries along with evenly-spaced values in between. So the + * entries along with evenly-spaced values in between. So the * i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But * computing that subscript directly risks integer overflow when * the stats target is more than a couple thousand. Instead we @@ -2766,7 +2766,7 @@ compute_scalar_stats(VacAttrStatsP stats, * qsort_arg comparator for sorting ScalarItems * * Aside from sorting the items, we update the tupnoLink[] array - * whenever two ScalarItems are found to contain equal datums. The array + * whenever two ScalarItems are found to contain equal datums. The array * is indexed by tupno; for each ScalarItem, it contains the highest * tupno that that item's datum has been found to be equal to. This allows * us to avoid additional comparisons in compute_scalar_stats(). diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index 09fb99bb73e..92f2077d487 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -151,7 +151,7 @@ * * This struct declaration has the maximal length, but in a real queue entry * the data area is only big enough for the actual channel and payload strings - * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible + * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible * entry size, if both channel and payload strings are empty (but note it * doesn't include alignment padding). * @@ -265,7 +265,7 @@ static SlruCtlData AsyncCtlData; * * The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2 * pages, because more than that would confuse slru.c into thinking there - * was a wraparound condition. With the default BLCKSZ this means there + * was a wraparound condition. With the default BLCKSZ this means there * can be up to 8GB of queued-and-not-read data. * * Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of @@ -395,7 +395,7 @@ asyncQueuePagePrecedes(int p, int q) int diff; /* - * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be + * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be * in the range 0..QUEUE_MAX_PAGE. */ Assert(p >= 0 && p <= QUEUE_MAX_PAGE); @@ -826,7 +826,7 @@ PreCommit_Notify(void) while (nextNotify != NULL) { /* - * Add the pending notifications to the queue. We acquire and + * Add the pending notifications to the queue. We acquire and * release AsyncQueueLock once per page, which might be overkill * but it does allow readers to get in while we're doing this. * @@ -1042,12 +1042,12 @@ Exec_UnlistenAllCommit(void) * The reason that this is not done in AtCommit_Notify is that there is * a nonzero chance of errors here (for example, encoding conversion errors * while trying to format messages to our frontend). An error during - * AtCommit_Notify would be a PANIC condition. The timing is also arranged + * AtCommit_Notify would be a PANIC condition. The timing is also arranged * to ensure that a transaction's self-notifies are delivered to the frontend * before it gets the terminating ReadyForQuery message. * * Note that we send signals and process the queue even if the transaction - * eventually aborted. This is because we need to clean out whatever got + * eventually aborted. This is because we need to clean out whatever got * added to the queue. * * NOTE: we are outside of any transaction here. @@ -1137,7 +1137,7 @@ IsListeningOn(const char *channel) /* * Remove our entry from the listeners array when we are no longer listening - * on any channel. NB: must not fail if we're already not listening. + * on any channel. NB: must not fail if we're already not listening. */ static void asyncQueueUnregister(void) @@ -1179,7 +1179,7 @@ asyncQueueIsFull(void) /* * The queue is full if creating a new head page would create a page that * logically precedes the current global tail pointer, ie, the head - * pointer would wrap around compared to the tail. We cannot create such + * pointer would wrap around compared to the tail. We cannot create such * a head page for fear of confusing slru.c. For safety we round the tail * pointer back to a segment boundary (compare the truncation logic in * asyncQueueAdvanceTail). @@ -1198,7 +1198,7 @@ asyncQueueIsFull(void) /* * Advance the QueuePosition to the next entry, assuming that the current - * entry is of length entryLength. If we jump to a new page the function + * entry is of length entryLength. If we jump to a new page the function * returns true, else false. */ static bool @@ -1267,7 +1267,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe) * the last byte which simplifies reading the page later. * * We are passed the list cell containing the next notification to write - * and return the first still-unwritten cell back. Eventually we will return + * and return the first still-unwritten cell back. Eventually we will return * NULL indicating all is done. * * We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock @@ -1344,7 +1344,7 @@ asyncQueueAddEntries(ListCell *nextNotify) * Page is full, so we're done here, but first fill the next page * with zeroes. The reason to do this is to ensure that slru.c's * idea of the head page is always the same as ours, which avoids - * boundary problems in SimpleLruTruncate. The test in + * boundary problems in SimpleLruTruncate. The test in * asyncQueueIsFull() ensured that there is room to create this * page without overrunning the queue. */ @@ -1518,7 +1518,7 @@ AtAbort_Notify(void) /* * If we LISTEN but then roll back the transaction after PreCommit_Notify, * we have registered as a listener but have not made any entry in - * listenChannels. In that case, deregister again. + * listenChannels. In that case, deregister again. */ if (amRegisteredListener && listenChannels == NIL) asyncQueueUnregister(); @@ -1771,7 +1771,7 @@ EnableNotifyInterrupt(void) * is disabled until the next EnableNotifyInterrupt call. * * The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this, - * so as to prevent conflicts if one signal interrupts the other. So we + * so as to prevent conflicts if one signal interrupts the other. So we * must return the previous state of the flag. */ bool @@ -1866,7 +1866,7 @@ asyncQueueReadAllNotifications(void) /* * We copy the data from SLRU into a local buffer, so as to avoid * holding the AsyncCtlLock while we are examining the entries and - * possibly transmitting them to our frontend. Copy only the part + * possibly transmitting them to our frontend. Copy only the part * of the page we will actually inspect. */ slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage, @@ -1940,7 +1940,7 @@ asyncQueueReadAllNotifications(void) * and deliver relevant ones to my frontend. * * The current page must have been fetched into page_buffer from shared - * memory. (We could access the page right in shared memory, but that + * memory. (We could access the page right in shared memory, but that * would imply holding the AsyncCtlLock throughout this routine.) * * We stop if we reach the "stop" position, or reach a notification from an @@ -2146,7 +2146,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid) pq_endmessage(&buf); /* - * NOTE: we do not do pq_flush() here. For a self-notify, it will + * NOTE: we do not do pq_flush() here. For a self-notify, it will * happen at the end of the transaction, and for incoming notifies * ProcessIncomingNotify will do it after finding all the notifies. */ diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 4ac1e0b864f..54a27531825 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * cluster.c - * CLUSTER a table on an index. This is now also used for VACUUM FULL. + * CLUSTER a table on an index. This is now also used for VACUUM FULL. * * There is hardly anything left of Paul Brown's original implementation... * @@ -94,7 +94,7 @@ static void reform_and_rewrite_tuple(HeapTuple tuple, * * The single-relation case does not have any such overhead. * - * We also allow a relation to be specified without index. In that case, + * We also allow a relation to be specified without index. In that case, * the indisclustered bit will be looked up, and an ERROR will be thrown * if there is no index with the bit set. *--------------------------------------------------------------------------- @@ -206,7 +206,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) ALLOCSET_DEFAULT_MAXSIZE); /* - * Build the list of relations to cluster. Note that this lives in + * Build the list of relations to cluster. Note that this lives in * cluster_context. */ rvs = get_tables_to_cluster(cluster_context); @@ -243,7 +243,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) * * This clusters the table by creating a new, clustered table and * swapping the relfilenodes of the new table and the old table, so - * the OID of the original table is preserved. Thus we do not lose + * the OID of the original table is preserved. Thus we do not lose * GRANT, inheritance nor references to this table (this was a bug * in releases thru 7.3). * @@ -252,7 +252,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) * them incrementally while we load the table. * * If indexOid is InvalidOid, the table will be rewritten in physical order - * instead of index order. This is the new implementation of VACUUM FULL, + * instead of index order. This is the new implementation of VACUUM FULL, * and error messages should refer to the operation as VACUUM not CLUSTER. */ void @@ -265,7 +265,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) /* * We grab exclusive access to the target rel and index for the duration - * of the transaction. (This is redundant for the single-transaction + * of the transaction. (This is redundant for the single-transaction * case, since cluster() already did it.) The index lock is taken inside * check_index_is_clusterable. */ @@ -300,7 +300,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) * check in the "recheck" case is appropriate (which currently means * somebody is executing a database-wide CLUSTER), because there is * another check in cluster() which will stop any attempt to cluster - * remote temp tables by name. There is another check in cluster_rel + * remote temp tables by name. There is another check in cluster_rel * which is redundant, but we leave it for extra safety. */ if (RELATION_IS_OTHER_TEMP(OldHeap)) @@ -393,7 +393,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) /* * All predicate locks on the tuples or pages are about to be made - * invalid, because we move tuples around. Promote them to relation + * invalid, because we move tuples around. Promote them to relation * locks. Predicate locks on indexes will be promoted when they are * reindexed. */ @@ -440,7 +440,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD /* * Disallow clustering on incomplete indexes (those that might not index - * every row of the relation). We could relax this by making a separate + * every row of the relation). We could relax this by making a separate * seqscan pass over the table to copy the missing rows, but that seems * expensive and tedious. */ @@ -649,14 +649,14 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp, /* * Create the new heap, using a temporary name in the same namespace as - * the existing table. NOTE: there is some risk of collision with user + * the existing table. NOTE: there is some risk of collision with user * relnames. Working around this seems more trouble than it's worth; in * particular, we can't create the new heap in a different namespace from * the old, or we will have problems with the TEMP status of temp tables. * * Note: the new heap is not a shared relation, even if we are rebuilding * a shared rel. However, we do make the new heap mapped if the source is - * mapped. This simplifies swap_relation_files, and is absolutely + * mapped. This simplifies swap_relation_files, and is absolutely * necessary for rebuilding pg_class, for reasons explained there. */ snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap); @@ -696,11 +696,11 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp, * * If the relation doesn't have a TOAST table already, we can't need one * for the new relation. The other way around is possible though: if some - * wide columns have been dropped, NewHeapCreateToastTable can decide - * that no TOAST table is needed for the new table. + * wide columns have been dropped, NewHeapCreateToastTable can decide that + * no TOAST table is needed for the new table. * - * Note that NewHeapCreateToastTable ends with CommandCounterIncrement, - * so that the TOAST table will be visible for insertion. + * Note that NewHeapCreateToastTable ends with CommandCounterIncrement, so + * that the TOAST table will be visible for insertion. */ toastid = OldHeap->rd_rel->reltoastrelid; if (OidIsValid(toastid)) @@ -788,12 +788,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* * If the OldHeap has a toast table, get lock on the toast table to keep - * it from being vacuumed. This is needed because autovacuum processes + * it from being vacuumed. This is needed because autovacuum processes * toast tables independently of their main tables, with no lock on the - * latter. If an autovacuum were to start on the toast table after we + * latter. If an autovacuum were to start on the toast table after we * compute our OldestXmin below, it would use a later OldestXmin, and then * possibly remove as DEAD toast tuples belonging to main tuples we think - * are only RECENTLY_DEAD. Then we'd fail while trying to copy those + * are only RECENTLY_DEAD. Then we'd fail while trying to copy those * tuples. * * We don't need to open the toast relation here, just lock it. The lock @@ -814,7 +814,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* * If both tables have TOAST tables, perform toast swap by content. It is * possible that the old table has a toast table but the new one doesn't, - * if toastable columns have been dropped. In that case we have to do + * if toastable columns have been dropped. In that case we have to do * swap by links. This is okay because swap by content is only essential * for system catalogs, and we don't support schema changes for them. */ @@ -833,7 +833,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, * * Note that we must hold NewHeap open until we are done writing data, * since the relcache will not guarantee to remember this setting once - * the relation is closed. Also, this technique depends on the fact + * the relation is closed. Also, this technique depends on the fact * that no one will try to read from the NewHeap until after we've * finished writing it and swapping the rels --- otherwise they could * follow the toast pointers to the wrong place. (It would actually @@ -929,7 +929,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* * Scan through the OldHeap, either in OldIndex order or sequentially; * copy each tuple into the NewHeap, or transiently to the tuplesort - * module. Note that we don't bother sorting dead tuples (they won't get + * module. Note that we don't bother sorting dead tuples (they won't get * to the new table anyway). */ for (;;) @@ -1217,7 +1217,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class, NameStr(relform2->relname), r2); /* - * Send replacement mappings to relmapper. Note these won't actually + * Send replacement mappings to relmapper. Note these won't actually * take effect until CommandCounterIncrement. */ RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false); @@ -1404,7 +1404,8 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class, relform1->relkind == RELKIND_TOASTVALUE && relform2->relkind == RELKIND_TOASTVALUE) { - Oid toastIndex1, toastIndex2; + Oid toastIndex1, + toastIndex2; /* Get valid index for each relation */ toastIndex1 = toast_get_valid_index(r1, @@ -1440,7 +1441,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class, * non-transient relation.) * * Caution: the placement of this step interacts with the decision to - * handle toast rels by recursion. When we are trying to rebuild pg_class + * handle toast rels by recursion. When we are trying to rebuild pg_class * itself, the smgr close on pg_class must happen after all accesses in * this function. */ @@ -1487,9 +1488,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, /* * Rebuild each index on the relation (but not the toast table, which is - * all-new at this point). It is important to do this before the DROP + * all-new at this point). It is important to do this before the DROP * step because if we are processing a system catalog that will be used - * during DROP, we want to have its indexes available. There is no + * during DROP, we want to have its indexes available. There is no * advantage to the other order anyway because this is all transactional, * so no chance to reclaim disk space before commit. We do not need a * final CommandCounterIncrement() because reindex_relation does it. @@ -1511,11 +1512,11 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, * swap_relation_files()), thus relfrozenxid was not updated. That's * annoying because a potential reason for doing a VACUUM FULL is a * imminent or actual anti-wraparound shutdown. So, now that we can - * access the new relation using it's indices, update - * relfrozenxid. pg_class doesn't have a toast relation, so we don't need - * to update the corresponding toast relation. Not that there's little - * point moving all relfrozenxid updates here since swap_relation_files() - * needs to write to pg_class for non-mapped relations anyway. + * access the new relation using it's indices, update relfrozenxid. + * pg_class doesn't have a toast relation, so we don't need to update the + * corresponding toast relation. Not that there's little point moving all + * relfrozenxid updates here since swap_relation_files() needs to write to + * pg_class for non-mapped relations anyway. */ if (OIDOldHeap == RelationRelationId) { diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c index 751f89e514d..b0cad4634b2 100644 --- a/src/backend/commands/constraint.c +++ b/src/backend/commands/constraint.c @@ -50,7 +50,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) bool isnull[INDEX_MAX_KEYS]; /* - * Make sure this is being called as an AFTER ROW trigger. Note: + * Make sure this is being called as an AFTER ROW trigger. Note: * translatable error strings are shared with ri_triggers.c, so resist the * temptation to fold the function name into them. */ @@ -87,7 +87,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) * If the new_row is now dead (ie, inserted and then deleted within our * transaction), we can skip the check. However, we have to be careful, * because this trigger gets queued only in response to index insertions; - * which means it does not get queued for HOT updates. The row we are + * which means it does not get queued for HOT updates. The row we are * called for might now be dead, but have a live HOT child, in which case * we still need to make the check. Therefore we have to use * heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 70ee7e50486..fbd7492a73f 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -125,8 +125,8 @@ typedef struct CopyStateData bool *force_quote_flags; /* per-column CSV FQ flags */ List *force_notnull; /* list of column names */ bool *force_notnull_flags; /* per-column CSV FNN flags */ - List *force_null; /* list of column names */ - bool *force_null_flags; /* per-column CSV FN flags */ + List *force_null; /* list of column names */ + bool *force_null_flags; /* per-column CSV FN flags */ bool convert_selectively; /* do selective binary conversion? */ List *convert_select; /* list of column names (can be NIL) */ bool *convert_select_flags; /* per-column CSV/TEXT CS flags */ @@ -189,7 +189,7 @@ typedef struct CopyStateData /* * Finally, raw_buf holds raw data read from the data source (file or - * client connection). CopyReadLine parses this data sufficiently to + * client connection). CopyReadLine parses this data sufficiently to * locate line boundaries, then transfers the data to line_buf and * converts it. Note: we guarantee that there is a \0 at * raw_buf[raw_buf_len]. @@ -215,7 +215,7 @@ typedef struct * function call overhead in tight COPY loops. * * We must use "if (1)" because the usual "do {...} while(0)" wrapper would - * prevent the continue/break processing from working. We end the "if (1)" + * prevent the continue/break processing from working. We end the "if (1)" * with "else ((void) 0)" to ensure the "if" does not unintentionally match * any "else" in the calling code, and to avoid any compiler warnings about * empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros. @@ -549,7 +549,7 @@ CopySendEndOfRow(CopyState cstate) * CopyGetData reads data from the source (file or frontend) * * We attempt to read at least minread, and at most maxread, bytes from - * the source. The actual number of bytes read is returned; if this is + * the source. The actual number of bytes read is returned; if this is * less than minread, EOF was detected. * * Note: when copying from the frontend, we expect a proper EOF mark per @@ -766,7 +766,7 @@ CopyLoadRawBuf(CopyState cstate) * we also support copying the output of an arbitrary SELECT query. * * If <pipe> is false, transfer is between the table and the file named - * <filename>. Otherwise, transfer is between the table and our regular + * <filename>. Otherwise, transfer is between the table and our regular * input/output stream. The latter could be either stdin/stdout or a * socket, depending on whether we're running under Postmaster control. * @@ -1203,7 +1203,7 @@ ProcessCopyOptions(CopyState cstate, if (cstate->force_null != NIL && !is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force null only available using COPY FROM"))); + errmsg("COPY force null only available using COPY FROM"))); /* Don't allow the delimiter to appear in the null string. */ if (strchr(cstate->null_print, cstate->delim[0]) != NULL) @@ -1298,7 +1298,7 @@ BeginCopy(bool is_from, errmsg("COPY (SELECT) WITH OIDS is not supported"))); /* - * Run parse analysis and rewrite. Note this also acquires sufficient + * Run parse analysis and rewrite. Note this also acquires sufficient * locks on the source table(s). * * Because the parser and planner tend to scribble on their input, we @@ -1428,8 +1428,8 @@ BeginCopy(bool is_from, if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("FORCE NULL column \"%s\" not referenced by COPY", - NameStr(tupDesc->attrs[attnum - 1]->attname)))); + errmsg("FORCE NULL column \"%s\" not referenced by COPY", + NameStr(tupDesc->attrs[attnum - 1]->attname)))); cstate->force_null_flags[attnum - 1] = true; } } @@ -1730,7 +1730,7 @@ CopyTo(CopyState cstate) * Create a temporary memory context that we can reset once per row to * recover palloc'd memory. This avoids any problems with leaks inside * datatype output routines, and should be faster than retail pfree's - * anyway. (We don't need a whole econtext as CopyFrom does.) + * anyway. (We don't need a whole econtext as CopyFrom does.) */ cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext, "COPY TO", @@ -2248,8 +2248,8 @@ CopyFrom(CopyState cstate) { /* * Reset the per-tuple exprcontext. We can only do this if the - * tuple buffer is empty. (Calling the context the per-tuple memory - * context is a bit of a misnomer now.) + * tuple buffer is empty. (Calling the context the per-tuple + * memory context is a bit of a misnomer now.) */ ResetPerTupleExprContext(estate); } @@ -2569,19 +2569,20 @@ BeginCopyFrom(Relation rel, num_defaults++; /* - * If a default expression looks at the table being loaded, then - * it could give the wrong answer when using multi-insert. Since - * database access can be dynamic this is hard to test for - * exactly, so we use the much wider test of whether the - * default expression is volatile. We allow for the special case - * of when the default expression is the nextval() of a sequence - * which in this specific case is known to be safe for use with - * the multi-insert optimisation. Hence we use this special case - * function checker rather than the standard check for + * If a default expression looks at the table being loaded, + * then it could give the wrong answer when using + * multi-insert. Since database access can be dynamic this is + * hard to test for exactly, so we use the much wider test of + * whether the default expression is volatile. We allow for + * the special case of when the default expression is the + * nextval() of a sequence which in this specific case is + * known to be safe for use with the multi-insert + * optimisation. Hence we use this special case function + * checker rather than the standard check for * contain_volatile_functions(). */ if (!volatile_defexprs) - volatile_defexprs = contain_volatile_functions_not_nextval((Node *)defexpr); + volatile_defexprs = contain_volatile_functions_not_nextval((Node *) defexpr); } } } @@ -2861,8 +2862,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, if (cstate->csv_mode) { - if(string == NULL && - cstate->force_notnull_flags[m]) + if (string == NULL && + cstate->force_notnull_flags[m]) { /* * FORCE_NOT_NULL option is set and column is NULL - @@ -2870,14 +2871,14 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, */ string = cstate->null_print; } - else if(string != NULL && cstate->force_null_flags[m] - && strcmp(string,cstate->null_print) == 0 ) + else if (string != NULL && cstate->force_null_flags[m] + && strcmp(string, cstate->null_print) == 0) { /* - * FORCE_NULL option is set and column matches the NULL string. - * It must have been quoted, or otherwise the string would already - * have been set to NULL. - * Convert it to NULL as specified. + * FORCE_NULL option is set and column matches the NULL + * string. It must have been quoted, or otherwise the + * string would already have been set to NULL. Convert it + * to NULL as specified. */ string = NULL; } @@ -2920,7 +2921,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, * if client chooses to send that now. * * Note that we MUST NOT try to read more data in an old-protocol - * copy, since there is no protocol-level EOF marker then. We + * copy, since there is no protocol-level EOF marker then. We * could go either way for copy from file, but choose to throw * error if there's data after the EOF marker, for consistency * with the new-protocol case. @@ -2982,7 +2983,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, /* * Now compute and insert any defaults available for the columns not - * provided by the input data. Anything not processed here or above will + * provided by the input data. Anything not processed here or above will * remain NULL. */ for (i = 0; i < num_defaults; i++) @@ -3017,7 +3018,7 @@ EndCopyFrom(CopyState cstate) * server encoding. * * Result is true if read was terminated by EOF, false if terminated - * by newline. The terminating newline or EOF marker is not included + * by newline. The terminating newline or EOF marker is not included * in the final value of line_buf. */ static bool @@ -3173,7 +3174,7 @@ CopyReadLineText(CopyState cstate) * of read-ahead and avoid the many calls to * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol * does not allow us to read too far ahead or we might read into the - * next data, so we read-ahead only as far we know we can. One + * next data, so we read-ahead only as far we know we can. One * optimization would be to read-ahead four byte here if * cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it, * considering the size of the buffer. @@ -3183,7 +3184,7 @@ CopyReadLineText(CopyState cstate) REFILL_LINEBUF; /* - * Try to read some more data. This will certainly reset + * Try to read some more data. This will certainly reset * raw_buf_index to zero, and raw_buf_ptr must go with it. */ if (!CopyLoadRawBuf(cstate)) @@ -3241,7 +3242,7 @@ CopyReadLineText(CopyState cstate) /* * Updating the line count for embedded CR and/or LF chars is * necessarily a little fragile - this test is probably about the - * best we can do. (XXX it's arguable whether we should do this + * best we can do. (XXX it's arguable whether we should do this * at all --- is cur_lineno a physical or logical count?) */ if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r')) @@ -3420,7 +3421,7 @@ CopyReadLineText(CopyState cstate) * after a backslash is special, so we skip over that second * character too. If we didn't do that \\. would be * considered an eof-of copy, while in non-CSV mode it is a - * literal backslash followed by a period. In CSV mode, + * literal backslash followed by a period. In CSV mode, * backslashes are not special, so we want to process the * character after the backslash just like a normal character, * so we don't increment in those cases. @@ -3523,7 +3524,7 @@ CopyReadAttributesText(CopyState cstate) /* * The de-escaped attributes will certainly not be longer than the input * data line, so we can just force attribute_buf to be large enough and - * then transfer data without any checks for enough space. We need to do + * then transfer data without any checks for enough space. We need to do * it this way because enlarging attribute_buf mid-stream would invalidate * pointers already stored into cstate->raw_fields[]. */ @@ -3753,7 +3754,7 @@ CopyReadAttributesCSV(CopyState cstate) /* * The de-escaped attributes will certainly not be longer than the input * data line, so we can just force attribute_buf to be large enough and - * then transfer data without any checks for enough space. We need to do + * then transfer data without any checks for enough space. We need to do * it this way because enlarging attribute_buf mid-stream would invalidate * pointers already stored into cstate->raw_fields[]. */ @@ -3968,7 +3969,7 @@ CopyAttributeOutText(CopyState cstate, char *string) /* * We have to grovel through the string searching for control characters * and instances of the delimiter character. In most cases, though, these - * are infrequent. To avoid overhead from calling CopySendData once per + * are infrequent. To avoid overhead from calling CopySendData once per * character, we dump out all characters between escaped characters in a * single call. The loop invariant is that the data from "start" to "ptr" * can be sent literally, but hasn't yet been. diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index e434d38702e..96806eed98b 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -104,7 +104,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, /* * For materialized views, lock down security-restricted operations and - * arrange to make GUC variable changes local to this command. This is + * arrange to make GUC variable changes local to this command. This is * not necessary for security, but this keeps the behavior similar to * REFRESH MATERIALIZED VIEW. Otherwise, one could create a materialized * view not possible to refresh. @@ -124,9 +124,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, * plancache.c. * * Because the rewriter and planner tend to scribble on the input, we make - * a preliminary copy of the source querytree. This prevents problems in + * a preliminary copy of the source querytree. This prevents problems in * the case that CTAS is in a portal or plpgsql function and is executed - * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) + * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) */ rewritten = QueryRewrite((Query *) copyObject(query)); @@ -141,7 +141,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, /* * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. (This could only matter if + * results of any previously executed queries. (This could only matter if * the planner executed an allegedly-stable function that changed the * database contents, but let's do it anyway to be parallel to the EXPLAIN * code path.) @@ -359,8 +359,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) /* * If necessary, create a TOAST table for the target table. Note that - * NewRelationCreateToastTable ends with CommandCounterIncrement(), so that - * the TOAST table will be visible for insertion. + * NewRelationCreateToastTable ends with CommandCounterIncrement(), so + * that the TOAST table will be visible for insertion. */ CommandCounterIncrement(); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 4996a2e7cd2..5705889f31d 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -265,7 +265,7 @@ createdb(const CreatedbStmt *stmt) * To create a database, must have createdb privilege and must be able to * become the target role (this does not imply that the target role itself * must have createdb privilege). The latter provision guards against - * "giveaway" attacks. Note that a superuser will always have both of + * "giveaway" attacks. Note that a superuser will always have both of * these privileges a fortiori. */ if (!have_createdb_privilege()) @@ -397,7 +397,7 @@ createdb(const CreatedbStmt *stmt) /* * If we are trying to change the default tablespace of the template, * we require that the template not have any files in the new default - * tablespace. This is necessary because otherwise the copied + * tablespace. This is necessary because otherwise the copied * database would contain pg_class rows that refer to its default * tablespace both explicitly (by OID) and implicitly (as zero), which * would cause problems. For example another CREATE DATABASE using @@ -433,7 +433,7 @@ createdb(const CreatedbStmt *stmt) } /* - * Check for db name conflict. This is just to give a more friendly error + * Check for db name conflict. This is just to give a more friendly error * message than "unique index violation". There's a race condition but * we're willing to accept the less friendly message in that case. */ @@ -498,7 +498,7 @@ createdb(const CreatedbStmt *stmt) /* * We deliberately set datacl to default (NULL), rather than copying it - * from the template database. Copying it would be a bad idea when the + * from the template database. Copying it would be a bad idea when the * owner is not the same as the template's owner. */ new_record_nulls[Anum_pg_database_datacl - 1] = true; @@ -751,7 +751,8 @@ dropdb(const char *dbname, bool missing_ok) HeapTuple tup; int notherbackends; int npreparedxacts; - int nslots, nslots_active; + int nslots, + nslots_active; /* * Look up the target database's OID, and get exclusive lock on it. We @@ -1160,7 +1161,7 @@ movedb(const char *dbname, const char *tblspcname) /* * Use an ENSURE block to make sure we remove the debris if the copy fails - * (eg, due to out-of-disk-space). This is not a 100% solution, because + * (eg, due to out-of-disk-space). This is not a 100% solution, because * of the possibility of failure during transaction commit, but it should * handle most scenarios. */ @@ -1647,7 +1648,7 @@ get_db_info(const char *name, LOCKMODE lockmode, LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode); /* - * And now, re-fetch the tuple by OID. If it's still there and still + * And now, re-fetch the tuple by OID. If it's still there and still * the same name, we win; else, drop the lock and loop back to try * again. */ diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index f0cb4f544e0..dca6e952a52 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -202,7 +202,7 @@ defGetInt64(DefElem *def) /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid int8 + * constants by the lexer. Accept these if they are valid int8 * strings. */ return DatumGetInt64(DirectFunctionCall1(int8in, diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 024a4778a94..96f926cbb2a 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -606,7 +606,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem *item) } /* - * Setup for running triggers for the given event. Return value is an OID list + * Setup for running triggers for the given event. Return value is an OID list * of functions to run; if there are any, trigdata is filled with an * appropriate EventTriggerData for them to receive. */ @@ -625,7 +625,7 @@ EventTriggerCommonSetup(Node *parsetree, * invoked to match up exactly with the list that CREATE EVENT TRIGGER * accepts. This debugging cross-check will throw an error if this * function is invoked for a command tag that CREATE EVENT TRIGGER won't - * accept. (Unfortunately, there doesn't seem to be any simple, automated + * accept. (Unfortunately, there doesn't seem to be any simple, automated * way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that * never reaches this control point.) * @@ -655,7 +655,7 @@ EventTriggerCommonSetup(Node *parsetree, /* * Filter list of event triggers by command tag, and copy them into our - * memory context. Once we start running the command trigers, or indeed + * memory context. Once we start running the command trigers, or indeed * once we do anything at all that touches the catalogs, an invalidation * might leave cachelist pointing at garbage, so we must do this before we * can do much else. @@ -783,7 +783,7 @@ EventTriggerSQLDrop(Node *parsetree) return; /* - * Use current state to determine whether this event fires at all. If + * Use current state to determine whether this event fires at all. If * there are no triggers for the sql_drop event, then we don't have * anything to do here. Note that dropped object collection is disabled * if this is the case, so even if we were to try to run, the list would @@ -798,7 +798,7 @@ EventTriggerSQLDrop(Node *parsetree) &trigdata); /* - * Nothing to do if run list is empty. Note this shouldn't happen, + * Nothing to do if run list is empty. Note this shouldn't happen, * because if there are no sql_drop events, then objects-to-drop wouldn't * have been collected in the first place and we would have quitted above. */ @@ -813,7 +813,7 @@ EventTriggerSQLDrop(Node *parsetree) /* * Make sure pg_event_trigger_dropped_objects only works when running - * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when + * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when * one trigger fails. (This is perhaps not necessary, as the currentState * variable will be removed shortly by our caller, but it seems better to * play safe.) @@ -1053,7 +1053,7 @@ EventTriggerBeginCompleteQuery(void) * returned false previously. * * Note: this might be called in the PG_CATCH block of a failing transaction, - * so be wary of running anything unnecessary. (In particular, it's probably + * so be wary of running anything unnecessary. (In particular, it's probably * unwise to try to allocate memory.) */ void diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 1104cc36312..794042b5501 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -86,7 +86,7 @@ static void show_sort_group_keys(PlanState *planstate, const char *qlabel, static void show_sort_info(SortState *sortstate, ExplainState *es); static void show_hash_info(HashState *hashstate, ExplainState *es); static void show_tidbitmap_info(BitmapHeapScanState *planstate, - ExplainState *es); + ExplainState *es); static void show_instrumentation_count(const char *qlabel, int which, PlanState *planstate, ExplainState *es); static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es); @@ -197,7 +197,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString, * plancache.c. * * Because the rewriter and planner tend to scribble on the input, we make - * a preliminary copy of the source querytree. This prevents problems in + * a preliminary copy of the source querytree. This prevents problems in * the case that the EXPLAIN is in a portal or plpgsql function and is * executed repeatedly. (See also the same hack in DECLARE CURSOR and * PREPARE.) XXX FIXME someday. @@ -320,8 +320,9 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, (*ExplainOneQuery_hook) (query, into, es, queryString, params); else { - PlannedStmt *plan; - instr_time planstart, planduration; + PlannedStmt *plan; + instr_time planstart, + planduration; INSTR_TIME_SET_CURRENT(planstart); @@ -493,7 +494,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, if (es->costs && planduration) { - double plantime = INSTR_TIME_GET_DOUBLE(*planduration); + double plantime = INSTR_TIME_GET_DOUBLE(*planduration); if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfo(es->str, "Planning time: %.3f ms\n", @@ -542,7 +543,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, * convert a QueryDesc's plan tree to text and append it to es->str * * The caller should have set up the options fields of *es, as well as - * initializing the output buffer es->str. Other fields in *es are + * initializing the output buffer es->str. Other fields in *es are * initialized here. * * NB: will not work on utility statements @@ -567,7 +568,7 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc) * es->str * * The caller should have set up the options fields of *es, as well as - * initializing the output buffer es->str. Other fields in *es are + * initializing the output buffer es->str. Other fields in *es are * initialized here. */ void @@ -2193,7 +2194,7 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es) /* * If the first target relation is a foreign table, call its FDW to - * display whatever additional fields it wants to. For now, we ignore the + * display whatever additional fields it wants to. For now, we ignore the * possibility of other targets being foreign tables, although the API for * ExplainForeignModify is designed to allow them to be processed. */ @@ -2692,7 +2693,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es) /* * Emit a JSON line ending. * - * JSON requires a comma after each property but the last. To facilitate this, + * JSON requires a comma after each property but the last. To facilitate this, * in JSON format, the text emitted for each property begins just prior to the * preceding line-break (and comma, if applicable). */ @@ -2713,7 +2714,7 @@ ExplainJSONLineEnding(ExplainState *es) * YAML lines are ordinarily indented by two spaces per indentation level. * The text emitted for each property begins just prior to the preceding * line-break, except for the first property in an unlabelled group, for which - * it begins immediately after the "- " that introduces the group. The first + * it begins immediately after the "- " that introduces the group. The first * property of the group appears on the same line as the opening "- ". */ static void diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 06bd90b9aa9..9a0afa4b5dc 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -108,7 +108,7 @@ static void ApplyExtensionUpdates(Oid extensionOid, /* * get_extension_oid - given an extension name, look up the OID * - * If missing_ok is false, throw an error if extension name not found. If + * If missing_ok is false, throw an error if extension name not found. If * true, just return InvalidOid. */ Oid @@ -257,9 +257,9 @@ check_valid_extension_name(const char *extensionname) errdetail("Extension names must not contain \"--\"."))); /* - * No leading or trailing dash either. (We could probably allow this, but + * No leading or trailing dash either. (We could probably allow this, but * it would require much care in filename parsing and would make filenames - * visually if not formally ambiguous. Since there's no real-world use + * visually if not formally ambiguous. Since there's no real-world use * case, let's just forbid it.) */ if (extensionname[0] == '-' || extensionname[namelen - 1] == '-') @@ -435,7 +435,7 @@ get_extension_script_filename(ExtensionControlFile *control, /* * Parse contents of primary or auxiliary control file, and fill in - * fields of *control. We parse primary file if version == NULL, + * fields of *control. We parse primary file if version == NULL, * else the optional auxiliary file for that version. * * Control files are supposed to be very short, half a dozen lines, @@ -673,7 +673,7 @@ read_extension_script_file(const ExtensionControlFile *control, * filename is used only to report errors. * * Note: it's tempting to just use SPI to execute the string, but that does - * not work very well. The really serious problem is that SPI will parse, + * not work very well. The really serious problem is that SPI will parse, * analyze, and plan the whole string before executing any of it; of course * this fails if there are any plannable statements referring to objects * created earlier in the script. A lesser annoyance is that SPI insists @@ -848,7 +848,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, /* * Set creating_extension and related variables so that * recordDependencyOnCurrentExtension and other functions do the right - * things. On failure, ensure we reset these variables. + * things. On failure, ensure we reset these variables. */ creating_extension = true; CurrentExtensionObject = extensionOid; @@ -1092,7 +1092,7 @@ identify_update_path(ExtensionControlFile *control, * is still good. * * Result is a List of names of versions to transition through (the initial - * version is *not* included). Returns NIL if no such path. + * version is *not* included). Returns NIL if no such path. */ static List * find_update_path(List *evi_list, @@ -1193,7 +1193,7 @@ CreateExtension(CreateExtensionStmt *stmt) check_valid_extension_name(stmt->extname); /* - * Check for duplicate extension name. The unique index on + * Check for duplicate extension name. The unique index on * pg_extension.extname would catch this anyway, and serves as a backstop * in case of race conditions; but this is a friendlier error message, and * besides we need a check to support IF NOT EXISTS. @@ -1360,7 +1360,7 @@ CreateExtension(CreateExtensionStmt *stmt) { /* * The extension is not relocatable and the author gave us a schema - * for it. We create the schema here if it does not already exist. + * for it. We create the schema here if it does not already exist. */ schemaName = control->schema; schemaOid = get_namespace_oid(schemaName, true); @@ -1390,7 +1390,7 @@ CreateExtension(CreateExtensionStmt *stmt) */ List *search_path = fetch_search_path(false); - if (search_path == NIL) /* nothing valid in search_path? */ + if (search_path == NIL) /* nothing valid in search_path? */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("no schema has been selected to create in"))); @@ -1589,7 +1589,7 @@ RemoveExtensionById(Oid extId) * might write "DROP EXTENSION foo" in foo's own script files, as because * errors in dependency management in extension script files could give * rise to cases where an extension is dropped as a result of recursing - * from some contained object. Because of that, we must test for the case + * from some contained object. Because of that, we must test for the case * here, not at some higher level of the DROP EXTENSION command. */ if (extId == CurrentExtensionObject) @@ -1620,7 +1620,7 @@ RemoveExtensionById(Oid extId) /* * This function lists the available extensions (one row per primary control - * file in the control directory). We parse each control file and report the + * file in the control directory). We parse each control file and report the * interesting fields. * * The system view pg_available_extensions provides a user interface to this @@ -1729,7 +1729,7 @@ pg_available_extensions(PG_FUNCTION_ARGS) /* * This function lists the available extension versions (one row per - * extension installation script). For each version, we parse the related + * extension installation script). For each version, we parse the related * control file(s) and report the interesting fields. * * The system view pg_available_extension_versions provides a user interface @@ -2517,7 +2517,7 @@ AlterExtensionNamespace(List *names, const char *newschema) Oid dep_oldNspOid; /* - * Ignore non-membership dependencies. (Currently, the only other + * Ignore non-membership dependencies. (Currently, the only other * case we could see here is a normal dependency from another * extension.) */ @@ -2929,7 +2929,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt) /* * Prevent a schema from being added to an extension if the schema - * contains the extension. That would create a dependency loop. + * contains the extension. That would create a dependency loop. */ if (object.classId == NamespaceRelationId && object.objectId == get_extension_schema(extension.objectId)) diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 7f007d7854a..8ab9c439db2 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -81,7 +81,7 @@ optionListToArray(List *options) /* - * Transform a list of DefElem into text array format. This is substantially + * Transform a list of DefElem into text array format. This is substantially * the same thing as optionListToArray(), except we recognize SET/ADD/DROP * actions for modifying an existing list of options, which is passed in * Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid @@ -125,7 +125,7 @@ transformGenericOptions(Oid catalogId, /* * It is possible to perform multiple SET/DROP actions on the same - * option. The standard permits this, as long as the options to be + * option. The standard permits this, as long as the options to be * added are unique. Note that an unspecified action is taken to be * ADD. */ diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 4c8119a474d..470db5705cc 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -74,7 +74,7 @@ * allow a shell type to be used, or even created if the specified return type * doesn't exist yet. (Without this, there's no way to define the I/O procs * for a new type.) But SQL function creation won't cope, so error out if - * the target language is SQL. (We do this here, not in the SQL-function + * the target language is SQL. (We do this here, not in the SQL-function * validator, so as not to produce a NOTICE and then an ERROR for the same * condition.) */ @@ -451,7 +451,7 @@ interpret_function_parameter_list(List *parameters, * FUNCTION and ALTER FUNCTION and return it via one of the out * parameters. Returns true if the passed option was recognized. If * the out parameter we were going to assign to points to non-NULL, - * raise a duplicate-clause error. (We don't try to detect duplicate + * raise a duplicate-clause error. (We don't try to detect duplicate * SET parameters though --- if you're redundant, the last one wins.) */ static bool @@ -760,7 +760,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName, { /* * For "C" language, store the file name in probin and, when given, - * the link symbol name in prosrc. If link symbol is omitted, + * the link symbol name in prosrc. If link symbol is omitted, * substitute procedure name. We also allow link symbol to be * specified as "-", since that was the habit in PG versions before * 8.4, and there might be dump files out there that don't translate @@ -1394,7 +1394,7 @@ CreateCast(CreateCastStmt *stmt) /* * Restricting the volatility of a cast function may or may not be a * good idea in the abstract, but it definitely breaks many old - * user-defined types. Disable this check --- tgl 2/1/03 + * user-defined types. Disable this check --- tgl 2/1/03 */ #ifdef NOT_USED if (procstruct->provolatile == PROVOLATILE_VOLATILE) @@ -1458,7 +1458,7 @@ CreateCast(CreateCastStmt *stmt) /* * We know that composite, enum and array types are never binary- - * compatible with each other. They all have OIDs embedded in them. + * compatible with each other. They all have OIDs embedded in them. * * Theoretically you could build a user-defined base type that is * binary-compatible with a composite, enum, or array type. But we @@ -1487,7 +1487,7 @@ CreateCast(CreateCastStmt *stmt) * We also disallow creating binary-compatibility casts involving * domains. Casting from a domain to its base type is already * allowed, and casting the other way ought to go through domain - * coercion to permit constraint checking. Again, if you're intent on + * coercion to permit constraint checking. Again, if you're intent on * having your own semantics for that, create a no-op cast function. * * NOTE: if we were to relax this, the above checks for composites diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 38ce023a8a2..fdfa6ca4f5c 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -102,7 +102,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation, * concrete benefit for core types. * When a comparison or exclusion operator has a polymorphic input type, the - * actual input types must also match. This defends against the possibility + * actual input types must also match. This defends against the possibility * that operators could vary behavior in response to get_fn_expr_argtype(). * At present, this hazard is theoretical: check_exclusion_constraint() and * all core index access methods decline to set fn_expr for such calls. @@ -349,11 +349,11 @@ DefineIndex(Oid relationId, * index build; but for concurrent builds we allow INSERT/UPDATE/DELETE * (but not VACUUM). * - * NB: Caller is responsible for making sure that relationId refers - * to the relation on which the index should be built; except in bootstrap - * mode, this will typically require the caller to have already locked - * the relation. To avoid lock upgrade hazards, that lock should be at - * least as strong as the one we take here. + * NB: Caller is responsible for making sure that relationId refers to the + * relation on which the index should be built; except in bootstrap mode, + * this will typically require the caller to have already locked the + * relation. To avoid lock upgrade hazards, that lock should be at least + * as strong as the one we take here. */ lockmode = stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock; rel = heap_open(relationId, lockmode); @@ -433,7 +433,7 @@ DefineIndex(Oid relationId, } /* - * Force shared indexes into the pg_global tablespace. This is a bit of a + * Force shared indexes into the pg_global tablespace. This is a bit of a * hack but seems simpler than marking them in the BKI commands. On the * other hand, if it's not shared, don't allow it to be placed there. */ @@ -628,7 +628,7 @@ DefineIndex(Oid relationId, /* * For a concurrent build, it's important to make the catalog entries * visible to other transactions before we start to build the index. That - * will prevent them from making incompatible HOT updates. The new index + * will prevent them from making incompatible HOT updates. The new index * will be marked not indisready and not indisvalid, so that no one else * tries to either insert into it or use it for queries. * @@ -676,7 +676,7 @@ DefineIndex(Oid relationId, * indexes. We have waited out all the existing transactions and any new * transaction will have the new index in its list, but the index is still * marked as "not-ready-for-inserts". The index is consulted while - * deciding HOT-safety though. This arrangement ensures that no new HOT + * deciding HOT-safety though. This arrangement ensures that no new HOT * chains can be created where the new tuple and the old tuple in the * chain have different index keys. * @@ -736,7 +736,7 @@ DefineIndex(Oid relationId, /* * Now take the "reference snapshot" that will be used by validate_index() - * to filter candidate tuples. Beware! There might still be snapshots in + * to filter candidate tuples. Beware! There might still be snapshots in * use that treat some transaction as in-progress that our reference * snapshot treats as committed. If such a recently-committed transaction * deleted tuples in the table, we will not include them in the index; yet @@ -761,7 +761,7 @@ DefineIndex(Oid relationId, * Drop the reference snapshot. We must do this before waiting out other * snapshot holders, else we will deadlock against other processes also * doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one - * they must wait for. But first, save the snapshot's xmin to use as + * they must wait for. But first, save the snapshot's xmin to use as * limitXmin for GetCurrentVirtualXIDs(). */ limitXmin = snapshot->xmin; @@ -771,7 +771,7 @@ DefineIndex(Oid relationId, /* * The index is now valid in the sense that it contains all currently - * interesting tuples. But since it might not contain tuples deleted just + * interesting tuples. But since it might not contain tuples deleted just * before the reference snap was taken, we have to wait out any * transactions that might have older snapshots. Obtain a list of VXIDs * of such transactions, and wait for them individually. @@ -786,7 +786,7 @@ DefineIndex(Oid relationId, * * We can also exclude autovacuum processes and processes running manual * lazy VACUUMs, because they won't be fazed by missing index entries - * either. (Manual ANALYZEs, however, can't be excluded because they + * either. (Manual ANALYZEs, however, can't be excluded because they * might be within transactions that are going to do arbitrary operations * later.) * @@ -875,7 +875,7 @@ CheckMutability(Expr *expr) { /* * First run the expression through the planner. This has a couple of - * important consequences. First, function default arguments will get + * important consequences. First, function default arguments will get * inserted, which may affect volatility (consider "default now()"). * Second, inline-able functions will get inlined, which may allow us to * conclude that the function is really less volatile than it's marked. As @@ -898,7 +898,7 @@ CheckMutability(Expr *expr) * Checks that the given partial-index predicate is valid. * * This used to also constrain the form of the predicate to forms that - * indxpath.c could do something with. However, that seems overly + * indxpath.c could do something with. However, that seems overly * restrictive. One useful application of partial indexes is to apply * a UNIQUE constraint across a subset of a table, and in that scenario * any evaluatable predicate will work. So accept any predicate here @@ -1009,7 +1009,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, attcollation = exprCollation(expr); /* - * Strip any top-level COLLATE clause. This ensures that we treat + * Strip any top-level COLLATE clause. This ensures that we treat * "x COLLATE y" and "(x COLLATE y)" alike. */ while (IsA(expr, CollateExpr)) @@ -1215,7 +1215,7 @@ GetIndexOpClass(List *opclass, Oid attrType, * 2000/07/30 * * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that - * too for awhile. I'm starting to think we need a better approach. tgl + * too for awhile. I'm starting to think we need a better approach. tgl * 2000/10/01 * * Release 8.0 removes bigbox_ops (which was dead code for a long while @@ -1284,7 +1284,7 @@ GetIndexOpClass(List *opclass, Oid attrType, NameListToString(opclass), accessMethodName))); /* - * Verify that the index operator class accepts this datatype. Note we + * Verify that the index operator class accepts this datatype. Note we * will accept binary compatibility. */ opClassId = HeapTupleGetOid(tuple); @@ -1305,7 +1305,7 @@ GetIndexOpClass(List *opclass, Oid attrType, * GetDefaultOpClass * * Given the OIDs of a datatype and an access method, find the default - * operator class, if any. Returns InvalidOid if there is none. + * operator class, if any. Returns InvalidOid if there is none. */ Oid GetDefaultOpClass(Oid type_id, Oid am_id) @@ -1400,7 +1400,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id) * Create a name for an implicitly created index, sequence, constraint, etc. * * The parameters are typically: the original table name, the original field - * name, and a "type" string (such as "seq" or "pkey"). The field name + * name, and a "type" string (such as "seq" or "pkey"). The field name * and/or type can be NULL if not relevant. * * The result is a palloc'd string. @@ -1408,7 +1408,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id) * The basic result we want is "name1_name2_label", omitting "_name2" or * "_label" when those parameters are NULL. However, we must generate * a name with less than NAMEDATALEN characters! So, we truncate one or - * both names if necessary to make a short-enough string. The label part + * both names if necessary to make a short-enough string. The label part * is never truncated (so it had better be reasonably short). * * The caller is responsible for checking uniqueness of the generated @@ -1603,7 +1603,7 @@ ChooseIndexNameAddition(List *colnames) /* * Select the actual names to be used for the columns of an index, given the - * list of IndexElems for the columns. This is mostly about ensuring the + * list of IndexElems for the columns. This is mostly about ensuring the * names are unique so we don't get a conflicting-attribute-names error. * * Returns a List of plain strings (char *, not String nodes). @@ -1714,7 +1714,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, /* * If the relation does exist, check whether it's an index. But note that * the relation might have been dropped between the time we did the name - * lookup and now. In that case, there's nothing to do. + * lookup and now. In that case, there's nothing to do. */ relkind = get_rel_relkind(relId); if (!relkind) diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index a301d65b60e..5130d512a6a 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -240,9 +240,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, owner = matviewRel->rd_rel->relowner; /* - * Create the transient table that will receive the regenerated data. - * Lock it against access by any other process until commit (by which time - * it will be gone). + * Create the transient table that will receive the regenerated data. Lock + * it against access by any other process until commit (by which time it + * will be gone). */ OIDNewHeap = make_new_heap(matviewOid, tableSpace, concurrent, ExclusiveLock); @@ -319,7 +319,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query, /* * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. (This could only matter if + * results of any previously executed queries. (This could only matter if * the planner executed an allegedly-stable function that changed the * database contents, but let's do it anyway to be safe.) */ @@ -495,9 +495,9 @@ mv_GenerateOper(StringInfo buf, Oid opoid) * * This is called after a new version of the data has been created in a * temporary table. It performs a full outer join against the old version of - * the data, producing "diff" results. This join cannot work if there are any + * the data, producing "diff" results. This join cannot work if there are any * duplicated rows in either the old or new versions, in the sense that every - * column would compare as equal between the two rows. It does work correctly + * column would compare as equal between the two rows. It does work correctly * in the face of rows which have at least one NULL value, with all non-NULL * columns equal. The behavior of NULLs on equality tests and on UNIQUE * indexes turns out to be quite convenient here; the tests we need to make @@ -561,7 +561,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid) /* * We need to ensure that there are not duplicate rows without NULLs in - * the new data set before we can count on the "diff" results. Check for + * the new data set before we can count on the "diff" results. Check for * that in a way that allows showing the first duplicated row found. Even * after we pass this test, a unique index on the materialized view may * find a duplicate key problem. @@ -707,7 +707,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid) /* Deletes must come before inserts; do them first. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, - "DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY " + "DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY " "(SELECT diff.tid FROM %s diff " "WHERE diff.tid IS NOT NULL " "AND diff.newdata IS NULL)", diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 5d7b37c674a..4b2baaceff0 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -391,7 +391,7 @@ DefineOpClass(CreateOpClassStmt *stmt) * A minimum expectation therefore is that the caller have execute * privilege with grant option. Since we don't have a way to make the * opclass go away if the grant option is revoked, we choose instead to - * require ownership of the functions. It's also not entirely clear what + * require ownership of the functions. It's also not entirely clear what * permissions should be required on the datatype, but ownership seems * like a safe choice. * @@ -673,7 +673,7 @@ DefineOpClass(CreateOpClassStmt *stmt) opclassoid, procedures, false); /* - * Create dependencies for the opclass proper. Note: we do not create a + * Create dependencies for the opclass proper. Note: we do not create a * dependency link to the AM, because we don't currently support DROP * ACCESS METHOD. */ @@ -1090,7 +1090,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (OidIsValid(member->sortfamily)) { /* - * Ordering op, check index supports that. (We could perhaps also + * Ordering op, check index supports that. (We could perhaps also * check that the operator returns a type supported by the sortfamily, * but that seems more trouble than it's worth here. If it does not, * the operator will never be matchable to any ORDER BY clause, but no @@ -1219,7 +1219,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) /* * The default in CREATE OPERATOR CLASS is to use the class' opcintype as - * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype + * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype * isn't available, so make the user specify the types. */ if (!OidIsValid(member->lefttype)) diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index c2560cbce38..85b81b7928f 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -211,7 +211,7 @@ DefineOperator(List *names, List *parameters) functionOid = LookupFuncName(functionName, nargs, typeId, false); /* - * We require EXECUTE rights for the function. This isn't strictly + * We require EXECUTE rights for the function. This isn't strictly * necessary, since EXECUTE will be checked at any attempted use of the * operator, but it seems like a good idea anyway. */ diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index e7c681ab7f4..28e785afb84 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -4,7 +4,7 @@ * Utility commands affecting portals (that is, SQL cursor commands) * * Note: see also tcop/pquery.c, which implements portal operations for - * the FE/BE protocol. This module uses pquery.c for some operations. + * the FE/BE protocol. This module uses pquery.c for some operations. * And both modules depend on utils/mmgr/portalmem.c, which controls * storage management for portals (but doesn't run any queries in them). * @@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params, /*---------- * Also copy the outer portal's parameter list into the inner portal's - * memory context. We want to pass down the parameter values in case we + * memory context. We want to pass down the parameter values in case we * had a command like * DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 * This will have been parsed using the outer parameter set and the @@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params, * * If the user didn't specify a SCROLL type, allow or disallow scrolling * based on whether it would require any additional runtime overhead to do - * so. Also, we disallow scrolling for FOR UPDATE cursors. + * so. Also, we disallow scrolling for FOR UPDATE cursors. */ portal->cursorOptions = cstmt->options; if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL))) @@ -365,7 +365,7 @@ PersistHoldablePortal(Portal portal) ExecutorRewind(queryDesc); /* - * Change the destination to output to the tuplestore. Note we tell + * Change the destination to output to the tuplestore. Note we tell * the tuplestore receiver to detoast all data passed through it. */ queryDesc->dest = CreateDestReceiver(DestTuplestore); diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 65431b713d0..10168e3e801 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString) * ExecuteQuery --- implement the 'EXECUTE' utility statement. * * This code also supports CREATE TABLE ... AS EXECUTE. That case is - * indicated by passing a non-null intoClause. The DestReceiver is already + * indicated by passing a non-null intoClause. The DestReceiver is already * set up correctly for CREATE TABLE AS, but we still have to make a few * other adjustments here. * @@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, { /* * Need an EState to evaluate parameters; must not delete it till end - * of query, in case parameters are pass-by-reference. Note that the + * of query, in case parameters are pass-by-reference. Note that the * passed-in "params" could possibly be referenced in the parameter * expressions. */ @@ -237,7 +237,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, /* * For CREATE TABLE ... AS EXECUTE, we must verify that the prepared * statement is one that produces tuples. Currently we insist that it be - * a plain old SELECT. In future we might consider supporting other + * a plain old SELECT. In future we might consider supporting other * things such as INSERT ... RETURNING, but there are a couple of issues * to be settled first, notably how WITH NO DATA should be handled in such * a case (do we really want to suppress execution?) and how to pass down @@ -529,7 +529,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt) /* * Given a prepared statement that returns tuples, extract the query - * targetlist. Returns NIL if the statement doesn't have a determinable + * targetlist. Returns NIL if the statement doesn't have a determinable * targetlist. * * Note: this is pretty ugly, but since it's only used in corner cases like @@ -644,7 +644,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, { /* * Need an EState to evaluate parameters; must not delete it till end - * of query, in case parameters are pass-by-reference. Note that the + * of query, in case parameters are pass-by-reference. Note that the * passed-in "params" could possibly be referenced in the parameter * expressions. */ diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 75b4ce56ae8..6fb34637f88 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -260,7 +260,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) if (funcrettype != LANGUAGE_HANDLEROID) { /* - * We allow OPAQUE just so we can load old dump files. When we + * We allow OPAQUE just so we can load old dump files. When we * see a handler function declared OPAQUE, change it to * LANGUAGE_HANDLER. (This is probably obsolete and removable?) */ diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index 2599e28cc45..03f5514d39b 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -67,7 +67,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString) * To create a schema, must have schema-create privilege on the current * database and must be able to become the target role (this does not * imply that the target role itself must have create-schema privilege). - * The latter provision guards against "giveaway" attacks. Note that a + * The latter provision guards against "giveaway" attacks. Note that a * superuser will always have both of these privileges a fortiori. */ aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE); @@ -132,7 +132,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString) /* * Examine the list of commands embedded in the CREATE SCHEMA command, and * reorganize them into a sequentially executable order with no forward - * references. Note that the result is still a list of raw parsetrees --- + * references. Note that the result is still a list of raw parsetrees --- * we cannot, in general, run parse analysis on one statement until we * have actually executed the prior ones. */ diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 2829b1e3044..e6084203a88 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -279,7 +279,7 @@ ResetSequence(Oid seq_relid) seq->log_cnt = 0; /* - * Create a new storage file for the sequence. We want to keep the + * Create a new storage file for the sequence. We want to keep the * sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs. * Same with relminmxid, since a sequence will never contain multixacts. */ @@ -325,9 +325,9 @@ fill_seq_with_data(Relation rel, HeapTuple tuple) LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* - * Since VACUUM does not process sequences, we have to force the tuple - * to have xmin = FrozenTransactionId now. Otherwise it would become - * invisible to SELECTs after 2G transactions. It is okay to do this + * Since VACUUM does not process sequences, we have to force the tuple to + * have xmin = FrozenTransactionId now. Otherwise it would become + * invisible to SELECTs after 2G transactions. It is okay to do this * because if the current transaction aborts, no other xact will ever * examine the sequence tuple anyway. */ @@ -487,7 +487,7 @@ nextval(PG_FUNCTION_ARGS) * XXX: This is not safe in the presence of concurrent DDL, but acquiring * a lock here is more expensive than letting nextval_internal do it, * since the latter maintains a cache that keeps us from hitting the lock - * manager more than once per transaction. It's not clear whether the + * manager more than once per transaction. It's not clear whether the * performance penalty is material in practice, but for now, we do it this * way. */ @@ -567,7 +567,7 @@ nextval_internal(Oid relid) } /* - * Decide whether we should emit a WAL log record. If so, force up the + * Decide whether we should emit a WAL log record. If so, force up the * fetch count to grab SEQ_LOG_VALS more values than we actually need to * cache. (These will then be usable without logging.) * @@ -674,7 +674,7 @@ nextval_internal(Oid relid) * We must mark the buffer dirty before doing XLogInsert(); see notes in * SyncOneBuffer(). However, we don't apply the desired changes just yet. * This looks like a violation of the buffer update protocol, but it is in - * fact safe because we hold exclusive lock on the buffer. Any other + * fact safe because we hold exclusive lock on the buffer. Any other * process, including a checkpoint, that tries to examine the buffer * contents will block until we release the lock, and then will see the * final state that we install below. @@ -936,7 +936,7 @@ setval3_oid(PG_FUNCTION_ARGS) * Open the sequence and acquire AccessShareLock if needed * * If we haven't touched the sequence already in this transaction, - * we need to acquire AccessShareLock. We arrange for the lock to + * we need to acquire AccessShareLock. We arrange for the lock to * be owned by the top transaction, so that we don't need to do it * more than once per xact. */ @@ -1037,7 +1037,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel) /* * If the sequence has been transactionally replaced since we last saw it, - * discard any cached-but-unissued values. We do not touch the currval() + * discard any cached-but-unissued values. We do not touch the currval() * state, however. */ if (seqrel->rd_rel->relfilenode != elm->filenode) @@ -1554,13 +1554,13 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) page = (Page) BufferGetPage(buffer); /* - * We always reinit the page. However, since this WAL record type is - * also used for updating sequences, it's possible that a hot-standby - * backend is examining the page concurrently; so we mustn't transiently - * trash the buffer. The solution is to build the correct new page - * contents in local workspace and then memcpy into the buffer. Then only - * bytes that are supposed to change will change, even transiently. We - * must palloc the local page for alignment reasons. + * We always reinit the page. However, since this WAL record type is also + * used for updating sequences, it's possible that a hot-standby backend + * is examining the page concurrently; so we mustn't transiently trash the + * buffer. The solution is to build the correct new page contents in + * local workspace and then memcpy into the buffer. Then only bytes that + * are supposed to change will change, even transiently. We must palloc + * the local page for alignment reasons. */ localpage = (Page) palloc(BufferGetPageSize(buffer)); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 619aa78d809..341262b6fc8 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -276,7 +276,7 @@ static void AlterSeqNamespaces(Relation classRel, Relation rel, Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved, LOCKMODE lockmode); static void ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, - bool recurse, bool recursing, LOCKMODE lockmode); + bool recurse, bool recursing, LOCKMODE lockmode); static void ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, bool recursing, LOCKMODE lockmode); static int transformColumnNameList(Oid relId, List *colList, @@ -557,7 +557,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) &inheritOids, &old_constraints, &parentOidCount); /* - * Create a tuple descriptor from the relation schema. Note that this + * Create a tuple descriptor from the relation schema. Note that this * deals with column names, types, and NOT NULL constraints, but not * default values or CHECK constraints; we handle those below. */ @@ -657,7 +657,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) CommandCounterIncrement(); /* - * Open the new relation and acquire exclusive lock on it. This isn't + * Open the new relation and acquire exclusive lock on it. This isn't * really necessary for locking out other backends (since they can't see * the new rel anyway until we commit), but it keeps the lock manager from * complaining about deadlock risks. @@ -702,7 +702,7 @@ DropErrorMsgNonExistent(RangeVar *rel, char rightkind, bool missing_ok) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("schema \"%s\" does not exist", rel->schemaname))); + errmsg("schema \"%s\" does not exist", rel->schemaname))); } else { @@ -1022,10 +1022,10 @@ ExecuteTruncate(TruncateStmt *stmt) } /* - * In CASCADE mode, suck in all referencing relations as well. This + * In CASCADE mode, suck in all referencing relations as well. This * requires multiple iterations to find indirectly-dependent relations. At * each phase, we need to exclusive-lock new rels before looking for their - * dependencies, else we might miss something. Also, we check each rel as + * dependencies, else we might miss something. Also, we check each rel as * soon as we open it, to avoid a faux pas such as holding lock for a long * time on a rel we have no permissions for. */ @@ -1246,7 +1246,7 @@ ExecuteTruncate(TruncateStmt *stmt) } /* - * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate + * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate */ static void truncate_check_rel(Relation rel) @@ -1674,7 +1674,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, /* * Now copy the CHECK constraints of this parent, adjusting attnos - * using the completed newattno[] map. Identically named constraints + * using the completed newattno[] map. Identically named constraints * are merged if possible, else we throw error. */ if (constr && constr->num_check > 0) @@ -1735,7 +1735,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, /* * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing + * commit. That will prevent someone else from deleting or ALTERing * the parent before the child is committed. */ heap_close(relation, NoLock); @@ -2243,7 +2243,7 @@ renameatt_internal(Oid myrelid, oldattname))); /* - * if the attribute is inherited, forbid the renaming. if this is a + * if the attribute is inherited, forbid the renaming. if this is a * top-level call to renameatt(), then expected_parents will be 0, so the * effect of this code will be to prohibit the renaming if the attribute * is inherited at all. if this is a recursive call to renameatt(), @@ -2547,7 +2547,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal) newrelname))); /* - * Update pg_class tuple with new relname. (Scribbling on reltup is OK + * Update pg_class tuple with new relname. (Scribbling on reltup is OK * because it's a copy...) */ namestrcpy(&(relform->relname), newrelname); @@ -2603,7 +2603,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal) * We also reject these commands if there are any pending AFTER trigger events * for the rel. This is certainly necessary for the rewriting variants of * ALTER TABLE, because they don't preserve tuple TIDs and so the pending - * events would try to fetch the wrong tuples. It might be overly cautious + * events would try to fetch the wrong tuples. It might be overly cautious * in other cases, but again it seems better to err on the side of paranoia. * * REINDEX calls this with "rel" referencing the index to be rebuilt; here @@ -2659,23 +2659,23 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode) * 3. Scan table(s) to check new constraints, and optionally recopy * the data into new table(s). * Phase 3 is not performed unless one or more of the subcommands requires - * it. The intention of this design is to allow multiple independent + * it. The intention of this design is to allow multiple independent * updates of the table schema to be performed with only one pass over the * data. * - * ATPrepCmd performs phase 1. A "work queue" entry is created for + * ATPrepCmd performs phase 1. A "work queue" entry is created for * each table to be affected (there may be multiple affected tables if the * commands traverse a table inheritance hierarchy). Also we do preliminary * validation of the subcommands, including parse transformation of those * expressions that need to be evaluated with respect to the old table * schema. * - * ATRewriteCatalogs performs phase 2 for each affected table. (Note that + * ATRewriteCatalogs performs phase 2 for each affected table. (Note that * phases 2 and 3 normally do no explicit recursion, since phase 1 already * did it --- although some subcommands have to recurse in phase 2 instead.) * Certain subcommands need to be performed before others to avoid * unnecessary conflicts; for example, DROP COLUMN should come before - * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple + * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple * lists, one for each logical "pass" of phase 2. * * ATRewriteTables performs phase 3 for those tables that need it. @@ -2782,17 +2782,18 @@ AlterTableGetLockLevel(List *cmds) * to SELECT */ case AT_SetTableSpace: /* must rewrite heap */ case AT_AlterColumnType: /* must rewrite heap */ - case AT_AddOids: /* must rewrite heap */ + case AT_AddOids: /* must rewrite heap */ cmd_lockmode = AccessExclusiveLock; break; /* - * These subcommands may require addition of toast tables. If we - * add a toast table to a table currently being scanned, we + * These subcommands may require addition of toast tables. If + * we add a toast table to a table currently being scanned, we * might miss data added to the new toast table by concurrent * insert transactions. */ - case AT_SetStorage: /* may add toast tables, see ATRewriteCatalogs() */ + case AT_SetStorage:/* may add toast tables, see + * ATRewriteCatalogs() */ cmd_lockmode = AccessExclusiveLock; break; @@ -2808,12 +2809,12 @@ AlterTableGetLockLevel(List *cmds) /* * Subcommands that may be visible to concurrent SELECTs */ - case AT_DropColumn: /* change visible to SELECT */ + case AT_DropColumn: /* change visible to SELECT */ case AT_AddColumnToView: /* CREATE VIEW */ - case AT_DropOids: /* calls AT_DropColumn */ + case AT_DropOids: /* calls AT_DropColumn */ case AT_EnableAlwaysRule: /* may change SELECT rules */ case AT_EnableReplicaRule: /* may change SELECT rules */ - case AT_EnableRule: /* may change SELECT rules */ + case AT_EnableRule: /* may change SELECT rules */ case AT_DisableRule: /* may change SELECT rules */ cmd_lockmode = AccessExclusiveLock; break; @@ -2834,8 +2835,8 @@ AlterTableGetLockLevel(List *cmds) break; /* - * These subcommands affect write operations only. - * XXX Theoretically, these could be ShareRowExclusiveLock. + * These subcommands affect write operations only. XXX + * Theoretically, these could be ShareRowExclusiveLock. */ case AT_ColumnDefault: case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ @@ -2872,9 +2873,9 @@ AlterTableGetLockLevel(List *cmds) * Cases essentially the same as CREATE INDEX. We * could reduce the lock strength to ShareLock if * we can work out how to allow concurrent catalog - * updates. - * XXX Might be set down to ShareRowExclusiveLock - * but requires further analysis. + * updates. XXX Might be set down to + * ShareRowExclusiveLock but requires further + * analysis. */ cmd_lockmode = AccessExclusiveLock; break; @@ -2883,10 +2884,9 @@ AlterTableGetLockLevel(List *cmds) /* * We add triggers to both tables when we add a * Foreign Key, so the lock level must be at least - * as strong as CREATE TRIGGER. - * XXX Might be set down to ShareRowExclusiveLock - * though trigger info is accessed by - * pg_get_triggerdef + * as strong as CREATE TRIGGER. XXX Might be set + * down to ShareRowExclusiveLock though trigger + * info is accessed by pg_get_triggerdef */ cmd_lockmode = AccessExclusiveLock; break; @@ -2902,8 +2902,8 @@ AlterTableGetLockLevel(List *cmds) * started before us will continue to see the old inheritance * behaviour, while queries started after we commit will see * new behaviour. No need to prevent reads or writes to the - * subtable while we hook it up though. - * Changing the TupDesc may be a problem, so keep highest lock. + * subtable while we hook it up though. Changing the TupDesc + * may be a problem, so keep highest lock. */ case AT_AddInherit: case AT_DropInherit: @@ -2912,9 +2912,9 @@ AlterTableGetLockLevel(List *cmds) /* * These subcommands affect implicit row type conversion. They - * have affects similar to CREATE/DROP CAST on queries. - * don't provide for invalidating parse trees as a result of - * such changes, so we keep these at AccessExclusiveLock. + * have affects similar to CREATE/DROP CAST on queries. don't + * provide for invalidating parse trees as a result of such + * changes, so we keep these at AccessExclusiveLock. */ case AT_AddOf: case AT_DropOf: @@ -2940,29 +2940,32 @@ AlterTableGetLockLevel(List *cmds) * updates. */ case AT_SetStatistics: /* Uses MVCC in getTableAttrs() */ - case AT_ClusterOn: /* Uses MVCC in getIndexes() */ + case AT_ClusterOn: /* Uses MVCC in getIndexes() */ case AT_DropCluster: /* Uses MVCC in getIndexes() */ - case AT_SetOptions: /* Uses MVCC in getTableAttrs() */ + case AT_SetOptions: /* Uses MVCC in getTableAttrs() */ case AT_ResetOptions: /* Uses MVCC in getTableAttrs() */ cmd_lockmode = ShareUpdateExclusiveLock; break; - case AT_ValidateConstraint: /* Uses MVCC in getConstraints() */ + case AT_ValidateConstraint: /* Uses MVCC in + * getConstraints() */ cmd_lockmode = ShareUpdateExclusiveLock; break; /* * Rel options are more complex than first appears. Options * are set here for tables, views and indexes; for historical - * reasons these can all be used with ALTER TABLE, so we - * can't decide between them using the basic grammar. + * reasons these can all be used with ALTER TABLE, so we can't + * decide between them using the basic grammar. * * XXX Look in detail at each option to determine lock level, - * e.g. - * cmd_lockmode = GetRelOptionsLockLevel((List *) cmd->def); + * e.g. cmd_lockmode = GetRelOptionsLockLevel((List *) + * cmd->def); */ - case AT_SetRelOptions: /* Uses MVCC in getIndexes() and getTables() */ - case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and getTables() */ + case AT_SetRelOptions: /* Uses MVCC in getIndexes() and + * getTables() */ + case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and + * getTables() */ cmd_lockmode = AccessExclusiveLock; break; @@ -3209,7 +3212,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, cmd->subtype = AT_ValidateConstraintRecurse; pass = AT_PASS_MISC; break; - case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */ + case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */ ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW); pass = AT_PASS_MISC; /* This command never recurses */ @@ -3258,7 +3261,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* * ATRewriteCatalogs * - * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are + * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are * dispatched in a "safe" execution order (designed to avoid unnecessary * conflicts). */ @@ -3604,8 +3607,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode) if (RelationIsUsedAsCatalogTable(OldHeap)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot rewrite table \"%s\" used as a catalog table", - RelationGetRelationName(OldHeap)))); + errmsg("cannot rewrite table \"%s\" used as a catalog table", + RelationGetRelationName(OldHeap)))); /* * Don't allow rewrite on temp tables of other backends ... their @@ -3856,7 +3859,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) { /* * All predicate locks on the tuples or pages are about to be made - * invalid, because we move tuples around. Promote them to + * invalid, because we move tuples around. Promote them to * relation locks. */ TransferPredicateLocksToHeapRelation(oldrel); @@ -3946,8 +3949,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) HeapTupleSetOid(tuple, tupOid); /* - * Constraints might reference the tableoid column, so initialize - * t_tableOid before evaluating them. + * Constraints might reference the tableoid column, so + * initialize t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(oldrel); } @@ -4404,7 +4407,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be * * Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it * isn't suitable, throw an error. Currently, we require that the type - * originated with CREATE TYPE AS. We could support any row type, but doing so + * originated with CREATE TYPE AS. We could support any row type, but doing so * would require handling a number of extra corner cases in the DDL commands. */ void @@ -4423,7 +4426,7 @@ check_of_type(HeapTuple typetuple) /* * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing + * commit. That will prevent someone else from deleting or ALTERing * the type before the typed table creation/conversion commits. */ relation_close(typeRelation, NoLock); @@ -4882,7 +4885,7 @@ add_column_collation_dependency(Oid relid, int32 attnum, Oid collid) /* * ALTER TABLE SET WITH OIDS * - * Basically this is an ADD COLUMN for the special OID column. We have + * Basically this is an ADD COLUMN for the special OID column. We have * to cons up a ColumnDef node because the ADD COLUMN code needs one. */ static void @@ -5352,7 +5355,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc * * DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism, * because we have to decide at runtime whether to recurse or not depending - * on whether attinhcount goes to zero or not. (We can't check this in a + * on whether attinhcount goes to zero or not. (We can't check this in a * static pre-pass because it won't handle multiple inheritance situations * correctly.) */ @@ -5600,7 +5603,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel, /* * If TryReuseIndex() stashed a relfilenode for us, we used it for the new - * index instead of building from scratch. The DROP of the old edition of + * index instead of building from scratch. The DROP of the old edition of * this index will have scheduled the storage for deletion at commit, so * cancel that pending deletion. */ @@ -5642,7 +5645,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, elog(ERROR, "index \"%s\" is not unique", indexName); /* - * Determine name to assign to constraint. We require a constraint to + * Determine name to assign to constraint. We require a constraint to * have the same name as the underlying index; therefore, use the index's * existing name as the default constraint name, and if the user * explicitly gives some other name for the constraint, rename the index @@ -5851,7 +5854,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Check if ONLY was specified with ALTER TABLE. If so, allow the - * contraint creation only if there are no children currently. Error out + * contraint creation only if there are no children currently. Error out * otherwise. */ if (!recurse && children != NIL) @@ -5883,7 +5886,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Add a foreign-key constraint to a single table * - * Subroutine for ATExecAddConstraint. Must already hold exclusive + * Subroutine for ATExecAddConstraint. Must already hold exclusive * lock on the rel, and have done appropriate validity checks for it. * We do permissions checks here, however. */ @@ -6022,7 +6025,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * * Note that we have to be careful about the difference between the actual * PK column type and the opclass' declared input type, which might be - * only binary-compatible with it. The declared opcintype is the right + * only binary-compatible with it. The declared opcintype is the right * thing to probe pg_amop with. */ if (numfks != numpks) @@ -6179,7 +6182,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, /* * Upon a change to the cast from the FK column to its pfeqop - * operand, revalidate the constraint. For this evaluation, a + * operand, revalidate the constraint. For this evaluation, a * binary coercion cast is equivalent to no cast at all. While * type implementors should design implicit casts with an eye * toward consistency of operations like equality, we cannot @@ -6197,7 +6200,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * Necessarily, the primary key column must then be of the domain * type. Since the constraint was previously valid, all values on * the foreign side necessarily exist on the primary side and in - * turn conform to the domain. Consequently, we need not treat + * turn conform to the domain. Consequently, we need not treat * domains specially here. * * Since we require that all collations share the same notion of @@ -6207,7 +6210,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * We need not directly consider the PK type. It's necessarily * binary coercible to the opcintype of the unique index column, * and ri_triggers.c will only deal with PK datums in terms of - * that opcintype. Changing the opcintype also changes pfeqop. + * that opcintype. Changing the opcintype also changes pfeqop. */ old_check_ok = (new_pathtype == old_pathtype && new_castfunc == old_castfunc && @@ -6300,14 +6303,14 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, */ static void ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, - bool recurse, bool recursing, LOCKMODE lockmode) + bool recurse, bool recursing, LOCKMODE lockmode) { Relation conrel; SysScanDesc scan; ScanKeyData key; HeapTuple contuple; Form_pg_constraint currcon = NULL; - Constraint *cmdcon = NULL; + Constraint *cmdcon = NULL; bool found = false; Assert(IsA(cmd->def, Constraint)); @@ -6374,8 +6377,8 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, heap_freetuple(copyTuple); /* - * Now we need to update the multiple entries in pg_trigger - * that implement the constraint. + * Now we need to update the multiple entries in pg_trigger that + * implement the constraint. */ tgrel = heap_open(TriggerRelationId, RowExclusiveLock); @@ -6397,7 +6400,7 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, CatalogUpdateIndexes(tgrel, copyTuple); InvokeObjectPostAlterHook(TriggerRelationId, - HeapTupleGetOid(tgtuple), 0); + HeapTupleGetOid(tgtuple), 0); heap_freetuple(copyTuple); } @@ -6619,10 +6622,10 @@ transformColumnNameList(Oid relId, List *colList, * transformFkeyGetPrimaryKey - * * Look up the names, attnums, and types of the primary key attributes - * for the pkrel. Also return the index OID and index opclasses of the + * for the pkrel. Also return the index OID and index opclasses of the * index supporting the primary key. * - * All parameters except pkrel are output parameters. Also, the function + * All parameters except pkrel are output parameters. Also, the function * return value is the number of attributes in the primary key. * * Used when the column list in the REFERENCES specification is omitted. @@ -6662,7 +6665,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, if (indexStruct->indisprimary && IndexIsValid(indexStruct)) { /* - * Refuse to use a deferrable primary key. This is per SQL spec, + * Refuse to use a deferrable primary key. This is per SQL spec, * and there would be a lot of interesting semantic problems if we * tried to allow it. */ @@ -7592,7 +7595,7 @@ ATPrepAlterColumnType(List **wqueue, tab->relkind == RELKIND_FOREIGN_TABLE) { /* - * For composite types, do this check now. Tables will check it later + * For composite types, do this check now. Tables will check it later * when the table is being rewritten. */ find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL); @@ -7601,7 +7604,7 @@ ATPrepAlterColumnType(List **wqueue, ReleaseSysCache(tuple); /* - * The recursion case is handled by ATSimpleRecursion. However, if we are + * The recursion case is handled by ATSimpleRecursion. However, if we are * told not to recurse, there had better not be any child tables; else the * alter would put them out of step. */ @@ -7710,7 +7713,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * * We remove any implicit coercion steps at the top level of the old * default expression; this has been agreed to satisfy the principle of - * least surprise. (The conversion to the new column type should act like + * least surprise. (The conversion to the new column type should act like * it started from what the user sees as the stored expression, and the * implicit coercions aren't going to be shown.) */ @@ -7739,7 +7742,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * and record enough information to let us recreate the objects. * * The actual recreation does not happen here, but only after we have - * performed all the individual ALTER TYPE operations. We have to save + * performed all the individual ALTER TYPE operations. We have to save * the info before executing ALTER TYPE, though, else the deparser will * get confused. * @@ -7868,7 +7871,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * used in the trigger's WHEN condition. The first case would * not require any extra work, but the second case would * require updating the WHEN expression, which will take a - * significant amount of new code. Since we can't easily tell + * significant amount of new code. Since we can't easily tell * which case applies, we punt for both. FIXME someday. */ ereport(ERROR, @@ -8144,24 +8147,24 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) /* * Re-parse the index and constraint definitions, and attach them to the - * appropriate work queue entries. We do this before dropping because in + * appropriate work queue entries. We do this before dropping because in * the case of a FOREIGN KEY constraint, we might not yet have exclusive * lock on the table the constraint is attached to, and we need to get * that before dropping. It's safe because the parser won't actually look * at the catalogs to detect the existing entry. * - * We can't rely on the output of deparsing to tell us which relation - * to operate on, because concurrent activity might have made the name + * We can't rely on the output of deparsing to tell us which relation to + * operate on, because concurrent activity might have made the name * resolve differently. Instead, we've got to use the OID of the - * constraint or index we're processing to figure out which relation - * to operate on. + * constraint or index we're processing to figure out which relation to + * operate on. */ forboth(oid_item, tab->changedConstraintOids, def_item, tab->changedConstraintDefs) { - Oid oldId = lfirst_oid(oid_item); - Oid relid; - Oid confrelid; + Oid oldId = lfirst_oid(oid_item); + Oid relid; + Oid confrelid; get_constraint_relation_oids(oldId, &relid, &confrelid); ATPostAlterTypeParse(oldId, relid, confrelid, @@ -8171,8 +8174,8 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) forboth(oid_item, tab->changedIndexOids, def_item, tab->changedIndexDefs) { - Oid oldId = lfirst_oid(oid_item); - Oid relid; + Oid oldId = lfirst_oid(oid_item); + Oid relid; relid = IndexGetRelation(oldId, false); ATPostAlterTypeParse(oldId, relid, InvalidOid, @@ -8238,9 +8241,9 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, cmd)); else if (IsA(stmt, AlterTableStmt)) querytree_list = list_concat(querytree_list, - transformAlterTableStmt(oldRelId, + transformAlterTableStmt(oldRelId, (AlterTableStmt *) stmt, - cmd)); + cmd)); else querytree_list = lappend(querytree_list, stmt); } @@ -8925,13 +8928,13 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(view_query, true); + view_query_is_auto_updatable(view_query, true); if (view_updatable_error) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("WITH CHECK OPTION is supported only on auto-updatable views"), - errhint("%s", view_updatable_error))); + errmsg("WITH CHECK OPTION is supported only on auto-updatable views"), + errhint("%s", view_updatable_error))); } } @@ -9098,7 +9101,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode) /* Fetch the list of indexes on toast relation if necessary */ if (OidIsValid(reltoastrelid)) { - Relation toastRel = relation_open(reltoastrelid, lockmode); + Relation toastRel = relation_open(reltoastrelid, lockmode); + reltoastidxids = RelationGetIndexList(toastRel); relation_close(toastRel, lockmode); } @@ -9120,8 +9124,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode) FlushRelationBuffers(rel); /* - * Relfilenodes are not unique in databases across tablespaces, so we - * need to allocate a new one in the new tablespace. + * Relfilenodes are not unique in databases across tablespaces, so we need + * to allocate a new one in the new tablespace. */ newrelfilenode = GetNewRelFileNode(newTableSpace, NULL, rel->rd_rel->relpersistence); @@ -9236,9 +9240,9 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, forkNum)))); /* - * WAL-log the copied page. Unfortunately we don't know what kind of - * a page this is, so we have to log the full page including any - * unused space. + * WAL-log the copied page. Unfortunately we don't know what kind of a + * page this is, so we have to log the full page including any unused + * space. */ if (use_wal) log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false); @@ -9246,7 +9250,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, PageSetChecksumInplace(page, blkno); /* - * Now write the page. We say isTemp = true even if it's not a temp + * Now write the page. We say isTemp = true even if it's not a temp * rel, because there's no need for smgr to schedule an fsync for this * write; we'll do it ourselves below. */ @@ -9256,7 +9260,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, pfree(buf); /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. (For a temp or * unlogged rel we don't care since the data will be gone after a crash * anyway.) @@ -9431,7 +9435,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode) MergeConstraintsIntoExisting(child_rel, parent_rel); /* - * OK, it looks valid. Make the catalog entries that show inheritance. + * OK, it looks valid. Make the catalog entries that show inheritance. */ StoreCatalogInheritance1(RelationGetRelid(child_rel), RelationGetRelid(parent_rel), @@ -9907,7 +9911,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode) * Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE * INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or * heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will - * be TypeRelationId). There's no convenient way to do this, so go trawling + * be TypeRelationId). There's no convenient way to do this, so go trawling * through pg_depend. */ static void @@ -10093,7 +10097,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode) /* * ALTER TABLE NOT OF * - * Detach a typed table from its originating type. Just clear reloftype and + * Detach a typed table from its originating type. Just clear reloftype and * remove the dependency. */ static void @@ -10155,7 +10159,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, */ pg_class = heap_open(RelationRelationId, RowExclusiveLock); pg_class_tuple = SearchSysCacheCopy1(RELOID, - ObjectIdGetDatum(RelationGetRelid(rel))); + ObjectIdGetDatum(RelationGetRelid(rel))); if (!HeapTupleIsValid(pg_class_tuple)) elog(ERROR, "cache lookup failed for relation \"%s\"", RelationGetRelationName(rel)); @@ -10191,8 +10195,8 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, } /* - * Clear the indisreplident flag from any index that had it previously, and - * set it for any index that should have it now. + * Clear the indisreplident flag from any index that had it previously, + * and set it for any index that should have it now. */ pg_index = heap_open(IndexRelationId, RowExclusiveLock); foreach(index, RelationGetIndexList(rel)) @@ -10201,7 +10205,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, bool dirty = false; pg_index_tuple = SearchSysCacheCopy1(INDEXRELID, - ObjectIdGetDatum(thisIndexOid)); + ObjectIdGetDatum(thisIndexOid)); if (!HeapTupleIsValid(pg_index_tuple)) elog(ERROR, "cache lookup failed for index %u", thisIndexOid); pg_index_form = (Form_pg_index) GETSTRUCT(pg_index_tuple); @@ -10261,7 +10265,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode } else if (stmt->identity_type == REPLICA_IDENTITY_INDEX) { - /* fallthrough */; + /* fallthrough */ ; } else elog(ERROR, "unexpected identity type %u", stmt->identity_type); @@ -10289,20 +10293,20 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode if (!indexRel->rd_am->amcanunique || !indexRel->rd_index->indisunique) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot use non-unique index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); + errmsg("cannot use non-unique index \"%s\" as replica identity", + RelationGetRelationName(indexRel)))); /* Deferred indexes are not guaranteed to be always unique. */ if (!indexRel->rd_index->indimmediate) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use non-immediate index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); + errmsg("cannot use non-immediate index \"%s\" as replica identity", + RelationGetRelationName(indexRel)))); /* Expression indexes aren't supported. */ if (RelationGetIndexExpressions(indexRel) != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use expression index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); + errmsg("cannot use expression index \"%s\" as replica identity", + RelationGetRelationName(indexRel)))); /* Predicate indexes aren't supported. */ if (RelationGetIndexPredicate(indexRel) != NIL) ereport(ERROR, @@ -10319,7 +10323,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode /* Check index for nullable columns. */ for (key = 0; key < indexRel->rd_index->indnatts; key++) { - int16 attno = indexRel->rd_index->indkey.values[key]; + int16 attno = indexRel->rd_index->indkey.values[key]; Form_pg_attribute attr; /* Of the system columns, only oid is indexable. */ @@ -10878,7 +10882,7 @@ AtEOXact_on_commit_actions(bool isCommit) * Post-subcommit or post-subabort cleanup for ON COMMIT management. * * During subabort, we can immediately remove entries created during this - * subtransaction. During subcommit, just relabel entries marked during + * subtransaction. During subcommit, just relabel entries marked during * this subtransaction as being the parent's responsibility. */ void @@ -10922,7 +10926,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid, * This is intended as a callback for RangeVarGetRelidExtended(). It allows * the relation to be locked only if (1) it's a plain table, materialized * view, or TOAST table and (2) the current user is the owner (or the - * superuser). This meets the permission-checking needs of CLUSTER, REINDEX + * superuser). This meets the permission-checking needs of CLUSTER, REINDEX * TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be * used by all. */ @@ -10939,7 +10943,7 @@ RangeVarCallbackOwnsTable(const RangeVar *relation, /* * If the relation does exist, check whether it's an index. But note that * the relation might have been dropped between the time we did the name - * lookup and now. In that case, there's nothing to do. + * lookup and now. In that case, there's nothing to do. */ relkind = get_rel_relkind(relId); if (!relkind) @@ -11105,8 +11109,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, relkind != RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table", - rv->relname))); + errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table", + rv->relname))); ReleaseSysCache(tuple); } diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 357e6e19741..031be37a1e7 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -31,7 +31,7 @@ * To allow CREATE DATABASE to give a new database a default tablespace * that's different from the template database's default, we make the * provision that a zero in pg_class.reltablespace means the database's - * default tablespace. Without this, CREATE DATABASE would have to go in + * default tablespace. Without this, CREATE DATABASE would have to go in * and munge the system catalogs of the new database. * * @@ -281,7 +281,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) * reference the whole path here, but mkdir() uses the first two parts. */ if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + - OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH) + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("tablespace location \"%s\" is too long", @@ -488,7 +488,7 @@ DropTableSpace(DropTableSpaceStmt *stmt) * Not all files deleted? However, there can be lingering empty files * in the directories, left behind by for example DROP TABLE, that * have been scheduled for deletion at next checkpoint (see comments - * in mdunlink() for details). We could just delete them immediately, + * in mdunlink() for details). We could just delete them immediately, * but we can't tell them apart from important data files that we * mustn't delete. So instead, we force a checkpoint which will clean * out any lingering files, and try again. @@ -562,10 +562,10 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) linkloc = psprintf("pg_tblspc/%u", tablespaceoid); location_with_version_dir = psprintf("%s/%s", location, - TABLESPACE_VERSION_DIRECTORY); + TABLESPACE_VERSION_DIRECTORY); /* - * Attempt to coerce target directory to safe permissions. If this fails, + * Attempt to coerce target directory to safe permissions. If this fails, * it doesn't exist or has the wrong owner. */ if (chmod(location, S_IRWXU) != 0) @@ -666,7 +666,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * Attempt to remove filesystem infrastructure for the tablespace. * * 'redo' indicates we are redoing a drop from XLOG; in that case we should - * not throw an ERROR for problems, just LOG them. The worst consequence of + * not throw an ERROR for problems, just LOG them. The worst consequence of * not removing files here would be failure to release some disk space, which * does not justify throwing an error that would require manual intervention * to get the database running again. @@ -684,7 +684,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo) struct stat st; linkloc_with_version_dir = psprintf("pg_tblspc/%u/%s", tablespaceoid, - TABLESPACE_VERSION_DIRECTORY); + TABLESPACE_VERSION_DIRECTORY); /* * Check if the tablespace still contains any files. We try to rmdir each @@ -701,10 +701,10 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo) * * If redo is true then ENOENT is a likely outcome here, and we allow it * to pass without comment. In normal operation we still allow it, but - * with a warning. This is because even though ProcessUtility disallows + * with a warning. This is because even though ProcessUtility disallows * DROP TABLESPACE in a transaction block, it's possible that a previous * DROP failed and rolled back after removing the tablespace directories - * and/or symlink. We want to allow a new DROP attempt to succeed at + * and/or symlink. We want to allow a new DROP attempt to succeed at * removing the catalog entries (and symlink if still present), so we * should not give a hard error here. */ @@ -1119,8 +1119,8 @@ AlterTableSpaceMove(AlterTableSpaceMoveStmt *stmt) /* * Handle permissions-checking here since we are locking the tables - * and also to avoid doing a bunch of work only to fail part-way. - * Note that permissions will also be checked by AlterTableInternal(). + * and also to avoid doing a bunch of work only to fail part-way. Note + * that permissions will also be checked by AlterTableInternal(). * * Caller must be considered an owner on the table to move it. */ @@ -1179,7 +1179,7 @@ check_default_tablespace(char **newval, void **extra, GucSource source) { /* * If we aren't inside a transaction, we cannot do database access so - * cannot verify the name. Must accept the value on faith. + * cannot verify the name. Must accept the value on faith. */ if (IsTransactionState()) { @@ -1290,7 +1290,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) /* * If we aren't inside a transaction, we cannot do database access so - * cannot verify the individual names. Must accept the list on faith. + * cannot verify the individual names. Must accept the list on faith. * Fortunately, there's then also no need to pass the data to fd.c. */ if (IsTransactionState()) diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 5f1ccf02c27..9bf0098b6cb 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -107,7 +107,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, * * constraintOid, if nonzero, says that this trigger is being created * internally to implement that constraint. A suitable pg_depend entry will - * be made to link the trigger to that constraint. constraintOid is zero when + * be made to link the trigger to that constraint. constraintOid is zero when * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT * TRIGGER, we build a pg_constraint entry internally.) * @@ -418,7 +418,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (funcrettype != TRIGGEROID) { /* - * We allow OPAQUE just so we can load old dump files. When we see a + * We allow OPAQUE just so we can load old dump files. When we see a * trigger function declared OPAQUE, change it to TRIGGER. */ if (funcrettype == OPAQUEOID) @@ -440,7 +440,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * references one of the built-in RI_FKey trigger functions, assume it is * from a dump of a pre-7.3 foreign key constraint, and take steps to * convert this legacy representation into a regular foreign key - * constraint. Ugly, but necessary for loading old dump files. + * constraint. Ugly, but necessary for loading old dump files. */ if (stmt->isconstraint && !isInternal && list_length(stmt->args) >= 6 && @@ -503,7 +503,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, /* * If trigger is internally generated, modify the provided trigger name to - * ensure uniqueness by appending the trigger OID. (Callers will usually + * ensure uniqueness by appending the trigger OID. (Callers will usually * supply a simple constant trigger name in these cases.) */ if (isInternal) @@ -627,7 +627,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, int16 attnum; int j; - /* Lookup column name. System columns are not allowed */ + /* Lookup column name. System columns are not allowed */ attnum = attnameAttNum(rel, name, false); if (attnum == InvalidAttrNumber) ereport(ERROR, @@ -732,7 +732,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, else { /* - * User CREATE TRIGGER, so place dependencies. We make trigger be + * User CREATE TRIGGER, so place dependencies. We make trigger be * auto-dropped if its relation is dropped or if the FK relation is * dropped. (Auto drop is compatible with our pre-7.3 behavior.) */ @@ -801,7 +801,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * full-fledged foreign key constraints. * * The conversion is complex because a pre-7.3 foreign key involved three - * separate triggers, which were reported separately in dumps. While the + * separate triggers, which were reported separately in dumps. While the * single trigger on the referencing table adds no new information, we need * to know the trigger functions of both of the triggers on the referenced * table to build the constraint declaration. Also, due to lack of proper @@ -2038,7 +2038,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2113,7 +2113,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2503,7 +2503,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2599,7 +2599,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -3031,7 +3031,7 @@ typedef SetConstraintStateData *SetConstraintState; * Although this is mutable state, we can keep it in AfterTriggerSharedData * because all instances of the same type of event in a given event list will * be fired at the same time, if they were queued between the same firing - * cycles. So we need only ensure that ats_firing_id is zero when attaching + * cycles. So we need only ensure that ats_firing_id is zero when attaching * a new event to an existing AfterTriggerSharedData record. */ typedef uint32 TriggerFlags; @@ -3077,7 +3077,7 @@ typedef struct AfterTriggerEventDataOneCtid typedef struct AfterTriggerEventDataZeroCtids { TriggerFlags ate_flags; /* status bits and offset to shared data */ -} AfterTriggerEventDataZeroCtids; +} AfterTriggerEventDataZeroCtids; #define SizeofTriggerEvent(evt) \ (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \ @@ -3092,7 +3092,7 @@ typedef struct AfterTriggerEventDataZeroCtids /* * To avoid palloc overhead, we keep trigger events in arrays in successively- * larger chunks (a slightly more sophisticated version of an expansible - * array). The space between CHUNK_DATA_START and freeptr is occupied by + * array). The space between CHUNK_DATA_START and freeptr is occupied by * AfterTriggerEventData records; the space between endfree and endptr is * occupied by AfterTriggerSharedData records. */ @@ -3134,7 +3134,7 @@ typedef struct AfterTriggerEventList * * firing_counter is incremented for each call of afterTriggerInvokeEvents. * We mark firable events with the current firing cycle's ID so that we can - * tell which ones to work on. This ensures sane behavior if a trigger + * tell which ones to work on. This ensures sane behavior if a trigger * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will * only fire those events that weren't already scheduled for firing. * @@ -3142,7 +3142,7 @@ typedef struct AfterTriggerEventList * This is saved and restored across failed subtransactions. * * events is the current list of deferred events. This is global across - * all subtransactions of the current transaction. In a subtransaction + * all subtransactions of the current transaction. In a subtransaction * abort, we know that the events added by the subtransaction are at the * end of the list, so it is relatively easy to discard them. The event * list chunks themselves are stored in event_cxt. @@ -3174,12 +3174,12 @@ typedef struct AfterTriggerEventList * which we similarly use to clean up at subtransaction abort. * * firing_stack is a stack of copies of subtransaction-start-time - * firing_counter. We use this to recognize which deferred triggers were + * firing_counter. We use this to recognize which deferred triggers were * fired (or marked for firing) within an aborted subtransaction. * * We use GetCurrentTransactionNestLevel() to determine the correct array * index in these stacks. maxtransdepth is the number of allocated entries in - * each stack. (By not keeping our own stack pointer, we can avoid trouble + * each stack. (By not keeping our own stack pointer, we can avoid trouble * in cases where errors during subxact abort cause multiple invocations * of AfterTriggerEndSubXact() at the same nesting depth.) */ @@ -3490,7 +3490,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events, * single trigger function. * * Frequently, this will be fired many times in a row for triggers of - * a single relation. Therefore, we cache the open relation and provide + * a single relation. Therefore, we cache the open relation and provide * fmgr lookup cache space at the caller level. (For triggers fired at * the end of a query, we can even piggyback on the executor's state.) * @@ -3566,6 +3566,7 @@ AfterTriggerExecute(AfterTriggerEvent event, } /* fall through */ case AFTER_TRIGGER_FDW_REUSE: + /* * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple() * ensures that tg_trigtuple does not reference tuplestore memory. @@ -4093,7 +4094,7 @@ AfterTriggerFireDeferred(void) } /* - * Run all the remaining triggers. Loop until they are all gone, in case + * Run all the remaining triggers. Loop until they are all gone, in case * some trigger queues more for us to do. */ while (afterTriggerMarkEvents(events, NULL, false)) @@ -4156,7 +4157,7 @@ AfterTriggerBeginSubXact(void) int my_level = GetCurrentTransactionNestLevel(); /* - * Ignore call if the transaction is in aborted state. (Probably + * Ignore call if the transaction is in aborted state. (Probably * shouldn't happen?) */ if (afterTriggers == NULL) @@ -4235,7 +4236,7 @@ AfterTriggerEndSubXact(bool isCommit) CommandId subxact_firing_id; /* - * Ignore call if the transaction is in aborted state. (Probably + * Ignore call if the transaction is in aborted state. (Probably * unneeded) */ if (afterTriggers == NULL) @@ -4378,7 +4379,7 @@ SetConstraintStateCopy(SetConstraintState origstate) } /* - * Add a per-trigger item to a SetConstraintState. Returns possibly-changed + * Add a per-trigger item to a SetConstraintState. Returns possibly-changed * pointer to the state object (it will change if we have to repalloc). */ static SetConstraintState @@ -4463,7 +4464,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) * First, identify all the named constraints and make a list of their * OIDs. Since, unlike the SQL spec, we allow multiple constraints of * the same name within a schema, the specifications are not - * necessarily unique. Our strategy is to target all matching + * necessarily unique. Our strategy is to target all matching * constraints within the first search-path schema that has any * matches, but disregard matches in schemas beyond the first match. * (This is a bit odd but it's the historical behavior.) @@ -4489,7 +4490,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) /* * If we're given the schema name with the constraint, look only - * in that schema. If given a bare constraint name, use the + * in that schema. If given a bare constraint name, use the * search path to find the first matching constraint. */ if (constraint->schemaname) @@ -4593,7 +4594,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) /* * Silently skip triggers that are marked as non-deferrable in - * pg_trigger. This is not an error condition, since a + * pg_trigger. This is not an error condition, since a * deferrable RI constraint may have some non-deferrable * actions. */ @@ -4664,7 +4665,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) /* * Make sure a snapshot has been established in case trigger - * functions need one. Note that we avoid setting a snapshot if + * functions need one. Note that we avoid setting a snapshot if * we don't find at least one trigger that has to be fired now. * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are @@ -4724,7 +4725,7 @@ AfterTriggerPendingOnRel(Oid relid) AfterTriggerShared evtshared = GetTriggerSharedData(event); /* - * We can ignore completed events. (Even if a DONE flag is rolled + * We can ignore completed events. (Even if a DONE flag is rolled * back by subxact abort, it's OK because the effects of the TRUNCATE * or whatever must get rolled back too.) */ @@ -4765,7 +4766,7 @@ AfterTriggerPendingOnRel(Oid relid) * be fired for an event. * * NOTE: this is called whenever there are any triggers associated with - * the event (even if they are disabled). This function decides which + * the event (even if they are disabled). This function decides which * triggers actually need to be queued. * ---------- */ diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index c1ee69b3233..f377c193719 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -514,8 +514,8 @@ DefineType(List *names, List *parameters) analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid); /* - * Check permissions on functions. We choose to require the creator/owner - * of a type to also own the underlying functions. Since creating a type + * Check permissions on functions. We choose to require the creator/owner + * of a type to also own the underlying functions. Since creating a type * is tantamount to granting public execute access on the functions, the * minimum sane check would be for execute-with-grant-option. But we * don't have a way to make the type go away if the grant option is @@ -552,7 +552,7 @@ DefineType(List *names, List *parameters) * now have TypeCreate do all the real work. * * Note: the pg_type.oid is stored in user tables as array elements (base - * types) in ArrayType and in composite types in DatumTupleFields. This + * types) in ArrayType and in composite types in DatumTupleFields. This * oid must be preserved by binary upgrades. */ typoid = @@ -725,7 +725,7 @@ DefineDomain(CreateDomainStmt *stmt) get_namespace_name(domainNamespace)); /* - * Check for collision with an existing type name. If there is one and + * Check for collision with an existing type name. If there is one and * it's an autogenerated array, we can rename it out of the way. */ old_type_oid = GetSysCacheOid2(TYPENAMENSP, @@ -1076,7 +1076,7 @@ DefineEnum(CreateEnumStmt *stmt) get_namespace_name(enumNamespace)); /* - * Check for collision with an existing type name. If there is one and + * Check for collision with an existing type name. If there is one and * it's an autogenerated array, we can rename it out of the way. */ old_type_oid = GetSysCacheOid2(TYPENAMENSP, @@ -1193,7 +1193,7 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel) /* * Ordinarily we disallow adding values within transaction blocks, because * we can't cope with enum OID values getting into indexes and then having - * their defining pg_enum entries go away. However, it's okay if the enum + * their defining pg_enum entries go away. However, it's okay if the enum * type was created in the current transaction, since then there can be no * such indexes that wouldn't themselves go away on rollback. (We support * this case because pg_dump --binary-upgrade needs it.) We test this by @@ -1515,7 +1515,7 @@ DefineRange(CreateRangeStmt *stmt) * impossible to define a polymorphic constructor; we have to generate new * constructor functions explicitly for each range type. * - * We actually define 4 functions, with 0 through 3 arguments. This is just + * We actually define 4 functions, with 0 through 3 arguments. This is just * to offer more convenience for the user. */ static void @@ -2277,7 +2277,7 @@ AlterDomainNotNull(List *names, bool notNull) /* * In principle the auxiliary information for this * error should be errdatatype(), but errtablecol() - * seems considerably more useful in practice. Since + * seems considerably more useful in practice. Since * this code only executes in an ALTER DOMAIN command, * the client should already know which domain is in * question. @@ -2300,7 +2300,7 @@ AlterDomainNotNull(List *names, bool notNull) } /* - * Okay to update pg_type row. We can scribble on typTup because it's a + * Okay to update pg_type row. We can scribble on typTup because it's a * copy. */ typTup->typnotnull = notNull; @@ -2488,7 +2488,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) /* * Since all other constraint types throw errors, this must be a check - * constraint. First, process the constraint expression and add an entry + * constraint. First, process the constraint expression and add an entry * to pg_constraint. */ @@ -2674,7 +2674,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin) /* * In principle the auxiliary information for this error * should be errdomainconstraint(), but errtablecol() - * seems considerably more useful in practice. Since this + * seems considerably more useful in practice. Since this * code only executes in an ALTER DOMAIN command, the * client should already know which domain is in question, * and which constraint too. @@ -2857,7 +2857,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) continue; /* - * Okay, add column to result. We store the columns in column-number + * Okay, add column to result. We store the columns in column-number * order; this is just a hack to improve predictability of regression * test output ... */ @@ -2944,7 +2944,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, /* * Set up a CoerceToDomainValue to represent the occurrence of VALUE in - * the expression. Note that it will appear to have the type of the base + * the expression. Note that it will appear to have the type of the base * type, not the domain. This seems correct since within the check * expression, we should not assume the input value can be considered a * member of the domain. @@ -3317,7 +3317,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) /* * If it's a composite type, invoke ATExecChangeOwner so that we fix - * up the pg_class entry properly. That will call back to + * up the pg_class entry properly. That will call back to * AlterTypeOwnerInternal to take care of the pg_type entry(s). */ if (typTup->typtype == TYPTYPE_COMPOSITE) @@ -3464,7 +3464,7 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved) * Caller must have already checked privileges. * * The function automatically recurses to process the type's array type, - * if any. isImplicitArray should be TRUE only when doing this internal + * if any. isImplicitArray should be TRUE only when doing this internal * recursion (outside callers must never try to move an array type directly). * * If errorOnTableType is TRUE, the function errors out if the type is diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 7f5b8473d81..d3a2044191b 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -995,7 +995,7 @@ DropRole(DropRoleStmt *stmt) ReleaseSysCache(tuple); /* - * Remove role from the pg_auth_members table. We have to remove all + * Remove role from the pg_auth_members table. We have to remove all * tuples that show it as either a role or a member. * * XXX what about grantor entries? Maybe we should do one heap scan. @@ -1091,7 +1091,7 @@ RenameRole(const char *oldname, const char *newname) * XXX Client applications probably store the session user somewhere, so * renaming it could cause confusion. On the other hand, there may not be * an actual problem besides a little confusion, so think about this and - * decide. Same for SET ROLE ... we don't restrict renaming the current + * decide. Same for SET ROLE ... we don't restrict renaming the current * effective userid, though. */ @@ -1347,7 +1347,7 @@ AddRoleMems(const char *rolename, Oid roleid, /* * Check permissions: must have createrole or admin option on the role to - * be changed. To mess with a superuser role, you gotta be superuser. + * be changed. To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { @@ -1493,7 +1493,7 @@ DelRoleMems(const char *rolename, Oid roleid, /* * Check permissions: must have createrole or admin option on the role to - * be changed. To mess with a superuser role, you gotta be superuser. + * be changed. To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index ded1841dc65..3d2c73902c6 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -381,18 +381,18 @@ get_rel_oids(Oid relid, const RangeVar *vacrel) * * The output parameters are: * - oldestXmin is the cutoff value used to distinguish whether tuples are - * DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum). + * DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum). * - freezeLimit is the Xid below which all Xids are replaced by - * FrozenTransactionId during vacuum. + * FrozenTransactionId during vacuum. * - xidFullScanLimit (computed from table_freeze_age parameter) - * represents a minimum Xid value; a table whose relfrozenxid is older than - * this will have a full-table vacuum applied to it, to freeze tuples across - * the whole table. Vacuuming a table younger than this value can use a - * partial scan. + * represents a minimum Xid value; a table whose relfrozenxid is older than + * this will have a full-table vacuum applied to it, to freeze tuples across + * the whole table. Vacuuming a table younger than this value can use a + * partial scan. * - multiXactCutoff is the value below which all MultiXactIds are removed from - * Xmax. + * Xmax. * - mxactFullScanLimit is a value against which a table's relminmxid value is - * compared to produce a full-table vacuum, as with xidFullScanLimit. + * compared to produce a full-table vacuum, as with xidFullScanLimit. * * xidFullScanLimit and mxactFullScanLimit can be passed as NULL if caller is * not interested. @@ -417,9 +417,9 @@ vacuum_set_xid_limits(Relation rel, MultiXactId safeMxactLimit; /* - * We can always ignore processes running lazy vacuum. This is because we + * We can always ignore processes running lazy vacuum. This is because we * use these values only for deciding which tuples we must keep in the - * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to + * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to * ignore it. In theory it could be problematic to ignore lazy vacuums in * a full vacuum, but keep in mind that only one vacuum process can be * working on a particular table at any time, and that each vacuum is @@ -566,7 +566,7 @@ vacuum_set_xid_limits(Relation rel, * If we scanned the whole relation then we should just use the count of * live tuples seen; but if we did not, we should not trust the count * unreservedly, especially not in VACUUM, which may have scanned a quite - * nonrandom subset of the table. When we have only partial information, + * nonrandom subset of the table. When we have only partial information, * we take the old value of pg_class.reltuples as a measurement of the * tuple density in the unscanned pages. * @@ -712,7 +712,7 @@ vac_update_relstats(Relation relation, /* * If we have discovered that there are no indexes, then there's no - * primary key either. This could be done more thoroughly... + * primary key either. This could be done more thoroughly... */ if (pgcform->relhaspkey && !hasindex) { @@ -772,7 +772,7 @@ vac_update_relstats(Relation relation, * truncate pg_clog and pg_multixact. * * We violate transaction semantics here by overwriting the database's - * existing pg_database tuple with the new value. This is reasonably + * existing pg_database tuple with the new value. This is reasonably * safe since the new value is correct whether or not this transaction * commits. As with vac_update_relstats, this avoids leaving dead tuples * behind after a VACUUM. @@ -892,7 +892,7 @@ vac_update_datfrozenxid(void) * Also update the XID wrap limit info maintained by varsup.c. * * The passed XID is simply the one I just wrote into my pg_database - * entry. It's used to initialize the "min" calculation. + * entry. It's used to initialize the "min" calculation. * * This routine is only invoked when we've managed to change our * DB's datfrozenxid entry, or we found that the shared XID-wrap-limit @@ -976,7 +976,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti) /* * Update the wrap limit for GetNewTransactionId and creation of new * MultiXactIds. Note: these functions will also signal the postmaster - * for an(other) autovac cycle if needed. XXX should we avoid possibly + * for an(other) autovac cycle if needed. XXX should we avoid possibly * signalling twice? */ SetTransactionIdLimit(frozenXID, oldestxid_datoid); @@ -988,7 +988,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti) * vacuum_rel() -- vacuum one heap relation * * Doing one heap at a time incurs extra overhead, since we need to - * check that the heap exists again just before we vacuum it. The + * check that the heap exists again just before we vacuum it. The * reason that we do this is so that vacuuming can be spread across * many small transactions. Otherwise, two-phase locking would require * us to lock the entire database during one pass of the vacuum cleaner. @@ -1045,7 +1045,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) } /* - * Check for user-requested abort. Note we want this to be inside a + * Check for user-requested abort. Note we want this to be inside a * transaction, so xact.c doesn't issue useless WARNING. */ CHECK_FOR_INTERRUPTS(); @@ -1092,7 +1092,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) * * We allow the user to vacuum a table if he is superuser, the table * owner, or the database owner (but in the latter case, only if it's not - * a shared relation). pg_class_ownercheck includes the superuser case. + * a shared relation). pg_class_ownercheck includes the superuser case. * * Note we choose to treat permissions failure as a WARNING and keep * trying to vacuum the rest of the DB --- is this appropriate? @@ -1220,7 +1220,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) /* * If the relation has a secondary toast rel, vacuum that too while we * still hold the session lock on the master table. Note however that - * "analyze" will not get done on the toast table. This is good, because + * "analyze" will not get done on the toast table. This is good, because * the toaster always uses hardcoded index access and statistics are * totally unimportant for toast relations. */ @@ -1239,7 +1239,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) /* * Open all the vacuumable indexes of the given relation, obtaining the - * specified kind of lock on each. Return an array of Relation pointers for + * specified kind of lock on each. Return an array of Relation pointers for * the indexes into *Irel, and the number of indexes into *nindexes. * * We consider an index vacuumable if it is marked insertable (IndexIsReady). @@ -1289,7 +1289,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode, } /* - * Release the resources acquired by vac_open_indexes. Optionally release + * Release the resources acquired by vac_open_indexes. Optionally release * the locks (say NoLock to keep 'em). */ void diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 3870df606b7..b4abeed5ac9 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -473,7 +473,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * Before entering the main loop, establish the invariant that * next_not_all_visible_block is the next block number >= blkno that's not * all-visible according to the visibility map, or nblocks if there's no - * such block. Also, we set up the skipping_all_visible_blocks flag, + * such block. Also, we set up the skipping_all_visible_blocks flag, * which is needed because we need hysteresis in the decision: once we've * started skipping blocks, we may as well skip everything up to the next * not-all-visible block. @@ -706,10 +706,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * It's possible that another backend has extended the heap, * initialized the page, and then failed to WAL-log the page * due to an ERROR. Since heap extension is not WAL-logged, - * recovery might try to replay our record setting the - * page all-visible and find that the page isn't initialized, - * which will cause a PANIC. To prevent that, check whether - * the page has been previously WAL-logged, and if not, do that + * recovery might try to replay our record setting the page + * all-visible and find that the page isn't initialized, which + * will cause a PANIC. To prevent that, check whether the + * page has been previously WAL-logged, and if not, do that * now. */ if (RelationNeedsWAL(onerel) && @@ -834,8 +834,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * NB: Like with per-tuple hint bits, we can't set the * PD_ALL_VISIBLE flag if the inserter committed * asynchronously. See SetHintBits for more info. Check - * that the tuple is hinted xmin-committed because - * of that. + * that the tuple is hinted xmin-committed because of + * that. */ if (all_visible) { @@ -972,7 +972,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, /* * It should never be the case that the visibility map page is set * while the page-level bit is clear, but the reverse is allowed - * (if checksums are not enabled). Regardless, set the both bits + * (if checksums are not enabled). Regardless, set the both bits * so that we get back in sync. * * NB: If the heap page is all-visible but the VM bit is not set, @@ -1034,8 +1034,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, /* * If we remembered any tuples for deletion, then the page will be * visited again by lazy_vacuum_heap, which will compute and record - * its post-compaction free space. If not, then we're done with this - * page, so remember its free space as-is. (This path will always be + * its post-compaction free space. If not, then we're done with this + * page, so remember its free space as-is. (This path will always be * taken if there are no indexes.) */ if (vacrelstats->num_dead_tuples == prev_dead_count) @@ -1635,9 +1635,9 @@ static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks) { long maxtuples; - int vac_work_mem = IsAutoVacuumWorkerProcess() && - autovacuum_work_mem != -1 ? - autovacuum_work_mem : maintenance_work_mem; + int vac_work_mem = IsAutoVacuumWorkerProcess() && + autovacuum_work_mem != -1 ? + autovacuum_work_mem : maintenance_work_mem; if (vacrelstats->hasindex) { diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 18133242f73..f299738d66b 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -176,7 +176,7 @@ check_datestyle(char **newval, void **extra, GucSource source) } /* - * Prepare the canonical string to return. GUC wants it malloc'd. + * Prepare the canonical string to return. GUC wants it malloc'd. */ result = (char *) malloc(32); if (!result) @@ -257,7 +257,7 @@ check_timezone(char **newval, void **extra, GucSource source) if (pg_strncasecmp(*newval, "interval", 8) == 0) { /* - * Support INTERVAL 'foo'. This is for SQL spec compliance, not + * Support INTERVAL 'foo'. This is for SQL spec compliance, not * because it has any actual real-world usefulness. */ const char *valueptr = *newval; @@ -281,7 +281,7 @@ check_timezone(char **newval, void **extra, GucSource source) /* * Try to parse it. XXX an invalid interval format will result in - * ereport(ERROR), which is not desirable for GUC. We did what we + * ereport(ERROR), which is not desirable for GUC. We did what we * could to guard against this in flatten_set_variable_args, but a * string coming in from postgresql.conf might contain anything. */ @@ -466,7 +466,7 @@ show_log_timezone(void) * We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and * we also always allow changes from read-write to read-only. However, * read-only may be changed to read-write only when in a top-level transaction - * that has not yet taken an initial snapshot. Can't do it in a hot standby + * that has not yet taken an initial snapshot. Can't do it in a hot standby * slave, either. * * If we are not in a transaction at all, just allow the change; it means @@ -627,7 +627,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source) * * We can't roll back the random sequence on error, and we don't want * config file reloads to affect it, so we only want interactive SET SEED - * commands to set it. We use the "extra" storage to ensure that rollbacks + * commands to set it. We use the "extra" storage to ensure that rollbacks * don't try to do the operation again. */ @@ -903,7 +903,7 @@ const char * show_role(void) { /* - * Check whether SET ROLE is active; if not return "none". This is a + * Check whether SET ROLE is active; if not return "none". This is a * kluge to deal with the fact that SET SESSION AUTHORIZATION logically * resets SET ROLE to NONE, but we cannot set the GUC role variable from * assign_session_authorization (because we haven't got enough info to diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index bc085666fbd..683621c35e5 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -52,7 +52,7 @@ validateWithCheckOption(char *value) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid value for \"check_option\" option"), - errdetail("Valid values are \"local\", and \"cascaded\"."))); + errdetail("Valid values are \"local\", and \"cascaded\"."))); } } @@ -344,11 +344,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse) *rt_entry2; /* - * Make a copy of the given parsetree. It's not so much that we don't + * Make a copy of the given parsetree. It's not so much that we don't * want to scribble on our input, it's that the parser has a bad habit of * outputting multiple links to the same subtree for constructs like * BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a - * Var node twice. copyObject will expand any multiply-referenced subtree + * Var node twice. copyObject will expand any multiply-referenced subtree * into multiple copies. */ viewParse = (Query *) copyObject(viewParse); @@ -460,13 +460,13 @@ DefineView(ViewStmt *stmt, const char *queryString) } /* - * If the check option is specified, look to see if the view is - * actually auto-updatable or not. + * If the check option is specified, look to see if the view is actually + * auto-updatable or not. */ if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(viewParse, true); + view_query_is_auto_updatable(viewParse, true); if (view_updatable_error) ereport(ERROR, @@ -513,7 +513,7 @@ DefineView(ViewStmt *stmt, const char *queryString) /* * If the user didn't explicitly ask for a temporary view, check whether - * we need one implicitly. We allow TEMP to be inserted automatically as + * we need one implicitly. We allow TEMP to be inserted automatically as * long as the CREATE command is consistent with that --- no explicit * schema name. */ diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 8c01a63500d..640964c5b7c 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -317,7 +317,7 @@ ExecMarkPos(PlanState *node) * * NOTE: the semantics of this are that the first ExecProcNode following * the restore operation will yield the same tuple as the first one following - * the mark operation. It is unspecified what happens to the plan node's + * the mark operation. It is unspecified what happens to the plan node's * result TupleTableSlot. (In most cases the result slot is unchanged by * a restore, but the node may choose to clear it or to load it with the * restored-to tuple.) Hence the caller should discard any previously @@ -397,7 +397,7 @@ ExecSupportsMarkRestore(NodeTag plantype) /* * T_Result only supports mark/restore if it has a child plan that * does, so we do not have enough information to give a really - * correct answer. However, for current uses it's enough to + * correct answer. However, for current uses it's enough to * always say "false", because this routine is not asked about * gating Result plans, only base-case Results. */ diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c index 32d0718ec59..7ff3e1ece1a 100644 --- a/src/backend/executor/execCurrent.c +++ b/src/backend/executor/execCurrent.c @@ -142,7 +142,7 @@ execCurrentOf(CurrentOfExpr *cexpr, /* * This table didn't produce the cursor's current row; some other - * inheritance child of the same parent must have. Signal caller to + * inheritance child of the same parent must have. Signal caller to * do nothing on this table. */ return false; diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index a9acd5b535d..45d6477c2e7 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -52,7 +52,7 @@ * * Initialize the Junk filter. * - * The source targetlist is passed in. The output tuple descriptor is + * The source targetlist is passed in. The output tuple descriptor is * built from the non-junk tlist entries, plus the passed specification * of whether to include room for an OID or not. * An optional resultSlot can be passed as well. diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 886c75125d2..072c7df0ada 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -19,7 +19,7 @@ * ExecutorRun accepts direction and count arguments that specify whether * the plan is to be executed forwards, backwards, and for how many tuples. * In some cases ExecutorRun may be called multiple times to process all - * the tuples for a plan. It is also acceptable to stop short of executing + * the tuples for a plan. It is also acceptable to stop short of executing * the whole plan (but only if it is a SELECT). * * ExecutorFinish must be called after the final ExecutorRun call and @@ -329,12 +329,12 @@ standard_ExecutorRun(QueryDesc *queryDesc, * ExecutorFinish * * This routine must be called after the last ExecutorRun call. - * It performs cleanup such as firing AFTER triggers. It is + * It performs cleanup such as firing AFTER triggers. It is * separate from ExecutorEnd because EXPLAIN ANALYZE needs to * include these actions in the total runtime. * * We provide a function hook variable that lets loadable plugins - * get control when ExecutorFinish is called. Such a plugin would + * get control when ExecutorFinish is called. Such a plugin would * normally call standard_ExecutorFinish(). * * ---------------------------------------------------------------- @@ -565,7 +565,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte) * userid to check as: current user unless we have a setuid indication. * * Note: GetUserId() is presently fast enough that there's no harm in - * calling it separately for each RTE. If that stops being true, we could + * calling it separately for each RTE. If that stops being true, we could * call it once in ExecCheckRTPerms and pass the userid down from there. * But for now, no need for the extra clutter. */ @@ -1184,7 +1184,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, * if so it doesn't matter which one we pick.) However, it is sometimes * necessary to fire triggers on other relations; this happens mainly when an * RI update trigger queues additional triggers on other relations, which will - * be processed in the context of the outer query. For efficiency's sake, + * be processed in the context of the outer query. For efficiency's sake, * we want to have a ResultRelInfo for those triggers too; that can avoid * repeated re-opening of the relation. (It also provides a way for EXPLAIN * ANALYZE to report the runtimes of such triggers.) So we make additional @@ -1221,7 +1221,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) /* * Open the target relation's relcache entry. We assume that an * appropriate lock is still held by the backend from whenever the trigger - * event got queued, so we need take no new lock here. Also, we need not + * event got queued, so we need take no new lock here. Also, we need not * recheck the relkind, so no need for CheckValidResultRel. */ rel = heap_open(relid, NoLock); @@ -1327,7 +1327,7 @@ ExecPostprocessPlan(EState *estate) /* * Run any secondary ModifyTable nodes to completion, in case the main - * query did not fetch all rows from them. (We do this to ensure that + * query did not fetch all rows from them. (We do this to ensure that * such nodes have predictable results.) */ foreach(lc, estate->es_auxmodifytables) @@ -1639,7 +1639,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { ExprContext *econtext; - ListCell *l1, *l2; + ListCell *l1, + *l2; /* * We will use the EState's per-tuple context for evaluating constraint @@ -1655,7 +1656,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, l2, resultRelInfo->ri_WithCheckOptionExprs) { WithCheckOption *wco = (WithCheckOption *) lfirst(l1); - ExprState *wcoExpr = (ExprState *) lfirst(l2); + ExprState *wcoExpr = (ExprState *) lfirst(l2); /* * WITH CHECK OPTION checks are intended to ensure that the new tuple @@ -1667,8 +1668,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, if (!ExecQual((List *) wcoExpr, econtext, false)) ereport(ERROR, (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), - errmsg("new row violates WITH CHECK OPTION for view \"%s\"", - wco->viewname), + errmsg("new row violates WITH CHECK OPTION for view \"%s\"", + wco->viewname), errdetail("Failing row contains %s.", ExecBuildSlotValueDescription(slot, RelationGetDescr(resultRelInfo->ri_RelationDesc), @@ -1681,7 +1682,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, * * This is intentionally very similar to BuildIndexValueDescription, but * unlike that function, we truncate long field values (to at most maxfieldlen - * bytes). That seems necessary here since heap field values could be very + * bytes). That seems necessary here since heap field values could be very * long, whereas index entries typically aren't so wide. * * Also, unlike the case with index entries, we need to be prepared to ignore @@ -1875,7 +1876,7 @@ EvalPlanQual(EState *estate, EPQState *epqstate, *tid = copyTuple->t_self; /* - * Need to run a recheck subquery. Initialize or reinitialize EPQ state. + * Need to run a recheck subquery. Initialize or reinitialize EPQ state. */ EvalPlanQualBegin(epqstate, estate); @@ -1958,7 +1959,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, /* * If xmin isn't what we're expecting, the slot must have been - * recycled and reused for an unrelated tuple. This implies that + * recycled and reused for an unrelated tuple. This implies that * the latest version of the row was deleted, so we need do * nothing. (Should be safe to examine xmin without getting * buffer's content lock, since xmin never changes in an existing @@ -2199,7 +2200,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti) /* * Fetch the current row values for any non-locked relations that need - * to be scanned by an EvalPlanQual operation. origslot must have been set + * to be scanned by an EvalPlanQual operation. origslot must have been set * to contain the current result row (top-level row) that we need to recheck. */ void @@ -2428,7 +2429,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) /* * Each EState must have its own es_epqScanDone state, but if we have - * nested EPQ checks they should share es_epqTuple arrays. This allows + * nested EPQ checks they should share es_epqTuple arrays. This allows * sub-rechecks to inherit the values being examined by an outer recheck. */ estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool)); @@ -2485,7 +2486,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) * * This is a cut-down version of ExecutorEnd(); basically we want to do most * of the normal cleanup, but *not* close result relations (which we are - * just sharing from the outer query). We do, however, have to close any + * just sharing from the outer query). We do, however, have to close any * trigger target relations that got opened, since those are not shared. * (There probably shouldn't be any of the latter, but just in case...) */ diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index c5ecd185b8c..c0189eb5a12 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -52,7 +52,7 @@ * * ExecInitNode() notices that it is looking at a nest loop and * as the code below demonstrates, it calls ExecInitNestLoop(). * Eventually this calls ExecInitNode() on the right and left subplans - * and so forth until the entire plan is initialized. The result + * and so forth until the entire plan is initialized. The result * of ExecInitNode() is a plan state tree built with the same structure * as the underlying plan tree. * @@ -575,7 +575,7 @@ MultiExecProcNode(PlanState *node) * at 'node'. * * After this operation, the query plan will not be able to be - * processed any further. This should be called only after + * processed any further. This should be called only after * the query plan has been fully executed. * ---------------------------------------------------------------- */ diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 833c4ed6a4e..f162e92fc71 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -26,7 +26,7 @@ * ExecProject() is used to make tuple projections. Rather then * trying to speed it up, the execution plan should be pre-processed * to facilitate attribute sharing between nodes wherever possible, - * instead of doing needless copying. -cim 5/31/91 + * instead of doing needless copying. -cim 5/31/91 * * During expression evaluation, we check_stack_depth only in * ExecMakeFunctionResult (and substitute routines) rather than at every @@ -201,7 +201,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext, * * Note: for notational simplicity we declare these functions as taking the * specific type of ExprState that they work on. This requires casting when - * assigning the function pointer in ExecInitExpr. Be careful that the + * assigning the function pointer in ExecInitExpr. Be careful that the * function signature is declared correctly, because the cast suppresses * automatic checking! * @@ -236,7 +236,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext, * The caller should already have switched into the temporary memory * context econtext->ecxt_per_tuple_memory. The convenience entry point * ExecEvalExprSwitchContext() is provided for callers who don't prefer to - * do the switch in an outer loop. We do not do the switch in these routines + * do the switch in an outer loop. We do not do the switch in these routines * because it'd be a waste of cycles during nested expression evaluation. * ---------------------------------------------------------------- */ @@ -366,7 +366,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate, * We might have a nested-assignment situation, in which the * refassgnexpr is itself a FieldStore or ArrayRef that needs to * obtain and modify the previous value of the array element or slice - * being replaced. If so, we have to extract that value from the + * being replaced. If so, we have to extract that value from the * array and pass it down via the econtext's caseValue. It's safe to * reuse the CASE mechanism because there cannot be a CASE between * here and where the value would be needed, and an array assignment @@ -439,7 +439,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate, /* * For assignment to varlena arrays, we handle a NULL original array * by substituting an empty (zero-dimensional) array; insertion of the - * new element will result in a singleton array value. It does not + * new element will result in a singleton array value. It does not * matter whether the new element is NULL. */ if (*isNull) @@ -829,11 +829,11 @@ ExecEvalWholeRowVar(WholeRowVarExprState *wrvstate, ExprContext *econtext, * We really only care about numbers of attributes and data types. * Also, we can ignore type mismatch on columns that are dropped in * the destination type, so long as (1) the physical storage matches - * or (2) the actual column value is NULL. Case (1) is helpful in + * or (2) the actual column value is NULL. Case (1) is helpful in * some cases involving out-of-date cached plans, while case (2) is * expected behavior in situations such as an INSERT into a table with * dropped columns (the planner typically generates an INT4 NULL - * regardless of the dropped column type). If we find a dropped + * regardless of the dropped column type). If we find a dropped * column and cannot verify that case (1) holds, we have to use * ExecEvalWholeRowSlow to check (2) for each row. */ @@ -1491,7 +1491,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo, * ExecPrepareTuplestoreResult * * Subroutine for ExecMakeFunctionResult: prepare to extract rows from a - * tuplestore function result. We must set up a funcResultSlot (unless + * tuplestore function result. We must set up a funcResultSlot (unless * already done in a previous call cycle) and verify that the function * returned the expected tuple descriptor. */ @@ -1536,7 +1536,7 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache, } /* - * If function provided a tupdesc, cross-check it. We only really need to + * If function provided a tupdesc, cross-check it. We only really need to * do this for functions returning RECORD, but might as well do it always. */ if (resultDesc) @@ -1719,7 +1719,7 @@ restart: if (fcache->func.fn_retset || hasSetArg) { /* - * We need to return a set result. Complain if caller not ready to + * We need to return a set result. Complain if caller not ready to * accept one. */ if (isDone == NULL) @@ -2046,7 +2046,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, /* * Normally the passed expression tree will be a FuncExprState, since the * grammar only allows a function call at the top level of a table - * function reference. However, if the function doesn't return set then + * function reference. However, if the function doesn't return set then * the planner might have replaced the function call via constant-folding * or inlining. So if we see any other kind of expression node, execute * it via the general ExecEvalExpr() code; the only difference is that we @@ -2085,7 +2085,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, * * Note: ideally, we'd do this in the per-tuple context, but then the * argument values would disappear when we reset the context in the - * inner loop. So do it in caller context. Perhaps we should make a + * inner loop. So do it in caller context. Perhaps we should make a * separate context just to hold the evaluated arguments? */ argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext); @@ -2171,7 +2171,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, * Can't do anything very useful with NULL rowtype values. For a * function returning set, we consider this a protocol violation * (but another alternative would be to just ignore the result and - * "continue" to get another row). For a function not returning + * "continue" to get another row). For a function not returning * set, we fall out of the loop; we'll cons up an all-nulls result * row below. */ @@ -2305,7 +2305,7 @@ no_function_result: } /* - * If function provided a tupdesc, cross-check it. We only really need to + * If function provided a tupdesc, cross-check it. We only really need to * do this for functions returning RECORD, but might as well do it always. */ if (rsinfo.setDesc) @@ -2483,7 +2483,7 @@ ExecEvalDistinct(FuncExprState *fcache, * * Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean, * and we combine the results across all array elements using OR and AND - * (for ANY and ALL respectively). Of course we short-circuit as soon as + * (for ANY and ALL respectively). Of course we short-circuit as soon as * the result is known. */ static Datum @@ -2670,7 +2670,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, * qualification to conjunctive normal form. If we ever get * an AND to evaluate, we can be sure that it's not a top-level * clause in the qualification, but appears lower (as a function - * argument, for example), or in the target list. Not that you + * argument, for example), or in the target list. Not that you * need to know this, mind you... * ---------------------------------------------------------------- */ @@ -2801,7 +2801,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, /* ---------------------------------------------------------------- * ExecEvalConvertRowtype * - * Evaluate a rowtype coercion operation. This may require + * Evaluate a rowtype coercion operation. This may require * rearranging field positions. * ---------------------------------------------------------------- */ @@ -2930,7 +2930,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, /* * if we have a true test, then we return the result, since the case - * statement is satisfied. A NULL result from the test is not + * statement is satisfied. A NULL result from the test is not * considered true. */ if (DatumGetBool(clause_value) && !*isNull) @@ -3144,7 +3144,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, * If all items were null or empty arrays, return an empty array; * otherwise, if some were and some weren't, raise error. (Note: we * must special-case this somehow to avoid trying to generate a 1-D - * array formed from empty arrays. It's not ideal...) + * array formed from empty arrays. It's not ideal...) */ if (haveempty) { @@ -4315,7 +4315,7 @@ ExecEvalExprSwitchContext(ExprState *expression, * ExecInitExpr: prepare an expression tree for execution * * This function builds and returns an ExprState tree paralleling the given - * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr + * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr * for execution. Because the Expr tree itself is read-only as far as * ExecInitExpr and ExecEvalExpr are concerned, several different executions * of the same plan tree can occur concurrently. @@ -4326,9 +4326,9 @@ ExecEvalExprSwitchContext(ExprState *expression, * * Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the * lists of such nodes held by the parent PlanState. Otherwise, we do very - * little initialization here other than building the state-node tree. Any + * little initialization here other than building the state-node tree. Any * nontrivial work associated with initializing runtime info for a node should - * happen during the first actual evaluation of that node. (This policy lets + * happen during the first actual evaluation of that node. (This policy lets * us avoid work if the node is never actually evaluated.) * * Note: there is no ExecEndExpr function; we assume that any resource @@ -5133,7 +5133,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull) oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); /* - * Evaluate the qual conditions one at a time. If we find a FALSE result, + * Evaluate the qual conditions one at a time. If we find a FALSE result, * we can stop evaluating and return FALSE --- the AND result must be * FALSE. Also, if we find a NULL result when resultForNull is FALSE, we * can stop and return FALSE --- the AND result must be FALSE or NULL in @@ -5292,7 +5292,7 @@ ExecTargetList(List *targetlist, else { /* - * We have some done and some undone sets. Restart the done ones + * We have some done and some undone sets. Restart the done ones * so that we can deliver a tuple (if possible). */ foreach(tl, targetlist) diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index 5e4538fa5e9..869abbecbd8 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -30,7 +30,7 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple * ExecScanFetch -- fetch next potential tuple * * This routine is concerned with substituting a test tuple if we are - * inside an EvalPlanQual recheck. If we aren't, just execute + * inside an EvalPlanQual recheck. If we aren't, just execute * the access method's next-tuple routine. */ static inline TupleTableSlot * @@ -155,7 +155,7 @@ ExecScan(ScanState *node, ResetExprContext(econtext); /* - * get a tuple from the access method. Loop until we obtain a tuple that + * get a tuple from the access method. Loop until we obtain a tuple that * passes the qualification. */ for (;;) diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 928b5e3178a..66515f71a25 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -4,7 +4,7 @@ * Routines dealing with TupleTableSlots. These are used for resource * management associated with tuples (eg, releasing buffer pins for * tuples in disk buffers, or freeing the memory occupied by transient - * tuples). Slots also provide access abstraction that lets us implement + * tuples). Slots also provide access abstraction that lets us implement * "virtual" tuples to reduce data-copying overhead. * * Routines dealing with the type information for tuples. Currently, @@ -261,7 +261,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ ExecClearTuple(slot); /* - * Release any old descriptor. Also release old Datum/isnull arrays if + * Release any old descriptor. Also release old Datum/isnull arrays if * present (we don't bother to check if they could be re-used). */ if (slot->tts_tupleDescriptor) @@ -311,7 +311,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ * Another case where it is 'false' is when the referenced tuple is held * in a tuple table slot belonging to a lower-level executor Proc node. * In this case the lower-level slot retains ownership and responsibility - * for eventually releasing the tuple. When this method is used, we must + * for eventually releasing the tuple. When this method is used, we must * be certain that the upper-level Proc node will lose interest in the tuple * sooner than the lower-level one does! If you're not certain, copy the * lower-level tuple with heap_copytuple and let the upper-level table @@ -650,7 +650,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot) * Fetch the slot's minimal physical tuple. * * If the slot contains a virtual tuple, we convert it to minimal - * physical form. The slot retains ownership of the minimal tuple. + * physical form. The slot retains ownership of the minimal tuple. * If it contains a regular tuple we convert to minimal form and store * that in addition to the regular tuple (not instead of, because * callers may hold pointers to Datums within the regular tuple). @@ -829,7 +829,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) * ExecInit{Result,Scan,Extra}TupleSlot * * These are convenience routines to initialize the specified slot - * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot + * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot * is used for initializing special-purpose slots. * -------------------------------- */ @@ -1147,7 +1147,7 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values) * code would have no way to obtain a tupledesc for the tuple. * * Note that if we do build a new tuple, it's palloc'd in the current - * memory context. Beware of code that changes context between the initial + * memory context. Beware of code that changes context between the initial * heap_form_tuple/etc call and calling HeapTuple(Header)GetDatum. * * For performance-critical callers, it could be worthwhile to take extra diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index fc71d852bed..d5e1273e91c 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -105,7 +105,7 @@ CreateExecutorState(void) * Initialize all fields of the Executor State structure */ estate->es_direction = ForwardScanDirection; - estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */ + estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */ estate->es_crosscheck_snapshot = InvalidSnapshot; /* no crosscheck */ estate->es_range_table = NIL; estate->es_plannedstmt = NULL; @@ -342,7 +342,7 @@ CreateStandaloneExprContext(void) * any previously computed pass-by-reference expression result will go away! * * If isCommit is false, we are being called in error cleanup, and should - * not call callbacks but only release memory. (It might be better to call + * not call callbacks but only release memory. (It might be better to call * the callbacks and pass the isCommit flag to them, but that would require * more invasive code changes than currently seems justified.) * @@ -371,7 +371,7 @@ FreeExprContext(ExprContext *econtext, bool isCommit) * ReScanExprContext * * Reset an expression context in preparation for a rescan of its - * plan node. This requires calling any registered shutdown callbacks, + * plan node. This requires calling any registered shutdown callbacks, * since any partially complete set-returning-functions must be canceled. * * Note we make no assumption about the caller's memory context. @@ -412,7 +412,7 @@ MakePerTupleExprContext(EState *estate) /* ---------------- * ExecAssignExprContext * - * This initializes the ps_ExprContext field. It is only necessary + * This initializes the ps_ExprContext field. It is only necessary * to do this for nodes which use ExecQual or ExecProject * because those routines require an econtext. Other nodes that * don't have to evaluate expressions don't need to do this. @@ -458,7 +458,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate) /* * ExecTypeFromTL needs the parse-time representation of the tlist, not a - * list of ExprStates. This is good because some plan nodes don't bother + * list of ExprStates. This is good because some plan nodes don't bother * to set up planstate->targetlist ... */ tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid); @@ -486,7 +486,7 @@ ExecGetResultType(PlanState *planstate) * the given tlist should be a list of ExprState nodes, not Expr nodes. * * inputDesc can be NULL, but if it is not, we check to see whether simple - * Vars in the tlist match the descriptor. It is important to provide + * Vars in the tlist match the descriptor. It is important to provide * inputDesc for relation-scan plan nodes, as a cross check that the relation * hasn't been changed since the plan was made. At higher levels of a plan, * there is no need to recheck. @@ -692,7 +692,7 @@ ExecAssignProjectionInfo(PlanState *planstate, * * However ... there is no particular need to do it during ExecEndNode, * because FreeExecutorState will free any remaining ExprContexts within - * the EState. Letting FreeExecutorState do it allows the ExprContexts to + * the EState. Letting FreeExecutorState do it allows the ExprContexts to * be freed in reverse order of creation, rather than order of creation as * will happen if we delete them here, which saves O(N^2) work in the list * cleanup inside FreeExprContext. @@ -712,7 +712,7 @@ ExecFreeExprContext(PlanState *planstate) * the following scan type support functions are for * those nodes which are stubborn and return tuples in * their Scan tuple slot instead of their Result tuple - * slot.. luck fur us, these nodes do not do projections + * slot.. luck fur us, these nodes do not do projections * so we don't have to worry about getting the ProjectionInfo * right for them... -cim 6/3/91 * ---------------------------------------------------------------- @@ -1111,7 +1111,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot, /* * If the index has an associated exclusion constraint, check that. * This is simpler than the process for uniqueness checks since we - * always insert first and then check. If the constraint is deferred, + * always insert first and then check. If the constraint is deferred, * we check now anyway, but don't throw error on violation; instead * we'll queue a recheck event. * @@ -1295,7 +1295,7 @@ retry: /* * If an in-progress transaction is affecting the visibility of this - * tuple, we need to wait for it to complete and then recheck. For + * tuple, we need to wait for it to complete and then recheck. For * simplicity we do rechecking by just restarting the whole scan --- * this case probably doesn't happen often enough to be worth trying * harder, and anyway we don't want to hold any index internal locks @@ -1357,7 +1357,7 @@ retry: /* * Check existing tuple's index values to see if it really matches the - * exclusion condition against the new_values. Returns true if conflict. + * exclusion condition against the new_values. Returns true if conflict. */ static bool index_recheck_constraint(Relation index, Oid *constr_procs, diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index f0a89d23b87..4d112604bb7 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -47,7 +47,7 @@ typedef struct } DR_sqlfunction; /* - * We have an execution_state record for each query in a function. Each + * We have an execution_state record for each query in a function. Each * record contains a plantree for its query. If the query is currently in * F_EXEC_RUN state then there's a QueryDesc too. * @@ -466,7 +466,7 @@ sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo, * Set up the per-query execution_state records for a SQL function. * * The input is a List of Lists of parsed and rewritten, but not planned, - * querytrees. The sublist structure denotes the original query boundaries. + * querytrees. The sublist structure denotes the original query boundaries. */ static List * init_execution_state(List *queryTree_list, @@ -590,7 +590,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) bool isNull; /* - * Create memory context that holds all the SQLFunctionCache data. It + * Create memory context that holds all the SQLFunctionCache data. It * must be a child of whatever context holds the FmgrInfo. */ fcontext = AllocSetContextCreate(finfo->fn_mcxt, @@ -602,7 +602,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) oldcontext = MemoryContextSwitchTo(fcontext); /* - * Create the struct proper, link it to fcontext and fn_extra. Once this + * Create the struct proper, link it to fcontext and fn_extra. Once this * is done, we'll be able to recover the memory after failure, even if the * FmgrInfo is long-lived. */ @@ -672,7 +672,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) fcache->src = TextDatumGetCString(tmp); /* - * Parse and rewrite the queries in the function text. Use sublists to + * Parse and rewrite the queries in the function text. Use sublists to * keep track of the original query boundaries. But we also build a * "flat" list of the rewritten queries to pass to check_sql_fn_retval. * This is because the last canSetTag query determines the result type @@ -712,7 +712,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) * any polymorphic arguments. * * Note: we set fcache->returnsTuple according to whether we are returning - * the whole tuple result or just a single column. In the latter case we + * the whole tuple result or just a single column. In the latter case we * clear returnsTuple because we need not act different from the scalar * result case, even if it's a rowtype column. (However, we have to force * lazy eval mode in that case; otherwise we'd need extra code to expand @@ -944,7 +944,7 @@ postquel_get_single_result(TupleTableSlot *slot, /* * Set up to return the function value. For pass-by-reference datatypes, * be sure to allocate the result in resultcontext, not the current memory - * context (which has query lifespan). We can't leave the data in the + * context (which has query lifespan). We can't leave the data in the * TupleTableSlot because we intend to clear the slot before returning. */ oldcontext = MemoryContextSwitchTo(resultcontext); @@ -1052,7 +1052,7 @@ fmgr_sql(PG_FUNCTION_ARGS) /* * Switch to context in which the fcache lives. This ensures that our * tuplestore etc will have sufficient lifetime. The sub-executor is - * responsible for deleting per-tuple information. (XXX in the case of a + * responsible for deleting per-tuple information. (XXX in the case of a * long-lived FmgrInfo, this policy represents more memory leakage, but * it's not entirely clear where to keep stuff instead.) */ @@ -1106,7 +1106,7 @@ fmgr_sql(PG_FUNCTION_ARGS) * suspend execution before completion is if we are returning a row from a * lazily-evaluated SELECT. So, when first entering this loop, we'll * either start a new query (and push a fresh snapshot) or re-establish - * the active snapshot from the existing query descriptor. If we need to + * the active snapshot from the existing query descriptor. If we need to * start a new query in a subsequent execution of the loop, either we need * a fresh snapshot (and pushed_snapshot is false) or the existing * snapshot is on the active stack and we can just bump its command ID. @@ -1162,7 +1162,7 @@ fmgr_sql(PG_FUNCTION_ARGS) * Break from loop if we didn't shut down (implying we got a * lazily-evaluated row). Otherwise we'll press on till the whole * function is done, relying on the tuplestore to keep hold of the - * data to eventually be returned. This is necessary since an + * data to eventually be returned. This is necessary since an * INSERT/UPDATE/DELETE RETURNING that sets the result might be * followed by additional rule-inserted commands, and we want to * finish doing all those commands before we return anything. @@ -1184,7 +1184,7 @@ fmgr_sql(PG_FUNCTION_ARGS) /* * Flush the current snapshot so that we will take a new one for - * the new query list. This ensures that new snaps are taken at + * the new query list. This ensures that new snaps are taken at * original-query boundaries, matching the behavior of interactive * execution. */ @@ -1242,7 +1242,7 @@ fmgr_sql(PG_FUNCTION_ARGS) else if (fcache->lazyEval) { /* - * We are done with a lazy evaluation. Clean up. + * We are done with a lazy evaluation. Clean up. */ tuplestore_clear(fcache->tstore); @@ -1266,8 +1266,8 @@ fmgr_sql(PG_FUNCTION_ARGS) else { /* - * We are done with a non-lazy evaluation. Return whatever is in - * the tuplestore. (It is now caller's responsibility to free the + * We are done with a non-lazy evaluation. Return whatever is in + * the tuplestore. (It is now caller's responsibility to free the * tuplestore when done.) */ rsi->returnMode = SFRM_Materialize; @@ -1379,7 +1379,7 @@ sql_exec_error_callback(void *arg) /* * Try to determine where in the function we failed. If there is a query - * with non-null QueryDesc, finger it. (We check this rather than looking + * with non-null QueryDesc, finger it. (We check this rather than looking * for F_EXEC_RUN state, so that errors during ExecutorStart or * ExecutorEnd are blamed on the appropriate query; see postquel_start and * postquel_end.) @@ -1671,7 +1671,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, * the function that's calling it. * * XXX Note that if rettype is RECORD, the IsBinaryCoercible check - * will succeed for any composite restype. For the moment we rely on + * will succeed for any composite restype. For the moment we rely on * runtime type checking to catch any discrepancy, but it'd be nice to * do better at parse time. */ @@ -1717,7 +1717,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, /* * Verify that the targetlist matches the return tuple type. We scan * the non-deleted attributes to ensure that they match the datatypes - * of the non-resjunk columns. For deleted attributes, insert NULL + * of the non-resjunk columns. For deleted attributes, insert NULL * result columns if the caller asked for that. */ tupnatts = tupdesc->natts; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 186c319a3a2..09ff03543df 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -25,7 +25,7 @@ * The agg's first input type and transtype must be the same in this case! * * If transfunc is marked "strict" then NULL input_values are skipped, - * keeping the previous transvalue. If transfunc is not strict then it + * keeping the previous transvalue. If transfunc is not strict then it * is called for every input tuple and must deal with NULL initcond * or NULL input_values for itself. * @@ -66,7 +66,7 @@ * it is completely forbidden for functions to modify pass-by-ref inputs, * but in the aggregate case we know the left input is either the initial * transition value or a previous function result, and in either case its - * value need not be preserved. See int8inc() for an example. Notice that + * value need not be preserved. See int8inc() for an example. Notice that * advance_transition_function() is coded to avoid a data copy step when * the previous transition value pointer is returned. Also, some * transition functions want to store working state in addition to the @@ -132,14 +132,14 @@ typedef struct AggStatePerAggData Aggref *aggref; /* - * Nominal number of arguments for aggregate function. For plain aggs, - * this excludes any ORDER BY expressions. For ordered-set aggs, this + * Nominal number of arguments for aggregate function. For plain aggs, + * this excludes any ORDER BY expressions. For ordered-set aggs, this * counts both the direct and aggregated (ORDER BY) arguments. */ int numArguments; /* - * Number of aggregated input columns. This includes ORDER BY expressions + * Number of aggregated input columns. This includes ORDER BY expressions * in both the plain-agg and ordered-set cases. Ordered-set direct args * are not counted, though. */ @@ -153,7 +153,7 @@ typedef struct AggStatePerAggData int numTransInputs; /* - * Number of arguments to pass to the finalfn. This is always at least 1 + * Number of arguments to pass to the finalfn. This is always at least 1 * (the transition state value) plus any ordered-set direct args. If the * finalfn wants extra args then we pass nulls corresponding to the * aggregated input columns. @@ -216,7 +216,7 @@ typedef struct AggStatePerAggData transtypeByVal; /* - * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but + * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but * with the addition of ORDER BY we now need at least a slot for passing * data to the sort object, which requires a tupledesc, so we might as * well go whole hog and use ExecProject too. @@ -236,7 +236,7 @@ typedef struct AggStatePerAggData * input tuple group and updated for each input tuple. * * For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input - * values straight to the transition function. If it's DISTINCT or + * values straight to the transition function. If it's DISTINCT or * requires ORDER BY, we pass the input values into a Tuplesort object; * then at completion of the input tuple group, we scan the sorted values, * eliminate duplicates if needed, and run the transition function on the @@ -279,7 +279,7 @@ typedef struct AggStatePerGroupData /* * Note: noTransValue initially has the same value as transValueIsNull, - * and if true both are cleared to false at the same time. They are not + * and if true both are cleared to false at the same time. They are not * the same though: if transfn later returns a NULL, we want to keep that * NULL and not auto-replace it with a later input value. Only the first * non-NULL input will be auto-substituted. @@ -289,7 +289,7 @@ typedef struct AggStatePerGroupData /* * To implement hashed aggregation, we need a hashtable that stores a * representative tuple and an array of AggStatePerGroup structs for each - * distinct set of GROUP BY column values. We compute the hash key from + * distinct set of GROUP BY column values. We compute the hash key from * the GROUP BY columns. */ typedef struct AggHashEntryData *AggHashEntry; @@ -416,7 +416,7 @@ initialize_aggregates(AggState *aggstate, * * The new values (and null flags) have been preloaded into argument positions * 1 and up in peraggstate->transfn_fcinfo, so that we needn't copy them again - * to pass to the transition function. We also expect that the static fields + * to pass to the transition function. We also expect that the static fields * of the fcinfo are already initialized; that was done by ExecInitAgg(). * * It doesn't matter which memory context this is called in. @@ -495,7 +495,7 @@ advance_transition_function(AggState *aggstate, /* * If pass-by-ref datatype, must copy the new value into aggcontext and - * pfree the prior transValue. But if transfn returned a pointer to its + * pfree the prior transValue. But if transfn returned a pointer to its * first input, we don't need to do anything. */ if (!peraggstate->transtypeByVal && @@ -519,7 +519,7 @@ advance_transition_function(AggState *aggstate, } /* - * Advance all the aggregates for one input tuple. The input tuple + * Advance all the aggregates for one input tuple. The input tuple * has been stored in tmpcontext->ecxt_outertuple, so that it is accessible * to ExecEvalExpr. pergroup is the array of per-group structs to use * (this might be in a hashtable entry). @@ -609,7 +609,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup) /* * Run the transition function for a DISTINCT or ORDER BY aggregate * with only one input. This is called after we have completed - * entering all the input values into the sort object. We complete the + * entering all the input values into the sort object. We complete the * sort, read out the values in sorted order, and run the transition * function on each value (applying DISTINCT if appropriate). * @@ -705,7 +705,7 @@ process_ordered_aggregate_single(AggState *aggstate, /* * Run the transition function for a DISTINCT or ORDER BY aggregate * with more than one input. This is called after we have completed - * entering all the input values into the sort object. We complete the + * entering all the input values into the sort object. We complete the * sort, read out the values in sorted order, and run the transition * function on each value (applying DISTINCT if appropriate). * @@ -1070,9 +1070,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot) * the appropriate attribute for each aggregate function use (Aggref * node) appearing in the targetlist or qual of the node. The number * of tuples to aggregate over depends on whether grouped or plain - * aggregation is selected. In grouped aggregation, we produce a result + * aggregation is selected. In grouped aggregation, we produce a result * row for each group; in plain aggregation there's a single result row - * for the whole query. In either case, the value of each aggregate is + * for the whole query. In either case, the value of each aggregate is * stored in the expression context to be used when ExecProject evaluates * the result tuple. */ @@ -1097,7 +1097,7 @@ ExecAgg(AggState *node) } /* - * Exit if nothing left to do. (We must do the ps_TupFromTlist check + * Exit if nothing left to do. (We must do the ps_TupFromTlist check * first, because in some cases agg_done gets set before we emit the final * aggregate tuple, and we have to finish running SRFs for it.) */ @@ -1181,11 +1181,11 @@ agg_retrieve_direct(AggState *aggstate) /* * Clear the per-output-tuple context for each group, as well as * aggcontext (which contains any pass-by-ref transvalues of the old - * group). We also clear any child contexts of the aggcontext; some + * group). We also clear any child contexts of the aggcontext; some * aggregate functions store working state in such contexts. * * We use ReScanExprContext not just ResetExprContext because we want - * any registered shutdown callbacks to be called. That allows + * any registered shutdown callbacks to be called. That allows * aggregate functions to ensure they've cleaned up any non-memory * resources. */ @@ -1518,8 +1518,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aggstate->hashtable = NULL; /* - * Create expression contexts. We need two, one for per-input-tuple - * processing and one for per-output-tuple processing. We cheat a little + * Create expression contexts. We need two, one for per-input-tuple + * processing and one for per-output-tuple processing. We cheat a little * by using ExecAssignExprContext() to build both. */ ExecAssignExprContext(estate, &aggstate->ss.ps); @@ -1552,7 +1552,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * initialize child expressions * * Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs - * contain other agg calls in their arguments. This would make no sense + * contain other agg calls in their arguments. This would make no sense * under SQL semantics anyway (and it's forbidden by the spec). Because * that is true, we don't need to worry about evaluating the aggs in any * particular order. @@ -1599,7 +1599,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * This is not an error condition: we might be using the Agg node just * to do hash-based grouping. Even in the regular case, * constant-expression simplification could optimize away all of the - * Aggrefs in the targetlist and qual. So keep going, but force local + * Aggrefs in the targetlist and qual. So keep going, but force local * copy of numaggs positive so that palloc()s below don't choke. */ numaggs = 1; @@ -1760,7 +1760,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) } /* - * Get actual datatypes of the (nominal) aggregate inputs. These + * Get actual datatypes of the (nominal) aggregate inputs. These * could be different from the agg's declared input types, when the * agg accepts ANY or a polymorphic type. */ @@ -1852,7 +1852,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * If the transfn is strict and the initval is NULL, make sure input * type and transtype are the same (or at least binary-compatible), so * that it's OK to use the first aggregated input value as the initial - * transValue. This should have been checked at agg definition time, + * transValue. This should have been checked at agg definition time, * but we must check again in case the transfn's strictness property * has been changed. */ @@ -1885,7 +1885,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* * If we're doing either DISTINCT or ORDER BY for a plain agg, then we * have a list of SortGroupClause nodes; fish out the data in them and - * stick them into arrays. We ignore ORDER BY for an ordered-set agg, + * stick them into arrays. We ignore ORDER BY for an ordered-set agg, * however; the agg's transfn and finalfn are responsible for that. * * Note that by construction, if there is a DISTINCT clause then the @@ -2144,8 +2144,8 @@ ExecReScanAgg(AggState *node) * * The transition and/or final functions of an aggregate may want to verify * that they are being called as aggregates, rather than as plain SQL - * functions. They should use this function to do so. The return value - * is nonzero if being called as an aggregate, or zero if not. (Specific + * functions. They should use this function to do so. The return value + * is nonzero if being called as an aggregate, or zero if not. (Specific * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more * values could conceivably appear in future.) * diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index 6185c1d0d14..ef121c420de 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -33,7 +33,7 @@ * / * Append -------+------+------+--- nil * / \ | | | - * nil nil ... ... ... + * nil nil ... ... ... * subplans * * Append nodes are currently used for unions, and to support diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 7d8a3f2c248..9b1e97578d0 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -5,7 +5,7 @@ * * NOTE: it is critical that this plan type only be used with MVCC-compliant * snapshots (ie, regular snapshots, not SnapshotAny or one of the other - * special snapshots). The reason is that since index and heap scans are + * special snapshots). The reason is that since index and heap scans are * decoupled, there can be no assurance that the index tuple prompting a * visit to a particular heap TID still exists when the visit is made. * Therefore the tuple might not exist anymore either (which is OK because @@ -340,7 +340,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) /* * We must hold share lock on the buffer content while examining tuple - * visibility. Afterwards, however, the tuples we have found to be + * visibility. Afterwards, however, the tuples we have found to be * visible are guaranteed good as long as we hold the buffer pin. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index ac135d9ba86..9cc53459527 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -147,7 +147,7 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) scanstate->ss.ss_currentRelation = currentRelation; /* - * get the scan type from the relation descriptor. (XXX at some point we + * get the scan type from the relation descriptor. (XXX at some point we * might want to let the FDW editorialize on the scan tupdesc.) */ ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation)); diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index 0300941a525..da5d8c114db 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -232,7 +232,7 @@ FunctionNext(FunctionScanState *node) } /* - * If alldone, we just return the previously-cleared scanslot. Otherwise, + * If alldone, we just return the previously-cleared scanslot. Otherwise, * finish creating the virtual tuple. */ if (!alldone) @@ -449,8 +449,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) * Create the combined TupleDesc * * If there is just one function without ordinality, the scan result - * tupdesc is the same as the function result tupdesc --- except that - * we may stuff new names into it below, so drop any rowtype label. + * tupdesc is the same as the function result tupdesc --- except that we + * may stuff new names into it below, so drop any rowtype label. */ if (scanstate->simple) { diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 95ed9bd9d0d..589b2f15099 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -365,7 +365,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) /* * Set up for skew optimization, if possible and there's a need for more - * than one batch. (In a one-batch join, there's no point in it.) + * than one batch. (In a one-batch join, there's no point in it.) */ if (nbatch > 1) ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs); @@ -407,7 +407,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* * Estimate tupsize based on footprint of tuple in hashtable... note this - * does not allow for any palloc overhead. The manipulations of spaceUsed + * does not allow for any palloc overhead. The manipulations of spaceUsed * don't count palloc overhead either. */ tupsize = HJTUPLE_OVERHEAD + @@ -459,7 +459,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when * memory is filled. Set nbatch to the smallest power of 2 that appears - * sufficient. The Min() steps limit the results so that the pointer + * sufficient. The Min() steps limit the results so that the pointer * arrays we'll try to allocate do not exceed work_mem. */ max_pointers = (work_mem * 1024L) / sizeof(void *); @@ -498,8 +498,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* * Both nbuckets and nbatch must be powers of 2 to make - * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate - * nbuckets to the next larger power of 2. We also force nbuckets to not + * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate + * nbuckets to the next larger power of 2. We also force nbuckets to not * be real small, by starting the search at 2^10. (Note: above we made * sure that nbuckets is not more than INT_MAX / 2, so this loop cannot * overflow, nor can the final shift to recalculate nbuckets.) @@ -817,7 +817,7 @@ ExecHashGetHashValue(HashJoinTable hashtable, * the hash support function as strict even if the operator is not. * * Note: currently, all hashjoinable operators must be strict since - * the hash index AM assumes that. However, it takes so little extra + * the hash index AM assumes that. However, it takes so little extra * code here to allow non-strict that we may as well do it. */ if (isNull) @@ -1237,7 +1237,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse) /* * While we have not hit a hole in the hashtable and have not hit * the desired bucket, we have collided with some previous hash - * value, so try the next bucket location. NB: this code must + * value, so try the next bucket location. NB: this code must * match ExecHashGetSkewBucket. */ bucket = hashvalue & (nbuckets - 1); @@ -1435,7 +1435,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable) * NOTE: this is not nearly as simple as it looks on the surface, because * of the possibility of collisions in the hashtable. Suppose that hash * values A and B collide at a particular hashtable entry, and that A was - * entered first so B gets shifted to a different table entry. If we were + * entered first so B gets shifted to a different table entry. If we were * to remove A first then ExecHashGetSkewBucket would mistakenly start * reporting that B is not in the hashtable, because it would hit the NULL * before finding B. However, we always remove entries in the reverse diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 9c8398a9cf5..7eec3f333de 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -126,7 +126,7 @@ ExecHashJoin(HashJoinState *node) * check this when the outer relation's startup cost is less * than the projected cost of building the hash table. * Otherwise it's best to build the hash table first and see - * if the inner relation is empty. (When it's a left join, we + * if the inner relation is empty. (When it's a left join, we * should always make this check, since we aren't going to be * able to skip the join on the strength of an empty inner * relation anyway.) @@ -530,7 +530,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) * tuple slot of the Hash node (which is our inner plan). we can do this * because Hash nodes don't return tuples via ExecProcNode() -- instead * the hash join node uses ExecScanHashBucket() to get at the contents of - * the hash table. -cim 6/9/91 + * the hash table. -cim 6/9/91 */ { HashState *hashstate = (HashState *) innerPlanState(hjstate); @@ -896,7 +896,7 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue, /* * ExecHashJoinGetSavedTuple - * read the next tuple from a batch file. Return NULL if no more. + * read the next tuple from a batch file. Return NULL if no more. * * On success, *hashvalue is set to the tuple's hash value, and the tuple * itself is stored in the given slot. diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index 8d5c3544d56..c55723608d6 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -88,7 +88,7 @@ IndexOnlyNext(IndexOnlyScanState *node) * Note on Memory Ordering Effects: visibilitymap_test does not lock * the visibility map buffer, and therefore the result we read here * could be slightly stale. However, it can't be stale enough to - * matter. It suffices to show that (1) there is a read barrier + * matter. It suffices to show that (1) there is a read barrier * between the time we read the index TID and the time we test the * visibility map; and (2) there is a write barrier between the time * some other concurrent process clears the visibility map bit and the @@ -113,7 +113,7 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Only MVCC snapshots are supported here, so there should be no * need to keep following the HOT chain once a visible entry has - * been found. If we did want to allow that, we'd need to keep + * been found. If we did want to allow that, we'd need to keep * more state to remember not to call index_getnext_tid next time. */ if (scandesc->xs_continue_hot) @@ -122,7 +122,7 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Note: at this point we are holding a pin on the heap page, as * recorded in scandesc->xs_cbuf. We could release that pin now, - * but it's not clear whether it's a win to do so. The next index + * but it's not clear whether it's a win to do so. The next index * entry might require a visit to the same heap page. */ } diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 4bde1e3afe1..2b89dc60f67 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -216,7 +216,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, /* * For each run-time key, extract the run-time expression and evaluate - * it with respect to the current context. We then stick the result + * it with respect to the current context. We then stick the result * into the proper scan key. * * Note: the result of the eval could be a pass-by-ref value that's @@ -349,7 +349,7 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys) /* * Note we advance the rightmost array key most quickly, since it will * correspond to the lowest-order index column among the available - * qualifications. This is hypothesized to result in better locality of + * qualifications. This is hypothesized to result in better locality of * access in the index. */ for (j = numArrayKeys - 1; j >= 0; j--) diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index ba650471030..0c723ac224d 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -113,7 +113,7 @@ ExecLimit(LimitState *node) /* * The subplan is known to return no tuples (or not more than - * OFFSET tuples, in general). So we return no tuples. + * OFFSET tuples, in general). So we return no tuples. */ return NULL; diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index ae107961ba3..298d4b4d017 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -182,7 +182,7 @@ lnext: tuple.t_self = copyTuple->t_self; /* - * Need to run a recheck subquery. Initialize EPQ state if we + * Need to run a recheck subquery. Initialize EPQ state if we * didn't do so already. */ if (!epq_started) @@ -213,7 +213,7 @@ lnext: { /* * First, fetch a copy of any rows that were successfully locked - * without any update having occurred. (We do this in a separate pass + * without any update having occurred. (We do this in a separate pass * so as to avoid overhead in the common case where there are no * concurrent updates.) */ @@ -318,7 +318,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) /* * Locate the ExecRowMark(s) that this node is responsible for, and - * construct ExecAuxRowMarks for them. (InitPlan should already have + * construct ExecAuxRowMarks for them. (InitPlan should already have * built the global list of ExecRowMarks.) */ lrstate->lr_arowMarks = NIL; @@ -340,7 +340,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist); /* - * Only locking rowmarks go into our own list. Non-locking marks are + * Only locking rowmarks go into our own list. Non-locking marks are * passed off to the EvalPlanQual machinery. This is because we don't * want to bother fetching non-locked rows unless we actually have to * do an EPQ recheck. diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index 13002bf9b46..4a632ee686f 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags) /* * Tuplestore's interpretation of the flag bits is subtly different from * the general executor meaning: it doesn't think BACKWARD necessarily - * means "backwards all the way to start". If told to support BACKWARD we + * means "backwards all the way to start". If told to support BACKWARD we * must include REWIND in the tuplestore eflags, else tuplestore_trim * might throw away too much. */ diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 74fa40da74c..47ed068c7b7 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -32,7 +32,7 @@ * / * MergeAppend---+------+------+--- nil * / \ | | | - * nil nil ... ... ... + * nil nil ... ... ... * subplans */ diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 2a1b4ed8b66..bc036a30b0d 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -41,7 +41,7 @@ * * Therefore, rather than directly executing the merge join clauses, * we evaluate the left and right key expressions separately and then - * compare the columns one at a time (see MJCompare). The planner + * compare the columns one at a time (see MJCompare). The planner * passes us enough information about the sort ordering of the inputs * to allow us to determine how to make the comparison. We may use the * appropriate btree comparison function, since Postgres' only notion @@ -269,7 +269,7 @@ MJExamineQuals(List *mergeclauses, * input, since we assume mergejoin operators are strict. If the NULL * is in the first join column, and that column sorts nulls last, then * we can further conclude that no following tuple can match anything - * either, since they must all have nulls in the first column. However, + * either, since they must all have nulls in the first column. However, * that case is only interesting if we're not in FillOuter mode, else * we have to visit all the tuples anyway. * @@ -325,7 +325,7 @@ MJEvalOuterValues(MergeJoinState *mergestate) /* * MJEvalInnerValues * - * Same as above, but for the inner tuple. Here, we have to be prepared + * Same as above, but for the inner tuple. Here, we have to be prepared * to load data from either the true current inner, or the marked inner, * so caller must tell us which slot to load from. */ @@ -736,7 +736,7 @@ ExecMergeJoin(MergeJoinState *node) case MJEVAL_MATCHABLE: /* - * OK, we have the initial tuples. Begin by skipping + * OK, we have the initial tuples. Begin by skipping * non-matching tuples. */ node->mj_JoinState = EXEC_MJ_SKIP_TEST; @@ -1131,7 +1131,7 @@ ExecMergeJoin(MergeJoinState *node) * which means that all subsequent outer tuples will be * larger than our marked inner tuples. So we need not * revisit any of the marked tuples but can proceed to - * look for a match to the current inner. If there's + * look for a match to the current inner. If there's * no more inners, no more matches are possible. * ---------------- */ @@ -1522,7 +1522,7 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) * For certain types of inner child nodes, it is advantageous to issue * MARK every time we advance past an inner tuple we will never return to. * For other types, MARK on a tuple we cannot return to is a waste of - * cycles. Detect which case applies and set mj_ExtraMarks if we want to + * cycles. Detect which case applies and set mj_ExtraMarks if we want to * issue "unnecessary" MARK calls. * * Currently, only Material wants the extra MARKs, and it will be helpful diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index fca7a2581f3..8ac60477fb8 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -30,7 +30,7 @@ * * If the query specifies RETURNING, then the ModifyTable returns a * RETURNING tuple after completing each row insert, update, or delete. - * It must be called again to continue the operation. Without RETURNING, + * It must be called again to continue the operation. Without RETURNING, * we just loop within the node until all the work is done, then * return NULL. This avoids useless call/return overhead. */ @@ -419,7 +419,7 @@ ldelete:; * proceed. We don't want to discard the original DELETE * while keeping the triggered actions based on its deletion; * and it would be no better to allow the original DELETE - * while discarding updates that it triggered. The row update + * while discarding updates that it triggered. The row update * carries some information that might be important according * to business rules; so throwing an error is the only safe * course. @@ -491,7 +491,7 @@ ldelete:; { /* * We have to put the target tuple into a slot, which means first we - * gotta fetch it. We can use the trigger tuple slot. + * gotta fetch it. We can use the trigger tuple slot. */ TupleTableSlot *rslot; HeapTupleData deltuple; @@ -549,7 +549,7 @@ ldelete:; * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple - * it just inserted.. This should be fixed but until it + * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * @@ -657,7 +657,7 @@ ExecUpdate(ItemPointer tupleid, * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to - * redo triggers, however. If there are any BEFORE triggers then + * redo triggers, however. If there are any BEFORE triggers then * trigger.c will have done heap_lock_tuple to lock the correct tuple, * so there's no need to do them again.) */ @@ -900,7 +900,7 @@ ExecModifyTable(ModifyTableState *node) /* * es_result_relation_info must point to the currently active result - * relation while we are within this ModifyTable node. Even though + * relation while we are within this ModifyTable node. Even though * ModifyTable nodes can't be nested statically, they can be nested * dynamically (since our subplan could include a reference to a modifying * CTE). So we have to save and restore the caller's value. @@ -916,7 +916,7 @@ ExecModifyTable(ModifyTableState *node) for (;;) { /* - * Reset the per-output-tuple exprcontext. This is needed because + * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly * to do this below the top level of the plan, however. We might need * to rethink this later. @@ -973,6 +973,7 @@ ExecModifyTable(ModifyTableState *node) * ctid!! */ tupleid = &tuple_ctid; } + /* * Use the wholerow attribute, when available, to reconstruct * the old relation tuple. @@ -1105,7 +1106,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * call ExecInitNode on each of the plans to be executed and save the * results into the array "mt_plans". This is also a convenient place to * verify that the proposed target relations are valid and open their - * indexes for insertion of new index entries. Note we *must* set + * indexes for insertion of new index entries. Note we *must* set * estate->es_result_relation_info correctly while we initialize each * sub-plan; ExecContextForcesOids depends on that! */ @@ -1125,7 +1126,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * If there are indices on the result relation, open them and save * descriptors in the result relation info, so that we can add new - * index entries for the tuples we add/update. We need not do this + * index entries for the tuples we add/update. We need not do this * for a DELETE, however, since deletion doesn't affect indexes. Also, * inside an EvalPlanQual operation, the indexes might be open * already, since we share the resultrel state with the original @@ -1175,6 +1176,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) WithCheckOption *wco = (WithCheckOption *) lfirst(ll); ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual, mtstate->mt_plans[i]); + wcoExprs = lappend(wcoExprs, wcoExpr); } @@ -1194,7 +1196,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * Initialize result tuple slot and assign its rowtype using the first - * RETURNING list. We assume the rest will look the same. + * RETURNING list. We assume the rest will look the same. */ tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists), false); @@ -1240,7 +1242,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * If we have any secondary relations in an UPDATE or DELETE, they need to * be treated like non-locked relations in SELECT FOR UPDATE, ie, the - * EvalPlanQual mechanism needs to be told about them. Locate the + * EvalPlanQual mechanism needs to be told about them. Locate the * relevant ExecRowMarks. */ foreach(l, node->rowMarks) @@ -1281,7 +1283,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * attribute present --- no need to look first. * * If there are multiple result relations, each one needs its own junk - * filter. Note multiple rels are only possible for UPDATE/DELETE, so we + * filter. Note multiple rels are only possible for UPDATE/DELETE, so we * can't be fooled by some needing a filter and some not. * * This section of code is also a convenient place to verify that the diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index 32c859c1a20..de3d87a5d67 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -316,7 +316,7 @@ ExecReScanRecursiveUnion(RecursiveUnionState *node) /* * if chgParam of subnode is not null then plan will be re-scanned by - * first ExecProcNode. Because of above, we only have to do this to the + * first ExecProcNode. Because of above, we only have to do this to the * non-recursive term. */ if (outerPlan->chgParam == NULL) diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 1f32c938489..75f6ed98837 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -5,7 +5,7 @@ * * The input of a SetOp node consists of tuples from two relations, * which have been combined into one dataset, with a junk attribute added - * that shows which relation each tuple came from. In SETOP_SORTED mode, + * that shows which relation each tuple came from. In SETOP_SORTED mode, * the input has furthermore been sorted according to all the grouping * columns (ie, all the non-junk attributes). The SetOp node scans each * group of identical tuples to determine how many came from each input @@ -18,7 +18,7 @@ * relation is the left-hand one for EXCEPT, and tries to make the smaller * input relation come first for INTERSECT. We build a hash table in memory * with one entry for each group of identical tuples, and count the number of - * tuples in the group from each relation. After seeing all the input, we + * tuples in the group from each relation. After seeing all the input, we * scan the hashtable and generate the correct output using those counts. * We can avoid making hashtable entries for any tuples appearing only in the * second input relation, since they cannot result in any output. @@ -268,7 +268,7 @@ setop_retrieve_direct(SetOpState *setopstate) /* * Store the copied first input tuple in the tuple table slot reserved - * for it. The tuple will be deleted when it is cleared from the + * for it. The tuple will be deleted when it is cleared from the * slot. */ ExecStoreTuple(setopstate->grp_firstTuple, diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 49d193bbae9..5d02d9420b1 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -261,12 +261,12 @@ ExecScanSubPlan(SubPlanState *node, * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK. * (ROWCOMPARE_SUBLINK doesn't allow multiple tuples from the subplan.) * NULL results from the combining operators are handled according to the - * usual SQL semantics for OR and AND. The result for no input tuples is + * usual SQL semantics for OR and AND. The result for no input tuples is * FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for * ROWCOMPARE_SUBLINK. * * For EXPR_SUBLINK we require the subplan to produce no more than one - * tuple, else an error is raised. If zero tuples are produced, we return + * tuple, else an error is raised. If zero tuples are produced, we return * NULL. Assuming we get a tuple, we just use its first column (there can * be only one non-junk column in this case). * @@ -409,7 +409,7 @@ ExecScanSubPlan(SubPlanState *node, else if (!found) { /* - * deal with empty subplan result. result/isNull were previously + * deal with empty subplan result. result/isNull were previously * initialized correctly for all sublink types except EXPR and * ROWCOMPARE; for those, return NULL. */ @@ -894,7 +894,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) * * This is called from ExecEvalParamExec() when the value of a PARAM_EXEC * parameter is requested and the param's execPlan field is set (indicating - * that the param has not yet been evaluated). This allows lazy evaluation + * that the param has not yet been evaluated). This allows lazy evaluation * of initplans: we don't run the subplan until/unless we need its output. * Note that this routine MUST clear the execPlan fields of the plan's * output parameters after evaluating them! @@ -1122,7 +1122,7 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent) /* * Select the one to be used. For this, we need an estimate of the number * of executions of the subplan. We use the number of output rows - * expected from the parent plan node. This is a good estimate if we are + * expected from the parent plan node. This is a good estimate if we are * in the parent's targetlist, and an underestimate (but probably not by * more than a factor of 2) if we are in the qual. */ diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index c69534da770..3d7cce2c9ea 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -194,7 +194,7 @@ ExecReScanSubqueryScan(SubqueryScanState *node) /* * ExecReScan doesn't know about my subplan, so I have to do - * changed-parameter signaling myself. This is just as well, because the + * changed-parameter signaling myself. This is just as well, because the * subplan has its own memory context in which its chgParam state lives. */ if (node->ss.ps.chgParam != NULL) diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 597a26018ad..ab3ec9735f4 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -4,7 +4,7 @@ * Routines to handle unique'ing of queries where appropriate * * Unique is a very simple node type that just filters out duplicate - * tuples from a stream of sorted tuples from its subplan. It's essentially + * tuples from a stream of sorted tuples from its subplan. It's essentially * a dumbed-down form of Group: the duplicate-removal functionality is * identical. However, Unique doesn't do projection nor qual checking, * so it's marginally more efficient for cases where neither is needed. diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c index 3016a6b072a..83b1324abc5 100644 --- a/src/backend/executor/nodeValuesscan.c +++ b/src/backend/executor/nodeValuesscan.c @@ -215,7 +215,7 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags) planstate = &scanstate->ss.ps; /* - * Create expression contexts. We need two, one for per-sublist + * Create expression contexts. We need two, one for per-sublist * processing and one for execScan.c to use for quals and projections. We * cheat a little by using ExecAssignExprContext() to build both. */ diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 40a925331c9..a0470d3eab2 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -4,7 +4,7 @@ * routines to handle WindowAgg nodes. * * A WindowAgg node evaluates "window functions" across suitable partitions - * of the input tuple set. Any one WindowAgg works for just a single window + * of the input tuple set. Any one WindowAgg works for just a single window * specification, though it can evaluate multiple window functions sharing * identical window specifications. The input tuples are required to be * delivered in sorted order, with the PARTITION BY columns (if any) as @@ -14,7 +14,7 @@ * * Since window functions can require access to any or all of the rows in * the current partition, we accumulate rows of the partition into a - * tuplestore. The window functions are called using the WindowObject API + * tuplestore. The window functions are called using the WindowObject API * so that they can access those rows as needed. * * We also support using plain aggregate functions as window functions. @@ -280,7 +280,7 @@ advance_windowaggregate(WindowAggState *winstate, { /* * For a strict transfn, nothing happens when there's a NULL input; we - * just keep the prior transValue. Note transValueCount doesn't + * just keep the prior transValue. Note transValueCount doesn't * change either. */ for (i = 1; i <= numArguments; i++) @@ -330,7 +330,7 @@ advance_windowaggregate(WindowAggState *winstate, } /* - * OK to call the transition function. Set winstate->curaggcontext while + * OK to call the transition function. Set winstate->curaggcontext while * calling it, for possible use by AggCheckCallContext. */ InitFunctionCallInfoData(*fcinfo, &(peraggstate->transfn), @@ -362,7 +362,7 @@ advance_windowaggregate(WindowAggState *winstate, /* * If pass-by-ref datatype, must copy the new value into aggcontext and - * pfree the prior transValue. But if transfn returned a pointer to its + * pfree the prior transValue. But if transfn returned a pointer to its * first input, we don't need to do anything. */ if (!peraggstate->transtypeByVal && @@ -485,7 +485,7 @@ advance_windowaggregate_base(WindowAggState *winstate, } /* - * OK to call the inverse transition function. Set + * OK to call the inverse transition function. Set * winstate->curaggcontext while calling it, for possible use by * AggCheckCallContext. */ @@ -513,7 +513,7 @@ advance_windowaggregate_base(WindowAggState *winstate, /* * If pass-by-ref datatype, must copy the new value into aggcontext and - * pfree the prior transValue. But if invtransfn returned a pointer to + * pfree the prior transValue. But if invtransfn returned a pointer to * its first input, we don't need to do anything. * * Note: the checks for null values here will never fire, but it seems @@ -827,7 +827,7 @@ eval_windowaggregates(WindowAggState *winstate) * * We assume that aggregates using the shared context always restart if * *any* aggregate restarts, and we may thus clean up the shared - * aggcontext if that is the case. Private aggcontexts are reset by + * aggcontext if that is the case. Private aggcontexts are reset by * initialize_windowaggregate() if their owning aggregate restarts. If we * aren't restarting an aggregate, we need to free any previously saved * result for it, else we'll leak memory. @@ -864,9 +864,9 @@ eval_windowaggregates(WindowAggState *winstate) * (i.e., frameheadpos) and aggregatedupto, while restarted aggregates * contain no rows. If there are any restarted aggregates, we must thus * begin aggregating anew at frameheadpos, otherwise we may simply - * continue at aggregatedupto. We must remember the old value of + * continue at aggregatedupto. We must remember the old value of * aggregatedupto to know how long to skip advancing non-restarted - * aggregates. If we modify aggregatedupto, we must also clear + * aggregates. If we modify aggregatedupto, we must also clear * agg_row_slot, per the loop invariant below. */ aggregatedupto_nonrestarted = winstate->aggregatedupto; @@ -881,7 +881,7 @@ eval_windowaggregates(WindowAggState *winstate) * Advance until we reach a row not in frame (or end of partition). * * Note the loop invariant: agg_row_slot is either empty or holds the row - * at position aggregatedupto. We advance aggregatedupto after processing + * at position aggregatedupto. We advance aggregatedupto after processing * a row. */ for (;;) @@ -1142,7 +1142,7 @@ spool_tuples(WindowAggState *winstate, int64 pos) /* * If the tuplestore has spilled to disk, alternate reading and writing - * becomes quite expensive due to frequent buffer flushes. It's cheaper + * becomes quite expensive due to frequent buffer flushes. It's cheaper * to force the entire partition to get spooled in one go. * * XXX this is a horrid kluge --- it'd be better to fix the performance @@ -1239,7 +1239,7 @@ release_partition(WindowAggState *winstate) * to our window framing rule * * The caller must have already determined that the row is in the partition - * and fetched it into a slot. This function just encapsulates the framing + * and fetched it into a slot. This function just encapsulates the framing * rules. */ static bool @@ -1341,7 +1341,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot) * * Uses the winobj's read pointer for any required fetches; hence, if the * frame mode is one that requires row comparisons, the winobj's mark must - * not be past the currently known frame head. Also uses the specified slot + * not be past the currently known frame head. Also uses the specified slot * for any required fetches. */ static void @@ -1446,7 +1446,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot) * * Uses the winobj's read pointer for any required fetches; hence, if the * frame mode is one that requires row comparisons, the winobj's mark must - * not be past the currently known frame tail. Also uses the specified slot + * not be past the currently known frame tail. Also uses the specified slot * for any required fetches. */ static void @@ -1789,8 +1789,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) winstate->ss.ps.state = estate; /* - * Create expression contexts. We need two, one for per-input-tuple - * processing and one for per-output-tuple processing. We cheat a little + * Create expression contexts. We need two, one for per-input-tuple + * processing and one for per-output-tuple processing. We cheat a little * by using ExecAssignExprContext() to build both. */ ExecAssignExprContext(estate, &winstate->ss.ps); @@ -2288,7 +2288,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, /* * Insist that forward and inverse transition functions have the same - * strictness setting. Allowing them to differ would require handling + * strictness setting. Allowing them to differ would require handling * more special cases in advance_windowaggregate and * advance_windowaggregate_base, for no discernible benefit. This should * have been checked at agg definition time, but we must check again in @@ -2467,7 +2467,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot) * requested amount of space. Subsequent calls just return the same chunk. * * Memory obtained this way is normally used to hold state that should be - * automatically reset for each new partition. If a window function wants + * automatically reset for each new partition. If a window function wants * to hold state across the whole query, fcinfo->fn_extra can be used in the * usual way for that. */ diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c index 2138ce78cf2..94ecf754fb1 100644 --- a/src/backend/executor/nodeWorktablescan.c +++ b/src/backend/executor/nodeWorktablescan.c @@ -82,7 +82,7 @@ ExecWorkTableScan(WorkTableScanState *node) { /* * On the first call, find the ancestor RecursiveUnion's state via the - * Param slot reserved for it. (We can't do this during node init because + * Param slot reserved for it. (We can't do this during node init because * there are corner cases where we'll get the init call before the * RecursiveUnion does.) */ diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index e0325c4a7de..7ba1fd90663 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -256,7 +256,7 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) } /* - * Pop the stack entry and reset global variables. Unlike + * Pop the stack entry and reset global variables. Unlike * SPI_finish(), we don't risk switching to memory contexts that might * be already gone. */ @@ -1306,7 +1306,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, } /* - * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the + * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the * check in transformDeclareCursorStmt because the cursor options might * not have come through there. */ @@ -1560,7 +1560,7 @@ SPI_plan_is_valid(SPIPlanPtr plan) /* * SPI_result_code_string --- convert any SPI return code to a string * - * This is often useful in error messages. Most callers will probably + * This is often useful in error messages. Most callers will probably * only pass negative (error-case) codes, but for generality we recognize * the success codes too. */ @@ -1630,7 +1630,7 @@ SPI_result_code_string(int code) * CachedPlanSources. * * This is exported so that pl/pgsql can use it (this beats letting pl/pgsql - * look directly into the SPIPlan for itself). It's not documented in + * look directly into the SPIPlan for itself). It's not documented in * spi.sgml because we'd just as soon not have too many places using this. */ List * @@ -1646,7 +1646,7 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan) * return NULL. Caller is responsible for doing ReleaseCachedPlan(). * * This is exported so that pl/pgsql can use it (this beats letting pl/pgsql - * look directly into the SPIPlan for itself). It's not documented in + * look directly into the SPIPlan for itself). It's not documented in * spi.sgml because we'd just as soon not have too many places using this. */ CachedPlan * @@ -2204,7 +2204,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, /* * The last canSetTag query sets the status values returned to the - * caller. Be careful to free any tuptables not returned, to + * caller. Be careful to free any tuptables not returned, to * avoid intratransaction memory leak. */ if (canSetTag) diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c index 23f11360c3a..c15c99a1f4e 100644 --- a/src/backend/executor/tstoreReceiver.c +++ b/src/backend/executor/tstoreReceiver.c @@ -5,7 +5,7 @@ * a Tuplestore. * * Optionally, we can force detoasting (but not decompression) of out-of-line - * toasted values. This is to support cursors WITH HOLD, which must retain + * toasted values. This is to support cursors WITH HOLD, which must retain * data even if the underlying table is dropped. * * diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c index 9b9ba0a22e0..7d0309079da 100644 --- a/src/backend/lib/stringinfo.c +++ b/src/backend/lib/stringinfo.c @@ -99,7 +99,7 @@ appendStringInfo(StringInfo str, const char *fmt,...) * appendStringInfoVA * * Attempt to format text data under the control of fmt (an sprintf-style - * format string) and append it to whatever is already in str. If successful + * format string) and append it to whatever is already in str. If successful * return zero; if not (because there's not enough space), return an estimate * of the space needed, without modifying str. Typically the caller should * pass the return value to enlargeStringInfo() before trying again; see @@ -247,7 +247,7 @@ enlargeStringInfo(StringInfo str, int needed) int newlen; /* - * Guard against out-of-range "needed" values. Without this, we can get + * Guard against out-of-range "needed" values. Without this, we can get * an overflow or infinite loop in the following. */ if (needed < 0) /* should not happen */ diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 8fa9aa797f4..70b0b939823 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -49,7 +49,7 @@ static int recv_and_check_password_packet(Port *port, char **logdetail); /* Max size of username ident server can return */ #define IDENT_USERNAME_MAX 512 -/* Standard TCP port number for Ident service. Assigned by IANA */ +/* Standard TCP port number for Ident service. Assigned by IANA */ #define IDENT_PORT 113 static int ident_inet(hbaPort *port); @@ -677,7 +677,7 @@ recv_password_packet(Port *port) (errmsg("received password packet"))); /* - * Return the received string. Note we do not attempt to do any + * Return the received string. Note we do not attempt to do any * character-set conversion on it; since we don't yet know the client's * encoding, there wouldn't be much point. */ @@ -1387,7 +1387,7 @@ interpret_ident_response(const char *ident_response, /* * Talk to the ident server on host "remote_ip_addr" and find out who * owns the tcp connection from his port "remote_port" to port - * "local_port_addr" on host "local_ip_addr". Return the user name the + * "local_port_addr" on host "local_ip_addr". Return the user name the * ident server gives as "*ident_user". * * IP addresses and port numbers are in network byte order. @@ -1591,7 +1591,7 @@ auth_peer(hbaPort *port) { ereport(LOG, (errmsg("failed to look up local user id %ld: %s", - (long) uid, errno ? strerror(errno) : _("user does not exist")))); + (long) uid, errno ? strerror(errno) : _("user does not exist")))); return STATUS_ERROR; } @@ -2006,8 +2006,8 @@ CheckLDAPAuth(Port *port) attributes[1] = NULL; filter = psprintf("(%s=%s)", - attributes[0], - port->user_name); + attributes[0], + port->user_name); r = ldap_search_s(ldap, port->hba->ldapbasedn, @@ -2095,9 +2095,9 @@ CheckLDAPAuth(Port *port) } else fulluser = psprintf("%s%s%s", - port->hba->ldapprefix ? port->hba->ldapprefix : "", - port->user_name, - port->hba->ldapsuffix ? port->hba->ldapsuffix : ""); + port->hba->ldapprefix ? port->hba->ldapprefix : "", + port->user_name, + port->hba->ldapsuffix ? port->hba->ldapsuffix : ""); r = ldap_simple_bind_s(ldap, fulluser, passwd); ldap_unbind(ldap); diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c index 827d4c58886..4a6bcf5598f 100644 --- a/src/backend/libpq/be-fsstubs.c +++ b/src/backend/libpq/be-fsstubs.c @@ -768,7 +768,7 @@ lo_get_fragment_internal(Oid loOid, int64 offset, int32 nbytes) LargeObjectDesc *loDesc; int64 loSize; int64 result_length; - int total_read PG_USED_FOR_ASSERTS_ONLY; + int total_read PG_USED_FOR_ASSERTS_ONLY; bytea *result = NULL; /* @@ -870,7 +870,7 @@ lo_create_bytea(PG_FUNCTION_ARGS) Oid loOid = PG_GETARG_OID(0); bytea *str = PG_GETARG_BYTEA_PP(1); LargeObjectDesc *loDesc; - int written PG_USED_FOR_ASSERTS_ONLY; + int written PG_USED_FOR_ASSERTS_ONLY; CreateFSContext(); @@ -893,7 +893,7 @@ lo_put(PG_FUNCTION_ARGS) int64 offset = PG_GETARG_INT64(1); bytea *str = PG_GETARG_BYTEA_PP(2); LargeObjectDesc *loDesc; - int written PG_USED_FOR_ASSERTS_ONLY; + int written PG_USED_FOR_ASSERTS_ONLY; CreateFSContext(); diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 56ad6ab4247..59204cfe801 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -30,13 +30,13 @@ * impersonations. * * Another benefit of EDH is that it allows the backend and - * clients to use DSA keys. DSA keys can only provide digital + * clients to use DSA keys. DSA keys can only provide digital * signatures, not encryption, and are often acceptable in * jurisdictions where RSA keys are unacceptable. * * The downside to EDH is that it makes it impossible to * use ssldump(1) if there's a problem establishing an SSL - * session. In this case you'll need to temporarily disable + * session. In this case you'll need to temporarily disable * EDH by commenting out the callback. * * ... @@ -119,7 +119,7 @@ char *SSLCipherSuites = NULL; char *SSLECDHCurve; /* GUC variable: if false, prefer client ciphers */ -bool SSLPreferServerCiphers; +bool SSLPreferServerCiphers; /* ------------------------------------------------------------ */ /* Hardcoded values */ diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index b070bfeda35..fd98c60ddb0 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -1758,7 +1758,7 @@ check_hba(hbaPort *port) * Read the config file and create a List of HbaLine records for the contents. * * The configuration is read into a temporary list, and if any parse error - * occurs the old list is kept in place and false is returned. Only if the + * occurs the old list is kept in place and false is returned. Only if the * whole file parses OK is the list replaced, and the function returns true. * * On a false result, caller will take care of reporting a FATAL error in case @@ -2244,7 +2244,7 @@ load_ident(void) /* * Determine what authentication method should be used when accessing database - * "database" from frontend "raddr", user "user". Return the method and + * "database" from frontend "raddr", user "user". Return the method and * an optional argument (stored in fields of *port), and STATUS_OK. * * If the file does not contain any entry matching the request, we return diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c index e2c929fb526..90bc113681c 100644 --- a/src/backend/libpq/md5.c +++ b/src/backend/libpq/md5.c @@ -2,7 +2,7 @@ * md5.c * * Implements the MD5 Message-Digest Algorithm as specified in - * RFC 1321. This implementation is a simple one, in that it + * RFC 1321. This implementation is a simple one, in that it * needs every input byte to be buffered before doing any * calculations. I do not expect this file to be used for * general purpose MD5'ing of large amounts of data, only for diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 0179451f080..605d8913b16 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -447,7 +447,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, /* * Note: This might fail on some OS's, like Linux older than * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map - * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4 + * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4 * connections. */ err = bind(fd, addr->ai_addr, addr->ai_addrlen); @@ -692,6 +692,7 @@ StreamConnection(pgsocket server_fd, Port *port) } #ifdef WIN32 + /* * This is a Win32 socket optimization. The ideal size is 32k. * http://support.microsoft.com/kb/823764/EN-US/ @@ -1126,7 +1127,7 @@ pq_getmessage(StringInfo s, int maxlen) if (len > 0) { /* - * Allocate space for message. If we run out of room (ridiculously + * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c index ba9598a8c63..dfe3a646a1b 100644 --- a/src/backend/libpq/pqformat.c +++ b/src/backend/libpq/pqformat.c @@ -120,7 +120,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen) * pq_sendcountedtext - append a counted text string (with character set conversion) * * The data sent to the frontend by this routine is a 4-byte count field - * followed by the string. The count includes itself or not, as per the + * followed by the string. The count includes itself or not, as per the * countincludesself flag (pre-3.0 protocol requires it to include itself). * The passed text string need not be null-terminated, and the data sent * to the frontend isn't either. diff --git a/src/backend/main/main.c b/src/backend/main/main.c index 1b9cbd1de36..4a563741e91 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -69,7 +69,7 @@ main(int argc, char *argv[]) /* * Remember the physical location of the initially given argv[] array for - * possible use by ps display. On some platforms, the argv[] storage must + * possible use by ps display. On some platforms, the argv[] storage must * be overwritten in order to set the process title for ps. In such cases * save_ps_display_args makes and returns a new copy of the argv[] array. * @@ -98,10 +98,10 @@ main(int argc, char *argv[]) MemoryContextInit(); /* - * Set up locale information from environment. Note that LC_CTYPE and + * Set up locale information from environment. Note that LC_CTYPE and * LC_COLLATE will be overridden later from pg_control if we are in an * already-initialized database. We set them here so that they will be - * available to fill pg_control during initdb. LC_MESSAGES will get set + * available to fill pg_control during initdb. LC_MESSAGES will get set * later during GUC option processing, but we set it here to allow startup * error messages to be localized. */ @@ -109,6 +109,7 @@ main(int argc, char *argv[]) set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("postgres")); #ifdef WIN32 + /* * Windows uses codepages rather than the environment, so we work around * that by querying the environment explicitly first for LC_COLLATE and @@ -202,6 +203,7 @@ main(int argc, char *argv[]) #endif #ifdef WIN32 + /* * Start our win32 signal implementation * @@ -227,9 +229,9 @@ main(int argc, char *argv[]) /* - * Place platform-specific startup hacks here. This is the right + * Place platform-specific startup hacks here. This is the right * place to put code that must be executed early in the launch of any new - * server process. Note that this code will NOT be executed when a backend + * server process. Note that this code will NOT be executed when a backend * or sub-bootstrap process is forked, unless we are in a fork/exec * environment (ie EXEC_BACKEND is defined). * diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index 3a6d0fb236b..c927b7891f5 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -38,7 +38,7 @@ * where x's are unspecified bits. The two's complement negative is formed * by inverting all the bits and adding one. Inversion gives * yyyyyy01111 - * where each y is the inverse of the corresponding x. Incrementing gives + * where each y is the inverse of the corresponding x. Incrementing gives * yyyyyy10000 * and then ANDing with the original value gives * 00000010000 @@ -796,7 +796,7 @@ bms_join(Bitmapset *a, Bitmapset *b) /*---------- * bms_first_member - find and remove first member of a set * - * Returns -1 if set is empty. NB: set is destructively modified! + * Returns -1 if set is empty. NB: set is destructively modified! * * This is intended as support for iterating through the members of a set. * The typical pattern is diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 98ad91078ed..43530aa24a8 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -4,7 +4,7 @@ * Copy functions for Postgres tree nodes. * * NOTE: we currently support copying all node types found in parse and - * plan trees. We do not support copying executor state trees; there + * plan trees. We do not support copying executor state trees; there * is no need for that, and no point in maintaining all the code that * would be needed. We also do not support copying Path trees, mainly * because the circular linkages between RelOptInfo and Path nodes can't @@ -30,7 +30,7 @@ /* * Macros to simplify copying of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire the convention that the local variables in a Copy routine are * named 'newnode' and 'from'. */ @@ -1038,7 +1038,7 @@ _copyIntoClause(const IntoClause *from) /* * We don't need a _copyExpr because Expr is an abstract supertype which - * should never actually get instantiated. Also, since it has no common + * should never actually get instantiated. Also, since it has no common * fields except NodeTag, there's no need for a helper routine to factor * out copying the common fields... */ @@ -3300,7 +3300,7 @@ _copyReplicaIdentityStmt(const ReplicaIdentityStmt *from) } static AlterSystemStmt * -_copyAlterSystemStmt(const AlterSystemStmt * from) +_copyAlterSystemStmt(const AlterSystemStmt *from) { AlterSystemStmt *newnode = makeNode(AlterSystemStmt); diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 9901d231cdb..2407cb73a38 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -11,7 +11,7 @@ * be handled easily in a simple depth-first traversal. * * Currently, in fact, equal() doesn't know how to compare Plan trees - * either. This might need to be fixed someday. + * either. This might need to be fixed someday. * * NOTE: it is intentional that parse location fields (in nodes that have * one) are not compared. This is because we want, for example, a variable @@ -34,8 +34,8 @@ /* - * Macros to simplify comparison of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * Macros to simplify comparison of different kinds of fields. Use these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire the convention that the local variables in an Equal routine are * named 'a' and 'b'. */ @@ -131,7 +131,7 @@ _equalIntoClause(const IntoClause *a, const IntoClause *b) /* * We don't need an _equalExpr because Expr is an abstract supertype which - * should never actually get instantiated. Also, since it has no common + * should never actually get instantiated. Also, since it has no common * fields except NodeTag, there's no need for a helper routine to factor * out comparing the common fields... */ @@ -764,9 +764,9 @@ static bool _equalPlaceHolderVar(const PlaceHolderVar *a, const PlaceHolderVar *b) { /* - * We intentionally do not compare phexpr. Two PlaceHolderVars with the + * We intentionally do not compare phexpr. Two PlaceHolderVars with the * same ID and levelsup should be considered equal even if the contained - * expressions have managed to mutate to different states. This will + * expressions have managed to mutate to different states. This will * happen during final plan construction when there are nested PHVs, since * the inner PHV will get replaced by a Param in some copies of the outer * PHV. Another way in which it can happen is that initplan sublinks @@ -1551,7 +1551,7 @@ _equalReplicaIdentityStmt(const ReplicaIdentityStmt *a, const ReplicaIdentityStm } static bool -_equalAlterSystemStmt(const AlterSystemStmt * a, const AlterSystemStmt * b) +_equalAlterSystemStmt(const AlterSystemStmt *a, const AlterSystemStmt *b) { COMPARE_NODE_FIELD(setstmt); diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index aebc5b60c29..f32124bedff 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -796,7 +796,7 @@ list_union_oid(const List *list1, const List *list2) * "intersection" if list1 is known unique beforehand. * * This variant works on lists of pointers, and determines list - * membership via equal(). Note that the list1 member will be pointed + * membership via equal(). Note that the list1 member will be pointed * to in the result. */ List * @@ -988,7 +988,7 @@ list_append_unique_oid(List *list, Oid datum) * via equal(). * * This is almost the same functionality as list_union(), but list1 is - * modified in-place rather than being copied. Note also that list2's cells + * modified in-place rather than being copied. Note also that list2's cells * are not inserted in list1, so the analogy to list_concat() isn't perfect. */ List * diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c index 664670d82a8..da59c580b0e 100644 --- a/src/backend/nodes/makefuncs.c +++ b/src/backend/nodes/makefuncs.c @@ -535,7 +535,7 @@ makeDefElemExtended(char *nameSpace, char *name, Node *arg, * makeFuncCall - * * Initialize a FuncCall struct with the information every caller must - * supply. Any non-default parameters have to be inserted by the caller. + * supply. Any non-default parameters have to be inserted by the caller. */ FuncCall * makeFuncCall(List *name, List *args, int location) diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 1e48a7f8890..5a98bfbc11e 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -239,7 +239,7 @@ exprType(const Node *expr) /* * exprTypmod - * returns the type-specific modifier of the expression's result type, - * if it can be determined. In many cases, it can't and we return -1. + * if it can be determined. In many cases, it can't and we return -1. */ int32 exprTypmod(const Node *expr) @@ -1543,8 +1543,8 @@ leftmostLoc(int loc1, int loc2) * * The walker routine should return "false" to continue the tree walk, or * "true" to abort the walk and immediately return "true" to the top-level - * caller. This can be used to short-circuit the traversal if the walker - * has found what it came for. "false" is returned to the top-level caller + * caller. This can be used to short-circuit the traversal if the walker + * has found what it came for. "false" is returned to the top-level caller * iff no invocation of the walker returned "true". * * The node types handled by expression_tree_walker include all those @@ -1582,7 +1582,7 @@ leftmostLoc(int loc1, int loc2) * * expression_tree_walker will handle SubPlan nodes by recursing normally * into the "testexpr" and the "args" list (which are expressions belonging to - * the outer plan). It will not touch the completed subplan, however. Since + * the outer plan). It will not touch the completed subplan, however. Since * there is no link to the original Query, it is not possible to recurse into * subselects of an already-planned expression tree. This is OK for current * uses, but may need to be revisited in future. @@ -2154,8 +2154,8 @@ expression_tree_mutator(Node *node, return (Node *) copyObject(node); case T_WithCheckOption: { - WithCheckOption *wco = (WithCheckOption *) node; - WithCheckOption *newnode; + WithCheckOption *wco = (WithCheckOption *) node; + WithCheckOption *newnode; FLATCOPY(newnode, wco, WithCheckOption); MUTATE(newnode->qual, wco->qual, Node *); @@ -2658,7 +2658,7 @@ expression_tree_mutator(Node *node, * This routine exists just to reduce the number of places that need to know * where all the expression subtrees of a Query are. Note it can be used * for starting a walk at top level of a Query regardless of whether the - * mutator intends to descend into subqueries. It is also useful for + * mutator intends to descend into subqueries. It is also useful for * descending into subqueries within a mutator. * * Some callers want to suppress mutating of certain items in the Query, @@ -2668,7 +2668,7 @@ expression_tree_mutator(Node *node, * indicated items. (More flag bits may be added as needed.) * * Normally the Query node itself is copied, but some callers want it to be - * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All + * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All * modified substructure is safely copied in any case. */ Query * diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 10e81391b13..11c74860070 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -13,7 +13,7 @@ * NOTES * Every node type that can appear in stored rules' parsetrees *must* * have an output function defined here (as well as an input function - * in readfuncs.c). For use in debugging, we also provide output + * in readfuncs.c). For use in debugging, we also provide output * functions for nodes that appear in raw parsetrees, path, and plan trees. * These nodes however need not have input functions. * @@ -30,8 +30,8 @@ /* - * Macros to simplify output of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * Macros to simplify output of different kinds of fields. Use these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire conventions about the names of the local variables in an Out * routine. */ diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index 3916412dd16..b21d651f95b 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -27,7 +27,7 @@ * * Note: the intent of this function is to make a static, self-contained * set of parameter values. If dynamic parameter hooks are present, we - * intentionally do not copy them into the result. Rather, we forcibly + * intentionally do not copy them into the result. Rather, we forcibly * instantiate all available parameter values and copy the datum values. */ ParamListInfo diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c index 7a88a0d46a3..2c0edff6c1b 100644 --- a/src/backend/nodes/read.c +++ b/src/backend/nodes/read.c @@ -85,21 +85,21 @@ stringToNode(char *str) * Backslashes themselves must also be backslashed for consistency. * Any other character can be, but need not be, backslashed as well. * * If the resulting token is '<>' (with no backslash), it is returned - * as a non-NULL pointer to the token but with length == 0. Note that + * as a non-NULL pointer to the token but with length == 0. Note that * there is no other way to get a zero-length token. * * Returns a pointer to the start of the next token, and the length of the - * token (including any embedded backslashes!) in *length. If there are + * token (including any embedded backslashes!) in *length. If there are * no more tokens, NULL and 0 are returned. * * NOTE: this routine doesn't remove backslashes; the caller must do so * if necessary (see "debackslash"). * * NOTE: prior to release 7.0, this routine also had a special case to treat - * a token starting with '"' as extending to the next '"'. This code was + * a token starting with '"' as extending to the next '"'. This code was * broken, however, since it would fail to cope with a string containing an * embedded '"'. I have therefore removed this special case, and instead - * introduced rules for using backslashes to quote characters. Higher-level + * introduced rules for using backslashes to quote characters. Higher-level * code should add backslashes to a string constant to ensure it is treated * as a single token. */ @@ -259,7 +259,7 @@ nodeTokenType(char *token, int length) * Slightly higher-level reader. * * This routine applies some semantic knowledge on top of the purely - * lexical tokenizer pg_strtok(). It can read + * lexical tokenizer pg_strtok(). It can read * * Value token nodes (integers, floats, or strings); * * General nodes (via parseNodeString() from readfuncs.c); * * Lists of the above; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index ef1eae91bf7..1ec4f3c6956 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -12,7 +12,7 @@ * * NOTES * Path and Plan nodes do not have any readfuncs support, because we - * never have occasion to read them in. (There was once code here that + * never have occasion to read them in. (There was once code here that * claimed to read them, but it was broken as well as unused.) We * never read executor state trees, either. * @@ -34,7 +34,7 @@ /* * Macros to simplify reading of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire conventions about the names of the local variables in a Read * routine. */ @@ -130,7 +130,7 @@ /* * NOTE: use atoi() to read values written with %d, or atoui() to read * values written with %u in outfuncs.c. An exception is OID values, - * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u, + * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u, * but this will probably change in the future.) */ #define atoui(x) ((unsigned int) strtoul((x), NULL, 10)) @@ -601,7 +601,7 @@ _readOpExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) @@ -632,7 +632,7 @@ _readDistinctExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) @@ -663,7 +663,7 @@ _readNullIfExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) @@ -694,7 +694,7 @@ _readScalarArrayOpExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index df3ae93b1dc..a880c81cf1c 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -19,7 +19,7 @@ * of lossiness. In theory we could fall back to page ranges at some * point, but for now that seems useless complexity. * - * We also support the notion of candidate matches, or rechecking. This + * We also support the notion of candidate matches, or rechecking. This * means we know that a search need visit only some tuples on a page, * but we are not certain that all of those tuples are real matches. * So the eventual heap scan must recheck the quals for these tuples only, @@ -48,7 +48,7 @@ /* * The maximum number of tuples per page is not large (typically 256 with * 8K pages, or 1024 with 32K pages). So there's not much point in making - * the per-page bitmaps variable size. We just legislate that the size + * the per-page bitmaps variable size. We just legislate that the size * is this: */ #define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage @@ -61,10 +61,10 @@ * for that page in the page table. * * We actually store both exact pages and lossy chunks in the same hash - * table, using identical data structures. (This is because dynahash.c's + * table, using identical data structures. (This is because dynahash.c's * memory management doesn't allow space to be transferred easily from one * hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the - * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we + * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we * also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer * remainder operations. So, define it like this: */ @@ -142,7 +142,7 @@ struct TIDBitmap /* * When iterating over a bitmap in sorted order, a TBMIterator is used to - * track our progress. There can be several iterators scanning the same + * track our progress. There can be several iterators scanning the same * bitmap concurrently. Note that the bitmap becomes read-only as soon as * any iterator is created. */ @@ -790,7 +790,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno) * * If new, the entry is marked as an exact (non-chunk) entry. * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static PagetableEntry * @@ -870,7 +870,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno) /* * tbm_mark_page_lossy - mark the page number as lossily stored * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static void @@ -891,7 +891,7 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno) chunk_pageno = pageno - bitno; /* - * Remove any extant non-lossy entry for the page. If the page is its own + * Remove any extant non-lossy entry for the page. If the page is its own * chunk header, however, we skip this and handle the case below. */ if (bitno != 0) @@ -956,7 +956,7 @@ tbm_lossify(TIDBitmap *tbm) * * Since we are called as soon as nentries exceeds maxentries, we should * push nentries down to significantly less than maxentries, or else we'll - * just end up doing this again very soon. We shoot for maxentries/2. + * just end up doing this again very soon. We shoot for maxentries/2. */ Assert(!tbm->iterating); Assert(tbm->status == TBM_HASH); diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c index 6ceb090e855..de2a6709dd3 100644 --- a/src/backend/optimizer/geqo/geqo_eval.c +++ b/src/backend/optimizer/geqo/geqo_eval.c @@ -82,11 +82,11 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene) * not already contain some entries. The newly added entries will be * recycled by the MemoryContextDelete below, so we must ensure that the * list is restored to its former state before exiting. We can do this by - * truncating the list to its original length. NOTE this assumes that any + * truncating the list to its original length. NOTE this assumes that any * added entries are appended at the end! * * We also must take care not to mess up the outer join_rel_hash, if there - * is one. We can do this by just temporarily setting the link to NULL. + * is one. We can do this by just temporarily setting the link to NULL. * (If we are dealing with enough join rels, which we very likely are, a * new hash table will get built and used locally.) * @@ -217,7 +217,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene) * Merge a "clump" into the list of existing clumps for gimme_tree. * * We try to merge the clump into some existing clump, and repeat if - * successful. When no more merging is possible, insert the clump + * successful. When no more merging is possible, insert the clump * into the list, preserving the list ordering rule (namely, that * clumps of larger size appear earlier). * @@ -268,7 +268,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) /* * Recursively try to merge the enlarged old_clump with - * others. When no further merge is possible, we'll reinsert + * others. When no further merge is possible, we'll reinsert * it into the list. */ return merge_clump(root, clumps, old_clump, force); @@ -279,7 +279,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) /* * No merging is possible, so add new_clump as an independent clump, in - * proper order according to size. We can be fast for the common case + * proper order according to size. We can be fast for the common case * where it has size 1 --- it should always go at the end. */ if (clumps == NIL || new_clump->size == 1) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 5777cb2ff0c..41eaa2653ac 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -425,7 +425,7 @@ set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) * set_append_rel_size * Set size estimates for an "append relation" * - * The passed-in rel and RTE represent the entire append relation. The + * The passed-in rel and RTE represent the entire append relation. The * relation's contents are computed by appending together the output of * the individual member relations. Note that in the inheritance case, * the first member relation is actually the same table as is mentioned in @@ -489,7 +489,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * We have to copy the parent's targetlist and quals to the child, - * with appropriate substitution of variables. However, only the + * with appropriate substitution of variables. However, only the * baserestrictinfo quals are needed before we can check for * constraint exclusion; so do that first and then check to see if we * can disregard this child. @@ -553,7 +553,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * We have to make child entries in the EquivalenceClass data - * structures as well. This is needed either if the parent + * structures as well. This is needed either if the parent * participates in some eclass joins (because we will want to consider * inner-indexscan joins on the individual children) or if the parent * has useful pathkeys (because we should try to build MergeAppend @@ -594,7 +594,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * Accumulate per-column estimates too. We need not do anything - * for PlaceHolderVars in the parent list. If child expression + * for PlaceHolderVars in the parent list. If child expression * isn't a Var, or we didn't record a width estimate for it, we * have to fall back on a datatype-based estimate. * @@ -670,7 +670,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Generate access paths for each member relation, and remember the - * cheapest path for each one. Also, identify all pathkeys (orderings) + * cheapest path for each one. Also, identify all pathkeys (orderings) * and parameterizations (required_outer sets) available for the member * relations. */ @@ -720,7 +720,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Collect lists of all the available path orderings and - * parameterizations for all the children. We use these as a + * parameterizations for all the children. We use these as a * heuristic to indicate which sort orderings and parameterizations we * should build Append and MergeAppend paths for. */ @@ -806,7 +806,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, * so that not that many cases actually get considered here.) * * The Append node itself cannot enforce quals, so all qual checking must - * be done in the child paths. This means that to have a parameterized + * be done in the child paths. This means that to have a parameterized * Append path, we must have the exact same parameterization for each * child path; otherwise some children might be failing to check the * moved-down quals. To make them match up, we can try to increase the @@ -977,7 +977,7 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * joinquals to be checked within the path's scan. However, some existing * paths might check the available joinquals already while others don't; * therefore, it's not clear which existing path will be cheapest after - * reparameterization. We have to go through them all and find out. + * reparameterization. We have to go through them all and find out. */ cheapest = NULL; foreach(lc, rel->pathlist) @@ -1103,7 +1103,7 @@ has_multiple_baserels(PlannerInfo *root) * * We don't currently support generating parameterized paths for subqueries * by pushing join clauses down into them; it seems too expensive to re-plan - * the subquery multiple times to consider different alternatives. So the + * the subquery multiple times to consider different alternatives. So the * subquery will have exactly one path. (The path will be parameterized * if the subquery contains LATERAL references, otherwise not.) Since there's * no freedom of action here, there's no need for a separate set_subquery_size @@ -1560,7 +1560,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist) * independent jointree items in the query. This is > 1. * * 'initial_rels' is a list of RelOptInfo nodes for each independent - * jointree item. These are the components to be joined together. + * jointree item. These are the components to be joined together. * Note that levels_needed == list_length(initial_rels). * * Returns the final level of join relations, i.e., the relation that is @@ -1576,7 +1576,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist) * needed for these paths need have been instantiated. * * Note to plugin authors: the functions invoked during standard_join_search() - * modify root->join_rel_list and root->join_rel_hash. If you want to do more + * modify root->join_rel_list and root->join_rel_hash. If you want to do more * than one join-order search, you'll probably need to save and restore the * original states of those data structures. See geqo_eval() for an example. */ @@ -1675,7 +1675,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) * column k is found to be unsafe to reference, we set unsafeColumns[k] to * TRUE, but we don't reject the subquery overall since column k might * not be referenced by some/all quals. The unsafeColumns[] array will be - * consulted later by qual_is_pushdown_safe(). It's better to do it this + * consulted later by qual_is_pushdown_safe(). It's better to do it this * way than to make the checks directly in qual_is_pushdown_safe(), because * when the subquery involves set operations we have to check the output * expressions in each arm of the set op. @@ -1768,7 +1768,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery, * check_output_expressions - check subquery's output expressions for safety * * There are several cases in which it's unsafe to push down an upper-level - * qual if it references a particular output column of a subquery. We check + * qual if it references a particular output column of a subquery. We check * each output column of the subquery and set unsafeColumns[k] to TRUE if * that column is unsafe for a pushed-down qual to reference. The conditions * checked here are: @@ -1786,7 +1786,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery, * of rows returned. (This condition is vacuous for DISTINCT, because then * there are no non-DISTINCT output columns, so we needn't check. But note * we are assuming that the qual can't distinguish values that the DISTINCT - * operator sees as equal. This is a bit shaky but we have no way to test + * operator sees as equal. This is a bit shaky but we have no way to test * for the case, and it's unlikely enough that we shouldn't refuse the * optimization just because it could theoretically happen.) */ @@ -1903,7 +1903,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, /* * It would be unsafe to push down window function calls, but at least for - * the moment we could never see any in a qual anyhow. (The same applies + * the moment we could never see any in a qual anyhow. (The same applies * to aggregates, which we check for in pull_var_clause below.) */ Assert(!contain_window_function(qual)); diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c index efeea374c27..9b657fb21fd 100644 --- a/src/backend/optimizer/path/clausesel.c +++ b/src/backend/optimizer/path/clausesel.c @@ -58,7 +58,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause, * See clause_selectivity() for the meaning of the additional parameters. * * Our basic approach is to take the product of the selectivities of the - * subclauses. However, that's only right if the subclauses have independent + * subclauses. However, that's only right if the subclauses have independent * probabilities, and in reality they are often NOT independent. So, * we want to be smarter where we can. @@ -75,12 +75,12 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause, * see that hisel is the fraction of the range below the high bound, while * losel is the fraction above the low bound; so hisel can be interpreted * directly as a 0..1 value but we need to convert losel to 1-losel before - * interpreting it as a value. Then the available range is 1-losel to hisel. + * interpreting it as a value. Then the available range is 1-losel to hisel. * However, this calculation double-excludes nulls, so really we need * hisel + losel + null_frac - 1.) * * If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation - * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation + * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation * yields an impossible (negative) result. * * A free side-effect is that we can recognize redundant inequalities such @@ -174,7 +174,7 @@ clauselist_selectivity(PlannerInfo *root, { /* * If it's not a "<" or ">" operator, just merge the - * selectivity in generically. But if it's the right oprrest, + * selectivity in generically. But if it's the right oprrest, * add the clause to rqlist for later processing. */ switch (get_oprrest(expr->opno)) @@ -459,14 +459,14 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo, * nestloop join's inner relation --- varRelid should then be the ID of the * inner relation. * - * When varRelid is 0, all variables are treated as variables. This + * When varRelid is 0, all variables are treated as variables. This * is appropriate for ordinary join clauses and restriction clauses. * * jointype is the join type, if the clause is a join clause. Pass JOIN_INNER * if the clause isn't a join clause. * * sjinfo is NULL for a non-join clause, otherwise it provides additional - * context information about the join being performed. There are some + * context information about the join being performed. There are some * special cases: * 1. For a special (not INNER) join, sjinfo is always a member of * root->join_info_list. @@ -501,7 +501,7 @@ clause_selectivity(PlannerInfo *root, /* * If the clause is marked pseudoconstant, then it will be used as a * gating qual and should not affect selectivity estimates; hence - * return 1.0. The only exception is that a constant FALSE may be + * return 1.0. The only exception is that a constant FALSE may be * taken as having selectivity 0.0, since it will surely mean no rows * out of the plan. This case is simple enough that we need not * bother caching the result. @@ -520,11 +520,11 @@ clause_selectivity(PlannerInfo *root, /* * If possible, cache the result of the selectivity calculation for - * the clause. We can cache if varRelid is zero or the clause + * the clause. We can cache if varRelid is zero or the clause * contains only vars of that relid --- otherwise varRelid will affect * the result, so mustn't cache. Outer join quals might be examined * with either their join's actual jointype or JOIN_INNER, so we need - * two cache variables to remember both cases. Note: we assume the + * two cache variables to remember both cases. Note: we assume the * result won't change if we are switching the input relations or * considering a unique-ified case, so we only need one cache variable * for all non-JOIN_INNER cases. @@ -685,7 +685,7 @@ clause_selectivity(PlannerInfo *root, /* * This is not an operator, so we guess at the selectivity. THIS IS A * HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE - * SELECTIVITIES THEMSELVES. -- JMH 7/9/92 + * SELECTIVITIES THEMSELVES. -- JMH 7/9/92 */ s1 = (Selectivity) 0.3333333; } diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 326794acb85..848065ee7b2 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -24,7 +24,7 @@ * * Obviously, taking constants for these values is an oversimplification, * but it's tough enough to get any useful estimates even at this level of - * detail. Note that all of these parameters are user-settable, in case + * detail. Note that all of these parameters are user-settable, in case * the default values are drastically off for a particular platform. * * seq_page_cost and random_page_cost can also be overridden for an individual @@ -493,7 +493,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count) * computed for us by query_planner. * * Caller is expected to have ensured that tuples_fetched is greater than zero - * and rounded to integer (see clamp_row_est). The result will likewise be + * and rounded to integer (see clamp_row_est). The result will likewise be * greater than zero and integral. */ double @@ -694,7 +694,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, /* * For small numbers of pages we should charge spc_random_page_cost * apiece, while if nearly all the table's pages are being read, it's more - * appropriate to charge spc_seq_page_cost apiece. The effect is + * appropriate to charge spc_seq_page_cost apiece. The effect is * nonlinear, too. For lack of a better idea, interpolate like this to * determine the cost per page. */ @@ -769,7 +769,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec) * Estimate the cost of a BitmapAnd node * * Note that this considers only the costs of index scanning and bitmap - * creation, not the eventual heap access. In that sense the object isn't + * creation, not the eventual heap access. In that sense the object isn't * truly a Path, but it has enough path-like properties (costs in particular) * to warrant treating it as one. We don't bother to set the path rows field, * however. @@ -828,7 +828,7 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root) /* * We estimate OR selectivity on the assumption that the inputs are * non-overlapping, since that's often the case in "x IN (list)" type - * situations. Of course, we clamp to 1.0 at the end. + * situations. Of course, we clamp to 1.0 at the end. * * The runtime cost of the BitmapOr itself is estimated at 100x * cpu_operator_cost for each tbm_union needed. Probably too small, @@ -917,7 +917,7 @@ cost_tidscan(Path *path, PlannerInfo *root, /* * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c - * understands how to do it correctly. Therefore, honor enable_tidscan + * understands how to do it correctly. Therefore, honor enable_tidscan * only when CURRENT OF isn't present. Also note that cost_qual_eval * counts a CurrentOfExpr as having startup cost disable_cost, which we * subtract off here; that's to prevent other plan types such as seqscan @@ -1036,7 +1036,7 @@ cost_functionscan(Path *path, PlannerInfo *root, * * Currently, nodeFunctionscan.c always executes the functions to * completion before returning any rows, and caches the results in a - * tuplestore. So the function eval cost is all startup cost, and per-row + * tuplestore. So the function eval cost is all startup cost, and per-row * costs are minimal. * * XXX in principle we ought to charge tuplestore spill costs if the @@ -1108,7 +1108,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, * * Note: this is used for both self-reference and regular CTEs; the * possible cost differences are below the threshold of what we could - * estimate accurately anyway. Note that the costs of evaluating the + * estimate accurately anyway. Note that the costs of evaluating the * referenced CTE query are added into the final plan as initplan costs, * and should NOT be counted here. */ @@ -1202,7 +1202,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm) * If the total volume exceeds sort_mem, we switch to a tape-style merge * algorithm. There will still be about t*log2(t) tuple comparisons in * total, but we will also need to write and read each tuple once per - * merge pass. We expect about ceil(logM(r)) merge passes where r is the + * merge pass. We expect about ceil(logM(r)) merge passes where r is the * number of initial runs formed and M is the merge order used by tuplesort.c. * Since the average initial run should be about twice sort_mem, we have * disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem))) @@ -1216,7 +1216,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm) * accesses (XXX can't we refine that guess?) * * By default, we charge two operator evals per tuple comparison, which should - * be in the right ballpark in most cases. The caller can tweak this by + * be in the right ballpark in most cases. The caller can tweak this by * specifying nonzero comparison_cost; typically that's used for any extra * work that has to be done to prepare the inputs to the comparison operators. * @@ -1340,7 +1340,7 @@ cost_sort(Path *path, PlannerInfo *root, * Determines and returns the cost of a MergeAppend node. * * MergeAppend merges several pre-sorted input streams, using a heap that - * at any given instant holds the next tuple from each stream. If there + * at any given instant holds the next tuple from each stream. If there * are N streams, we need about N*log2(N) tuple comparisons to construct * the heap at startup, and then for each output tuple, about log2(N) * comparisons to delete the top heap entry and another log2(N) comparisons @@ -1499,7 +1499,7 @@ cost_agg(Path *path, PlannerInfo *root, * group otherwise. We charge cpu_tuple_cost for each output tuple. * * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the - * same total CPU cost, but AGG_SORTED has lower startup cost. If the + * same total CPU cost, but AGG_SORTED has lower startup cost. If the * input path is already sorted appropriately, AGG_SORTED should be * preferred (since it has no risk of memory overflow). This will happen * as long as the computed total costs are indeed exactly equal --- but if @@ -2107,10 +2107,10 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, * Unlike other costsize functions, this routine makes one actual decision: * whether we should materialize the inner path. We do that either because * the inner path can't support mark/restore, or because it's cheaper to - * use an interposed Material node to handle mark/restore. When the decision + * use an interposed Material node to handle mark/restore. When the decision * is cost-based it would be logically cleaner to build and cost two separate * paths with and without that flag set; but that would require repeating most - * of the cost calculations, which are not all that cheap. Since the choice + * of the cost calculations, which are not all that cheap. Since the choice * will not affect output pathkeys or startup cost, only total cost, there is * no possibility of wanting to keep both paths. So it seems best to make * the decision here and record it in the path's materialize_inner field. @@ -2174,7 +2174,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple; /* - * Get approx # tuples passing the mergequals. We use approx_tuple_count + * Get approx # tuples passing the mergequals. We use approx_tuple_count * here because we need an estimate done with JOIN_INNER semantics. */ mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses); @@ -2188,7 +2188,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, * estimated approximately as size of merge join output minus size of * inner relation. Assume that the distinct key values are 1, 2, ..., and * denote the number of values of each key in the outer relation as m1, - * m2, ...; in the inner relation, n1, n2, ... Then we have + * m2, ...; in the inner relation, n1, n2, ... Then we have * * size of join = m1 * n1 + m2 * n2 + ... * @@ -2199,7 +2199,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, * This equation works correctly for outer tuples having no inner match * (nk = 0), but not for inner tuples having no outer match (mk = 0); we * are effectively subtracting those from the number of rescanned tuples, - * when we should not. Can we do better without expensive selectivity + * when we should not. Can we do better without expensive selectivity * computations? * * The whole issue is moot if we are working from a unique-ified outer @@ -2219,7 +2219,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, /* * Decide whether we want to materialize the inner input to shield it from - * mark/restore and performing re-fetches. Our cost model for regular + * mark/restore and performing re-fetches. Our cost model for regular * re-fetches is that a re-fetch costs the same as an original fetch, * which is probably an overestimate; but on the other hand we ignore the * bookkeeping costs of mark/restore. Not clear if it's worth developing @@ -2312,7 +2312,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, /* * For each tuple that gets through the mergejoin proper, we charge * cpu_tuple_cost plus the cost of evaluating additional restriction - * clauses that are to be applied at the join. (This is pessimistic since + * clauses that are to be applied at the join. (This is pessimistic since * not all of the quals may get evaluated at each tuple.) * * Note: we could adjust for SEMI/ANTI joins skipping some qual @@ -2464,7 +2464,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, * If inner relation is too big then we will need to "batch" the join, * which implies writing and reading most of the tuples to disk an extra * time. Charge seq_page_cost per page, since the I/O should be nice and - * sequential. Writing the inner rel counts as startup cost, all the rest + * sequential. Writing the inner rel counts as startup cost, all the rest * as run cost. */ if (numbatches > 1) @@ -2695,7 +2695,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, /* * For each tuple that gets through the hashjoin proper, we charge * cpu_tuple_cost plus the cost of evaluating additional restriction - * clauses that are to be applied at the join. (This is pessimistic since + * clauses that are to be applied at the join. (This is pessimistic since * not all of the quals may get evaluated at each tuple.) */ startup_cost += qp_qual_cost.startup; @@ -2748,7 +2748,7 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan) { /* * Otherwise we will be rescanning the subplan output on each - * evaluation. We need to estimate how much of the output we will + * evaluation. We need to estimate how much of the output we will * actually need to scan. NOTE: this logic should agree with the * tuple_fraction estimates used by make_subplan() in * plan/subselect.c. @@ -2796,10 +2796,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan) /* * cost_rescan * Given a finished Path, estimate the costs of rescanning it after - * having done so the first time. For some Path types a rescan is + * having done so the first time. For some Path types a rescan is * cheaper than an original scan (if no parameters change), and this * function embodies knowledge about that. The default is to return - * the same costs stored in the Path. (Note that the cost estimates + * the same costs stored in the Path. (Note that the cost estimates * actually stored in Paths are always for first scans.) * * This function is not currently intended to model effects such as rescans @@ -2840,7 +2840,7 @@ cost_rescan(PlannerInfo *root, Path *path, { /* * These plan types materialize their final result in a - * tuplestore or tuplesort object. So the rescan cost is only + * tuplestore or tuplesort object. So the rescan cost is only * cpu_tuple_cost per tuple, unless the result is large enough * to spill to disk. */ @@ -2865,8 +2865,8 @@ cost_rescan(PlannerInfo *root, Path *path, { /* * These plan types not only materialize their results, but do - * not implement qual filtering or projection. So they are - * even cheaper to rescan than the ones above. We charge only + * not implement qual filtering or projection. So they are + * even cheaper to rescan than the ones above. We charge only * cpu_operator_cost per tuple. (Note: keep that in sync with * the run_cost charge in cost_sort, and also see comments in * cost_material before you change it.) @@ -3007,7 +3007,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) * evaluation of AND/OR? Probably *not*, because that would make the * results depend on the clause ordering, and we are not in any position * to expect that the current ordering of the clauses is the one that's - * going to end up being used. The above per-RestrictInfo caching would + * going to end up being used. The above per-RestrictInfo caching would * not mix well with trying to re-order clauses anyway. * * Another issue that is entirely ignored here is that if a set-returning @@ -3129,7 +3129,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) else if (IsA(node, AlternativeSubPlan)) { /* - * Arbitrarily use the first alternative plan for costing. (We should + * Arbitrarily use the first alternative plan for costing. (We should * certainly only include one alternative, and we don't yet have * enough information to know which one the executor is most likely to * use.) @@ -3273,13 +3273,13 @@ compute_semi_anti_join_factors(PlannerInfo *root, /* * jselec can be interpreted as the fraction of outer-rel rows that have * any matches (this is true for both SEMI and ANTI cases). And nselec is - * the fraction of the Cartesian product that matches. So, the average + * the fraction of the Cartesian product that matches. So, the average * number of matches for each outer-rel row that has at least one match is * nselec * inner_rows / jselec. * * Note: it is correct to use the inner rel's "rows" count here, even * though we might later be considering a parameterized inner path with - * fewer rows. This is because we have included all the join clauses in + * fewer rows. This is because we have included all the join clauses in * the selectivity estimate. */ if (jselec > 0) /* protect against zero divide */ @@ -3607,7 +3607,7 @@ calc_joinrel_size_estimate(PlannerInfo *root, double nrows; /* - * Compute joinclause selectivity. Note that we are only considering + * Compute joinclause selectivity. Note that we are only considering * clauses that become restriction clauses at this join level; we are not * double-counting them because they were not considered in estimating the * sizes of the component rels. @@ -3665,7 +3665,7 @@ calc_joinrel_size_estimate(PlannerInfo *root, * * If we are doing an outer join, take that into account: the joinqual * selectivity has to be clamped using the knowledge that the output must - * be at least as large as the non-nullable input. However, any + * be at least as large as the non-nullable input. However, any * pushed-down quals are applied after the outer join, so their * selectivity applies fully. * @@ -3736,7 +3736,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel) /* * Compute per-output-column width estimates by examining the subquery's - * targetlist. For any output that is a plain Var, get the width estimate + * targetlist. For any output that is a plain Var, get the width estimate * that was made while planning the subquery. Otherwise, we leave it to * set_rel_width to fill in a datatype-based default estimate. */ @@ -3755,7 +3755,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel) * The subquery could be an expansion of a view that's had columns * added to it since the current query was parsed, so that there are * non-junk tlist columns in it that don't correspond to any column - * visible at our query level. Ignore such columns. + * visible at our query level. Ignore such columns. */ if (te->resno < rel->min_attr || te->resno > rel->max_attr) continue; @@ -3904,7 +3904,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan) * of estimating baserestrictcost, so we set that, and we also set up width * using what will be purely datatype-driven estimates from the targetlist. * There is no way to do anything sane with the rows value, so we just put - * a default estimate and hope that the wrapper can improve on it. The + * a default estimate and hope that the wrapper can improve on it. The * wrapper's GetForeignRelSize function will be called momentarily. * * The rel's targetlist and restrictinfo list must have been constructed @@ -4025,7 +4025,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel) { /* * We could be looking at an expression pulled up from a subquery, - * or a ROW() representing a whole-row child Var, etc. Do what we + * or a ROW() representing a whole-row child Var, etc. Do what we * can using the expression type information. */ int32 item_width; @@ -4132,7 +4132,7 @@ void set_default_effective_cache_size(void) { /* - * We let check_effective_cache_size() compute the actual setting. Note + * We let check_effective_cache_size() compute the actual setting. Note * that this call is a no-op if the user has supplied a setting (since * that will have a higher priority than PGC_S_DYNAMIC_DEFAULT). */ diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index ac12f84fd5e..b7aff3775ee 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -74,7 +74,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root, * * If below_outer_join is true, then the clause was found below the nullable * side of an outer join, so its sides might validly be both NULL rather than - * strictly equal. We can still deduce equalities in such cases, but we take + * strictly equal. We can still deduce equalities in such cases, but we take * care to mark an EquivalenceClass if it came from any such clauses. Also, * we have to check that both sides are either pseudo-constants or strict * functions of Vars, else they might not both go to NULL above the outer @@ -141,9 +141,9 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, collation); /* - * Reject clauses of the form X=X. These are not as redundant as they + * Reject clauses of the form X=X. These are not as redundant as they * might seem at first glance: assuming the operator is strict, this is - * really an expensive way to write X IS NOT NULL. So we must not risk + * really an expensive way to write X IS NOT NULL. So we must not risk * just losing the clause, which would be possible if there is already a * single-element EquivalenceClass containing X. The case is not common * enough to be worth contorting the EC machinery for, so just reject the @@ -187,14 +187,14 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, * Sweep through the existing EquivalenceClasses looking for matches to * item1 and item2. These are the possible outcomes: * - * 1. We find both in the same EC. The equivalence is already known, so + * 1. We find both in the same EC. The equivalence is already known, so * there's nothing to do. * * 2. We find both in different ECs. Merge the two ECs together. * * 3. We find just one. Add the other to its EC. * - * 4. We find neither. Make a new, two-entry EC. + * 4. We find neither. Make a new, two-entry EC. * * Note: since all ECs are built through this process or the similar * search in get_eclass_for_sort_expr(), it's impossible that we'd match @@ -294,7 +294,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, /* * We add ec2's items to ec1, then set ec2's ec_merged link to point - * to ec1 and remove ec2 from the eq_classes list. We cannot simply + * to ec1 and remove ec2 from the eq_classes list. We cannot simply * delete ec2 because that could leave dangling pointers in existing * PathKeys. We leave it behind with a link so that the merged EC can * be found. @@ -406,7 +406,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, * Also, the expression's exposed collation must match the EC's collation. * This is important because in comparisons like "foo < bar COLLATE baz", * only one of the expressions has the correct exposed collation as we receive - * it from the parser. Forcing both of them to have it ensures that all + * it from the parser. Forcing both of them to have it ensures that all * variant spellings of such a construct behave the same. Again, we can * stick on a RelabelType to force the right exposed collation. (It might * work to not label the collation at all in EC members, but this is risky @@ -511,22 +511,22 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids, * single-member EquivalenceClass for it. * * expr is the expression, and nullable_relids is the set of base relids - * that are potentially nullable below it. We actually only care about + * that are potentially nullable below it. We actually only care about * the set of such relids that are used in the expression; but for caller * convenience, we perform that intersection step here. The caller need * only be sure that nullable_relids doesn't omit any nullable rels that * might appear in the expr. * * sortref is the SortGroupRef of the originating SortGroupClause, if any, - * or zero if not. (It should never be zero if the expression is volatile!) + * or zero if not. (It should never be zero if the expression is volatile!) * * If rel is not NULL, it identifies a specific relation we're considering * a path for, and indicates that child EC members for that relation can be - * considered. Otherwise child members are ignored. (Note: since child EC + * considered. Otherwise child members are ignored. (Note: since child EC * members aren't guaranteed unique, a non-NULL value means that there could * be more than one EC that matches the expression; if so it's order-dependent * which one you get. This is annoying but it only happens in corner cases, - * so for now we live with just reporting the first match. See also + * so for now we live with just reporting the first match. See also * generate_implied_equalities_for_column and match_pathkeys_to_index.) * * If create_it is TRUE, we'll build a new EquivalenceClass when there is no @@ -680,7 +680,7 @@ get_eclass_for_sort_expr(PlannerInfo *root, * * When an EC contains pseudoconstants, our strategy is to generate * "member = const1" clauses where const1 is the first constant member, for - * every other member (including other constants). If we are able to do this + * every other member (including other constants). If we are able to do this * then we don't need any "var = var" comparisons because we've successfully * constrained all the vars at their points of creation. If we fail to * generate any of these clauses due to lack of cross-type operators, we fall @@ -705,7 +705,7 @@ get_eclass_for_sort_expr(PlannerInfo *root, * "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot * generate "a.x = a.z" as a restriction clause for A.) In this case we mark * the EC "ec_broken" and fall back to regurgitating its original source - * RestrictInfos at appropriate times. We do not try to retract any derived + * RestrictInfos at appropriate times. We do not try to retract any derived * clauses already generated from the broken EC, so the resulting plan could * be poor due to bad selectivity estimates caused by redundant clauses. But * the correct solution to that is to fix the opfamilies ... @@ -968,8 +968,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root, * built any join RelOptInfos. * * An annoying special case for parameterized scans is that the inner rel can - * be an appendrel child (an "other rel"). In this case we must generate - * appropriate clauses using child EC members. add_child_rel_equivalences + * be an appendrel child (an "other rel"). In this case we must generate + * appropriate clauses using child EC members. add_child_rel_equivalences * must already have been done for the child rel. * * The results are sufficient for use in merge, hash, and plain nestloop join @@ -983,7 +983,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root, * we consider different join paths, we avoid generating multiple copies: * whenever we select a particular pair of EquivalenceMembers to join, * we check to see if the pair matches any original clause (in ec_sources) - * or previously-built clause (in ec_derives). This saves memory and allows + * or previously-built clause (in ec_derives). This saves memory and allows * re-use of information cached in RestrictInfos. * * join_relids should always equal bms_union(outer_relids, inner_rel->relids). @@ -1079,7 +1079,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root, * First, scan the EC to identify member values that are computable at the * outer rel, at the inner rel, or at this relation but not in either * input rel. The outer-rel members should already be enforced equal, - * likewise for the inner-rel members. We'll need to create clauses to + * likewise for the inner-rel members. We'll need to create clauses to * enforce that any newly computable members are all equal to each other * as well as to at least one input member, plus enforce at least one * outer-rel member equal to at least one inner-rel member. @@ -1105,7 +1105,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root, } /* - * First, select the joinclause if needed. We can equate any one outer + * First, select the joinclause if needed. We can equate any one outer * member to any one inner member, but we have to find a datatype * combination for which an opfamily member operator exists. If we have * choices, we prefer simple Var members (possibly with RelabelType) since @@ -1323,8 +1323,8 @@ create_join_clause(PlannerInfo *root, /* * Search to see if we already built a RestrictInfo for this pair of - * EquivalenceMembers. We can use either original source clauses or - * previously-derived clauses. The check on opno is probably redundant, + * EquivalenceMembers. We can use either original source clauses or + * previously-derived clauses. The check on opno is probably redundant, * but be safe ... */ foreach(lc, ec->ec_sources) @@ -1455,7 +1455,7 @@ create_join_clause(PlannerInfo *root, * * Outer join clauses that are marked outerjoin_delayed are special: this * condition means that one or both VARs might go to null due to a lower - * outer join. We can still push a constant through the clause, but only + * outer join. We can still push a constant through the clause, but only * if its operator is strict; and we *have to* throw the clause back into * regular joinclause processing. By keeping the strict join clause, * we ensure that any null-extended rows that are mistakenly generated due @@ -1649,7 +1649,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, /* * Yes it does! Try to generate a clause INNERVAR = CONSTANT for each - * CONSTANT in the EC. Note that we must succeed with at least one + * CONSTANT in the EC. Note that we must succeed with at least one * constant before we can decide to throw away the outer-join clause. */ match = false; @@ -1938,8 +1938,8 @@ add_child_rel_equivalences(PlannerInfo *root, continue; /* - * No point in searching if parent rel not mentioned in eclass; but - * we can't tell that for sure if parent rel is itself a child. + * No point in searching if parent rel not mentioned in eclass; but we + * can't tell that for sure if parent rel is itself a child. */ if (parent_rel->reloptkind == RELOPT_BASEREL && !bms_is_subset(parent_rel->relids, cur_ec->ec_relids)) @@ -2055,7 +2055,7 @@ mutate_eclass_expressions(PlannerInfo *root, * is a redundant list of clauses equating the table/index column to each of * the other-relation values it is known to be equal to. Any one of * these clauses can be used to create a parameterized path, and there - * is no value in using more than one. (But it *is* worthwhile to create + * is no value in using more than one. (But it *is* worthwhile to create * a separate parameterized path for each one, since that leads to different * join orders.) * @@ -2102,12 +2102,12 @@ generate_implied_equalities_for_column(PlannerInfo *root, continue; /* - * Scan members, looking for a match to the target column. Note that + * Scan members, looking for a match to the target column. Note that * child EC members are considered, but only when they belong to the * target relation. (Unlike regular members, the same expression * could be a child member of more than one EC. Therefore, it's * potentially order-dependent which EC a child relation's target - * column gets matched to. This is annoying but it only happens in + * column gets matched to. This is annoying but it only happens in * corner cases, so for now we live with just reporting the first * match. See also get_eclass_for_sort_expr.) */ @@ -2186,7 +2186,7 @@ generate_implied_equalities_for_column(PlannerInfo *root, * a joinclause involving the two given relations. * * This is essentially a very cut-down version of - * generate_join_implied_equalities(). Note it's OK to occasionally say "yes" + * generate_join_implied_equalities(). Note it's OK to occasionally say "yes" * incorrectly. Hence we don't bother with details like whether the lack of a * cross-type operator might prevent the clause from actually being generated. */ @@ -2222,7 +2222,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root, * OK as a possibly-overoptimistic heuristic. * * We don't test ec_has_const either, even though a const eclass won't - * generate real join clauses. This is because if we had "WHERE a.x = + * generate real join clauses. This is because if we had "WHERE a.x = * b.y and a.x = 42", it is worth considering a join between a and b, * since the join result is likely to be small even though it'll end * up being an unqualified nestloop. @@ -2279,7 +2279,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1) * against the specified relation. * * This is just a heuristic test and doesn't have to be exact; it's better - * to say "yes" incorrectly than "no". Hence we don't bother with details + * to say "yes" incorrectly than "no". Hence we don't bother with details * like whether the lack of a cross-type operator might prevent the clause * from actually being generated. */ @@ -2300,7 +2300,7 @@ eclass_useful_for_merging(EquivalenceClass *eclass, /* * Note we don't test ec_broken; if we did, we'd need a separate code path - * to look through ec_sources. Checking the members anyway is OK as a + * to look through ec_sources. Checking the members anyway is OK as a * possibly-overoptimistic heuristic. */ diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index a912174fb00..42dcb111aeb 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -222,7 +222,7 @@ static Const *string_to_const(const char *str, Oid datatype); * Note: in cases involving LATERAL references in the relation's tlist, it's * possible that rel->lateral_relids is nonempty. Currently, we include * lateral_relids into the parameterization reported for each path, but don't - * take it into account otherwise. The fact that any such rels *must* be + * take it into account otherwise. The fact that any such rels *must* be * available as parameter sources perhaps should influence our choices of * index quals ... but for now, it doesn't seem worth troubling over. * In particular, comments below about "unparameterized" paths should be read @@ -270,7 +270,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) match_restriction_clauses_to_index(rel, index, &rclauseset); /* - * Build index paths from the restriction clauses. These will be + * Build index paths from the restriction clauses. These will be * non-parameterized paths. Plain paths go directly to add_path(), * bitmap paths are added to bitindexpaths to be handled below. */ @@ -278,10 +278,10 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) &bitindexpaths); /* - * Identify the join clauses that can match the index. For the moment - * we keep them separate from the restriction clauses. Note that this + * Identify the join clauses that can match the index. For the moment + * we keep them separate from the restriction clauses. Note that this * step finds only "loose" join clauses that have not been merged into - * EquivalenceClasses. Also, collect join OR clauses for later. + * EquivalenceClasses. Also, collect join OR clauses for later. */ MemSet(&jclauseset, 0, sizeof(jclauseset)); match_join_clauses_to_index(root, rel, index, @@ -343,9 +343,9 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) /* * Likewise, if we found anything usable, generate BitmapHeapPaths for the - * most promising combinations of join bitmap index paths. Our strategy + * most promising combinations of join bitmap index paths. Our strategy * is to generate one such path for each distinct parameterization seen - * among the available bitmap index paths. This may look pretty + * among the available bitmap index paths. This may look pretty * expensive, but usually there won't be very many distinct * parameterizations. (This logic is quite similar to that in * consider_index_join_clauses, but we're working with whole paths not @@ -461,7 +461,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel, * * For simplicity in selecting relevant clauses, we represent each set of * outer rels as a maximum set of clause_relids --- that is, the indexed - * relation itself is also included in the relids set. considered_relids + * relation itself is also included in the relids set. considered_relids * lists all relids sets we've already tried. */ for (indexcol = 0; indexcol < index->ncolumns; indexcol++) @@ -550,7 +550,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel, /* * If this clause was derived from an equivalence class, the * clause list may contain other clauses derived from the same - * eclass. We should not consider that combining this clause with + * eclass. We should not consider that combining this clause with * one of those clauses generates a usefully different * parameterization; so skip if any clause derived from the same * eclass would already have been included when using oldrelids. @@ -633,9 +633,9 @@ get_join_index_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * Add applicable eclass join clauses. The clauses generated for each + * Add applicable eclass join clauses. The clauses generated for each * column are redundant (cf generate_implied_equalities_for_column), - * so we need at most one. This is the only exception to the general + * so we need at most one. This is the only exception to the general * rule of using all available index clauses. */ foreach(lc, eclauseset->indexclauses[indexcol]) @@ -722,7 +722,7 @@ bms_equal_any(Relids relids, List *relids_list) * bitmap indexpaths are added to *bitindexpaths for later processing. * * This is a fairly simple frontend to build_index_paths(). Its reason for - * existence is mainly to handle ScalarArrayOpExpr quals properly. If the + * existence is mainly to handle ScalarArrayOpExpr quals properly. If the * index AM supports them natively, we should just include them in simple * index paths. If not, we should exclude them while building simple index * paths, and then make a separate attempt to include them in bitmap paths. @@ -736,7 +736,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, ListCell *lc; /* - * Build simple index paths using the clauses. Allow ScalarArrayOpExpr + * Build simple index paths using the clauses. Allow ScalarArrayOpExpr * clauses only if the index AM supports them natively. */ indexpaths = build_index_paths(root, rel, @@ -748,7 +748,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, * Submit all the ones that can form plain IndexScan plans to add_path. (A * plain IndexPath can represent either a plain IndexScan or an * IndexOnlyScan, but for our purposes here that distinction does not - * matter. However, some of the indexes might support only bitmap scans, + * matter. However, some of the indexes might support only bitmap scans, * and those we mustn't submit to add_path here.) * * Also, pick out the ones that are usable as bitmap scans. For that, we @@ -792,7 +792,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, * We return a list of paths because (1) this routine checks some cases * that should cause us to not generate any IndexPath, and (2) in some * cases we want to consider both a forward and a backward scan, so as - * to obtain both sort orders. Note that the paths are just returned + * to obtain both sort orders. Note that the paths are just returned * to the caller and not immediately fed to add_path(). * * At top level, useful_predicate should be exactly the index's predOK flag @@ -975,7 +975,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * 3. Check if an index-only scan is possible. If we're not building + * 3. Check if an index-only scan is possible. If we're not building * plain indexscans, this isn't relevant since bitmap scans don't support * index data retrieval anyway. */ @@ -1080,13 +1080,13 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel, continue; /* - * Ignore partial indexes that do not match the query. If a partial + * Ignore partial indexes that do not match the query. If a partial * index is marked predOK then we know it's OK. Otherwise, we have to * test whether the added clauses are sufficient to imply the * predicate. If so, we can use the index in the current context. * * We set useful_predicate to true iff the predicate was proven using - * the current set of clauses. This is needed to prevent matching a + * the current set of clauses. This is needed to prevent matching a * predOK index to an arm of an OR, which would be a legal but * pointlessly inefficient plan. (A better plan will be generated by * just scanning the predOK index alone, no OR.) @@ -1256,7 +1256,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel, * Given a nonempty list of bitmap paths, AND them into one path. * * This is a nontrivial decision since we can legally use any subset of the - * given path set. We want to choose a good tradeoff between selectivity + * given path set. We want to choose a good tradeoff between selectivity * and cost of computing the bitmap. * * The result is either a single one of the inputs, or a BitmapAndPath @@ -1283,12 +1283,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * In theory we should consider every nonempty subset of the given paths. * In practice that seems like overkill, given the crude nature of the * estimates, not to mention the possible effects of higher-level AND and - * OR clauses. Moreover, it's completely impractical if there are a large + * OR clauses. Moreover, it's completely impractical if there are a large * number of paths, since the work would grow as O(2^N). * * As a heuristic, we first check for paths using exactly the same sets of * WHERE clauses + index predicate conditions, and reject all but the - * cheapest-to-scan in any such group. This primarily gets rid of indexes + * cheapest-to-scan in any such group. This primarily gets rid of indexes * that include the interesting columns but also irrelevant columns. (In * situations where the DBA has gone overboard on creating variant * indexes, this can make for a very large reduction in the number of @@ -1308,7 +1308,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * costsize.c and clausesel.c aren't very smart about redundant clauses. * They will usually double-count the redundant clauses, producing a * too-small selectivity that makes a redundant AND step look like it - * reduces the total cost. Perhaps someday that code will be smarter and + * reduces the total cost. Perhaps someday that code will be smarter and * we can remove this limitation. (But note that this also defends * against flat-out duplicate input paths, which can happen because * match_join_clauses_to_index will find the same OR join clauses that @@ -1316,7 +1316,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * of.) * * For the same reason, we reject AND combinations in which an index - * predicate clause duplicates another clause. Here we find it necessary + * predicate clause duplicates another clause. Here we find it necessary * to be even stricter: we'll reject a partial index if any of its * predicate clauses are implied by the set of WHERE clauses and predicate * clauses used so far. This covers cases such as a condition "x = 42" @@ -1379,7 +1379,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) /* * For each surviving index, consider it as an "AND group leader", and see * whether adding on any of the later indexes results in an AND path with - * cheaper total cost than before. Then take the cheapest AND group. + * cheaper total cost than before. Then take the cheapest AND group. */ for (i = 0; i < npaths; i++) { @@ -1711,7 +1711,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds) /* * find_list_position * Return the given node's position (counting from 0) in the given - * list of nodes. If it's not equal() to any existing list member, + * list of nodes. If it's not equal() to any existing list member, * add it at the end, and return that position. */ static int @@ -1817,7 +1817,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index) * Since we produce parameterized paths before we've begun to generate join * relations, it's impossible to predict exactly how many times a parameterized * path will be iterated; we don't know the size of the relation that will be - * on the outside of the nestloop. However, we should try to account for + * on the outside of the nestloop. However, we should try to account for * multiple iterations somehow in costing the path. The heuristic embodied * here is to use the rowcount of the smallest other base relation needed in * the join clauses used by the path. (We could alternatively consider the @@ -2032,7 +2032,7 @@ match_clause_to_index(IndexOptInfo *index, * doesn't involve a volatile function or a Var of the index's relation. * In particular, Vars belonging to other relations of the query are * accepted here, since a clause of that form can be used in a - * parameterized indexscan. It's the responsibility of higher code levels + * parameterized indexscan. It's the responsibility of higher code levels * to manage restriction and join clauses appropriately. * * Note: we do need to check for Vars of the index's relation on the @@ -2056,7 +2056,7 @@ match_clause_to_index(IndexOptInfo *index, * It is also possible to match RowCompareExpr clauses to indexes (but * currently, only btree indexes handle this). In this routine we will * report a match if the first column of the row comparison matches the - * target index column. This is sufficient to guarantee that some index + * target index column. This is sufficient to guarantee that some index * condition can be constructed from the RowCompareExpr --- whether the * remaining columns match the index too is considered in * adjust_rowcompare_for_index(). @@ -2094,7 +2094,7 @@ match_clause_to_indexcol(IndexOptInfo *index, bool plain_op; /* - * Never match pseudoconstants to indexes. (Normally this could not + * Never match pseudoconstants to indexes. (Normally this could not * happen anyway, since a pseudoconstant clause couldn't contain a Var, * but what if someone builds an expression index on a constant? It's not * totally unreasonable to do so with a partial index, either.) @@ -2378,7 +2378,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys, * We allow any column of the index to match each pathkey; they * don't have to match left-to-right as you might expect. This is * correct for GiST, which is the sole existing AM supporting - * amcanorderbyop. We might need different logic in future for + * amcanorderbyop. We might need different logic in future for * other implementations. */ for (indexcol = 0; indexcol < index->ncolumns; indexcol++) @@ -2429,7 +2429,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys, * Note that we currently do not consider the collation of the ordering * operator's result. In practical cases the result type will be numeric * and thus have no collation, and it's not very clear what to match to - * if it did have a collation. The index's collation should match the + * if it did have a collation. The index's collation should match the * ordering operator's input collation, not its result. * * If successful, return 'clause' as-is if the indexkey is on the left, @@ -2679,7 +2679,7 @@ ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel, * if it is true. * 2. A list of expressions in this relation, and a corresponding list of * equality operators. The caller must have already checked that the operators - * represent equality. (Note: the operators could be cross-type; the + * represent equality. (Note: the operators could be cross-type; the * expressions should correspond to their RHS inputs.) * * The caller need only supply equality conditions arising from joins; @@ -2868,7 +2868,7 @@ match_index_to_operand(Node *operand, int indkey; /* - * Ignore any RelabelType node above the operand. This is needed to be + * Ignore any RelabelType node above the operand. This is needed to be * able to apply indexscanning in binary-compatible-operator cases. Note: * we can assume there is at most one RelabelType node; * eval_const_expressions() will have simplified if more than one. @@ -2935,10 +2935,10 @@ match_index_to_operand(Node *operand, * indexscan machinery. The key idea is that these operators allow us * to derive approximate indexscan qual clauses, such that any tuples * that pass the operator clause itself must also satisfy the simpler - * indexscan condition(s). Then we can use the indexscan machinery + * indexscan condition(s). Then we can use the indexscan machinery * to avoid scanning as much of the table as we'd otherwise have to, * while applying the original operator as a qpqual condition to ensure - * we deliver only the tuples we want. (In essence, we're using a regular + * we deliver only the tuples we want. (In essence, we're using a regular * index as if it were a lossy index.) * * An example of what we're doing is @@ -2952,7 +2952,7 @@ match_index_to_operand(Node *operand, * * Another thing that we do with this machinery is to provide special * smarts for "boolean" indexes (that is, indexes on boolean columns - * that support boolean equality). We can transform a plain reference + * that support boolean equality). We can transform a plain reference * to the indexkey into "indexkey = true", or "NOT indexkey" into * "indexkey = false", so as to make the expression indexable using the * regular index operators. (As of Postgres 8.1, we must do this here @@ -3374,7 +3374,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily, Oid idxcollation) /* * LIKE and regex operators are not members of any btree index opfamily, * but they can be members of opfamilies for more exotic index types such - * as GIN. Therefore, we should only do expansion if the operator is + * as GIN. Therefore, we should only do expansion if the operator is * actually not in the opfamily. But checking that requires a syscache * lookup, so it's best to first see if the operator is one we are * interested in. @@ -3492,7 +3492,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo, * column matches) or a simple OpExpr (if the first-column match is all * there is). In these cases the modified clause is always "<=" or ">=" * even when the original was "<" or ">" --- this is necessary to match all - * the rows that could match the original. (We are essentially building a + * the rows that could match the original. (We are essentially building a * lossy version of the row comparison when we do this.) * * *indexcolnos receives an integer list of the index column numbers (zero diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index a9961161dbc..be54f3de0ba 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -107,7 +107,7 @@ add_paths_to_joinrel(PlannerInfo *root, /* * If it's SEMI or ANTI join, compute correction factors for cost - * estimation. These will be the same for all paths. + * estimation. These will be the same for all paths. */ if (jointype == JOIN_SEMI || jointype == JOIN_ANTI) compute_semi_anti_join_factors(root, outerrel, innerrel, @@ -122,7 +122,7 @@ add_paths_to_joinrel(PlannerInfo *root, * to the parameter source rel instead of joining to the other input rel. * This restriction reduces the number of parameterized paths we have to * deal with at higher join levels, without compromising the quality of - * the resulting plan. We express the restriction as a Relids set that + * the resulting plan. We express the restriction as a Relids set that * must overlap the parameterization of any proposed join path. */ foreach(lc, root->join_info_list) @@ -155,7 +155,7 @@ add_paths_to_joinrel(PlannerInfo *root, * However, when a LATERAL subquery is involved, we have to be a bit * laxer, because there will simply not be any paths for the joinrel that * aren't parameterized by whatever the subquery is parameterized by, - * unless its parameterization is resolved within the joinrel. Hence, add + * unless its parameterization is resolved within the joinrel. Hence, add * to param_source_rels anything that is laterally referenced in either * input and is not in the join already. */ @@ -208,7 +208,7 @@ add_paths_to_joinrel(PlannerInfo *root, /* * 1. Consider mergejoin paths where both relations must be explicitly - * sorted. Skip this if we can't mergejoin. + * sorted. Skip this if we can't mergejoin. */ if (mergejoin_allowed) sort_inner_and_outer(root, joinrel, outerrel, innerrel, @@ -233,7 +233,7 @@ add_paths_to_joinrel(PlannerInfo *root, /* * 3. Consider paths where the inner relation need not be explicitly - * sorted. This includes mergejoins only (nestloops were already built in + * sorted. This includes mergejoins only (nestloops were already built in * match_unsorted_outer). * * Diked out as redundant 2/13/2000 -- tgl. There isn't any really @@ -507,7 +507,7 @@ try_hashjoin_path(PlannerInfo *root, * We already know that the clause is a binary opclause referencing only the * rels in the current join. The point here is to check whether it has the * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr", - * rather than mixing outer and inner vars on either side. If it matches, + * rather than mixing outer and inner vars on either side. If it matches, * we set the transient flag outer_is_left to identify which side is which. */ static inline bool @@ -572,7 +572,7 @@ sort_inner_and_outer(PlannerInfo *root, * sort. * * This function intentionally does not consider parameterized input - * paths, except when the cheapest-total is parameterized. If we did so, + * paths, except when the cheapest-total is parameterized. If we did so, * we'd have a combinatorial explosion of mergejoin paths of dubious * value. This interacts with decisions elsewhere that also discriminate * against mergejoins with parameterized inputs; see comments in @@ -619,7 +619,7 @@ sort_inner_and_outer(PlannerInfo *root, * * Actually, it's not quite true that every mergeclause ordering will * generate a different path order, because some of the clauses may be - * partially redundant (refer to the same EquivalenceClasses). Therefore, + * partially redundant (refer to the same EquivalenceClasses). Therefore, * what we do is convert the mergeclause list to a list of canonical * pathkeys, and then consider different orderings of the pathkeys. * @@ -713,7 +713,7 @@ sort_inner_and_outer(PlannerInfo *root, * cheapest-total inner-indexscan path (if any), and one on the * cheapest-startup inner-indexscan path (if different). * - * We also consider mergejoins if mergejoin clauses are available. We have + * We also consider mergejoins if mergejoin clauses are available. We have * two ways to generate the inner path for a mergejoin: sort the cheapest * inner path, or use an inner path that is already suitably ordered for the * merge. If we have several mergeclauses, it could be that there is no inner @@ -845,8 +845,8 @@ match_unsorted_outer(PlannerInfo *root, /* * If we need to unique-ify the outer path, it's pointless to consider - * any but the cheapest outer. (XXX we don't consider parameterized - * outers, nor inners, for unique-ified cases. Should we?) + * any but the cheapest outer. (XXX we don't consider parameterized + * outers, nor inners, for unique-ified cases. Should we?) */ if (save_jointype == JOIN_UNIQUE_OUTER) { @@ -887,7 +887,7 @@ match_unsorted_outer(PlannerInfo *root, { /* * Consider nestloop joins using this outer path and various - * available paths for the inner relation. We consider the + * available paths for the inner relation. We consider the * cheapest-total paths for each available parameterization of the * inner relation, including the unparameterized case. */ @@ -1042,7 +1042,7 @@ match_unsorted_outer(PlannerInfo *root, /* * Look for an inner path ordered well enough for the first - * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified + * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified * destructively, which is why we made a copy... */ trialsortkeys = list_truncate(trialsortkeys, sortkeycnt); diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 05eaef525d5..610892890f5 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -213,7 +213,7 @@ join_search_one_level(PlannerInfo *root, int level) /*---------- * When special joins are involved, there may be no legal way - * to make an N-way join for some values of N. For example consider + * to make an N-way join for some values of N. For example consider * * SELECT ... FROM t1 WHERE * x IN (SELECT ... FROM t2,t3 WHERE ...) AND @@ -337,7 +337,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, ListCell *l; /* - * Ensure output params are set on failure return. This is just to + * Ensure output params are set on failure return. This is just to * suppress uninitialized-variable warnings from overly anal compilers. */ *sjinfo_p = NULL; @@ -345,7 +345,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, /* * If we have any special joins, the proposed join might be illegal; and - * in any case we have to determine its join type. Scan the join info + * in any case we have to determine its join type. Scan the join info * list for conflicts. */ match_sjinfo = NULL; @@ -609,7 +609,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2) /* * If it's a plain inner join, then we won't have found anything in - * join_info_list. Make up a SpecialJoinInfo so that selectivity + * join_info_list. Make up a SpecialJoinInfo so that selectivity * estimation functions will know what's being joined. */ if (sjinfo == NULL) @@ -916,7 +916,7 @@ have_join_order_restriction(PlannerInfo *root, * * Essentially, this tests whether have_join_order_restriction() could * succeed with this rel and some other one. It's OK if we sometimes - * say "true" incorrectly. (Therefore, we don't bother with the relatively + * say "true" incorrectly. (Therefore, we don't bother with the relatively * expensive has_legal_joinclause test.) */ static bool @@ -1027,7 +1027,7 @@ is_dummy_rel(RelOptInfo *rel) * dummy. * * Also, when called during GEQO join planning, we are in a short-lived - * memory context. We must make sure that the dummy path attached to a + * memory context. We must make sure that the dummy path attached to a * baserel survives the GEQO cycle, else the baserel is trashed for future * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO, * we don't want the dummy path to clutter the main planning context. Upshot diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 9179c61cbdb..5d953dfb45a 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -46,7 +46,7 @@ static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey); * entry if there's not one already. * * Note that this function must not be used until after we have completed - * merging EquivalenceClasses. (We don't try to enforce that here; instead, + * merging EquivalenceClasses. (We don't try to enforce that here; instead, * equivclass.c will complain if a merge occurs after root->canon_pathkeys * has become nonempty.) */ @@ -120,7 +120,7 @@ make_canonical_pathkey(PlannerInfo *root, * * Both the given pathkey and the list members must be canonical for this * to work properly, but that's okay since we no longer ever construct any - * non-canonical pathkeys. (Note: the notion of a pathkey *list* being + * non-canonical pathkeys. (Note: the notion of a pathkey *list* being * canonical includes the additional requirement of no redundant entries, * which is exactly what we are checking for here.) * @@ -162,7 +162,7 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys) * * If rel is not NULL, it identifies a specific relation we're considering * a path for, and indicates that child EC members for that relation can be - * considered. Otherwise child members are ignored. (See the comments for + * considered. Otherwise child members are ignored. (See the comments for * get_eclass_for_sort_expr.) * * create_it is TRUE if we should create any missing EquivalenceClass @@ -192,7 +192,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root, /* * EquivalenceClasses need to contain opfamily lists based on the family * membership of mergejoinable equality operators, which could belong to - * more than one opfamily. So we have to look up the opfamily's equality + * more than one opfamily. So we have to look up the opfamily's equality * operator and get its membership. */ equality_op = get_opfamily_member(opfamily, @@ -355,7 +355,7 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, /* * Since cost comparison is a lot cheaper than pathkey comparison, do - * that first. (XXX is that still true?) + * that first. (XXX is that still true?) */ if (matched_path != NULL && compare_path_costs(matched_path, path, cost_criterion) <= 0) @@ -397,7 +397,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths, /* * Since cost comparison is a lot cheaper than pathkey comparison, do - * that first. (XXX is that still true?) + * that first. (XXX is that still true?) */ if (matched_path != NULL && compare_fractional_path_costs(matched_path, path, fraction) <= 0) @@ -555,7 +555,7 @@ build_expression_pathkey(PlannerInfo *root, /* * convert_subquery_pathkeys * Build a pathkeys list that describes the ordering of a subquery's - * result, in the terms of the outer query. This is essentially a + * result, in the terms of the outer query. This is essentially a * task of conversion. * * 'rel': outer query's RelOptInfo for the subquery relation. @@ -608,7 +608,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, /* * Note: it might look funny to be setting sortref = 0 for a - * reference to a volatile sub_eclass. However, the + * reference to a volatile sub_eclass. However, the * expression is *not* volatile in the outer query: it's just * a Var referencing whatever the subquery emitted. (IOW, the * outer query isn't going to re-execute the volatile @@ -645,7 +645,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, /* * Otherwise, the sub_pathkey's EquivalenceClass could contain * multiple elements (representing knowledge that multiple items - * are effectively equal). Each element might match none, one, or + * are effectively equal). Each element might match none, one, or * more of the output columns that are visible to the outer query. * This means we may have multiple possible representations of the * sub_pathkey in the context of the outer query. Ideally we @@ -873,7 +873,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root, * right sides. * * Note this is called before EC merging is complete, so the links won't - * necessarily point to canonical ECs. Before they are actually used for + * necessarily point to canonical ECs. Before they are actually used for * anything, update_mergeclause_eclasses must be called to ensure that * they've been updated to point to canonical ECs. */ @@ -1007,7 +1007,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, * It's possible that multiple matching clauses might have different * ECs on the other side, in which case the order we put them into our * result makes a difference in the pathkeys required for the other - * input path. However this routine hasn't got any info about which + * input path. However this routine hasn't got any info about which * order would be best, so we don't worry about that. * * It's also possible that the selected mergejoin clauses produce @@ -1038,7 +1038,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, /* * If we didn't find a mergeclause, we're done --- any additional - * sort-key positions in the pathkeys are useless. (But we can still + * sort-key positions in the pathkeys are useless. (But we can still * mergejoin if we found at least one mergeclause.) */ if (matched_restrictinfos == NIL) @@ -1070,7 +1070,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, * Returns a pathkeys list that can be applied to the outer relation. * * Since we assume here that a sort is required, there is no particular use - * in matching any available ordering of the outerrel. (joinpath.c has an + * in matching any available ordering of the outerrel. (joinpath.c has an * entirely separate code path for considering sort-free mergejoins.) Rather, * it's interesting to try to match the requested query_pathkeys so that a * second output sort may be avoided; and failing that, we try to list "more @@ -1401,7 +1401,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys) /* * If we didn't find a mergeclause, we're done --- any additional - * sort-key positions in the pathkeys are useless. (But we can still + * sort-key positions in the pathkeys are useless. (But we can still * mergejoin if we found at least one mergeclause.) */ if (matched) @@ -1431,7 +1431,7 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey) pathkey->pk_opfamily == query_pathkey->pk_opfamily) { /* - * Found a matching query sort column. Prefer this pathkey's + * Found a matching query sort column. Prefer this pathkey's * direction iff it matches. Note that we ignore pk_nulls_first, * which means that a sort might be needed anyway ... but we still * want to prefer only one of the two possible directions, and we @@ -1507,13 +1507,13 @@ truncate_useless_pathkeys(PlannerInfo *root, * useful according to truncate_useless_pathkeys(). * * This is a cheap test that lets us skip building pathkeys at all in very - * simple queries. It's OK to err in the direction of returning "true" when + * simple queries. It's OK to err in the direction of returning "true" when * there really aren't any usable pathkeys, but erring in the other direction * is bad --- so keep this in sync with the routines above! * * We could make the test more complex, for example checking to see if any of * the joinclauses are really mergejoinable, but that likely wouldn't win - * often enough to repay the extra cycles. Queries with neither a join nor + * often enough to repay the extra cycles. Queries with neither a join nor * a sort are reasonably common, though, so this much work seems worthwhile. */ bool diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c index a751a7d36cd..a31d67493bb 100644 --- a/src/backend/optimizer/path/tidpath.c +++ b/src/backend/optimizer/path/tidpath.c @@ -19,7 +19,7 @@ * representation all the way through to execution. * * There is currently no special support for joins involving CTID; in - * particular nothing corresponding to best_inner_indexscan(). Since it's + * particular nothing corresponding to best_inner_indexscan(). Since it's * not very useful to store TIDs of one table in another table, there * doesn't seem to be enough use-case to justify adding a lot of code * for that. @@ -57,7 +57,7 @@ static List *TidQualFromRestrictinfo(List *restrictinfo, int varno); * or * pseudoconstant = CTID * - * We check that the CTID Var belongs to relation "varno". That is probably + * We check that the CTID Var belongs to relation "varno". That is probably * redundant considering this is only applied to restriction clauses, but * let's be safe. */ diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c index 523a1e75f89..129fc3dfae6 100644 --- a/src/backend/optimizer/plan/analyzejoins.c +++ b/src/backend/optimizer/plan/analyzejoins.c @@ -40,7 +40,7 @@ static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved); * Check for relations that don't actually need to be joined at all, * and remove them from the query. * - * We are passed the current joinlist and return the updated list. Other + * We are passed the current joinlist and return the updated list. Other * data structures that have to be updated are accessible via "root". */ List * @@ -90,7 +90,7 @@ restart: * Restart the scan. This is necessary to ensure we find all * removable joins independently of ordering of the join_info_list * (note that removal of attr_needed bits may make a join appear - * removable that did not before). Also, since we just deleted the + * removable that did not before). Also, since we just deleted the * current list cell, we'd have to have some kluge to continue the * list scan anyway. */ @@ -107,7 +107,7 @@ restart: * We already know that the clause is a binary opclause referencing only the * rels in the current join. The point here is to check whether it has the * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr", - * rather than mixing outer and inner vars on either side. If it matches, + * rather than mixing outer and inner vars on either side. If it matches, * we set the transient flag outer_is_left to identify which side is which. */ static inline bool @@ -154,7 +154,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo) /* * Currently, we only know how to remove left joins to a baserel with - * unique indexes. We can check most of these criteria pretty trivially + * unique indexes. We can check most of these criteria pretty trivially * to avoid doing useless extra work. But checking whether any of the * indexes are unique would require iterating over the indexlist, so for * now we just make sure there are indexes of some sort or other. If none @@ -203,7 +203,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo) * actually references some inner-rel attributes; but the correct check * for that is relatively expensive, so we first check against ph_eval_at, * which must mention the inner rel if the PHV uses any inner-rel attrs as - * non-lateral references. Note that if the PHV's syntactic scope is just + * non-lateral references. Note that if the PHV's syntactic scope is just * the inner rel, we can't drop the rel even if the PHV is variable-free. */ foreach(l, root->placeholder_list) diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 784805fbf43..4b641a2ca1f 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -171,7 +171,7 @@ static Material *make_material(Plan *lefttree); /* * create_plan * Creates the access plan for a query by recursively processing the - * desired tree of pathnodes, starting at the node 'best_path'. For + * desired tree of pathnodes, starting at the node 'best_path'. For * every pathnode found, we create a corresponding plan node containing * appropriate id, target list, and qualification information. * @@ -286,7 +286,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path) /* * For table scans, rather than using the relation targetlist (which is * only those Vars actually needed by the query), we prefer to generate a - * tlist containing all Vars in order. This will allow the executor to + * tlist containing all Vars in order. This will allow the executor to * optimize away projection of the table tuples, if possible. (Note that * planner.c may replace the tlist we generate here, forcing projection to * occur.) @@ -523,7 +523,7 @@ use_physical_tlist(PlannerInfo *root, RelOptInfo *rel) * * If the plan node immediately above a scan would prefer to get only * needed Vars and not a physical tlist, it must call this routine to - * undo the decision made by use_physical_tlist(). Currently, Hash, Sort, + * undo the decision made by use_physical_tlist(). Currently, Hash, Sort, * and Material nodes want this, so they don't have to store useless columns. */ static void @@ -654,7 +654,7 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path) /* * * Expensive function pullups may have pulled local predicates * into - * this path node. Put them in the qpqual of the plan node. * JMH, + * this path node. Put them in the qpqual of the plan node. * JMH, * 6/15/92 */ if (get_loc_restrictinfo(best_path) != NIL) @@ -1170,10 +1170,10 @@ create_indexscan_plan(PlannerInfo *root, /* * The qpqual list must contain all restrictions not automatically handled * by the index, other than pseudoconstant clauses which will be handled - * by a separate gating plan node. All the predicates in the indexquals + * by a separate gating plan node. All the predicates in the indexquals * will be checked (either by the index itself, or by nodeIndexscan.c), * but if there are any "special" operators involved then they must be - * included in qpqual. The upshot is that qpqual must contain + * included in qpqual. The upshot is that qpqual must contain * scan_clauses minus whatever appears in indexquals. * * In normal cases simple pointer equality checks will be enough to spot @@ -1310,15 +1310,15 @@ create_bitmap_scan_plan(PlannerInfo *root, /* * The qpqual list must contain all restrictions not automatically handled * by the index, other than pseudoconstant clauses which will be handled - * by a separate gating plan node. All the predicates in the indexquals + * by a separate gating plan node. All the predicates in the indexquals * will be checked (either by the index itself, or by * nodeBitmapHeapscan.c), but if there are any "special" operators - * involved then they must be added to qpqual. The upshot is that qpqual + * involved then they must be added to qpqual. The upshot is that qpqual * must contain scan_clauses minus whatever appears in indexquals. * * This loop is similar to the comparable code in create_indexscan_plan(), * but with some differences because it has to compare the scan clauses to - * stripped (no RestrictInfos) indexquals. See comments there for more + * stripped (no RestrictInfos) indexquals. See comments there for more * info. * * In normal cases simple equal() checks will be enough to spot duplicate @@ -1363,7 +1363,7 @@ create_bitmap_scan_plan(PlannerInfo *root, /* * When dealing with special operators, we will at this point have - * duplicate clauses in qpqual and bitmapqualorig. We may as well drop + * duplicate clauses in qpqual and bitmapqualorig. We may as well drop * 'em from bitmapqualorig, since there's no point in making the tests * twice. */ @@ -1475,7 +1475,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * Here, we only detect qual-free subplans. A qual-free subplan would * cause us to generate "... OR true ..." which we may as well reduce - * to just "true". We do not try to eliminate redundant subclauses + * to just "true". We do not try to eliminate redundant subclauses * because (a) it's not as likely as in the AND case, and (b) we might * well be working with hundreds or even thousands of OR conditions, * perhaps from a long IN list. The performance of list_append_unique @@ -1571,7 +1571,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * We know that the index predicate must have been implied by the * query condition as a whole, but it may or may not be implied by - * the conditions that got pushed into the bitmapqual. Avoid + * the conditions that got pushed into the bitmapqual. Avoid * generating redundant conditions. */ if (!predicate_implied_by(list_make1(pred), ipath->indexclauses)) @@ -1954,14 +1954,14 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, Assert(rte->rtekind == RTE_RELATION); /* - * Sort clauses into best execution order. We do this first since the FDW + * Sort clauses into best execution order. We do this first since the FDW * might have more info than we do and wish to adjust the ordering. */ scan_clauses = order_qual_clauses(root, scan_clauses); /* * Let the FDW perform its processing on the restriction clauses and - * generate the plan node. Note that the FDW might remove restriction + * generate the plan node. Note that the FDW might remove restriction * clauses that it intends to execute remotely, or even add more (if it * has selected some join clauses for remote use but also wants them * rechecked locally). @@ -2615,7 +2615,7 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root) * * Note that after doing this, we might have different * representations of the contents of the same PHV in different - * parts of the plan tree. This is OK because equal() will just + * parts of the plan tree. This is OK because equal() will just * match on phid/phlevelsup, so setrefs.c will still recognize an * upper-level reference to a lower-level copy of the same PHV. */ @@ -2793,7 +2793,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path) /* * Check to see if the indexkey is on the right; if so, commute - * the clause. The indexkey should be the side that refers to + * the clause. The indexkey should be the side that refers to * (only) the base relation. */ if (!bms_equal(rinfo->left_relids, index->rel->relids)) @@ -2887,7 +2887,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path) * * This is a simplified version of fix_indexqual_references. The input does * not have RestrictInfo nodes, and we assume that indxpath.c already - * commuted the clauses to put the index keys on the left. Also, we don't + * commuted the clauses to put the index keys on the left. Also, we don't * bother to support any cases except simple OpExprs, since nothing else * is allowed for ordering operators. */ @@ -3126,7 +3126,7 @@ order_qual_clauses(PlannerInfo *root, List *clauses) /* * Sort. We don't use qsort() because it's not guaranteed stable for - * equal keys. The expected number of entries is small enough that a + * equal keys. The expected number of entries is small enough that a * simple insertion sort should be good enough. */ for (i = 1; i < nitems; i++) @@ -3771,7 +3771,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols, * prepare_sort_from_pathkeys * Prepare to sort according to given pathkeys * - * This is used to set up for both Sort and MergeAppend nodes. It calculates + * This is used to set up for both Sort and MergeAppend nodes. It calculates * the executor's representation of the sort key information, and adjusts the * plan targetlist if needed to add resjunk sort columns. * @@ -3784,7 +3784,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols, * * We must convert the pathkey information into arrays of sort key column * numbers, sort operator OIDs, collation OIDs, and nulls-first flags, - * which is the representation the executor wants. These are returned into + * which is the representation the executor wants. These are returned into * the output parameters *p_numsortkeys etc. * * When looking for matches to an EquivalenceClass's members, we will only @@ -4229,7 +4229,7 @@ make_material(Plan *lefttree) * materialize_finished_plan: stick a Material node atop a completed plan * * There are a couple of places where we want to attach a Material node - * after completion of subquery_planner(). This currently requires hackery. + * after completion of subquery_planner(). This currently requires hackery. * Since subquery_planner has already run SS_finalize_plan on the subplan * tree, we have to kluge up parameter lists for the Material node. * Possibly this could be fixed by postponing SS_finalize_plan processing @@ -4435,7 +4435,7 @@ make_group(PlannerInfo *root, /* * distinctList is a list of SortGroupClauses, identifying the targetlist items - * that should be considered by the Unique filter. The input path must + * that should be considered by the Unique filter. The input path must * already be sorted accordingly. */ Unique * @@ -4453,7 +4453,7 @@ make_unique(Plan *lefttree, List *distinctList) /* * Charge one cpu_operator_cost per comparison per input tuple. We assume - * all columns get compared at most of the tuples. (XXX probably this is + * all columns get compared at most of the tuples. (XXX probably this is * an overestimate.) */ plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols; diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index b57bfd21760..f88e493edb8 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -87,12 +87,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo); * appearing in the jointree. * * The initial invocation must pass root->parse->jointree as the value of - * jtnode. Internally, the function recurses through the jointree. + * jtnode. Internally, the function recurses through the jointree. * * At the end of this process, there should be one baserel RelOptInfo for * every non-join RTE that is used in the query. Therefore, this routine * is the only place that should call build_simple_rel with reloptkind - * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build + * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build * "other rel" RelOptInfos for the members of any appendrels we find here.) */ void @@ -234,10 +234,10 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars, * means setting suitable where_needed values for them. * * Note that this only deals with lateral references in unflattened LATERAL - * subqueries. When we flatten a LATERAL subquery, its lateral references + * subqueries. When we flatten a LATERAL subquery, its lateral references * become plain Vars in the parent query, but they may have to be wrapped in * PlaceHolderVars if they need to be forced NULL by outer joins that don't - * also null the LATERAL subquery. That's all handled elsewhere. + * also null the LATERAL subquery. That's all handled elsewhere. * * This has to run before deconstruct_jointree, since it might result in * creation of PlaceHolderInfos. @@ -360,7 +360,7 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex) /* * We mark the Vars as being "needed" at the LATERAL RTE. This is a bit * of a cheat: a more formal approach would be to mark each one as needed - * at the join of the LATERAL RTE with its source RTE. But it will work, + * at the join of the LATERAL RTE with its source RTE. But it will work, * and it's much less tedious than computing a separate where_needed for * each Var. */ @@ -568,7 +568,7 @@ create_lateral_join_info(PlannerInfo *root) * add_lateral_info * Add a LateralJoinInfo to root->lateral_info_list, if needed * - * We suppress redundant list entries. The passed Relids are copied if saved. + * We suppress redundant list entries. The passed Relids are copied if saved. */ static void add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs) @@ -615,7 +615,7 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs) * deconstruct_jointree * Recursively scan the query's join tree for WHERE and JOIN/ON qual * clauses, and add these to the appropriate restrictinfo and joininfo - * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes + * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes * to root->join_info_list for any outer joins appearing in the query tree. * Return a "joinlist" data structure showing the join order decisions * that need to be made by make_one_rel(). @@ -632,9 +632,9 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs) * be evaluated at the lowest level where all the variables it mentions are * available. However, we cannot push a qual down into the nullable side(s) * of an outer join since the qual might eliminate matching rows and cause a - * NULL row to be incorrectly emitted by the join. Therefore, we artificially + * NULL row to be incorrectly emitted by the join. Therefore, we artificially * OR the minimum-relids of such an outer join into the required_relids of - * clauses appearing above it. This forces those clauses to be delayed until + * clauses appearing above it. This forces those clauses to be delayed until * application of the outer join (or maybe even higher in the join tree). */ List * @@ -755,7 +755,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join, *inner_join_rels = *qualscope; /* - * Try to process any quals postponed by children. If they need + * Try to process any quals postponed by children. If they need * further postponement, add them to my output postponed_qual_list. */ foreach(l, child_postponed_quals) @@ -807,7 +807,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join, * regard for whether this level is an outer join, which is correct. * Then we place our own join quals, which are restricted by lower * outer joins in any case, and are forced to this level if this is an - * outer join and they mention the outer side. Finally, if this is an + * outer join and they mention the outer side. Finally, if this is an * outer join, we create a join_info_list entry for the join. This * will prevent quals above us in the join tree that use those rels * from being pushed down below this level. (It's okay for upper @@ -897,7 +897,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join, nullable_rels); /* - * Try to process any quals postponed by children. If they need + * Try to process any quals postponed by children. If they need * further postponement, add them to my output postponed_qual_list. * Quals that can be processed now must be included in my_quals, so * that they'll be handled properly in make_outerjoininfo. @@ -1059,7 +1059,7 @@ make_outerjoininfo(PlannerInfo *root, * complain if any nullable rel is FOR [KEY] UPDATE/SHARE. * * You might be wondering why this test isn't made far upstream in the - * parser. It's because the parser hasn't got enough info --- consider + * parser. It's because the parser hasn't got enough info --- consider * FOR UPDATE applied to a view. Only after rewriting and flattening do * we know whether the view contains an outer join. * @@ -1074,8 +1074,8 @@ make_outerjoininfo(PlannerInfo *root, (jointype == JOIN_FULL && bms_is_member(rc->rti, left_rels))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s cannot be applied to the nullable side of an outer join", LCS_asString(rc->strength)))); } @@ -1117,7 +1117,7 @@ make_outerjoininfo(PlannerInfo *root, min_lefthand = bms_intersect(clause_relids, left_rels); /* - * Similarly for required RHS. But here, we must also include any lower + * Similarly for required RHS. But here, we must also include any lower * inner joins, to ensure we don't try to commute with any of them. */ min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels), @@ -1169,7 +1169,7 @@ make_outerjoininfo(PlannerInfo *root, * Here, we have to consider that "our join condition" includes any * clauses that syntactically appeared above the lower OJ and below * ours; those are equivalent to degenerate clauses in our OJ and must - * be treated as such. Such clauses obviously can't reference our + * be treated as such. Such clauses obviously can't reference our * LHS, and they must be non-strict for the lower OJ's RHS (else * reduce_outer_joins would have reduced the lower OJ to a plain * join). Hence the other ways in which we handle clauses within our @@ -1248,7 +1248,7 @@ make_outerjoininfo(PlannerInfo *root, * distribute_qual_to_rels * Add clause information to either the baserestrictinfo or joininfo list * (depending on whether the clause is a join) of each base relation - * mentioned in the clause. A RestrictInfo node is created and added to + * mentioned in the clause. A RestrictInfo node is created and added to * the appropriate list for each rel. Alternatively, if the clause uses a * mergejoinable operator and is not delayed by outer-join rules, enter * the left- and right-side expressions into the query's list of @@ -1313,7 +1313,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * In ordinary SQL, a WHERE or JOIN/ON clause can't reference any rels * that aren't within its syntactic scope; however, if we pulled up a * LATERAL subquery then we might find such references in quals that have - * been pulled up. We need to treat such quals as belonging to the join + * been pulled up. We need to treat such quals as belonging to the join * level that includes every rel they reference. Although we could make * pull_up_subqueries() place such quals correctly to begin with, it's * easier to handle it here. When we find a clause that contains Vars @@ -1357,10 +1357,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * gating Result plan node. We put such a clause into the regular * RestrictInfo lists for the moment, but eventually createplan.c will * pull it out and make a gating Result node immediately above whatever - * plan node the pseudoconstant clause is assigned to. It's usually best + * plan node the pseudoconstant clause is assigned to. It's usually best * to put a gating node as high in the plan tree as possible. If we are * not below an outer join, we can actually push the pseudoconstant qual - * all the way to the top of the tree. If we are below an outer join, we + * all the way to the top of the tree. If we are below an outer join, we * leave the qual at its original syntactic level (we could push it up to * just below the outer join, but that seems more complex than it's * worth). @@ -1414,7 +1414,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * Note: it is not immediately obvious that a simple boolean is enough * for this: if for some reason we were to attach a degenerate qual to * its original join level, it would need to be treated as an outer join - * qual there. However, this cannot happen, because all the rels the + * qual there. However, this cannot happen, because all the rels the * clause mentions must be in the outer join's min_righthand, therefore * the join it needs must be formed before the outer join; and we always * attach quals to the lowest level where they can be evaluated. But @@ -1448,7 +1448,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * We can't use such a clause to deduce equivalence (the left and * right sides might be unequal above the join because one of them has * gone to NULL) ... but we might be able to use it for more limited - * deductions, if it is mergejoinable. So consider adding it to the + * deductions, if it is mergejoinable. So consider adding it to the * lists of set-aside outer-join clauses. */ is_pushed_down = false; @@ -1478,7 +1478,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, else { /* - * Normal qual clause or degenerate outer-join clause. Either way, we + * Normal qual clause or degenerate outer-join clause. Either way, we * can mark it as pushed-down. */ is_pushed_down = true; @@ -1598,7 +1598,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * * In all cases, it's important to initialize the left_ec and right_ec * fields of a mergejoinable clause, so that all possibly mergejoinable - * expressions have representations in EquivalenceClasses. If + * expressions have representations in EquivalenceClasses. If * process_equivalence is successful, it will take care of that; * otherwise, we have to call initialize_mergeclause_eclasses to do it. */ @@ -1674,7 +1674,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have * all the rels it mentions, and (2) we are at or above any outer joins that * can null any of these rels and are below the syntactic location of the - * given qual. We must enforce (2) because pushing down such a clause below + * given qual. We must enforce (2) because pushing down such a clause below * the OJ might cause the OJ to emit null-extended rows that should not have * been formed, or that should have been rejected by the clause. (This is * only an issue for non-strict quals, since if we can prove a qual mentioning @@ -1700,7 +1700,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * required relids overlap the LHS too) causes that OJ's delay_upper_joins * flag to be set TRUE. This will prevent any higher-level OJs from * being interchanged with that OJ, which would result in not having any - * correct place to evaluate the qual. (The case we care about here is a + * correct place to evaluate the qual. (The case we care about here is a * sub-select WHERE clause within the RHS of some outer join. The WHERE * clause must effectively be treated as a degenerate clause of that outer * join's condition. Rather than trying to match such clauses with joins @@ -1928,7 +1928,7 @@ distribute_restrictinfo_to_rels(PlannerInfo *root, * that provides all its variables. * * "nullable_relids" is the set of relids used in the expressions that are - * potentially nullable below the expressions. (This has to be supplied by + * potentially nullable below the expressions. (This has to be supplied by * caller because this function is used after deconstruct_jointree, so we * don't have knowledge of where the clause items came from.) * @@ -2098,7 +2098,7 @@ check_mergejoinable(RestrictInfo *restrictinfo) * info fields in the restrictinfo. * * Currently, we support hashjoin for binary opclauses where - * the operator is a hashjoinable operator. The arguments can be + * the operator is a hashjoinable operator. The arguments can be * anything --- as long as there are no volatile functions in them. */ static void diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index 7937ff00e05..94ca92d78e7 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -10,9 +10,9 @@ * ORDER BY col ASC/DESC * LIMIT 1) * Given a suitable index on tab.col, this can be much faster than the - * generic scan-all-the-rows aggregation plan. We can handle multiple + * generic scan-all-the-rows aggregation plan. We can handle multiple * MIN/MAX aggregates by generating multiple subqueries, and their - * orderings can be different. However, if the query contains any + * orderings can be different. However, if the query contains any * non-optimizable aggregates, there's no point since we'll have to * scan all the rows anyway. * @@ -128,7 +128,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist) /* * Scan the tlist and HAVING qual to find all the aggregates and verify - * all are MIN/MAX aggregates. Stop as soon as we find one that isn't. + * all are MIN/MAX aggregates. Stop as soon as we find one that isn't. */ aggs_list = NIL; if (find_minmax_aggs_walker((Node *) tlist, &aggs_list)) @@ -163,7 +163,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist) * We can use either an ordering that gives NULLS FIRST or one that * gives NULLS LAST; furthermore there's unlikely to be much * performance difference between them, so it doesn't seem worth - * costing out both ways if we get a hit on the first one. NULLS + * costing out both ways if we get a hit on the first one. NULLS * FIRST is more likely to be available if the operator is a * reverse-sort operator, so try that first if reverse. */ diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index 3ea916f1661..93484a0cd59 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -33,7 +33,7 @@ * which may involve joins but not any fancier features. * * Since query_planner does not handle the toplevel processing (grouping, - * sorting, etc) it cannot select the best path by itself. Instead, it + * sorting, etc) it cannot select the best path by itself. Instead, it * returns the RelOptInfo for the top level of joining, and the caller * (grouping_planner) can choose one of the surviving paths for the rel. * Normally it would choose either the rel's cheapest path, or the cheapest @@ -63,7 +63,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * If the query has an empty join tree, then it's something easy like - * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly. + * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly. */ if (parse->jointree->fromlist == NIL) { @@ -129,7 +129,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * Examine the targetlist and join tree, adding entries to baserel * targetlists for all referenced Vars, and generating PlaceHolderInfo - * entries for all referenced PlaceHolderVars. Restrict and join clauses + * entries for all referenced PlaceHolderVars. Restrict and join clauses * are added to appropriate lists belonging to the mentioned relations. We * also build EquivalenceClasses for provably equivalent expressions. The * SpecialJoinInfo list is also built to hold information about join order @@ -153,7 +153,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * If we formed any equivalence classes, generate additional restriction - * clauses as appropriate. (Implied join clauses are formed on-the-fly + * clauses as appropriate. (Implied join clauses are formed on-the-fly * later.) */ generate_base_implied_equalities(root); @@ -168,14 +168,14 @@ query_planner(PlannerInfo *root, List *tlist, /* * Examine any "placeholder" expressions generated during subquery pullup. * Make sure that the Vars they need are marked as needed at the relevant - * join level. This must be done before join removal because it might + * join level. This must be done before join removal because it might * cause Vars or placeholders to be needed above a join when they weren't * so marked before. */ fix_placeholder_input_needed_levels(root); /* - * Remove any useless outer joins. Ideally this would be done during + * Remove any useless outer joins. Ideally this would be done during * jointree preprocessing, but the necessary information isn't available * until we've built baserel data structures and classified qual clauses. */ diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 0508d16902b..0f1e2e46802 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -192,7 +192,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) /* * We document cursor_tuple_fraction as simply being a fraction, which - * means the edge cases 0 and 1 have to be treated specially here. We + * means the edge cases 0 and 1 have to be treated specially here. We * convert 1 to 0 ("all the tuples") and 0 to a very small fraction. */ if (tuple_fraction >= 1.0) @@ -386,7 +386,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, } /* - * Preprocess RowMark information. We need to do this after subquery + * Preprocess RowMark information. We need to do this after subquery * pullup (so that all non-inherited RTEs are present) and before * inheritance expansion (so that the info is available for * expand_inherited_tables to examine and modify). @@ -506,7 +506,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, * to execute that we're better off doing it only once per group, despite * the loss of selectivity. This is hard to estimate short of doing the * entire planning process twice, so we use a heuristic: clauses - * containing subplans are left in HAVING. Otherwise, we move or copy the + * containing subplans are left in HAVING. Otherwise, we move or copy the * HAVING clause into WHERE, in hopes of eliminating tuples before * aggregation instead of after. * @@ -916,8 +916,8 @@ inheritance_planner(PlannerInfo *root) subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ ); /* - * Planning may have modified the query result relation (if there - * were security barrier quals on the result RTE). + * Planning may have modified the query result relation (if there were + * security barrier quals on the result RTE). */ appinfo->child_relid = subroot.parse->resultRelation; @@ -940,7 +940,8 @@ inheritance_planner(PlannerInfo *root) else { List *tmp_rtable = NIL; - ListCell *cell1, *cell2; + ListCell *cell1, + *cell2; /* * Check to see if any of the original RTEs were turned into @@ -1108,7 +1109,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * If there's a top-level ORDER BY, assume we have to fetch all the - * tuples. This might be too simplistic given all the hackery below + * tuples. This might be too simplistic given all the hackery below * to possibly avoid the sort; but the odds of accurate estimates here * are pretty low anyway. */ @@ -1135,7 +1136,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * We should not need to call preprocess_targetlist, since we must be - * in a SELECT query node. Instead, use the targetlist returned by + * in a SELECT query node. Instead, use the targetlist returned by * plan_set_operations (since this tells whether it returned any * resjunk columns!), and transfer any sort key information from the * original tlist. @@ -1152,11 +1153,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) if (parse->rowMarks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", LCS_asString(((RowMarkClause *) - linitial(parse->rowMarks))->strength)))); + linitial(parse->rowMarks))->strength)))); /* * Calculate pathkeys that represent result ordering requirements @@ -1279,7 +1280,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Generate the best unsorted and presorted paths for this Query (but - * note there may not be any presorted paths). We also generate (in + * note there may not be any presorted paths). We also generate (in * standard_qp_callback) pathkey representations of the query's sort * clause, distinct clause, etc. */ @@ -1314,7 +1315,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * In GROUP BY mode, an absolute LIMIT is relative to the number - * of groups not the number of tuples. If the caller gave us a + * of groups not the number of tuples. If the caller gave us a * fraction, keep it as-is. (In both cases, we are effectively * assuming that all the groups are about the same size.) */ @@ -1673,7 +1674,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * Furthermore, there cannot be any variables in either HAVING * or the targetlist, so we actually do not need the FROM * table at all! We can just throw away the plan-so-far and - * generate a Result node. This is a sufficiently unusual + * generate a Result node. This is a sufficiently unusual * corner case that it's not worth contorting the structure of * this routine to avoid having to generate the plan in the * first place. @@ -1717,14 +1718,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * The "base" targetlist for all steps of the windowing process is - * a flat tlist of all Vars and Aggs needed in the result. (In + * a flat tlist of all Vars and Aggs needed in the result. (In * some cases we wouldn't need to propagate all of these all the * way to the top, since they might only be needed as inputs to * WindowFuncs. It's probably not worth trying to optimize that * though.) We also add window partitioning and sorting * expressions to the base tlist, to ensure they're computed only * once at the bottom of the stack (that's critical for volatile - * functions). As we climb up the stack, we'll add outputs for + * functions). As we climb up the stack, we'll add outputs for * the WindowFuncs computed at each level. */ window_tlist = make_windowInputTargetList(root, @@ -1733,7 +1734,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * The copyObject steps here are needed to ensure that each plan - * node has a separately modifiable tlist. (XXX wouldn't a + * node has a separately modifiable tlist. (XXX wouldn't a * shallow list copy do for that?) */ result_plan->targetlist = (List *) copyObject(window_tlist); @@ -2018,7 +2019,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * * Once grouping_planner() has applied a general tlist to the topmost * scan/join plan node, any tlist eval cost for added-on nodes should be - * accounted for as we create those nodes. Presently, of the node types we + * accounted for as we create those nodes. Presently, of the node types we * can add on later, only Agg, WindowAgg, and Group project new tlists (the * rest just copy their input tuples) --- so make_agg(), make_windowagg() and * make_group() are responsible for calling this function to account for their @@ -2150,7 +2151,7 @@ preprocess_rowmarks(PlannerInfo *root) * insufficient because of rule substitution, query pullup, etc. */ CheckSelectLocking(parse, ((RowMarkClause *) - linitial(parse->rowMarks))->strength); + linitial(parse->rowMarks))->strength); } else { @@ -2184,7 +2185,7 @@ preprocess_rowmarks(PlannerInfo *root) /* * Currently, it is syntactically impossible to have FOR UPDATE et al - * applied to an update/delete target rel. If that ever becomes + * applied to an update/delete target rel. If that ever becomes * possible, we should drop the target from the PlanRowMark list. */ Assert(rc->rti != parse->resultRelation); @@ -2268,7 +2269,7 @@ preprocess_rowmarks(PlannerInfo *root) * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses * * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the - * results back in *count_est and *offset_est. These variables are set to + * results back in *count_est and *offset_est. These variables are set to * 0 if the corresponding clause is not present, and -1 if it's present * but we couldn't estimate the value for it. (The "0" convention is OK * for OFFSET but a little bit bogus for LIMIT: effectively we estimate @@ -2277,7 +2278,7 @@ preprocess_rowmarks(PlannerInfo *root) * be passed to make_limit, which see if you change this code. * * The return value is the suitably adjusted tuple_fraction to use for - * planning the query. This adjustment is not overridable, since it reflects + * planning the query. This adjustment is not overridable, since it reflects * plan actions that grouping_planner() will certainly take, not assumptions * about context. */ @@ -2401,7 +2402,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, else if (*offset_est != 0 && tuple_fraction > 0.0) { /* - * We have an OFFSET but no LIMIT. This acts entirely differently + * We have an OFFSET but no LIMIT. This acts entirely differently * from the LIMIT case: here, we need to increase rather than decrease * the caller's tuple_fraction, because the OFFSET acts to cause more * tuples to be fetched instead of fewer. This only matters if we got @@ -2416,7 +2417,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, /* * If we have absolute counts from both caller and OFFSET, add them - * together; likewise if they are both fractional. If one is + * together; likewise if they are both fractional. If one is * fractional and the other absolute, we want to take the larger, and * we heuristically assume that's the fractional one. */ @@ -2457,7 +2458,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, * * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding * a Limit node. This is worth checking for because "OFFSET 0" is a common - * locution for an optimization fence. (Because other places in the planner + * locution for an optimization fence. (Because other places in the planner * merely check whether parse->limitOffset isn't NULL, it will still work as * an optimization fence --- we're just suppressing unnecessary run-time * overhead.) @@ -2700,7 +2701,7 @@ choose_hashed_grouping(PlannerInfo *root, /* * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY - * aggregates. (Doing so would imply storing *all* the input values in + * aggregates. (Doing so would imply storing *all* the input values in * the hash table, and/or running many sorts in parallel, either of which * seems like a certain loser.) We similarly don't support ordered-set * aggregates in hashed aggregation, but that case is included in the @@ -2840,7 +2841,7 @@ choose_hashed_grouping(PlannerInfo *root, * pass in the costs as individual variables.) * * But note that making the two choices independently is a bit bogus in - * itself. If the two could be combined into a single choice operation + * itself. If the two could be combined into a single choice operation * it'd probably be better, but that seems far too unwieldy to be practical, * especially considering that the combination of GROUP BY and DISTINCT * isn't very common in real queries. By separating them, we are giving @@ -2937,7 +2938,7 @@ choose_hashed_distinct(PlannerInfo *root, 0.0, work_mem, limit_tuples); /* - * Now for the GROUP case. See comments in grouping_planner about the + * Now for the GROUP case. See comments in grouping_planner about the * sorting choices here --- this code should match that code. */ sorted_p.startup_cost = sorted_startup_cost; @@ -3127,7 +3128,7 @@ make_subplanTargetList(PlannerInfo *root, * add them to the result tlist if not already present. (A Var used * directly as a GROUP BY item will be present already.) Note this * includes Vars used in resjunk items, so we are covering the needs of - * ORDER BY and window specifications. Vars used within Aggrefs will be + * ORDER BY and window specifications. Vars used within Aggrefs will be * pulled out here, too. */ non_group_vars = pull_var_clause((Node *) non_group_cols, @@ -3178,7 +3179,7 @@ get_grouping_column_index(Query *parse, TargetEntry *tle) * Locate grouping columns in the tlist chosen by create_plan. * * This is only needed if we don't use the sub_tlist chosen by - * make_subplanTargetList. We have to forget the column indexes found + * make_subplanTargetList. We have to forget the column indexes found * by that routine and re-locate the grouping exprs in the real sub_tlist. * We assume the grouping exprs are just Vars (see make_subplanTargetList). */ @@ -3209,11 +3210,11 @@ locate_grouping_columns(PlannerInfo *root, /* * The grouping column returned by create_plan might not have the same - * typmod as the original Var. (This can happen in cases where a + * typmod as the original Var. (This can happen in cases where a * set-returning function has been inlined, so that we now have more * knowledge about what it returns than we did when the original Var * was created.) So we can't use tlist_member() to search the tlist; - * instead use tlist_member_match_var. For safety, still check that + * instead use tlist_member_match_var. For safety, still check that * the vartype matches. */ if (!(groupexpr && IsA(groupexpr, Var))) @@ -3339,7 +3340,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists) * * When grouping_planner inserts one or more WindowAgg nodes into the plan, * this function computes the initial target list to be computed by the node - * just below the first WindowAgg. This list must contain all values needed + * just below the first WindowAgg. This list must contain all values needed * to evaluate the window functions, compute the final target list, and * perform any required final sort step. If multiple WindowAggs are needed, * each intermediate one adds its window function results onto this tlist; @@ -3347,7 +3348,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists) * * This function is much like make_subplanTargetList, though not quite enough * like it to share code. As in that function, we flatten most expressions - * into their component variables. But we do not want to flatten window + * into their component variables. But we do not want to flatten window * PARTITION BY/ORDER BY clauses, since that might result in multiple * evaluations of them, which would be bad (possibly even resulting in * inconsistent answers, if they contain volatile functions). Also, we must @@ -3520,7 +3521,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, * This depends on the behavior of make_pathkeys_for_window()! * * We are given the target WindowClause and an array of the input column - * numbers associated with the resulting pathkeys. In the easy case, there + * numbers associated with the resulting pathkeys. In the easy case, there * are the same number of pathkey columns as partitioning + ordering columns * and we just have to copy some data around. However, it's possible that * some of the original partitioning + ordering columns were eliminated as @@ -3532,7 +3533,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, * determine which keys are significant. * * The method used here is a bit brute-force: add the sort columns to a list - * one at a time and note when the resulting pathkey list gets longer. But + * one at a time and note when the resulting pathkey list gets longer. But * it's a sufficiently uncommon case that a faster way doesn't seem worth * the amount of code refactoring that'd be needed. *---------- diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 46affe7dad0..768c5c76704 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -145,7 +145,7 @@ static bool extract_query_dependencies_walker(Node *node, /* * set_plan_references * - * This is the final processing pass of the planner/optimizer. The plan + * This is the final processing pass of the planner/optimizer. The plan * tree is complete; we just have to adjust some representational details * for the convenience of the executor: * @@ -189,7 +189,7 @@ static bool extract_query_dependencies_walker(Node *node, * and root->glob->invalItems (for everything else). * * Notice that we modify Plan nodes in-place, but use expression_tree_mutator - * to process targetlist and qual expressions. We can assume that the Plan + * to process targetlist and qual expressions. We can assume that the Plan * nodes were just built by the planner and are not multiply referenced, but * it's not so safe to assume that for expression tree nodes. */ @@ -262,7 +262,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing) /* * If there are any dead subqueries, they are not referenced in the Plan * tree, so we must add RTEs contained in them to the flattened rtable - * separately. (If we failed to do this, the executor would not perform + * separately. (If we failed to do this, the executor would not perform * expected permission checks for tables mentioned in such subqueries.) * * Note: this pass over the rangetable can't be combined with the previous @@ -292,7 +292,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing) /* * The subquery might never have been planned at all, if it * was excluded on the basis of self-contradictory constraints - * in our query level. In this case apply + * in our query level. In this case apply * flatten_unplanned_rtes. * * If it was planned but the plan is dummy, we assume that it @@ -591,7 +591,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) /* * These plan types don't actually bother to evaluate their * targetlists, because they just return their unmodified input - * tuples. Even though the targetlist won't be used by the + * tuples. Even though the targetlist won't be used by the * executor, we fix it up for possible use by EXPLAIN (not to * mention ease of debugging --- wrong varnos are very confusing). */ @@ -609,7 +609,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) /* * Like the plan types above, LockRows doesn't evaluate its - * tlist or quals. But we have to fix up the RT indexes in + * tlist or quals. But we have to fix up the RT indexes in * its rowmarks. */ set_dummy_tlist_references(plan, rtoffset); @@ -727,7 +727,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) * Set up the visible plan targetlist as being the same as * the first RETURNING list. This is for the use of * EXPLAIN; the executor won't pay any attention to the - * targetlist. We postpone this step until here so that + * targetlist. We postpone this step until here so that * we don't have to do set_returning_clause_references() * twice on identical targetlists. */ @@ -953,7 +953,7 @@ set_subqueryscan_references(PlannerInfo *root, else { /* - * Keep the SubqueryScan node. We have to do the processing that + * Keep the SubqueryScan node. We have to do the processing that * set_plan_references would otherwise have done on it. Notice we do * not do set_upper_references() here, because a SubqueryScan will * always have been created with correct references to its subplan's @@ -1425,7 +1425,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset) * * In most cases, subplan tlists will be "flat" tlists with only Vars, * so we try to optimize that case by extracting information about Vars - * in advance. Matching a parent tlist to a child is still an O(N^2) + * in advance. Matching a parent tlist to a child is still an O(N^2) * operation, but at least with a much smaller constant factor than plain * tlist_member() searches. * @@ -1870,7 +1870,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context) * adjust any Vars that refer to other tables to reference junk tlist * entries in the top subplan's targetlist. Vars referencing the result * table should be left alone, however (the executor will evaluate them - * using the actual heap tuple, after firing triggers if any). In the + * using the actual heap tuple, after firing triggers if any). In the * adjusted RETURNING list, result-table Vars will have their original * varno (plus rtoffset), but Vars for other rels will have varno OUTER_VAR. * diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index a3f358377da..be92049ec4d 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -434,7 +434,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType, Node *result; /* - * Copy the source Query node. This is a quick and dirty kluge to resolve + * Copy the source Query node. This is a quick and dirty kluge to resolve * the fact that the parser can generate trees with multiple links to the * same sub-Query node, but the planner wants to scribble on the Query. * Try to clean this up when we do querytree redesign... @@ -459,7 +459,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType, * path/costsize.c. * * XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash - * its output. In that case it would've been better to specify full + * its output. In that case it would've been better to specify full * retrieval. At present, however, we can only check hashability after * we've made the subplan :-(. (Determining whether it'll fit in work_mem * is the really hard part.) Therefore, we don't want to be too @@ -496,7 +496,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType, /* * If it's a correlated EXISTS with an unimportant targetlist, we might be * able to transform it to the equivalent of an IN and then implement it - * by hashing. We don't have enough information yet to tell which way is + * by hashing. We don't have enough information yet to tell which way is * likely to be better (it depends on the expected number of executions of * the EXISTS qual, and we are much too early in planning the outer query * to be able to guess that). So we generate both plans, if possible, and @@ -724,7 +724,7 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, * Otherwise, we have the option to tack a Material node onto the top * of the subplan, to reduce the cost of reading it repeatedly. This * is pointless for a direct-correlated subplan, since we'd have to - * recompute its results each time anyway. For uncorrelated/undirect + * recompute its results each time anyway. For uncorrelated/undirect * correlated subplans, we add Material unless the subplan's top plan * node would materialize its output anyway. Also, if enable_material * is false, then the user does not want us to materialize anything @@ -750,10 +750,10 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, /* * A parameterless subplan (not initplan) should be prepared to handle - * REWIND efficiently. If it has direct parameters then there's no point + * REWIND efficiently. If it has direct parameters then there's no point * since it'll be reset on each scan anyway; and if it's an initplan then * there's no point since it won't get re-run without parameter changes - * anyway. The input of a hashed subplan doesn't need REWIND either. + * anyway. The input of a hashed subplan doesn't need REWIND either. */ if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable) root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs, @@ -850,7 +850,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno) /* * convert_testexpr: convert the testexpr given by the parser into * actually executable form. This entails replacing PARAM_SUBLINK Params - * with Params or Vars representing the results of the sub-select. The + * with Params or Vars representing the results of the sub-select. The * nodes to be substituted are passed in as the List result from * generate_subquery_params or generate_subquery_vars. */ @@ -952,7 +952,7 @@ testexpr_is_hashable(Node *testexpr) * * The combining operators must be hashable and strict. The need for * hashability is obvious, since we want to use hashing. Without - * strictness, behavior in the presence of nulls is too unpredictable. We + * strictness, behavior in the presence of nulls is too unpredictable. We * actually must assume even more than plain strictness: they can't yield * NULL for non-null inputs, either (see nodeSubplan.c). However, hash * indexes and hash joins assume that too. @@ -1060,7 +1060,7 @@ SS_process_ctes(PlannerInfo *root) } /* - * Copy the source Query node. Probably not necessary, but let's keep + * Copy the source Query node. Probably not necessary, but let's keep * this similar to make_subplan. */ subquery = (Query *) copyObject(cte->ctequery); @@ -1086,7 +1086,7 @@ SS_process_ctes(PlannerInfo *root) elog(ERROR, "unexpected outer reference in CTE query"); /* - * Make a SubPlan node for it. This is just enough unlike + * Make a SubPlan node for it. This is just enough unlike * build_subplan that we can't share code. * * Note plan_id, plan_name, and cost fields are set further down. @@ -1309,7 +1309,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink, /* * See if the subquery can be simplified based on the knowledge that it's - * being used in EXISTS(). If we aren't able to get rid of its + * being used in EXISTS(). If we aren't able to get rid of its * targetlist, we have to fail, because the pullup operation leaves us * with noplace to evaluate the targetlist. */ @@ -1358,9 +1358,9 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink, * what pull_up_subqueries has to go through. * * In fact, it's even easier than what convert_ANY_sublink_to_join has to - * do. The machinations of simplify_EXISTS_query ensured that there is + * do. The machinations of simplify_EXISTS_query ensured that there is * nothing interesting in the subquery except an rtable and jointree, and - * even the jointree FromExpr no longer has quals. So we can just append + * even the jointree FromExpr no longer has quals. So we can just append * the rtable to our own and use the FromExpr in our jointree. But first, * adjust all level-zero varnos in the subquery to account for the rtable * merger. @@ -1491,7 +1491,7 @@ simplify_EXISTS_query(Query *query) * * On success, the modified subselect is returned, and we store a suitable * upper-level test expression at *testexpr, plus a list of the subselect's - * output Params at *paramIds. (The test expression is already Param-ified + * output Params at *paramIds. (The test expression is already Param-ified * and hence need not go through convert_testexpr, which is why we have to * deal with the Param IDs specially.) * @@ -1654,7 +1654,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect, return NULL; /* - * Also reject sublinks in the stuff we intend to pull up. (It might be + * Also reject sublinks in the stuff we intend to pull up. (It might be * possible to support this, but doesn't seem worth the complication.) */ if (contain_subplans((Node *) leftargs)) @@ -1856,7 +1856,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context) * is needed for a bare List.) * * Anywhere within the top-level AND/OR clause structure, we can tell - * make_subplan() that NULL and FALSE are interchangeable. So isTopQual + * make_subplan() that NULL and FALSE are interchangeable. So isTopQual * propagates down in both cases. (Note that this is unlike the meaning * of "top level qual" used in most other places in Postgres.) */ @@ -1962,7 +1962,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans) * Now determine the set of params that are validly referenceable in this * query level; to wit, those available from outer query levels plus the * output parameters of any local initPlans. (We do not include output - * parameters of regular subplans. Those should only appear within the + * parameters of regular subplans. Those should only appear within the * testexpr of SubPlan nodes, and are taken care of locally within * finalize_primnode. Likewise, special parameters that are generated by * nodes such as ModifyTable are handled within finalize_plan.) @@ -2138,7 +2138,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, /* * In a SubqueryScan, SS_finalize_plan has already been run on the * subplan by the inner invocation of subquery_planner, so there's - * no need to do it again. Instead, just pull out the subplan's + * no need to do it again. Instead, just pull out the subplan's * extParams list, which represents the params it needs from my * level and higher levels. */ @@ -2500,7 +2500,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context) /* * Remove any param IDs of output parameters of the subplan that were - * referenced in the testexpr. These are not interesting for + * referenced in the testexpr. These are not interesting for * parameter change signaling since we always re-evaluate the subplan. * Note that this wouldn't work too well if there might be uses of the * same param IDs elsewhere in the plan, but that can't happen because @@ -2598,7 +2598,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan, /* Label the subplan for EXPLAIN purposes */ node->plan_name = psprintf("InitPlan %d (returns $%d)", - node->plan_id, prm->paramid); + node->plan_id, prm->paramid); return prm; } diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index 812e56d4c19..776fe426c3e 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -116,7 +116,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid); * * A clause "foo op ANY (sub-SELECT)" can be processed by pulling the * sub-SELECT up to become a rangetable entry and treating the implied - * comparisons as quals of a semijoin. However, this optimization *only* + * comparisons as quals of a semijoin. However, this optimization *only* * works at the top level of WHERE or a JOIN/ON clause, because we cannot * distinguish whether the ANY ought to return FALSE or NULL in cases * involving NULL inputs. Also, in an outer join's ON clause we can only @@ -133,7 +133,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid); * transformations if any are found. * * This routine has to run before preprocess_expression(), so the quals - * clauses are not yet reduced to implicit-AND format. That means we need + * clauses are not yet reduced to implicit-AND format. That means we need * to recursively search through explicit AND clauses, which are * probably only binary ANDs. We stop as soon as we hit a non-AND item. */ @@ -287,7 +287,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode, /* * Although we could include the pulled-up subqueries in the returned * relids, there's no need since upper quals couldn't refer to their - * outputs anyway. But we *do* need to include the join's own rtindex + * outputs anyway. But we *do* need to include the join's own rtindex * because we haven't yet collapsed join alias variables, so upper * levels would mistakenly think they couldn't use references to this * join. @@ -609,7 +609,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode) * * If this jointree node is within either side of an outer join, then * lowest_outer_join references the lowest such JoinExpr node; otherwise - * it is NULL. We use this to constrain the effects of LATERAL subqueries. + * it is NULL. We use this to constrain the effects of LATERAL subqueries. * * If this jointree node is within the nullable side of an outer join, then * lowest_nulling_outer_join references the lowest such JoinExpr node; @@ -759,7 +759,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode, * Attempt to pull up a single simple subquery. * * jtnode is a RangeTblRef that has been tentatively identified as a simple - * subquery by pull_up_subqueries. We return the replacement jointree node, + * subquery by pull_up_subqueries. We return the replacement jointree node, * or jtnode itself if we determine that the subquery can't be pulled up after * all. * @@ -792,7 +792,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * Create a PlannerInfo data structure for this subquery. * * NOTE: the next few steps should match the first processing in - * subquery_planner(). Can we refactor to avoid code duplication, or + * subquery_planner(). Can we refactor to avoid code duplication, or * would that just make things uglier? */ subroot = makeNode(PlannerInfo); @@ -842,7 +842,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, /* * Now we must recheck whether the subquery is still simple enough to pull - * up. If not, abandon processing it. + * up. If not, abandon processing it. * * We don't really need to recheck all the conditions involved, but it's * easier just to keep this "if" looking the same as the one in @@ -859,7 +859,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * Give up, return unmodified RangeTblRef. * * Note: The work we just did will be redone when the subquery gets - * planned on its own. Perhaps we could avoid that by storing the + * planned on its own. Perhaps we could avoid that by storing the * modified subquery back into the rangetable, but I'm not gonna risk * it now. */ @@ -900,7 +900,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * non-nullable items and lateral references may have to be turned into * PlaceHolderVars. If we are dealing with an appendrel member then * anything that's not a simple Var has to be turned into a - * PlaceHolderVar. Set up required context data for pullup_replace_vars. + * PlaceHolderVar. Set up required context data for pullup_replace_vars. */ rvcontext.root = root; rvcontext.targetlist = subquery->targetList; @@ -925,7 +925,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * replace any of the jointree structure. (This'd be a lot cleaner if we * could use query_tree_mutator.) We have to use PHVs in the targetList, * returningList, and havingQual, since those are certainly above any - * outer join. replace_vars_in_jointree tracks its location in the + * outer join. replace_vars_in_jointree tracks its location in the * jointree and uses PHVs or not appropriately. */ parse->targetList = (List *) @@ -1084,7 +1084,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * Pull up a single simple UNION ALL subquery. * * jtnode is a RangeTblRef that has been identified as a simple UNION ALL - * subquery by pull_up_subqueries. We pull up the leaf subqueries and + * subquery by pull_up_subqueries. We pull up the leaf subqueries and * build an "append relation" for the union set. The result value is just * jtnode, since we don't actually need to change the query jointree. */ @@ -1098,7 +1098,7 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte) /* * Make a modifiable copy of the subquery's rtable, so we can adjust - * upper-level Vars in it. There are no such Vars in the setOperations + * upper-level Vars in it. There are no such Vars in the setOperations * tree proper, so fixing the rtable should be sufficient. */ rtable = copyObject(subquery->rtable); @@ -1370,7 +1370,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery that has any set-returning functions in its - * targetlist. Otherwise we might well wind up inserting set-returning + * targetlist. Otherwise we might well wind up inserting set-returning * functions into places where they mustn't go, such as quals of higher * queries. */ @@ -1379,7 +1379,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery that has any volatile functions in its - * targetlist. Otherwise we might introduce multiple evaluations of these + * targetlist. Otherwise we might introduce multiple evaluations of these * functions, if they get copied to multiple places in the upper query, * leading to surprising results. (Note: the PlaceHolderVar mechanism * doesn't quite guarantee single evaluation; else we could pull up anyway @@ -1609,7 +1609,7 @@ replace_vars_in_jointree(Node *jtnode, /* * If the RangeTblRef refers to a LATERAL subquery (that isn't the * same subquery we're pulling up), it might contain references to the - * target subquery, which we must replace. We drive this from the + * target subquery, which we must replace. We drive this from the * jointree scan, rather than a scan of the rtable, for a couple of * reasons: we can avoid processing no-longer-referenced RTEs, and we * can use the appropriate setting of need_phvs depending on whether @@ -1770,7 +1770,7 @@ pullup_replace_vars_callback(Var *var, /* * Insert PlaceHolderVar if needed. Notice that we are wrapping one * PlaceHolderVar around the whole RowExpr, rather than putting one - * around each element of the row. This is because we need the + * around each element of the row. This is because we need the * expression to yield NULL, not ROW(NULL,NULL,...) when it is forced * to null by an outer join. */ @@ -1872,7 +1872,7 @@ pullup_replace_vars_callback(Var *var, /* * Cache it if possible (ie, if the attno is in range, which it - * probably always should be). We can cache the value even if we + * probably always should be). We can cache the value even if we * decided we didn't need a PHV, since this result will be * suitable for any request that has need_phvs. */ @@ -1915,7 +1915,7 @@ pullup_replace_vars_subquery(Query *query, * * If a query's setOperations tree consists entirely of simple UNION ALL * operations, flatten it into an append relation, which we can process more - * intelligently than the general setops case. Otherwise, do nothing. + * intelligently than the general setops case. Otherwise, do nothing. * * In most cases, this can succeed only for a top-level query, because for a * subquery in FROM, the parent query's invocation of pull_up_subqueries would @@ -2027,7 +2027,7 @@ flatten_simple_union_all(PlannerInfo *root) * SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL; * If the join clause is strict for b.y, then only null-extended rows could * pass the upper WHERE, and we can conclude that what the query is really - * specifying is an anti-semijoin. We change the join type from JOIN_LEFT + * specifying is an anti-semijoin. We change the join type from JOIN_LEFT * to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be * removed to prevent bogus selectivity calculations, but we leave it to * distribute_qual_to_rels to get rid of such clauses. @@ -2267,7 +2267,7 @@ reduce_outer_joins_pass2(Node *jtnode, /* * See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if * the join's own quals are strict for any var that was forced null by - * higher qual levels. NOTE: there are other ways that we could + * higher qual levels. NOTE: there are other ways that we could * detect an anti-join, in particular if we were to check whether Vars * coming from the RHS must be non-null because of table constraints. * That seems complicated and expensive though (in particular, one @@ -2425,7 +2425,7 @@ reduce_outer_joins_pass2(Node *jtnode, * pulled-up relid, and change them to reference the replacement relid(s). * * NOTE: although this has the form of a walker, we cheat and modify the - * nodes in-place. This should be OK since the tree was copied by + * nodes in-place. This should be OK since the tree was copied by * pullup_replace_vars earlier. Avoid scribbling on the original values of * the bitmapsets, though, because expression_tree_mutator doesn't copy those. */ diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c index 812fbaddba9..2a24938d843 100644 --- a/src/backend/optimizer/prep/prepqual.c +++ b/src/backend/optimizer/prep/prepqual.c @@ -54,12 +54,12 @@ static Expr *process_duplicate_ors(List *orlist); * Although this can be invoked on its own, it's mainly intended as a helper * for eval_const_expressions(), and that context drives several design * decisions. In particular, if the input is already AND/OR flat, we must - * preserve that property. We also don't bother to recurse in situations + * preserve that property. We also don't bother to recurse in situations * where we can assume that lower-level executions of eval_const_expressions * would already have simplified sub-clauses of the input. * * The difference between this and a simple make_notclause() is that this - * tries to get rid of the NOT node by logical simplification. It's clearly + * tries to get rid of the NOT node by logical simplification. It's clearly * always a win if the NOT node can be eliminated altogether. However, our * use of DeMorgan's laws could result in having more NOT nodes rather than * fewer. We do that unconditionally anyway, because in WHERE clauses it's @@ -152,7 +152,7 @@ negate_clause(Node *node) * those properties. For example, if no direct child of * the given AND clause is an AND or a NOT-above-OR, then * the recursive calls of negate_clause() can't return any - * OR clauses. So we needn't call pull_ors() before + * OR clauses. So we needn't call pull_ors() before * building a new OR clause. Similarly for the OR case. *-------------------- */ @@ -293,7 +293,7 @@ canonicalize_qual(Expr *qual) /* * Pull up redundant subclauses in OR-of-AND trees. We do this only * within the top-level AND/OR structure; there's no point in looking - * deeper. Also remove any NULL constants in the top-level structure. + * deeper. Also remove any NULL constants in the top-level structure. */ newqual = find_duplicate_ors(qual); @@ -374,7 +374,7 @@ pull_ors(List *orlist) * * This may seem like a fairly useless activity, but it turns out to be * applicable to many machine-generated queries, and there are also queries - * in some of the TPC benchmarks that need it. This was in fact almost the + * in some of the TPC benchmarks that need it. This was in fact almost the * sole useful side-effect of the old prepqual code that tried to force * the query into canonical AND-of-ORs form: the canonical equivalent of * ((A AND B) OR (A AND C)) @@ -400,7 +400,7 @@ pull_ors(List *orlist) * results, so it's valid to treat NULL::boolean the same as FALSE and then * simplify AND/OR accordingly. * - * Returns the modified qualification. AND/OR flatness is preserved. + * Returns the modified qualification. AND/OR flatness is preserved. */ static Expr * find_duplicate_ors(Expr *qual) diff --git a/src/backend/optimizer/prep/prepsecurity.c b/src/backend/optimizer/prep/prepsecurity.c index 7daaa3349ed..dd7f9003a28 100644 --- a/src/backend/optimizer/prep/prepsecurity.c +++ b/src/backend/optimizer/prep/prepsecurity.c @@ -33,7 +33,7 @@ typedef struct Relation rel; /* RTE relation at rt_index */ List *targetlist; /* Targetlist for new subquery RTE */ List *colnames; /* Column names in subquery RTE */ - List *vars_processed; /* List of Vars already processed */ + List *vars_processed; /* List of Vars already processed */ } security_barrier_replace_vars_context; static void expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, @@ -43,7 +43,7 @@ static void security_barrier_replace_vars(Node *node, security_barrier_replace_vars_context *context); static bool security_barrier_replace_vars_walker(Node *node, - security_barrier_replace_vars_context *context); + security_barrier_replace_vars_context *context); /* @@ -97,6 +97,7 @@ expand_security_quals(PlannerInfo *root, List *tlist) if (rt_index == parse->resultRelation) { RangeTblEntry *newrte = copyObject(rte); + parse->rtable = lappend(parse->rtable, newrte); parse->resultRelation = list_length(parse->rtable); @@ -117,11 +118,11 @@ expand_security_quals(PlannerInfo *root, List *tlist) rte->modifiedCols = NULL; /* - * For the most part, Vars referencing the original relation should - * remain as they are, meaning that they pull OLD values from the - * expanded RTE. But in the RETURNING list and in any WITH CHECK - * OPTION quals, we want such Vars to represent NEW values, so - * change them to reference the new RTE. + * For the most part, Vars referencing the original relation + * should remain as they are, meaning that they pull OLD values + * from the expanded RTE. But in the RETURNING list and in any + * WITH CHECK OPTION quals, we want such Vars to represent NEW + * values, so change them to reference the new RTE. */ ChangeVarNodes((Node *) parse->returningList, rt_index, parse->resultRelation, 0); @@ -141,7 +142,8 @@ expand_security_quals(PlannerInfo *root, List *tlist) */ while (rte->securityQuals != NIL) { - Node *qual = (Node *) linitial(rte->securityQuals); + Node *qual = (Node *) linitial(rte->securityQuals); + rte->securityQuals = list_delete_first(rte->securityQuals); ChangeVarNodes(qual, rt_index, 1, 0); @@ -160,14 +162,14 @@ static void expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, RangeTblEntry *rte, Node *qual) { - Query *parse = root->parse; - Oid relid = rte->relid; - Query *subquery; - RangeTblEntry *subrte; - RangeTblRef *subrtr; - PlanRowMark *rc; + Query *parse = root->parse; + Oid relid = rte->relid; + Query *subquery; + RangeTblEntry *subrte; + RangeTblRef *subrtr; + PlanRowMark *rc; security_barrier_replace_vars_context context; - ListCell *cell; + ListCell *cell; /* * There should only be 2 possible cases: @@ -182,6 +184,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, switch (rte->rtekind) { case RTE_RELATION: + /* * Turn the relation RTE into a security barrier subquery RTE, * moving all permissions checks down into the subquery. @@ -204,7 +207,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, rte->relid = InvalidOid; rte->subquery = subquery; rte->security_barrier = true; - rte->inh = false; /* must not be set for a subquery */ + rte->inh = false; /* must not be set for a subquery */ /* the permissions checks have now been moved down */ rte->requiredPerms = 0; @@ -219,9 +222,9 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, * Note that we can't push the user-defined quals down since they * may included untrusted functions and that means that we will * end up locking all rows which pass the securityQuals, even if - * those rows don't pass the user-defined quals. This is currently - * documented behavior, but it'd be nice to come up with a better - * solution some day. + * those rows don't pass the user-defined quals. This is + * currently documented behavior, but it'd be nice to come up with + * a better solution some day. */ rc = get_plan_rowmark(root->rowMarks, rt_index); if (rc != NULL) @@ -277,6 +280,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, break; case RTE_SUBQUERY: + /* * Build a new subquery that includes all the same columns as the * original subquery. @@ -288,8 +292,8 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, foreach(cell, rte->subquery->targetList) { - TargetEntry *tle; - Var *var; + TargetEntry *tle; + Var *var; tle = (TargetEntry *) lfirst(cell); var = makeVarFromTargetEntry(1, tle); @@ -333,7 +337,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, * variable that needs to be exposed by the security barrier subquery RTE. * * NOTE: although this has the form of a walker, we cheat and modify the - * nodes in-place. The given expression tree should have been copied + * nodes in-place. The given expression tree should have been copied * earlier to ensure that no unwanted side-effects occur! */ static void @@ -355,7 +359,7 @@ security_barrier_replace_vars(Node *node, static bool security_barrier_replace_vars_walker(Node *node, - security_barrier_replace_vars_context *context) + security_barrier_replace_vars_context *context) { if (node == NULL) return false; @@ -405,7 +409,7 @@ security_barrier_replace_vars_walker(Node *node, Form_pg_attribute att_tup; att_tup = SystemAttributeDefinition(var->varattno, - context->rel->rd_rel->relhasoids); + context->rel->rd_rel->relhasoids); attname = NameStr(att_tup->attname); } else if (var->varattno == InvalidAttrNumber) diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c index ee773b834e9..4ab12e51df7 100644 --- a/src/backend/optimizer/prep/preptlist.c +++ b/src/backend/optimizer/prep/preptlist.c @@ -4,7 +4,7 @@ * Routines to preprocess the parse tree target list * * For INSERT and UPDATE queries, the targetlist must contain an entry for - * each attribute of the target relation in the correct order. For all query + * each attribute of the target relation in the correct order. For all query * types, we may need to add junk tlist entries for Vars used in the RETURNING * list and row ID information needed for SELECT FOR UPDATE locking and/or * EvalPlanQual checking. @@ -79,7 +79,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist) /* * Add necessary junk columns for rowmarked rels. These values are needed * for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual - * rechecking. See comments for PlanRowMark in plannodes.h. + * rechecking. See comments for PlanRowMark in plannodes.h. */ foreach(lc, root->rowMarks) { @@ -144,7 +144,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist) /* * If the query has a RETURNING list, add resjunk entries for any Vars * used in RETURNING that belong to other relations. We need to do this - * to make these Vars available for the RETURNING calculation. Vars that + * to make these Vars available for the RETURNING calculation. Vars that * belong to the result rel don't need to be added, because they will be * made to refer to the actual heap tuple. */ @@ -252,9 +252,9 @@ expand_targetlist(List *tlist, int command_type, * When generating a NULL constant for a dropped column, we label * it INT4 (any other guaranteed-to-exist datatype would do as * well). We can't label it with the dropped column's datatype - * since that might not exist anymore. It does not really matter + * since that might not exist anymore. It does not really matter * what we claim the type is, since NULL is NULL --- its - * representation is datatype-independent. This could perhaps + * representation is datatype-independent. This could perhaps * confuse code comparing the finished plan to the target * relation, however. */ @@ -336,7 +336,7 @@ expand_targetlist(List *tlist, int command_type, /* * The remaining tlist entries should be resjunk; append them all to the * end of the new tlist, making sure they have resnos higher than the last - * real attribute. (Note: although the rewriter already did such + * real attribute. (Note: although the rewriter already did such * renumbering, we have to do it again here in case we are doing an UPDATE * in a table with dropped columns, or an inheritance child table with * extra columns.) diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index cdf541d34d5..0410fddc546 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -6,14 +6,14 @@ * * There are two code paths in the planner for set-operation queries. * If a subquery consists entirely of simple UNION ALL operations, it - * is converted into an "append relation". Otherwise, it is handled + * is converted into an "append relation". Otherwise, it is handled * by the general code in this module (plan_set_operations and its * subroutines). There is some support code here for the append-relation * case, but most of the heavy lifting for that is done elsewhere, * notably in prepjointree.c and allpaths.c. * * There is also some code here to support planning of queries that use - * inheritance (SELECT FROM foo*). Inheritance trees are converted into + * inheritance (SELECT FROM foo*). Inheritance trees are converted into * append relations, and thenceforth share code with the UNION ALL case. * * @@ -577,7 +577,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root, * * The tlist for an Append plan isn't important as far as the Append is * concerned, but we must make it look real anyway for the benefit of the - * next plan level up. In fact, it has to be real enough that the flag + * next plan level up. In fact, it has to be real enough that the flag * column is shown as a variable not a constant, else setrefs.c will get * confused. */ @@ -970,7 +970,7 @@ generate_setop_tlist(List *colTypes, List *colCollations, * Ensure the tlist entry's exposed collation matches the set-op. This * is necessary because plan_set_operations() reports the result * ordering as a list of SortGroupClauses, which don't carry collation - * themselves but just refer to tlist entries. If we don't show the + * themselves but just refer to tlist entries. If we don't show the * right collation then planner.c might do the wrong thing in * higher-level queries. * @@ -1184,7 +1184,7 @@ generate_setop_grouplist(SetOperationStmt *op, List *targetlist) /* * expand_inherited_tables * Expand each rangetable entry that represents an inheritance set - * into an "append relation". At the conclusion of this process, + * into an "append relation". At the conclusion of this process, * the "inh" flag is set in all and only those RTEs that are append * relation parents. */ @@ -1216,7 +1216,7 @@ expand_inherited_tables(PlannerInfo *root) * Check whether a rangetable entry represents an inheritance set. * If so, add entries for all the child tables to the query's * rangetable, and build AppendRelInfo nodes for all the child tables - * and add them to root->append_rel_list. If not, clear the entry's + * and add them to root->append_rel_list. If not, clear the entry's * "inh" flag to prevent later code from looking for AppendRelInfos. * * Note that the original RTE is considered to represent the whole @@ -1527,7 +1527,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, * parent rel's attribute numbering to the child's. * * The only surprise here is that we don't translate a parent whole-row - * reference into a child whole-row reference. That would mean requiring + * reference into a child whole-row reference. That would mean requiring * permissions on all child columns, which is overly strict, since the * query is really only going to reference the inherited columns. Instead * we set the per-column bits for all inherited columns. @@ -1708,6 +1708,7 @@ adjust_appendrel_attrs_mutator(Node *node, foreach(lc, fields) { Var *field = (Var *) lfirst(lc); + field->varlevelsup += context->sublevels_up; } rowexpr = makeNode(RowExpr); @@ -1887,7 +1888,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid) * * The expressions have already been fixed, but we have to make sure that * the target resnos match the child table (they may not, in the case of - * a column that was added after-the-fact by ALTER TABLE). In some cases + * a column that was added after-the-fact by ALTER TABLE). In some cases * this can force us to re-order the tlist to preserve resno ordering. * (We do all this work in special cases so that preptlist.c is fast for * the typical case.) diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 3f307e6464c..97dacaaac19 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -540,7 +540,7 @@ count_agg_clauses_walker(Node *node, count_agg_clauses_context *context) /* * If the transition type is pass-by-value then it doesn't add - * anything to the required size of the hashtable. If it is + * anything to the required size of the hashtable. If it is * pass-by-reference then we have to add the estimated size of the * value itself, plus palloc overhead. */ @@ -835,7 +835,7 @@ contain_subplans_walker(Node *node, void *context) * Recursively search for mutable functions within a clause. * * Returns true if any mutable function (or operator implemented by a - * mutable function) is found. This test is needed so that we don't + * mutable function) is found. This test is needed so that we don't * mistakenly think that something like "WHERE random() < 0.5" can be treated * as a constant qualification. * @@ -962,7 +962,7 @@ contain_mutable_functions_walker(Node *node, void *context) * invalid conversions of volatile expressions into indexscan quals. * * We will recursively look into Query nodes (i.e., SubLink sub-selects) - * but not into SubPlans. This is a bit odd, but intentional. If we are + * but not into SubPlans. This is a bit odd, but intentional. If we are * looking at a SubLink, we are probably deciding whether a query tree * transformation is safe, and a contained sub-select should affect that; * for example, duplicating a sub-select containing a volatile function @@ -1207,7 +1207,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) * The idea here is that the caller has verified that the expression contains * one or more Var or Param nodes (as appropriate for the caller's need), and * now wishes to prove that the expression result will be NULL if any of these - * inputs is NULL. If we return false, then the proof succeeded. + * inputs is NULL. If we return false, then the proof succeeded. */ bool contain_nonstrict_functions(Node *clause) @@ -1326,7 +1326,7 @@ contain_nonstrict_functions_walker(Node *node, void *context) * Recursively search for leaky functions within a clause. * * Returns true if any function call with side-effect may be present in the - * clause. Qualifiers from outside the a security_barrier view should not + * clause. Qualifiers from outside the a security_barrier view should not * be pushed down into the view, lest the contents of tuples intended to be * filtered out be revealed via side effects. */ @@ -1465,7 +1465,7 @@ contain_leaky_functions_walker(Node *node, void *context) * * Returns the set of all Relids that are referenced in the clause in such * a way that the clause cannot possibly return TRUE if any of these Relids - * is an all-NULL row. (It is OK to err on the side of conservatism; hence + * is an all-NULL row. (It is OK to err on the side of conservatism; hence * the analysis here is simplistic.) * * The semantics here are subtly different from contain_nonstrict_functions: @@ -1571,7 +1571,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level) * could be FALSE (hence not NULL). However, if *all* the * arms produce NULL then the result is NULL, so we can take * the intersection of the sets of nonnullable rels, just as - * for OR. Fall through to share code. + * for OR. Fall through to share code. */ /* FALL THRU */ case OR_EXPR: @@ -1779,7 +1779,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level) * could be FALSE (hence not NULL). However, if *all* the * arms produce NULL then the result is NULL, so we can take * the intersection of the sets of nonnullable vars, just as - * for OR. Fall through to share code. + * for OR. Fall through to share code. */ /* FALL THRU */ case OR_EXPR: @@ -2049,7 +2049,7 @@ is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK) * variables of the current query level and no uses of volatile functions. * Such an expr is not necessarily a true constant: it can still contain * Params and outer-level Vars, not to mention functions whose results - * may vary from one statement to the next. However, the expr's value + * may vary from one statement to the next. However, the expr's value * will be constant over any one scan of the current query, so it can be * used as, eg, an indexscan key. * @@ -2255,7 +2255,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum, * expression tree, for example "2 + 2" => "4". More interestingly, * we can reduce certain boolean expressions even when they contain * non-constant subexpressions: "x OR true" => "true" no matter what - * the subexpression x is. (XXX We assume that no such subexpression + * the subexpression x is. (XXX We assume that no such subexpression * will have important side-effects, which is not necessarily a good * assumption in the presence of user-defined functions; do we need a * pg_proc flag that prevents discarding the execution of a function?) @@ -2268,7 +2268,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum, * * Whenever a function is eliminated from the expression by means of * constant-expression evaluation or inlining, we add the function to - * root->glob->invalItems. This ensures the plan is known to depend on + * root->glob->invalItems. This ensures the plan is known to depend on * such functions, even though they aren't referenced anymore. * * We assume that the tree has already been type-checked and contains @@ -2451,7 +2451,7 @@ eval_const_expressions_mutator(Node *node, /* * Code for op/func reduction is pretty bulky, so split it out - * as a separate function. Note: exprTypmod normally returns + * as a separate function. Note: exprTypmod normally returns * -1 for a FuncExpr, but not when the node is recognizably a * length coercion; we want to preserve the typmod in the * eventual Const if so. @@ -2495,7 +2495,7 @@ eval_const_expressions_mutator(Node *node, OpExpr *newexpr; /* - * Need to get OID of underlying function. Okay to scribble + * Need to get OID of underlying function. Okay to scribble * on input to this extent. */ set_opfuncid(expr); @@ -2598,7 +2598,7 @@ eval_const_expressions_mutator(Node *node, /* (NOT okay to try to inline it, though!) */ /* - * Need to get OID of underlying function. Okay to + * Need to get OID of underlying function. Okay to * scribble on input to this extent. */ set_opfuncid((OpExpr *) expr); /* rely on struct @@ -2963,13 +2963,13 @@ eval_const_expressions_mutator(Node *node, * TRUE: drop all remaining alternatives * If the first non-FALSE alternative is a constant TRUE, * we can simplify the entire CASE to that alternative's - * expression. If there are no non-FALSE alternatives, + * expression. If there are no non-FALSE alternatives, * we simplify the entire CASE to the default result (ELSE). * * If we have a simple-form CASE with constant test * expression, we substitute the constant value for contained * CaseTestExpr placeholder nodes, so that we have the - * opportunity to reduce constant test conditions. For + * opportunity to reduce constant test conditions. For * example this allows * CASE 0 WHEN 0 THEN 1 ELSE 1/0 END * to reduce to 1 rather than drawing a divide-by-0 error. @@ -3191,7 +3191,7 @@ eval_const_expressions_mutator(Node *node, { /* * We can optimize field selection from a whole-row Var into a - * simple Var. (This case won't be generated directly by the + * simple Var. (This case won't be generated directly by the * parser, because ParseComplexProjection short-circuits it. * But it can arise while simplifying functions.) Also, we * can optimize field selection from a RowExpr construct. @@ -3449,7 +3449,7 @@ simplify_or_arguments(List *args, /* * Since the parser considers OR to be a binary operator, long OR lists * become deeply nested expressions. We must flatten these into long - * argument lists of a single OR operator. To avoid blowing out the stack + * argument lists of a single OR operator. To avoid blowing out the stack * with recursion of eval_const_expressions, we resort to some tenseness * here: we keep a list of not-yet-processed inputs, and handle flattening * of nested ORs by prepending to the to-do list instead of recursing. @@ -3497,7 +3497,7 @@ simplify_or_arguments(List *args, } /* - * OK, we have a const-simplified non-OR argument. Process it per + * OK, we have a const-simplified non-OR argument. Process it per * comments above. */ if (IsA(arg, Const)) @@ -3732,7 +3732,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod, * deliver a constant result, use a transform function to generate a * substitute node tree, or expand in-line the body of the function * definition (which only works for simple SQL-language functions, but - * that is a common case). Each case needs access to the function's + * that is a common case). Each case needs access to the function's * pg_proc tuple, so fetch it just once. * * Note: the allow_non_const flag suppresses both the second and third @@ -3770,7 +3770,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod, if (!newexpr && allow_non_const && OidIsValid(func_form->protransform)) { /* - * Build a dummy FuncExpr node containing the simplified arg list. We + * Build a dummy FuncExpr node containing the simplified arg list. We * use this approach to present a uniform interface to the transform * function regardless of how the function is actually being invoked. */ @@ -3978,7 +3978,7 @@ fetch_function_defaults(HeapTuple func_tuple) * * It is possible for some of the defaulted arguments to be polymorphic; * therefore we can't assume that the default expressions have the correct - * data types already. We have to re-resolve polymorphics and do coercion + * data types already. We have to re-resolve polymorphics and do coercion * just like the parser did. * * This should be a no-op if there are no polymorphic arguments, @@ -4141,7 +4141,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod, * do not re-expand them. Also, if a parameter is used more than once * in the SQL-function body, we require it not to contain any volatile * functions (volatiles might deliver inconsistent answers) nor to be - * unreasonably expensive to evaluate. The expensiveness check not only + * unreasonably expensive to evaluate. The expensiveness check not only * prevents us from doing multiple evaluations of an expensive parameter * at runtime, but is a safety value to limit growth of an expression due * to repeated inlining. @@ -4184,7 +4184,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * Forget it if the function is not SQL-language or has other showstopper - * properties. (The nargs check is just paranoia.) + * properties. (The nargs check is just paranoia.) */ if (funcform->prolang != SQLlanguageId || funcform->prosecdef || @@ -4262,7 +4262,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * We just do parsing and parse analysis, not rewriting, because rewriting * will not affect table-free-SELECT-only queries, which is all that we - * care about. Also, we can punt as soon as we detect more than one + * care about. Also, we can punt as soon as we detect more than one * command in the function body. */ raw_parsetree_list = pg_parse_query(src); @@ -4304,7 +4304,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * Make sure the function (still) returns what it's declared to. This * will raise an error if wrong, but that's okay since the function would - * fail at runtime anyway. Note that check_sql_fn_retval will also insert + * fail at runtime anyway. Note that check_sql_fn_retval will also insert * a RelabelType if needed to make the tlist expression match the declared * type of the function. * @@ -4349,7 +4349,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * We may be able to do it; there are still checks on parameter usage to * make, but those are most easily done in combination with the actual - * substitution of the inputs. So start building expression with inputs + * substitution of the inputs. So start building expression with inputs * substituted. */ usecounts = (int *) palloc0(funcform->pronargs * sizeof(int)); @@ -4549,7 +4549,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, fix_opfuncids((Node *) expr); /* - * Prepare expr for execution. (Note: we can't use ExecPrepareExpr + * Prepare expr for execution. (Note: we can't use ExecPrepareExpr * because it'd result in recursively invoking eval_const_expressions.) */ exprstate = ExecInitExpr(expr, NULL); @@ -4671,7 +4671,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) * Refuse to inline if the arguments contain any volatile functions or * sub-selects. Volatile functions are rejected because inlining may * result in the arguments being evaluated multiple times, risking a - * change in behavior. Sub-selects are rejected partly for implementation + * change in behavior. Sub-selects are rejected partly for implementation * reasons (pushing them down another level might change their behavior) * and partly because they're likely to be expensive and so multiple * evaluation would be bad. @@ -4698,7 +4698,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * Forget it if the function is not SQL-language or has other showstopper - * properties. In particular it mustn't be declared STRICT, since we + * properties. In particular it mustn't be declared STRICT, since we * couldn't enforce that. It also mustn't be VOLATILE, because that is * supposed to cause it to be executed with its own snapshot, rather than * sharing the snapshot of the calling query. (Rechecking proretset is @@ -4728,9 +4728,9 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * When we call eval_const_expressions below, it might try to add items to - * root->glob->invalItems. Since it is running in the temp context, those + * root->glob->invalItems. Since it is running in the temp context, those * items will be in that context, and will need to be copied out if we're - * successful. Temporarily reset the list so that we can keep those items + * successful. Temporarily reset the list so that we can keep those items * separate from the pre-existing list contents. */ saveInvalItems = root->glob->invalItems; @@ -4760,7 +4760,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * Run eval_const_expressions on the function call. This is necessary to * ensure that named-argument notation is converted to positional notation - * and any default arguments are inserted. It's a bit of overkill for the + * and any default arguments are inserted. It's a bit of overkill for the * arguments, since they'll get processed again later, but no harm will be * done. */ @@ -4812,7 +4812,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * Make sure the function (still) returns what it's declared to. This * will raise an error if wrong, but that's okay since the function would - * fail at runtime anyway. Note that check_sql_fn_retval will also insert + * fail at runtime anyway. Note that check_sql_fn_retval will also insert * RelabelType(s) and/or NULL columns if needed to make the tlist * expression(s) match the declared type of the function. * diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c index a6421580f9b..0418946d714 100644 --- a/src/backend/optimizer/util/joininfo.c +++ b/src/backend/optimizer/util/joininfo.c @@ -83,7 +83,7 @@ have_relevant_joinclause(PlannerInfo *root, * Add 'restrictinfo' to the joininfo list of each relation it requires. * * Note that the same copy of the restrictinfo node is linked to by all the - * lists it is in. This allows us to exploit caching of information about + * lists it is in. This allows us to exploit caching of information about * the restriction clause (but we must be careful that the information does * not depend on context). * diff --git a/src/backend/optimizer/util/orclauses.c b/src/backend/optimizer/util/orclauses.c index e9fd47bffbe..9e954d0d35f 100644 --- a/src/backend/optimizer/util/orclauses.c +++ b/src/backend/optimizer/util/orclauses.c @@ -50,7 +50,7 @@ static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel, * * The added quals are partially redundant with the original OR, and therefore * would cause the size of the joinrel to be underestimated when it is finally - * formed. (This would be true of a full transformation to CNF as well; the + * formed. (This would be true of a full transformation to CNF as well; the * fault is not really in the transformation, but in clauselist_selectivity's * inability to recognize redundant conditions.) We can compensate for this * redundancy by changing the cached selectivity of the original OR clause, @@ -60,10 +60,10 @@ static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel, * and on the fact that the same RestrictInfo node will appear in every * joininfo list that might be used when the joinrel is formed. * And it doesn't work in cases where the size estimation is nonlinear - * (i.e., outer and IN joins). But it beats not doing anything. + * (i.e., outer and IN joins). But it beats not doing anything. * * We examine each base relation to see if join clauses associated with it - * contain extractable restriction conditions. If so, add those conditions + * contain extractable restriction conditions. If so, add those conditions * to the rel's baserestrictinfo and update the cached selectivities of the * join clauses. Note that the same join clause will be examined afresh * from the point of view of each baserel that participates in it, so its @@ -129,7 +129,7 @@ static bool is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel) { /* - * We want clauses that mention the rel, and only the rel. So in + * We want clauses that mention the rel, and only the rel. So in * particular pseudoconstant clauses can be rejected quickly. Then check * the clause's Var membership. */ @@ -168,7 +168,7 @@ extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel) * in those nodes to make is_safe_restriction_clause_for()'s checks * cheaper. We'll strip those nodes from the returned tree, though, * meaning that fresh ones will be built if the clause is accepted as a - * restriction clause. This might seem wasteful --- couldn't we re-use + * restriction clause. This might seem wasteful --- couldn't we re-use * the existing RestrictInfos? But that'd require assuming that * selectivity and other cached data is computed exactly the same way for * a restriction clause as for a join clause, which seems undesirable. @@ -193,7 +193,7 @@ extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel) if (restriction_is_or_clause(rinfo)) { /* - * Recurse to deal with nested OR. Note we *must* recurse + * Recurse to deal with nested OR. Note we *must* recurse * here, this isn't just overly-tense optimization: we * have to descend far enough to find and strip all * RestrictInfos in the expression. @@ -314,7 +314,7 @@ consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo sjinfo; /* - * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare + * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare * approx_tuple_count() in costsize.c.) */ sjinfo.type = T_SpecialJoinInfo; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index b79af7af4e0..4e05dcd2463 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -127,11 +127,11 @@ compare_fractional_path_costs(Path *path1, Path *path2, * * The fuzz_factor argument must be 1.0 plus delta, where delta is the * fraction of the smaller cost that is considered to be a significant - * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit + * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit * be 1% of the smaller cost. * * The two paths are said to have "equal" costs if both startup and total - * costs are fuzzily the same. Path1 is said to be better than path2 if + * costs are fuzzily the same. Path1 is said to be better than path2 if * it has fuzzily better startup cost and fuzzily no worse total cost, * or if it has fuzzily better total cost and fuzzily no worse startup cost. * Path2 is better than path1 if the reverse holds. Finally, if one path @@ -207,12 +207,12 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor, * * cheapest_total_path is normally the cheapest-total-cost unparameterized * path; but if there are no unparameterized paths, we assign it to be the - * best (cheapest least-parameterized) parameterized path. However, only + * best (cheapest least-parameterized) parameterized path. However, only * unparameterized paths are considered candidates for cheapest_startup_path, * so that will be NULL if there are no unparameterized paths. * * The cheapest_parameterized_paths list collects all parameterized paths - * that have survived the add_path() tournament for this relation. (Since + * that have survived the add_path() tournament for this relation. (Since * add_path ignores pathkeys and startup cost for a parameterized path, * these will be paths that have best total cost or best row count for their * parameterization.) cheapest_parameterized_paths always includes the @@ -431,7 +431,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path) p1_next = lnext(p1); /* - * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this + * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this * percentage need to be user-configurable?) */ costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01, @@ -607,7 +607,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path) * and have lower bounds for its costs. * * Note that we do not know the path's rowcount, since getting an estimate for - * that is too expensive to do before prechecking. We assume here that paths + * that is too expensive to do before prechecking. We assume here that paths * of a superset parameterization will generate fewer rows; if that holds, * then paths with different parameterizations cannot dominate each other * and so we can simply ignore existing paths of another parameterization. @@ -907,7 +907,7 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer) * Compute rows and costs as sums of subplan rows and costs. We charge * nothing extra for the Append itself, which perhaps is too optimistic, * but since it doesn't do any selection or projection, it is a pretty - * cheap node. If you change this, see also make_append(). + * cheap node. If you change this, see also make_append(). */ pathnode->path.rows = 0; pathnode->path.startup_cost = 0; @@ -1456,7 +1456,7 @@ translate_sub_tlist(List *tlist, int relid) * * colnos is an integer list of output column numbers (resno's). We are * interested in whether rows consisting of just these columns are certain - * to be distinct. "Distinctness" is defined according to whether the + * to be distinct. "Distinctness" is defined according to whether the * corresponding upper-level equality operators listed in opids would think * the values are distinct. (Note: the opids entries could be cross-type * operators, and thus not exactly the equality operators that the subquery @@ -1577,7 +1577,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids) * distinct_col_search - subroutine for query_is_distinct_for * * If colno is in colnos, return the corresponding element of opids, - * else return InvalidOid. (We expect colnos does not contain duplicates, + * else return InvalidOid. (We expect colnos does not contain duplicates, * so the result is well-defined.) */ static Oid @@ -1977,10 +1977,10 @@ create_hashjoin_path(PlannerInfo *root, /* * A hashjoin never has pathkeys, since its output ordering is - * unpredictable due to possible batching. XXX If the inner relation is + * unpredictable due to possible batching. XXX If the inner relation is * small enough, we could instruct the executor that it must not batch, * and then we could assume that the output inherits the outer relation's - * ordering, which might save a sort step. However there is considerable + * ordering, which might save a sort step. However there is considerable * downside if our estimate of the inner relation size is badly off. For * the moment we don't risk it. (Note also that if we wanted to take this * seriously, joinpath.c would have to consider many more paths for the @@ -2007,7 +2007,7 @@ create_hashjoin_path(PlannerInfo *root, * same parameterization level, ensuring that they all enforce the same set * of join quals (and thus that that parameterization can be attributed to * an append path built from such paths). Currently, only a few path types - * are supported here, though more could be added at need. We return NULL + * are supported here, though more could be added at need. We return NULL * if we can't reparameterize the given path. * * Note: we intentionally do not pass created paths to add_path(); it would @@ -2039,7 +2039,7 @@ reparameterize_path(PlannerInfo *root, Path *path, /* * We can't use create_index_path directly, and would not want * to because it would re-compute the indexqual conditions - * which is wasted effort. Instead we hack things a bit: + * which is wasted effort. Instead we hack things a bit: * flat-copy the path node, revise its param_info, and redo * the cost estimate. */ diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c index 1172d24b9a1..8d7c4feca46 100644 --- a/src/backend/optimizer/util/placeholder.c +++ b/src/backend/optimizer/util/placeholder.c @@ -60,7 +60,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels) * We build PlaceHolderInfos only for PHVs that are still present in the * simplified query passed to query_planner(). * - * Note: this should only be called after query_planner() has started. Also, + * Note: this should only be called after query_planner() has started. Also, * create_new_ph must not be TRUE after deconstruct_jointree begins, because * make_outerjoininfo assumes that we already know about all placeholders. */ @@ -94,7 +94,7 @@ find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv, /* * Any referenced rels that are outside the PHV's syntactic scope are * LATERAL references, which should be included in ph_lateral but not in - * ph_eval_at. If no referenced rels are within the syntactic scope, + * ph_eval_at. If no referenced rels are within the syntactic scope, * force evaluation at the syntactic location. */ rels_used = pull_varnos((Node *) phv->phexpr); diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 73ba2f60b2d..b2becfa6765 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -427,12 +427,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths, * minimum size estimate of 10 pages. The idea here is to avoid * assuming a newly-created table is really small, even if it * currently is, because that may not be true once some data gets - * loaded into it. Once a vacuum or analyze cycle has been done + * loaded into it. Once a vacuum or analyze cycle has been done * on it, it's more reasonable to believe the size is somewhat * stable. * * (Note that this is only an issue if the plan gets cached and - * used again after the table has been filled. What we're trying + * used again after the table has been filled. What we're trying * to avoid is using a nestloop-type plan on a table that has * grown substantially since the plan was made. Normally, * autovacuum/autoanalyze will occur once enough inserts have @@ -441,7 +441,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths, * such as temporary tables.) * * We approximate "never vacuumed" by "has relpages = 0", which - * means this will also fire on genuinely empty relations. Not + * means this will also fire on genuinely empty relations. Not * great, but fortunately that's a seldom-seen case in the real * world, and it shouldn't degrade the quality of the plan too * much anyway to err in this direction. @@ -786,7 +786,7 @@ relation_excluded_by_constraints(PlannerInfo *root, return false; /* - * OK to fetch the constraint expressions. Include "col IS NOT NULL" + * OK to fetch the constraint expressions. Include "col IS NOT NULL" * expressions for attnotnull columns, in case we can refute those. */ constraint_pred = get_relation_constraints(root, rte->relid, rel, true); @@ -834,7 +834,7 @@ relation_excluded_by_constraints(PlannerInfo *root, * Exception: if there are any dropped columns, we punt and return NIL. * Ideally we would like to handle the dropped-column case too. However this * creates problems for ExecTypeFromTL, which may be asked to build a tupdesc - * for a tlist that includes vars of no-longer-existent types. In theory we + * for a tlist that includes vars of no-longer-existent types. In theory we * could dig out the required info from the pg_attribute entries of the * relation, but that data is not readily available to ExecTypeFromTL. * For now, we don't apply the physical-tlist optimization when there are diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index eadd2d5104a..9d61a4d71c2 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -133,7 +133,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list) /* * If either input is a single-element list, replace it with its lone - * member; this avoids one useless level of AND-recursion. We only need + * member; this avoids one useless level of AND-recursion. We only need * to worry about this at top level, since eval_const_expressions should * have gotten rid of any trivial ANDs or ORs below that. */ @@ -191,7 +191,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list) /* * If either input is a single-element list, replace it with its lone - * member; this avoids one useless level of AND-recursion. We only need + * member; this avoids one useless level of AND-recursion. We only need * to worry about this at top level, since eval_const_expressions should * have gotten rid of any trivial ANDs or ORs below that. */ @@ -225,7 +225,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list) * OR-expr A => AND-expr B iff: A => each of B's components * OR-expr A => OR-expr B iff: each of A's components => any of B's * - * An "atom" is anything other than an AND or OR node. Notice that we don't + * An "atom" is anything other than an AND or OR node. Notice that we don't * have any special logic to handle NOT nodes; these should have been pushed * down or eliminated where feasible by prepqual.c. * @@ -658,7 +658,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate) * We cannot make the stronger conclusion that B is refuted if B * implies A's arg; that would only prove that B is not-TRUE, not * that it's not NULL either. Hence use equal() rather than - * predicate_implied_by_recurse(). We could do the latter if we + * predicate_implied_by_recurse(). We could do the latter if we * ever had a need for the weak form of refutation. */ not_arg = extract_strong_not_arg(clause); @@ -820,7 +820,7 @@ predicate_classify(Node *clause, PredIterInfo info) } /* - * PredIterInfo routines for iterating over regular Lists. The iteration + * PredIterInfo routines for iterating over regular Lists. The iteration * state variable is the next ListCell to visit. */ static void @@ -1014,13 +1014,13 @@ arrayexpr_cleanup_fn(PredIterInfo info) * implies another: * * A simple and general way is to see if they are equal(); this works for any - * kind of expression. (Actually, there is an implied assumption that the + * kind of expression. (Actually, there is an implied assumption that the * functions in the expression are immutable, ie dependent only on their input * arguments --- but this was checked for the predicate by the caller.) * * When the predicate is of the form "foo IS NOT NULL", we can conclude that * the predicate is implied if the clause is a strict operator or function - * that has "foo" as an input. In this case the clause must yield NULL when + * that has "foo" as an input. In this case the clause must yield NULL when * "foo" is NULL, which we can take as equivalent to FALSE because we know * we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is * already known immutable, so the clause will certainly always fail.) @@ -1244,7 +1244,7 @@ list_member_strip(List *list, Expr *datum) * * The strategy numbers defined by btree indexes (see access/skey.h) are: * (1) < (2) <= (3) = (4) >= (5) > - * and in addition we use (6) to represent <>. <> is not a btree-indexable + * and in addition we use (6) to represent <>. <> is not a btree-indexable * operator, but we assume here that if an equality operator of a btree * opfamily has a negator operator, the negator behaves as <> for the opfamily. * (This convention is also known to get_op_btree_interpretation().) @@ -1328,7 +1328,7 @@ static const StrategyNumber BT_refute_table[6][6] = { * if not able to prove it. * * What we look for here is binary boolean opclauses of the form - * "foo op constant", where "foo" is the same in both clauses. The operators + * "foo op constant", where "foo" is the same in both clauses. The operators * and constants can be different but the operators must be in the same btree * operator family. We use the above operator implication tables to * derive implications between nonidentical clauses. (Note: "foo" is known @@ -1418,7 +1418,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it) /* * Check for matching subexpressions on the non-Const sides. We used to * only allow a simple Var, but it's about as easy to allow any - * expression. Remember we already know that the pred expression does not + * expression. Remember we already know that the pred expression does not * contain any non-immutable functions, so identical expressions should * yield identical results. */ @@ -1690,7 +1690,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it) * Last check: test_op must be immutable. * * Note that we require only the test_op to be immutable, not the - * original clause_op. (pred_op is assumed to have been checked + * original clause_op. (pred_op is assumed to have been checked * immutable by the caller.) Essentially we are assuming that the * opfamily is consistent even if it contains operators that are * merely stable. diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 8ae8f551212..c938c2700f9 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -262,7 +262,7 @@ RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids) { /* - * Switch to using hash lookup when list grows "too long". The threshold + * Switch to using hash lookup when list grows "too long". The threshold * is arbitrary and is known only here. */ if (!root->join_rel_hash && list_length(root->join_rel_list) > 32) @@ -448,7 +448,7 @@ build_join_rel(PlannerInfo *root, /* * Also, if dynamic-programming join search is active, add the new joinrel - * to the appropriate sublist. Note: you might think the Assert on number + * to the appropriate sublist. Note: you might think the Assert on number * of members should be for equality, but some of the level 1 rels might * have been joinrels already, so we can only assert <=. */ @@ -529,7 +529,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, * the join list need only be computed once for any join RelOptInfo. * The join list is fully determined by the set of rels making up the * joinrel, so we should get the same results (up to ordering) from any - * candidate pair of sub-relations. But the restriction list is whatever + * candidate pair of sub-relations. But the restriction list is whatever * is not handled in the sub-relations, so it depends on which * sub-relations are considered. * @@ -538,7 +538,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, * we put it into the joininfo list for the joinrel. Otherwise, * the clause is now a restrict clause for the joined relation, and we * return it to the caller of build_joinrel_restrictlist() to be stored in - * join paths made from this pair of sub-relations. (It will not need to + * join paths made from this pair of sub-relations. (It will not need to * be considered further up the join tree.) * * In many case we will find the same RestrictInfos in both input @@ -557,7 +557,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, * * NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass * up to the join relation. I believe this is no longer necessary, because - * RestrictInfo nodes are no longer context-dependent. Instead, just include + * RestrictInfo nodes are no longer context-dependent. Instead, just include * the original nodes in the lists made for the join relation. */ static List * @@ -577,7 +577,7 @@ build_joinrel_restrictlist(PlannerInfo *root, result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result); /* - * Add on any clauses derived from EquivalenceClasses. These cannot be + * Add on any clauses derived from EquivalenceClasses. These cannot be * redundant with the clauses in the joininfo lists, so don't bother * checking. */ @@ -945,7 +945,7 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, *restrict_clauses); /* - * And now we can build the ParamPathInfo. No point in saving the + * And now we can build the ParamPathInfo. No point in saving the * input-pair-dependent clause list, though. * * Note: in GEQO mode, we'll be called in a temporary memory context, but @@ -965,8 +965,8 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, * Get the ParamPathInfo for a parameterized path for an append relation. * * For an append relation, the rowcount estimate will just be the sum of - * the estimates for its children. However, we still need a ParamPathInfo - * to flag the fact that the path requires parameters. So this just creates + * the estimates for its children. However, we still need a ParamPathInfo + * to flag the fact that the path requires parameters. So this just creates * a suitable struct with zero ppi_rows (and no ppi_clauses either, since * the Append node isn't responsible for checking quals). */ diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c index 62de5905232..e861ce66576 100644 --- a/src/backend/optimizer/util/restrictinfo.c +++ b/src/backend/optimizer/util/restrictinfo.c @@ -210,7 +210,7 @@ make_restrictinfo_internal(Expr *clause, /* * Fill in all the cacheable fields with "not yet set" markers. None of - * these will be computed until/unless needed. Note in particular that we + * these will be computed until/unless needed. Note in particular that we * don't mark a binary opclause as mergejoinable or hashjoinable here; * that happens only if it appears in the right context (top level of a * joinclause list). diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c index 5e26f3b57e3..f1f1be1b7fe 100644 --- a/src/backend/optimizer/util/tlist.c +++ b/src/backend/optimizer/util/tlist.c @@ -26,7 +26,7 @@ /* * tlist_member * Finds the (first) member of the given tlist whose expression is - * equal() to the given expression. Result is NULL if no such member. + * equal() to the given expression. Result is NULL if no such member. */ TargetEntry * tlist_member(Node *node, List *targetlist) diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c index d629fcd90d2..d4f46b8d461 100644 --- a/src/backend/optimizer/util/var.c +++ b/src/backend/optimizer/util/var.c @@ -165,7 +165,7 @@ pull_varnos_walker(Node *node, pull_varnos_context *context) * lower than that if it references only a subset of the rels in its * syntactic scope. It might also contain lateral references, but we * should ignore such references when computing the set of varnos in - * an expression tree. Also, if the PHV contains no variables within + * an expression tree. Also, if the PHV contains no variables within * its syntactic scope, it will be forced to be evaluated exactly at * the syntactic scope, so take that as the relid set. */ @@ -364,7 +364,7 @@ contain_var_clause_walker(Node *node, void *context) * * Returns true if any such Var found. * - * Will recurse into sublinks. Also, may be invoked directly on a Query. + * Will recurse into sublinks. Also, may be invoked directly on a Query. */ bool contain_vars_of_level(Node *node, int levelsup) @@ -424,10 +424,10 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up) * Find the parse location of any Var of the specified query level. * * Returns -1 if no such Var is in the querytree, or if they all have - * unknown parse location. (The former case is probably caller error, + * unknown parse location. (The former case is probably caller error, * but we don't bother to distinguish it from the latter case.) * - * Will recurse into sublinks. Also, may be invoked directly on a Query. + * Will recurse into sublinks. Also, may be invoked directly on a Query. * * Note: it might seem appropriate to merge this functionality into * contain_vars_of_level, but that would complicate that function's API. @@ -514,7 +514,7 @@ locate_var_of_level_walker(Node *node, * Upper-level vars (with varlevelsup > 0) should not be seen here, * likewise for upper-level Aggrefs and PlaceHolderVars. * - * Returns list of nodes found. Note the nodes themselves are not + * Returns list of nodes found. Note the nodes themselves are not * copied, only referenced. * * Does not examine subqueries, therefore must only be used after reduction @@ -591,7 +591,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context) * flatten_join_alias_vars * Replace Vars that reference JOIN outputs with references to the original * relation variables instead. This allows quals involving such vars to be - * pushed down. Whole-row Vars that reference JOIN relations are expanded + * pushed down. Whole-row Vars that reference JOIN relations are expanded * into RowExpr constructs that name the individual output Vars. This * is necessary since we will not scan the JOIN as a base relation, which * is the only way that the executor can directly handle whole-row Vars. @@ -603,7 +603,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context) * entries might now be arbitrary expressions, not just Vars. This affects * this function in one important way: we might find ourselves inserting * SubLink expressions into subqueries, and we must make sure that their - * Query.hasSubLinks fields get set to TRUE if so. If there are any + * Query.hasSubLinks fields get set to TRUE if so. If there are any * SubLinks in the join alias lists, the outer Query should already have * hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries. * diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 7225bb62ab0..fb6c44c11c8 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -79,7 +79,7 @@ static void transformLockingClause(ParseState *pstate, Query *qry, * Optionally, information about $n parameter types can be supplied. * References to $n indexes not defined by paramTypes[] are disallowed. * - * The result is a Query node. Optimizable statements require considerable + * The result is a Query node. Optimizable statements require considerable * transformation, while utility-type statements are simply hung off * a dummy CMD_UTILITY Query node. */ @@ -457,7 +457,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt) /* * If a non-nil rangetable/namespace was passed in, and we are doing * INSERT/SELECT, arrange to pass the rangetable/namespace down to the - * SELECT. This can only happen if we are inside a CREATE RULE, and in + * SELECT. This can only happen if we are inside a CREATE RULE, and in * that case we want the rule's OLD and NEW rtable entries to appear as * part of the SELECT's rtable, not as outer references for it. (Kluge!) * The SELECT's joinlist is not affected however. We must do this before @@ -642,7 +642,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt) * We must assign collations now because assign_query_collations * doesn't process rangetable entries. We just assign all the * collations independently in each row, and don't worry about - * whether they are consistent vertically. The outer INSERT query + * whether they are consistent vertically. The outer INSERT query * isn't going to care about the collations of the VALUES columns, * so it's not worth the effort to identify a common collation for * each one here. (But note this does have one user-visible @@ -691,7 +691,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt) else { /* - * Process INSERT ... VALUES with a single VALUES sublist. We treat + * Process INSERT ... VALUES with a single VALUES sublist. We treat * this case separately for efficiency. The sublist is just computed * directly as the Query's targetlist, with no VALUES RTE. So it * works just like a SELECT without any FROM. @@ -789,7 +789,7 @@ transformInsertRow(ParseState *pstate, List *exprlist, * Check length of expr list. It must not have more expressions than * there are target columns. We allow fewer, but only if no explicit * columns list was given (the remaining columns are implicitly - * defaulted). Note we must check this *after* transformation because + * defaulted). Note we must check this *after* transformation because * that could expand '*' into multiple items. */ if (list_length(exprlist) > list_length(icolumns)) @@ -859,7 +859,7 @@ transformInsertRow(ParseState *pstate, List *exprlist, * return -1 if expression isn't a RowExpr or a Var referencing one. * * This is currently used only for hint purposes, so we aren't terribly - * tense about recognizing all possible cases. The Var case is interesting + * tense about recognizing all possible cases. The Var case is interesting * because that's what we'll get in the INSERT ... SELECT (...) case. */ static int @@ -1191,7 +1191,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt) /* * Ordinarily there can't be any current-level Vars in the expression * lists, because the namespace was empty ... but if we're inside CREATE - * RULE, then NEW/OLD references might appear. In that case we have to + * RULE, then NEW/OLD references might appear. In that case we have to * mark the VALUES RTE as LATERAL. */ if (pstate->p_rtable != NIL && @@ -1234,11 +1234,11 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt) if (stmt->lockingClause) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s cannot be applied to VALUES", LCS_asString(((LockingClause *) - linitial(stmt->lockingClause))->strength)))); + linitial(stmt->lockingClause))->strength)))); qry->rtable = pstate->p_rtable; qry->jointree = makeFromExpr(pstate->p_joinlist, NULL); @@ -1329,8 +1329,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt) if (lockingClause) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", LCS_asString(((LockingClause *) linitial(lockingClause))->strength)))); @@ -1413,7 +1413,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt) /* * As a first step towards supporting sort clauses that are expressions * using the output columns, generate a namespace entry that makes the - * output columns visible. A Join RTE node is handy for this, since we + * output columns visible. A Join RTE node is handy for this, since we * can easily control the Vars generated upon matches. * * Note: we don't yet do anything useful with such cases, but at least @@ -1493,7 +1493,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt) * Recursively transform leaves and internal nodes of a set-op tree * * In addition to returning the transformed node, if targetlist isn't NULL - * then we return a list of its non-resjunk TargetEntry nodes. For a leaf + * then we return a list of its non-resjunk TargetEntry nodes. For a leaf * set-op node these are the actual targetlist entries; otherwise they are * dummy entries created to carry the type, typmod, collation, and location * (for error messages) of each output column of the set-op node. This info @@ -1527,16 +1527,16 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, if (stmt->lockingClause) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", LCS_asString(((LockingClause *) - linitial(stmt->lockingClause))->strength)))); + linitial(stmt->lockingClause))->strength)))); /* * If an internal node of a set-op tree has ORDER BY, LIMIT, FOR UPDATE, * or WITH clauses attached, we need to treat it like a leaf node to - * generate an independent sub-Query tree. Otherwise, it can be + * generate an independent sub-Query tree. Otherwise, it can be * represented by a SetOperationStmt node underneath the parent Query. */ if (stmt->op == SETOP_NONE) @@ -1712,7 +1712,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, rescoltypmod = -1; /* - * Verify the coercions are actually possible. If not, we'd fail + * Verify the coercions are actually possible. If not, we'd fail * later anyway, but we want to fail now while we have sufficient * context to produce an error cursor position. * @@ -1721,7 +1721,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, * child query's semantics. * * If a child expression is an UNKNOWN-type Const or Param, we - * want to replace it with the coerced expression. This can only + * want to replace it with the coerced expression. This can only * happen when the child is a leaf set-op node. It's safe to * replace the expression because if the child query's semantics * depended on the type of this output column, it'd have already @@ -2113,8 +2113,8 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt) if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_HOLD)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("DECLARE CURSOR WITH HOLD ... %s is not supported", LCS_asString(((RowMarkClause *) linitial(result->rowMarks))->strength)), @@ -2124,8 +2124,8 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt) if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_SCROLL)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("DECLARE SCROLL CURSOR ... %s is not supported", LCS_asString(((RowMarkClause *) linitial(result->rowMarks))->strength)), @@ -2135,8 +2135,8 @@ transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt) if (result->rowMarks != NIL && (stmt->options & CURSOR_OPT_INSENSITIVE)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("DECLARE INSENSITIVE CURSOR ... %s is not supported", LCS_asString(((RowMarkClause *) linitial(result->rowMarks))->strength)), @@ -2220,7 +2220,7 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt) /* * A materialized view would either need to save parameters for use in - * maintaining/loading the data or prohibit them entirely. The latter + * maintaining/loading the data or prohibit them entirely. The latter * seems safer and more sane. */ if (query_contains_extern_params(query)) @@ -2272,7 +2272,7 @@ LCS_asString(LockClauseStrength strength) case LCS_FORUPDATE: return "FOR UPDATE"; } - return "FOR some"; /* shouldn't happen */ + return "FOR some"; /* shouldn't happen */ } /* @@ -2286,50 +2286,50 @@ CheckSelectLocking(Query *qry, LockClauseStrength strength) if (qry->setOperations) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", LCS_asString(strength)))); if (qry->distinctClause != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with DISTINCT clause", LCS_asString(strength)))); if (qry->groupClause != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with GROUP BY clause", LCS_asString(strength)))); if (qry->havingQual != NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with HAVING clause", LCS_asString(strength)))); if (qry->hasAggs) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with aggregate functions", LCS_asString(strength)))); if (qry->hasWindowFuncs) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with window functions", LCS_asString(strength)))); if (expression_returns_set((Node *) qry->targetList)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with set-returning functions in the target list", LCS_asString(strength)))); } @@ -2407,8 +2407,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, if (thisrel->catalogname || thisrel->schemaname) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s must specify unqualified relation names", LCS_asString(lc->strength)), parser_errposition(pstate, thisrel->location))); @@ -2440,8 +2440,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, case RTE_JOIN: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s cannot be applied to a join", LCS_asString(lc->strength)), parser_errposition(pstate, thisrel->location))); @@ -2449,17 +2449,17 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, case RTE_FUNCTION: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ - errmsg("%s cannot be applied to a function", - LCS_asString(lc->strength)), + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ + errmsg("%s cannot be applied to a function", + LCS_asString(lc->strength)), parser_errposition(pstate, thisrel->location))); break; case RTE_VALUES: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s cannot be applied to VALUES", LCS_asString(lc->strength)), parser_errposition(pstate, thisrel->location))); @@ -2467,10 +2467,10 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, case RTE_CTE: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ - errmsg("%s cannot be applied to a WITH query", - LCS_asString(lc->strength)), + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ + errmsg("%s cannot be applied to a WITH query", + LCS_asString(lc->strength)), parser_errposition(pstate, thisrel->location))); break; default: @@ -2484,8 +2484,8 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, if (rt == NULL) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("relation \"%s\" in %s clause not found in FROM clause", thisrel->relname, LCS_asString(lc->strength)), diff --git a/src/backend/parser/kwlookup.c b/src/backend/parser/kwlookup.c index 5b28ddecced..af05aa70eac 100644 --- a/src/backend/parser/kwlookup.c +++ b/src/backend/parser/kwlookup.c @@ -52,7 +52,7 @@ ScanKeywordLookup(const char *text, return NULL; /* - * Apply an ASCII-only downcasing. We must not use tolower() since it may + * Apply an ASCII-only downcasing. We must not use tolower() since it may * produce the wrong translation in some locales (eg, Turkish). */ for (i = 0; i < len; i++) diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c index 9af43d2a328..c984b7d5e46 100644 --- a/src/backend/parser/parse_agg.c +++ b/src/backend/parser/parse_agg.c @@ -437,7 +437,7 @@ check_agg_arguments(ParseState *pstate, /* * Now check for vars/aggs in the direct arguments, and throw error if - * needed. Note that we allow a Var of the agg's semantic level, but not + * needed. Note that we allow a Var of the agg's semantic level, but not * an Agg of that level. In principle such Aggs could probably be * supported, but it would create an ordering dependency among the * aggregates at execution time. Since the case appears neither to be @@ -815,7 +815,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry) /* * If there are join alias vars involved, we have to flatten them to the * underlying vars, so that aliased and unaliased vars will be correctly - * taken as equal. We can skip the expense of doing this if no rangetable + * taken as equal. We can skip the expense of doing this if no rangetable * entries are RTE_JOIN kind. We use the planner's flatten_join_alias_vars * routine to do the flattening; it wants a PlannerInfo root node, which * fortunately can be mostly dummy. @@ -853,7 +853,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry) * * Note: because we check resjunk tlist elements as well as regular ones, * this will also find ungrouped variables that came from ORDER BY and - * WINDOW clauses. For that matter, it's also going to examine the + * WINDOW clauses. For that matter, it's also going to examine the * grouping expressions themselves --- but they'll all pass the test ... */ clause = (Node *) qry->targetList; @@ -984,7 +984,7 @@ check_ungrouped_columns_walker(Node *node, /* * If we have an ungrouped Var of the original query level, we have a * failure. Vars below the original query level are not a problem, and - * neither are Vars from above it. (If such Vars are ungrouped as far as + * neither are Vars from above it. (If such Vars are ungrouped as far as * their own query level is concerned, that's someone else's problem...) */ if (IsA(node, Var)) @@ -1015,7 +1015,7 @@ check_ungrouped_columns_walker(Node *node, /* * Check whether the Var is known functionally dependent on the GROUP - * BY columns. If so, we can allow the Var to be used, because the + * BY columns. If so, we can allow the Var to be used, because the * grouping is really a no-op for this table. However, this deduction * depends on one or more constraints of the table, so we have to add * those constraints to the query's constraintDeps list, because it's diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index aa704bb4412..fcee1379c0c 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -149,7 +149,7 @@ transformFromClause(ParseState *pstate, List *frmList) * * If alsoSource is true, add the target to the query's joinlist and * namespace. For INSERT, we don't want the target to be joined to; - * it's a destination of tuples, not a source. For UPDATE/DELETE, + * it's a destination of tuples, not a source. For UPDATE/DELETE, * we do need to scan or join the target. (NOTE: we do not bother * to check for namespace conflict; we assume that the namespace was * initially empty in these cases.) @@ -219,7 +219,7 @@ setTargetTable(ParseState *pstate, RangeVar *relation, * Simplify InhOption (yes/no/default) into boolean yes/no. * * The reason we do things this way is that we don't want to examine the - * SQL_inheritance option flag until parse_analyze() is run. Otherwise, + * SQL_inheritance option flag until parse_analyze() is run. Otherwise, * we'd do the wrong thing with query strings that intermix SET commands * with queries. */ @@ -396,7 +396,7 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace) /* * The namespace that the join expression should see is just the two * subtrees of the JOIN plus any outer references from upper pstate - * levels. Temporarily set this pstate's namespace accordingly. (We need + * levels. Temporarily set this pstate's namespace accordingly. (We need * not check for refname conflicts, because transformFromClauseItem() * already did.) All namespace items are marked visible regardless of * LATERAL state. @@ -490,7 +490,7 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r) pstate->p_expr_kind = EXPR_KIND_NONE; /* - * Check that we got something reasonable. Many of these conditions are + * Check that we got something reasonable. Many of these conditions are * impossible given restrictions of the grammar, but check 'em anyway. */ if (!IsA(query, Query) || @@ -526,7 +526,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r) /* * We make lateral_only names of this level visible, whether or not the - * RangeFunction is explicitly marked LATERAL. This is needed for SQL + * RangeFunction is explicitly marked LATERAL. This is needed for SQL * spec compliance in the case of UNNEST(), and seems useful on * convenience grounds for all functions in FROM. * @@ -546,7 +546,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r) * node types. * * We have to get this info now, because FigureColname only works on raw - * parsetrees. Actually deciding what to do with the names is left up to + * parsetrees. Actually deciding what to do with the names is left up to * addRangeTableEntryForFunction. * * Likewise, collect column definition lists if there were any. But @@ -570,7 +570,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r) * other ways of implementing the SQL-standard UNNEST() syntax. * * If there is any decoration (including a coldeflist), we don't - * transform, which probably means a no-such-function error later. We + * transform, which probably means a no-such-function error later. We * could alternatively throw an error right now, but that doesn't seem * tremendously helpful. If someone is using any such decoration, * then they're not using the SQL-standard syntax, and they're more @@ -682,7 +682,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("WITH ORDINALITY cannot be used with a column definition list"), - errhint("Put the column definition list inside ROWS FROM()."), + errhint("Put the column definition list inside ROWS FROM()."), parser_errposition(pstate, exprLocation((Node *) r->coldeflist)))); @@ -721,10 +721,10 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r) * (We could extract this from the function return node, but it saves cycles * to pass it back separately.) * - * *top_rti: receives the rangetable index of top_rte. (Ditto.) + * *top_rti: receives the rangetable index of top_rte. (Ditto.) * * *namespace: receives a List of ParseNamespaceItems for the RTEs exposed - * as table/column names by this item. (The lateral_only flags in these items + * as table/column names by this item. (The lateral_only flags in these items * are indeterminate and should be explicitly set by the caller before use.) */ static Node * @@ -837,7 +837,7 @@ transformFromClauseItem(ParseState *pstate, Node *n, * right side, by temporarily adding them to the pstate's namespace * list. Per SQL:2008, if the join type is not INNER or LEFT then the * left-side names must still be exposed, but it's an error to - * reference them. (Stupid design, but that's what it says.) Hence, + * reference them. (Stupid design, but that's what it says.) Hence, * we always push them into the namespace, but mark them as not * lateral_ok if the jointype is wrong. * @@ -1101,7 +1101,7 @@ transformFromClauseItem(ParseState *pstate, Node *n, * * Note: if there are nested alias-less JOINs, the lower-level ones * will remain in the list although they have neither p_rel_visible - * nor p_cols_visible set. We could delete such list items, but it's + * nor p_cols_visible set. We could delete such list items, but it's * unclear that it's worth expending cycles to do so. */ if (j->alias != NULL) @@ -1438,9 +1438,9 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle, * * This function supports the old SQL92 ORDER BY interpretation, where the * expression is an output column name or number. If we fail to find a - * match of that sort, we fall through to the SQL99 rules. For historical + * match of that sort, we fall through to the SQL99 rules. For historical * reasons, Postgres also allows this interpretation for GROUP BY, though - * the standard never did. However, for GROUP BY we prefer a SQL99 match. + * the standard never did. However, for GROUP BY we prefer a SQL99 match. * This function is *not* used for WINDOW definitions. * * node the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched @@ -1458,7 +1458,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist, * * 1. Bare ColumnName (no qualifier or subscripts) * For a bare identifier, we search for a matching column name - * in the existing target list. Multiple matches are an error + * in the existing target list. Multiple matches are an error * unless they refer to identical values; for example, * we allow SELECT a, a FROM table ORDER BY a * but not SELECT a AS b, b FROM table ORDER BY b @@ -1467,7 +1467,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist, * For GROUP BY, it is incorrect to match the grouping item against * targetlist entries: according to SQL92, an identifier in GROUP BY * is a reference to a column name exposed by FROM, not to a target - * list column. However, many implementations (including pre-7.0 + * list column. However, many implementations (including pre-7.0 * PostgreSQL) accept this anyway. So for GROUP BY, we look first * to see if the identifier matches any FROM column name, and only * try for a targetlist name if it doesn't. This ensures that we @@ -1625,7 +1625,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist, /* * Convert the untransformed node to a transformed expression, and search * for a match in the tlist. NOTE: it doesn't really matter whether there - * is more than one match. Also, we are willing to match an existing + * is more than one match. Also, we are willing to match an existing * resjunk target here, though the SQL92 cases above must ignore resjunk * targets. */ @@ -1653,7 +1653,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist, /* * If no matches, construct a new target entry which is appended to the - * end of the target list. This target is given resjunk = TRUE so that it + * end of the target list. This target is given resjunk = TRUE so that it * will not be projected into the final tuple. */ target_result = transformTargetEntry(pstate, node, expr, exprKind, @@ -1864,7 +1864,7 @@ transformWindowDefinitions(ParseState *pstate, * <window clause> syntax rule 10 and general rule 1. The frame * clause rule is especially bizarre because it makes "OVER foo" * different from "OVER (foo)", and requires the latter to throw an - * error if foo has a nondefault frame clause. Well, ours not to + * error if foo has a nondefault frame clause. Well, ours not to * reason why, but we do go out of our way to throw a useful error * message for such cases. */ @@ -1967,7 +1967,7 @@ transformDistinctClause(ParseState *pstate, /* * The distinctClause should consist of all ORDER BY items followed by all - * other non-resjunk targetlist items. There must not be any resjunk + * other non-resjunk targetlist items. There must not be any resjunk * ORDER BY items --- that would imply that we are sorting by a value that * isn't necessarily unique within a DISTINCT group, so the results * wouldn't be well-defined. This construction ensures we follow the rule @@ -2023,7 +2023,7 @@ transformDistinctClause(ParseState *pstate, ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), is_agg ? - errmsg("an aggregate with DISTINCT must have at least one argument") : + errmsg("an aggregate with DISTINCT must have at least one argument") : errmsg("SELECT DISTINCT must have at least one column"))); return result; @@ -2104,7 +2104,7 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist, /* * Now add any remaining DISTINCT ON items, using default sort/group - * semantics for their data types. (Note: this is pretty questionable; if + * semantics for their data types. (Note: this is pretty questionable; if * the ORDER BY list doesn't include all the DISTINCT ON items and more * besides, you certainly aren't using DISTINCT ON in the intended way, * and you probably aren't going to get consistent results. It might be @@ -2131,7 +2131,8 @@ transformDistinctOnClause(ParseState *pstate, List *distinctlist, } /* - * An empty result list is impossible here because of grammar restrictions. + * An empty result list is impossible here because of grammar + * restrictions. */ Assert(result != NIL); diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c index 854d7232216..8416d3675b1 100644 --- a/src/backend/parser/parse_coerce.c +++ b/src/backend/parser/parse_coerce.c @@ -56,12 +56,12 @@ static bool typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId); * Convert an expression to a target type and typmod. * * This is the general-purpose entry point for arbitrary type coercion - * operations. Direct use of the component operations can_coerce_type, + * operations. Direct use of the component operations can_coerce_type, * coerce_type, and coerce_type_typmod should be restricted to special * cases (eg, when the conversion is expected to succeed). * * Returns the possibly-transformed expression tree, or NULL if the type - * conversion is not possible. (We do this, rather than ereport'ing directly, + * conversion is not possible. (We do this, rather than ereport'ing directly, * so that callers can generate custom error messages indicating context.) * * pstate - parse state (can be NULL, see coerce_type) @@ -145,7 +145,7 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype, * already be properly coerced to the specified typmod. * * pstate is only used in the case that we are able to resolve the type of - * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the + * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the * caller does not want type information updated for Params. * * Note: this function must not modify the given expression tree, only add @@ -175,7 +175,7 @@ coerce_type(ParseState *pstate, Node *node, * * Note: by returning the unmodified node here, we are saying that * it's OK to treat an UNKNOWN constant as a valid input for a - * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be + * function accepting ANY, ANYELEMENT, or ANYNONARRAY. This should be * all right, since an UNKNOWN value is still a perfectly valid Datum. * * NB: we do NOT want a RelabelType here: the exposed type of the @@ -250,7 +250,7 @@ coerce_type(ParseState *pstate, Node *node, /* * If the target type is a domain, we want to call its base type's - * input routine, not domain_in(). This is to avoid premature failure + * input routine, not domain_in(). This is to avoid premature failure * when the domain applies a typmod: existing input routines follow * implicit-coercion semantics for length checks, which is not always * what we want here. The needed check will be applied properly @@ -263,7 +263,7 @@ coerce_type(ParseState *pstate, Node *node, * For most types we pass typmod -1 to the input routine, because * existing input routines follow implicit-coercion semantics for * length checks, which is not always what we want here. Any length - * constraint will be applied later by our caller. An exception + * constraint will be applied later by our caller. An exception * however is the INTERVAL type, for which we *must* pass the typmod * or it won't be able to obey the bizarre SQL-spec input rules. (Ugly * as sin, but so is this part of the spec...) @@ -343,7 +343,7 @@ coerce_type(ParseState *pstate, Node *node, { /* * If we have a COLLATE clause, we have to push the coercion - * underneath the COLLATE. This is really ugly, but there is little + * underneath the COLLATE. This is really ugly, but there is little * choice because the above hacks on Consts and Params wouldn't happen * otherwise. This kluge has consequences in coerce_to_target_type. */ @@ -366,7 +366,7 @@ coerce_type(ParseState *pstate, Node *node, { /* * Generate an expression tree representing run-time application - * of the conversion function. If we are dealing with a domain + * of the conversion function. If we are dealing with a domain * target type, the conversion function will yield the base type, * and we need to extract the correct typmod to use from the * domain's typtypmod. @@ -402,7 +402,7 @@ coerce_type(ParseState *pstate, Node *node, * to have the intended type when inspected by higher-level code. * * Also, domains may have value restrictions beyond the base type - * that must be accounted for. If the destination is a domain + * that must be accounted for. If the destination is a domain * then we won't need a RelabelType node. */ result = coerce_to_domain(node, InvalidOid, -1, targetTypeId, @@ -649,7 +649,7 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId, } /* - * Now build the domain coercion node. This represents run-time checking + * Now build the domain coercion node. This represents run-time checking * of any constraints currently attached to the domain. This also ensures * that the expression is properly labeled as to result type. */ @@ -722,7 +722,7 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod, * Mark a coercion node as IMPLICIT so it will never be displayed by * ruleutils.c. We use this when we generate a nest of coercion nodes * to implement what is logically one conversion; the inner nodes are - * forced to IMPLICIT_CAST format. This does not change their semantics, + * forced to IMPLICIT_CAST format. This does not change their semantics, * only display behavior. * * It is caller error to call this on something that doesn't have a @@ -1181,7 +1181,7 @@ select_common_type(ParseState *pstate, List *exprs, const char *context, } /* - * Nope, so set up for the full algorithm. Note that at this point, lc + * Nope, so set up for the full algorithm. Note that at this point, lc * points to the first list item with type different from pexpr's; we need * not re-examine any items the previous loop advanced over. */ @@ -1476,7 +1476,7 @@ check_generic_type_consistency(Oid *actual_arg_types, * * If any polymorphic pseudotype is used in a function's arguments or * return type, we make sure the actual data types are consistent with - * each other. The argument consistency rules are shown above for + * each other. The argument consistency rules are shown above for * check_generic_type_consistency(). * * If we have UNKNOWN input (ie, an untyped literal) for any polymorphic @@ -1498,7 +1498,7 @@ check_generic_type_consistency(Oid *actual_arg_types, * impossible to determine the range type from the subtype alone.) * 4) If return type is ANYARRAY, but no argument is ANYARRAY or ANYELEMENT, * generate an error. Similarly, if return type is ANYRANGE, but no - * argument is ANYRANGE, generate an error. (These conditions are + * argument is ANYRANGE, generate an error. (These conditions are * prevented by CREATE FUNCTION and therefore are not expected here.) * 5) If return type is ANYELEMENT, and any argument is ANYELEMENT, use the * argument's actual type as the function's return type. @@ -1508,7 +1508,7 @@ check_generic_type_consistency(Oid *actual_arg_types, * type or the range type's corresponding subtype (or both, in which case * they must match). * 7) If return type is ANYELEMENT, no argument is ANYELEMENT, ANYARRAY, or - * ANYRANGE, generate an error. (This condition is prevented by CREATE + * ANYRANGE, generate an error. (This condition is prevented by CREATE * FUNCTION and therefore is not expected here.) * 8) ANYENUM is treated the same as ANYELEMENT except that if it is used * (alone or in combination with plain ANYELEMENT), we add the extra @@ -1525,14 +1525,14 @@ check_generic_type_consistency(Oid *actual_arg_types, * * When allow_poly is false, we are not expecting any of the actual_arg_types * to be polymorphic, and we should not return a polymorphic result type - * either. When allow_poly is true, it is okay to have polymorphic "actual" + * either. When allow_poly is true, it is okay to have polymorphic "actual" * arg types, and we can return ANYARRAY, ANYRANGE, or ANYELEMENT as the - * result. (This case is currently used only to check compatibility of an + * result. (This case is currently used only to check compatibility of an * aggregate's declaration with the underlying transfn.) * * A special case is that we could see ANYARRAY as an actual_arg_type even * when allow_poly is false (this is possible only because pg_statistic has - * columns shown as anyarray in the catalogs). We allow this to match a + * columns shown as anyarray in the catalogs). We allow this to match a * declared ANYARRAY argument, but only if there is no ANYELEMENT argument * or result (since we can't determine a specific element type to match to * ANYELEMENT). Note this means that functions taking ANYARRAY had better @@ -1638,7 +1638,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types, /* * Fast Track: if none of the arguments are polymorphic, return the - * unmodified rettype. We assume it can't be polymorphic either. + * unmodified rettype. We assume it can't be polymorphic either. */ if (!have_generics) return rettype; @@ -1981,8 +1981,8 @@ IsPreferredType(TYPCATEGORY category, Oid type) * Check if srctype is binary-coercible to targettype. * * This notion allows us to cheat and directly exchange values without - * going through the trouble of calling a conversion function. Note that - * in general, this should only be an implementation shortcut. Before 7.4, + * going through the trouble of calling a conversion function. Note that + * in general, this should only be an implementation shortcut. Before 7.4, * this was also used as a heuristic for resolving overloaded functions and * operators, but that's basically a bad idea. * @@ -1995,7 +1995,7 @@ IsPreferredType(TYPCATEGORY category, Oid type) * types. * * This function replaces IsBinaryCompatible(), which was an inherently - * symmetric test. Since the pg_cast entries aren't necessarily symmetric, + * symmetric test. Since the pg_cast entries aren't necessarily symmetric, * the order of the operands is now significant. */ bool @@ -2181,7 +2181,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId, * Hack: disallow coercions to oidvector and int2vector, which * otherwise tend to capture coercions that should go to "real" array * types. We want those types to be considered "real" arrays for many - * purposes, but not this one. (Also, ArrayCoerceExpr isn't + * purposes, but not this one. (Also, ArrayCoerceExpr isn't * guaranteed to produce an output that meets the restrictions of * these datatypes, such as being 1-dimensional.) */ diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c index aa30864fc24..bbd10304cca 100644 --- a/src/backend/parser/parse_collate.c +++ b/src/backend/parser/parse_collate.c @@ -14,19 +14,19 @@ * 1. The output collation of each expression node, or InvalidOid if it * returns a noncollatable data type. This can also be InvalidOid if the * result type is collatable but the collation is indeterminate. - * 2. The collation to be used in executing each function. InvalidOid means + * 2. The collation to be used in executing each function. InvalidOid means * that there are no collatable inputs or their collation is indeterminate. * This value is only stored in node types that might call collation-using * functions. * * You might think we could get away with storing only one collation per - * node, but the two concepts really need to be kept distinct. Otherwise + * node, but the two concepts really need to be kept distinct. Otherwise * it's too confusing when a function produces a collatable output type but * has no collatable inputs or produces noncollatable output from collatable * inputs. * * Cases with indeterminate collation might result in an error being thrown - * at runtime. If we knew exactly which functions require collation + * at runtime. If we knew exactly which functions require collation * information, we could throw those errors at parse time instead. * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group @@ -245,7 +245,7 @@ select_common_collation(ParseState *pstate, List *exprs, bool none_ok) * Recursive guts of collation processing. * * Nodes with no children (eg, Vars, Consts, Params) must have been marked - * when built. All upper-level nodes are marked here. + * when built. All upper-level nodes are marked here. * * Note: if this is invoked directly on a List, it will attempt to infer a * common collation for all the list members. In particular, it will throw @@ -448,7 +448,7 @@ assign_collations_walker(Node *node, assign_collations_context *context) /* * TargetEntry can have only one child, and should bubble that - * state up to its parent. We can't use the general-case code + * state up to its parent. We can't use the general-case code * below because exprType and friends don't work on TargetEntry. */ collation = loccontext.collation; @@ -463,7 +463,7 @@ assign_collations_walker(Node *node, assign_collations_context *context) * There are some cases where there might not be a failure, for * example if the planner chooses to use hash aggregation instead * of sorting for grouping; but it seems better to predictably - * throw an error. (Compare transformSetOperationTree, which will + * throw an error. (Compare transformSetOperationTree, which will * throw error for indeterminate collation of set-op columns, even * though the planner might be able to implement the set-op * without sorting.) @@ -501,7 +501,7 @@ assign_collations_walker(Node *node, assign_collations_context *context) * SubLink. Act as though the Query returns its first output * column, which indeed is what it does for EXPR_SUBLINK and * ARRAY_SUBLINK cases. In the cases where the SubLink - * returns boolean, this info will be ignored. Special case: + * returns boolean, this info will be ignored. Special case: * in EXISTS, the Query might return no columns, in which case * we need do nothing. * @@ -961,7 +961,7 @@ assign_hypothetical_collations(Aggref *aggref, /* * Assign collations internally in this pair of expressions, then - * choose a common collation for them. This should match + * choose a common collation for them. This should match * select_common_collation(), but we can't use that function as-is * because we need access to the whole collation state so we can * bubble it up to the aggregate function's level. diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c index 76eb418d1bf..04b585d1e20 100644 --- a/src/backend/parser/parse_cte.c +++ b/src/backend/parser/parse_cte.c @@ -181,7 +181,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause) checkWellFormedRecursion(&cstate); /* - * Set up the ctenamespace for parse analysis. Per spec, all the WITH + * Set up the ctenamespace for parse analysis. Per spec, all the WITH * items are visible to all others, so stuff them all in before parse * analysis. We build the list in safe processing order so that the * planner can process the queries in sequence. @@ -207,7 +207,7 @@ transformWithClause(ParseState *pstate, WithClause *withClause) { /* * For non-recursive WITH, just analyze each CTE in sequence and then - * add it to the ctenamespace. This corresponds to the spec's + * add it to the ctenamespace. This corresponds to the spec's * definition of the scope of each WITH name. However, to allow error * reports to be aware of the possibility of an erroneous reference, * we maintain a list in p_future_ctes of the not-yet-visible CTEs. @@ -245,7 +245,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte) cte->ctequery = (Node *) query; /* - * Check that we got something reasonable. These first two cases should + * Check that we got something reasonable. These first two cases should * be prevented by the grammar. */ if (!IsA(query, Query)) @@ -393,7 +393,7 @@ analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte, List *tlist) /* * If the CTE is recursive, force the exposed column type of any - * "unknown" column to "text". This corresponds to the fact that + * "unknown" column to "text". This corresponds to the fact that * SELECT 'foo' UNION SELECT 'bar' will ultimately produce text. We * might see "unknown" as a result of an untyped literal in the * non-recursive term's select list, and if we don't convert to text diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 81c9338054e..088224573f3 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -506,7 +506,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) } crerr = CRERR_NO_COLUMN; /* - * Give the PreParseColumnRefHook, if any, first shot. If it returns + * Give the PreParseColumnRefHook, if any, first shot. If it returns * non-null then that's all, folks. */ if (pstate->p_pre_columnref_hook != NULL) @@ -577,7 +577,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) } /* - * Try to find the name as a relation. Note that only + * Try to find the name as a relation. Note that only * relations already entered into the rangetable will be * recognized. * @@ -808,7 +808,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref) Node *result; /* - * The core parser knows nothing about Params. If a hook is supplied, + * The core parser knows nothing about Params. If a hook is supplied, * call it. If not, or if the hook returns NULL, throw a generic error. */ if (pstate->p_paramref_hook != NULL) @@ -1108,7 +1108,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a) * We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only * possible if there is a suitable array type available. If not, we fall * back to a boolean condition tree with multiple copies of the lefthand - * expression. Also, any IN-list items that contain Vars are handled as + * expression. Also, any IN-list items that contain Vars are handled as * separate boolean conditions, because that gives the planner more scope * for optimization on such clauses. * @@ -1139,7 +1139,7 @@ transformAExprIn(ParseState *pstate, A_Expr *a) Oid array_type; /* - * Try to select a common type for the array elements. Note that + * Try to select a common type for the array elements. Note that * since the LHS' type is first in the list, it will be preferred when * there is doubt (eg, when all the RHS items are unknown literals). * @@ -1254,8 +1254,8 @@ transformFuncCall(ParseState *pstate, FuncCall *fn) /* * When WITHIN GROUP is used, we treat its ORDER BY expressions as * additional arguments to the function, for purposes of function lookup - * and argument type coercion. So, transform each such expression and add - * them to the targs list. We don't explicitly mark where each argument + * and argument type coercion. So, transform each such expression and add + * them to the targs list. We don't explicitly mark where each argument * came from, but ParseFuncOrColumn can tell what's what by reference to * list_length(fn->agg_order). */ @@ -1510,7 +1510,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink) qtree = parse_sub_analyze(sublink->subselect, pstate, NULL, false); /* - * Check that we got something reasonable. Many of these conditions are + * Check that we got something reasonable. Many of these conditions are * impossible given restrictions of the grammar, but check 'em anyway. */ if (!IsA(qtree, Query) || @@ -1925,7 +1925,7 @@ transformXmlExpr(ParseState *pstate, XmlExpr *x) newx->location = x->location; /* - * gram.y built the named args as a list of ResTarget. Transform each, + * gram.y built the named args as a list of ResTarget. Transform each, * and break the names out as a separate list. */ newx->named_args = NIL; @@ -2188,9 +2188,9 @@ transformWholeRowRef(ParseState *pstate, RangeTblEntry *rte, int location) vnum = RTERangeTablePosn(pstate, rte, &sublevels_up); /* - * Build the appropriate referencing node. Note that if the RTE is a + * Build the appropriate referencing node. Note that if the RTE is a * function returning scalar, we create just a plain reference to the - * function value, not a composite containing a single column. This is + * function value, not a composite containing a single column. This is * pretty inconsistent at first sight, but it's what we've done * historically. One argument for it is that "rel" and "rel.*" mean the * same thing for composite relations, so why not for scalar functions... @@ -2374,7 +2374,7 @@ make_row_comparison_op(ParseState *pstate, List *opname, /* * Now we must determine which row comparison semantics (= <> < <= > >=) - * apply to this set of operators. We look for btree opfamilies + * apply to this set of operators. We look for btree opfamilies * containing the operators, and see which interpretations (strategy * numbers) exist for each operator. */ diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c index cc4608417b2..9ebd3fd43bd 100644 --- a/src/backend/parser/parse_func.c +++ b/src/backend/parser/parse_func.c @@ -104,7 +104,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, /* * Most of the rest of the parser just assumes that functions do not have - * more than FUNC_MAX_ARGS parameters. We have to test here to protect + * more than FUNC_MAX_ARGS parameters. We have to test here to protect * against array overruns, etc. Of course, this may not be a function, * but the test doesn't hurt. */ @@ -520,7 +520,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, * If there are default arguments, we have to include their types in * actual_arg_types for the purpose of checking generic type consistency. * However, we do NOT put them into the generated parse node, because - * their actual values might change before the query gets run. The + * their actual values might change before the query gets run. The * planner has to insert the up-to-date values at plan time. */ nargsplusdefs = nargs; @@ -653,7 +653,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, /* * Reject attempt to call a parameterless aggregate without (*) - * syntax. This is mere pedantry but some folks insisted ... + * syntax. This is mere pedantry but some folks insisted ... */ if (fargs == NIL && !agg_star && !agg_within_group) ereport(ERROR, @@ -672,7 +672,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, * We might want to support named arguments later, but disallow it for * now. We'd need to figure out the parsed representation (should the * NamedArgExprs go above or below the TargetEntry nodes?) and then - * teach the planner to reorder the list properly. Or maybe we could + * teach the planner to reorder the list properly. Or maybe we could * make transformAggregateCall do that? However, if you'd also like * to allow default arguments for aggregates, we'd need to do it in * planning to avoid semantic problems. @@ -717,7 +717,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, /* * Reject attempt to call a parameterless aggregate without (*) - * syntax. This is mere pedantry but some folks insisted ... + * syntax. This is mere pedantry but some folks insisted ... */ if (wfunc->winagg && fargs == NIL && !agg_star) ereport(ERROR, @@ -895,7 +895,7 @@ func_select_candidate(int nargs, * matches" in the exact-match heuristic; it also makes it possible to do * something useful with the type-category heuristics. Note that this * makes it difficult, but not impossible, to use functions declared to - * take a domain as an input datatype. Such a function will be selected + * take a domain as an input datatype. Such a function will be selected * over the base-type function only if it is an exact match at all * argument positions, and so was already chosen by our caller. * @@ -1019,7 +1019,7 @@ func_select_candidate(int nargs, /* * The next step examines each unknown argument position to see if we can - * determine a "type category" for it. If any candidate has an input + * determine a "type category" for it. If any candidate has an input * datatype of STRING category, use STRING category (this bias towards * STRING is appropriate since unknown-type literals look like strings). * Otherwise, if all the candidates agree on the type category of this @@ -1030,7 +1030,7 @@ func_select_candidate(int nargs, * the candidates takes a preferred datatype within the category. * * Having completed this examination, remove candidates that accept the - * wrong category at any unknown position. Also, if at least one + * wrong category at any unknown position. Also, if at least one * candidate accepted a preferred type at a position, remove candidates * that accept non-preferred types. If just one candidate remains, return * that one. However, if this rule turns out to reject all candidates, @@ -1159,7 +1159,7 @@ func_select_candidate(int nargs, * type, and see if that gives us a unique match. If so, use that match. * * NOTE: for a binary operator with one unknown and one non-unknown input, - * we already tried this heuristic in binary_oper_exact(). However, that + * we already tried this heuristic in binary_oper_exact(). However, that * code only finds exact matches, whereas here we will handle matches that * involve coercion, polymorphic type resolution, etc. */ @@ -1328,7 +1328,7 @@ func_get_detail(List *funcname, * * NB: it's important that this code does not exceed what coerce_type * can do, because the caller will try to apply coerce_type if we - * return FUNCDETAIL_COERCION. If we return that result for something + * return FUNCDETAIL_COERCION. If we return that result for something * coerce_type can't handle, we'll cause infinite recursion between * this module and coerce_type! */ @@ -1506,7 +1506,7 @@ func_get_detail(List *funcname, { /* * This is a bit tricky in named notation, since the supplied - * arguments could replace any subset of the defaults. We + * arguments could replace any subset of the defaults. We * work by making a bitmapset of the argnumbers of defaulted * arguments, then scanning the defaults list and selecting * the needed items. (This assumes that defaulted arguments @@ -1751,7 +1751,7 @@ FuncNameAsType(List *funcname) * ParseComplexProjection - * handles function calls with a single argument that is of complex type. * If the function call is actually a column projection, return a suitably - * transformed expression tree. If not, return NULL. + * transformed expression tree. If not, return NULL. */ static Node * ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg, @@ -1825,7 +1825,7 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg, * The result is something like "foo(integer)". * * If argnames isn't NIL, it is a list of C strings representing the actual - * arg names for the last N arguments. This must be considered part of the + * arg names for the last N arguments. This must be considered part of the * function signature too, when dealing with named-notation function calls. * * This is typically used in the construction of function-not-found error diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c index fc9e53a41d3..1e3d1f68fab 100644 --- a/src/backend/parser/parse_node.c +++ b/src/backend/parser/parse_node.c @@ -99,8 +99,8 @@ free_parsestate(ParseState *pstate) * is a dummy (always 0, in fact). * * The locations stored in raw parsetrees are byte offsets into the source - * string. We have to convert them to 1-based character indexes for reporting - * to clients. (We do things this way to avoid unnecessary overhead in the + * string. We have to convert them to 1-based character indexes for reporting + * to clients. (We do things this way to avoid unnecessary overhead in the * normal non-error case: computing character indexes would be much more * expensive than storing token offsets.) */ @@ -129,7 +129,7 @@ parser_errposition(ParseState *pstate, int location) * Sometimes the parser calls functions that aren't part of the parser * subsystem and can't reasonably be passed a ParseState; yet we would * like any errors thrown in those functions to be tagged with a parse - * error location. Use this function to set up an error context stack + * error location. Use this function to set up an error context stack * entry that will accomplish that. Usage pattern: * * declare a local variable "ParseCallbackState pcbstate" @@ -221,7 +221,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod) * If the input is a domain, smash to base type, and extract the actual * typmod to be applied to the base type. Subscripting a domain is an * operation that necessarily works on the base array type, not the domain - * itself. (Note that we provide no method whereby the creator of a + * itself. (Note that we provide no method whereby the creator of a * domain over an array type could hide its ability to be subscripted.) */ *arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod); @@ -269,7 +269,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod) * * In an array assignment, we are given a destination array value plus a * source value that is to be assigned to a single element or a slice of - * that array. We produce an expression that represents the new array value + * that array. We produce an expression that represents the new array value * with the source data inserted into the right part of the array. * * For both cases, if the source array is of a domain-over-array type, diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index a2b712d5161..b65b632f17e 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -447,7 +447,7 @@ oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, * * This is tighter than oper() because it will not return an operator that * requires coercion of the input datatypes (but binary-compatible operators - * are accepted). Otherwise, the semantics are the same. + * are accepted). Otherwise, the semantics are the same. */ Operator compatible_oper(ParseState *pstate, List *op, Oid arg1, Oid arg2, @@ -980,7 +980,7 @@ make_scalar_array_op(ParseState *pstate, List *opname, * mapping is pretty expensive to compute, especially for ambiguous operators; * this is mainly because there are a *lot* of instances of popular operator * names such as "=", and we have to check each one to see which is the - * best match. So once we have identified the correct mapping, we save it + * best match. So once we have identified the correct mapping, we save it * in a cache that need only be flushed on pg_operator or pg_cast change. * (pg_cast must be considered because changes in the set of implicit casts * affect the set of applicable operators for any given input datatype.) diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c index c5c034b2d86..41b755a1fa9 100644 --- a/src/backend/parser/parse_param.c +++ b/src/backend/parser/parse_param.c @@ -256,7 +256,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param, * of parsing with parse_variable_parameters. * * Note: this code intentionally does not check that all parameter positions - * were used, nor that all got non-UNKNOWN types assigned. Caller of parser + * were used, nor that all got non-UNKNOWN types assigned. Caller of parser * should enforce that if it's important. */ void diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 8760952dfee..478584d946e 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -71,7 +71,7 @@ static bool isQueryUsingTempRelation_walker(Node *node, void *context); * * A qualified refname (schemaname != NULL) can only match a relation RTE * that (a) has no alias and (b) is for the same relation identified by - * schemaname.refname. In this case we convert schemaname.refname to a + * schemaname.refname. In this case we convert schemaname.refname to a * relation OID and search by relid, rather than by alias name. This is * peculiar, but it's what SQL says to do. */ @@ -181,7 +181,7 @@ scanNameSpaceForRefname(ParseState *pstate, const char *refname, int location) /* * Search the query's table namespace for a relation RTE matching the - * given relation OID. Return the RTE if a unique match, or NULL + * given relation OID. Return the RTE if a unique match, or NULL * if no match. Raise error if multiple matches. * * See the comments for refnameRangeTblEntry to understand why this @@ -285,7 +285,7 @@ isFutureCTE(ParseState *pstate, const char *refname) * * This is different from refnameRangeTblEntry in that it considers every * entry in the ParseState's rangetable(s), not only those that are currently - * visible in the p_namespace list(s). This behavior is invalid per the SQL + * visible in the p_namespace list(s). This behavior is invalid per the SQL * spec, and it may give ambiguous results (there might be multiple equally * valid matches, but only one will be returned). This must be used ONLY * as a heuristic in giving suitable error messages. See errorMissingRTE. @@ -308,8 +308,8 @@ searchRangeTableForRel(ParseState *pstate, RangeVar *relation) * relation. * * NB: It's not critical that RangeVarGetRelid return the correct answer - * here in the face of concurrent DDL. If it doesn't, the worst case - * scenario is a less-clear error message. Also, the tables involved in + * here in the face of concurrent DDL. If it doesn't, the worst case + * scenario is a less-clear error message. Also, the tables involved in * the query are already locked, which reduces the number of cases in * which surprising behavior can occur. So we do the name lookup * unlocked. @@ -431,7 +431,7 @@ check_lateral_ref_ok(ParseState *pstate, ParseNamespaceItem *nsitem, /* * given an RTE, return RT index (starting with 1) of the entry, - * and optionally get its nesting depth (0 = current). If sublevels_up + * and optionally get its nesting depth (0 = current). If sublevels_up * is NULL, only consider rels at the current nesting level. * Raises error if RTE not found. */ @@ -585,11 +585,11 @@ scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname, /* In constraint check, no system column is allowed except tableOid */ if (pstate->p_expr_kind == EXPR_KIND_CHECK_CONSTRAINT && - attnum < InvalidAttrNumber && attnum != TableOidAttributeNumber) + attnum < InvalidAttrNumber && attnum != TableOidAttributeNumber) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("system column \"%s\" reference in check constraint is invalid", - colname), + colname), parser_errposition(pstate, location))); if (attnum != InvalidAttrNumber) @@ -673,7 +673,7 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly, * * This is different from colNameToVar in that it considers every entry in * the ParseState's rangetable(s), not only those that are currently visible - * in the p_namespace list(s). This behavior is invalid per the SQL spec, + * in the p_namespace list(s). This behavior is invalid per the SQL spec, * and it may give ambiguous results (there might be multiple equally valid * matches, but only one will be returned). This must be used ONLY as a * heuristic in giving suitable error messages. See errorMissingColumn. @@ -1016,7 +1016,7 @@ addRangeTableEntry(ParseState *pstate, /* * Get the rel's OID. This access also ensures that we have an up-to-date - * relcache entry for the rel. Since this is typically the first access + * relcache entry for the rel. Since this is typically the first access * to a rel in a statement, be careful to get the right access level * depending on whether we're doing SELECT FOR UPDATE/SHARE. */ @@ -2580,7 +2580,7 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum) * Dropped attributes are only possible with functions that * return named composite types. In such a case we have to * look up the result type to see if it currently has this - * column dropped. So first, loop over the funcs until we + * column dropped. So first, loop over the funcs until we * find the one that covers the requested column. */ foreach(lc, rte->functions) @@ -2811,7 +2811,7 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation) /* * Check to see if there are any potential matches in the query's - * rangetable. (Note: cases involving a bad schema name in the RangeVar + * rangetable. (Note: cases involving a bad schema name in the RangeVar * will throw error immediately here. That seems OK.) */ rte = searchRangeTableForRel(pstate, relation); @@ -2865,7 +2865,7 @@ errorMissingColumn(ParseState *pstate, RangeTblEntry *rte; /* - * If relname was given, just play dumb and report it. (In practice, a + * If relname was given, just play dumb and report it. (In practice, a * bad qualification name should end up at errorMissingRTE, not here, so * no need to work hard on this case.) */ diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c index f971c71a92b..2ee1270ec5d 100644 --- a/src/backend/parser/parse_target.c +++ b/src/backend/parser/parse_target.c @@ -182,7 +182,7 @@ transformTargetList(ParseState *pstate, List *targetlist, * This is the identical transformation to transformTargetList, except that * the input list elements are bare expressions without ResTarget decoration, * and the output elements are likewise just expressions without TargetEntry - * decoration. We use this for ROW() and VALUES() constructs. + * decoration. We use this for ROW() and VALUES() constructs. */ List * transformExpressionList(ParseState *pstate, List *exprlist, @@ -348,7 +348,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle, /* * transformAssignedExpr() - * This is used in INSERT and UPDATE statements only. It prepares an + * This is used in INSERT and UPDATE statements only. It prepares an * expression for assignment to a column of the target table. * This includes coercing the given value to the target column's type * (if necessary), and dealing with any subfield names or subscripts @@ -367,7 +367,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle, * * Note: location points at the target column name (SET target or INSERT * column name list entry), and must therefore be -1 in an INSERT that - * omits the column name list. So we should usually prefer to use + * omits the column name list. So we should usually prefer to use * exprLocation(expr) for errors that can happen in a default INSERT. */ Expr * @@ -442,7 +442,7 @@ transformAssignedExpr(ParseState *pstate, /* * If there is indirection on the target column, prepare an array or - * subfield assignment expression. This will generate a new column value + * subfield assignment expression. This will generate a new column value * that the source value has been inserted into, which can then be placed * in the new tuple constructed by INSERT or UPDATE. */ @@ -550,7 +550,7 @@ updateTargetListEntry(ParseState *pstate, /* * Set the resno to identify the target column --- the rewriter and - * planner depend on this. We also set the resname to identify the target + * planner depend on this. We also set the resname to identify the target * column, but this is only for debugging purposes; it should not be * relied on. (In particular, it might be out of date in a stored rule.) */ @@ -998,7 +998,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref, * * Note: this code is a lot like transformColumnRef; it's tempting to * call that instead and then replace the resulting whole-row Var with - * a list of Vars. However, that would leave us with the RTE's + * a list of Vars. However, that would leave us with the RTE's * selectedCols bitmap showing the whole row as needing select * permission, as well as the individual columns. That would be * incorrect (since columns added later shouldn't need select @@ -1017,7 +1017,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref, } crserr = CRSERR_NO_RTE; /* - * Give the PreParseColumnRefHook, if any, first shot. If it returns + * Give the PreParseColumnRefHook, if any, first shot. If it returns * non-null then we should use that expression. */ if (pstate->p_pre_columnref_hook != NULL) @@ -1133,7 +1133,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref, * Transforms '*' (in the target list) into a list of targetlist entries. * * tlist entries are generated for each relation visible for unqualified - * column name access. We do not consider qualified-name-only entries because + * column name access. We do not consider qualified-name-only entries because * that would include input tables of aliasless JOINs, NEW/OLD pseudo-entries, * etc. * @@ -1280,7 +1280,7 @@ ExpandRowReference(ParseState *pstate, Node *expr, /* * If the rowtype expression is a whole-row Var, we can expand the fields - * as simple Vars. Note: if the RTE is a relation, this case leaves us + * as simple Vars. Note: if the RTE is a relation, this case leaves us * with the RTE's selectedCols bitmap showing the whole row as needing * select permission, as well as the individual columns. However, we can * only get here for weird notations like (table.*).*, so it's not worth @@ -1362,7 +1362,7 @@ ExpandRowReference(ParseState *pstate, Node *expr, * Get the tuple descriptor for a Var of type RECORD, if possible. * * Since no actual table or view column is allowed to have type RECORD, such - * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We + * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We * drill down to find the ultimate defining expression and attempt to infer * the tupdesc from it. We ereport if we can't determine the tupdesc. * @@ -1445,7 +1445,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup) { /* * Recurse into the sub-select to see what its Var refers - * to. We have to build an additional level of ParseState + * to. We have to build an additional level of ParseState * to keep in step with varlevelsup in the subselect. */ ParseState mypstate; @@ -1519,7 +1519,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup) /* * We now have an expression we can't expand any more, so see if - * get_expr_result_type() can do anything with it. If not, pass to + * get_expr_result_type() can do anything with it. If not, pass to * lookup_rowtype_tupdesc() which will probably fail, but will give an * appropriate error message while failing. */ diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c index b8c10e11c9f..d0803dfafd1 100644 --- a/src/backend/parser/parse_type.c +++ b/src/backend/parser/parse_type.c @@ -35,7 +35,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName, /* * LookupTypeName * Given a TypeName object, lookup the pg_type syscache entry of the type. - * Returns NULL if no such type can be found. If the type is found, + * Returns NULL if no such type can be found. If the type is found, * the typmod value represented in the TypeName struct is computed and * stored into *typmod_p. * @@ -48,7 +48,7 @@ static int32 typenameTypeMod(ParseState *pstate, const TypeName *typeName, * * typmod_p can be passed as NULL if the caller does not care to know the * typmod value, but the typmod decoration (if any) will be validated anyway, - * except in the case where the type is not found. Note that if the type is + * except in the case where the type is not found. Note that if the type is * found but is a shell, and there is typmod decoration, an error will be * thrown --- this is intentional. * @@ -113,7 +113,7 @@ LookupTypeName(ParseState *pstate, const TypeName *typeName, * Look up the field. * * XXX: As no lock is taken here, this might fail in the presence of - * concurrent DDL. But taking a lock would carry a performance + * concurrent DDL. But taking a lock would carry a performance * penalty and would also require a permissions check. */ relid = RangeVarGetRelid(rel, NoLock, missing_ok); @@ -625,7 +625,7 @@ typeTypeCollation(Type typ) /* * Given a type structure and a string, returns the internal representation - * of that string. The "string" can be NULL to perform conversion of a NULL + * of that string. The "string" can be NULL to perform conversion of a NULL * (which might result in failure, if the input function rejects NULLs). */ Datum @@ -649,7 +649,7 @@ stringTypeDatum(Type tp, char *string, int32 atttypmod) * instability in the input function is that comparison of Const nodes * relies on bytewise comparison of the datums, so if the input function * leaves garbage then subexpressions that should be identical may not get - * recognized as such. See pgsql-hackers discussion of 2008-04-04. + * recognized as such. See pgsql-hackers discussion of 2008-04-04. */ if (string && !typform->typbyval) { @@ -696,7 +696,7 @@ pts_error_callback(void *arg) /* * Currently we just suppress any syntax error position report, rather - * than transforming to an "internal query" error. It's unlikely that a + * than transforming to an "internal query" error. It's unlikely that a * type name is complex enough to need positioning. */ errposition(0); @@ -792,9 +792,9 @@ parseTypeString(const char *str, Oid *typeid_p, int32 *typmod_p, if (!missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("type \"%s\" does not exist", + errmsg("type \"%s\" does not exist", TypeNameToString(typeName)), - parser_errposition(NULL, typeName->location))); + parser_errposition(NULL, typeName->location))); *typeid_p = InvalidOid; } else @@ -802,9 +802,9 @@ parseTypeString(const char *str, Oid *typeid_p, int32 *typmod_p, if (!((Form_pg_type) GETSTRUCT(tup))->typisdefined) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("type \"%s\" is only a shell", + errmsg("type \"%s\" is only a shell", TypeNameToString(typeName)), - parser_errposition(NULL, typeName->location))); + parser_errposition(NULL, typeName->location))); *typeid_p = HeapTupleGetOid(tup); ReleaseSysCache(tup); } diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 1e071d7908b..7c1939f9c45 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -157,7 +157,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) stmt = (CreateStmt *) copyObject(stmt); /* - * Look up the creation namespace. This also checks permissions on the + * Look up the creation namespace. This also checks permissions on the * target namespace, locks it against concurrent drops, checks for a * preexisting relation in that namespace with the same name, and updates * stmt->relation->relpersistence if the select namespace is temporary. @@ -183,7 +183,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) * If the target relation name isn't schema-qualified, make it so. This * prevents some corner cases in which added-on rewritten commands might * think they should apply to other relations that have the same name and - * are earlier in the search path. But a local temp table is effectively + * are earlier in the search path. But a local temp table is effectively * specified to be in pg_temp, so no need for anything extra in that case. */ if (stmt->relation->schemaname == NULL @@ -672,7 +672,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla if (cxt->isforeign) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("LIKE is not supported for creating foreign tables"))); + errmsg("LIKE is not supported for creating foreign tables"))); relation = relation_openrv(table_like_clause->relation, AccessShareLock); @@ -712,7 +712,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla constr = tupleDesc->constr; /* - * Initialize column number map for map_variable_attnos(). We need this + * Initialize column number map for map_variable_attnos(). We need this * since dropped columns in the source table aren't copied, so the new * table can have different column numbers. */ @@ -927,7 +927,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla /* * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing the + * commit. That will prevent someone else from deleting or ALTERing the * parent before the child is committed. */ heap_close(relation, NoLock); @@ -1608,7 +1608,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) parser_errposition(cxt->pstate, constraint->location))); /* - * Insist on it being a btree. That's the only kind that supports + * Insist on it being a btree. That's the only kind that supports * uniqueness at the moment anyway; but we must have an index that * exactly matches what you'd get from plain ADD CONSTRAINT syntax, * else dump and reload will produce a different index (breaking @@ -1635,7 +1635,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) /* * We shouldn't see attnum == 0 here, since we already rejected - * expression indexes. If we do, SystemAttributeDefinition will + * expression indexes. If we do, SystemAttributeDefinition will * throw an error. */ if (attnum > 0) @@ -1649,7 +1649,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) attname = pstrdup(NameStr(attform->attname)); /* - * Insist on default opclass and sort options. While the index + * Insist on default opclass and sort options. While the index * would still work as a constraint with non-default settings, it * might not provide exactly the same uniqueness semantics as * you'd get from a normally-created constraint; and there's also @@ -1900,7 +1900,7 @@ transformFKConstraints(CreateStmtContext *cxt, * transformIndexStmt - parse analysis for CREATE INDEX and ALTER TABLE * * Note: this is a no-op for an index not using either index expressions or - * a predicate expression. There are several code paths that create indexes + * a predicate expression. There are several code paths that create indexes * without bothering to call this, because they know they don't have any * such expressions to deal with. * @@ -2023,7 +2023,7 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString, /* * To avoid deadlock, make sure the first thing we do is grab - * AccessExclusiveLock on the target relation. This will be needed by + * AccessExclusiveLock on the target relation. This will be needed by * DefineQueryRewrite(), and we don't want to grab a lesser lock * beforehand. */ diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c index a439e8b1991..663296683a5 100644 --- a/src/backend/parser/parser.c +++ b/src/backend/parser/parser.c @@ -65,7 +65,7 @@ raw_parser(const char *str) * Intermediate filter between parser and core lexer (core_yylex in scan.l). * * The filter is needed because in some cases the standard SQL grammar - * requires more than one token lookahead. We reduce these cases to one-token + * requires more than one token lookahead. We reduce these cases to one-token * lookahead by combining tokens here, in order to keep the grammar LALR(1). * * Using a filter is simpler than trying to recognize multiword tokens diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c index b9871bb297a..e9fa5dd0b02 100644 --- a/src/backend/parser/scansup.c +++ b/src/backend/parser/scansup.c @@ -132,7 +132,7 @@ downcase_truncate_identifier(const char *ident, int len, bool warn) { char *result; int i; - bool enc_is_single_byte; + bool enc_is_single_byte; result = palloc(len + 1); enc_is_single_byte = pg_database_encoding_max_length() == 1; @@ -143,8 +143,8 @@ downcase_truncate_identifier(const char *ident, int len, bool warn) * locale-aware translation. However, there are some locales where this * is not right either (eg, Turkish may do strange things with 'i' and * 'I'). Our current compromise is to use tolower() for characters with - * the high bit set, as long as they aren't part of a multi-byte character, - * and use an ASCII-only downcasing for 7-bit characters. + * the high bit set, as long as they aren't part of a multi-byte + * character, and use an ASCII-only downcasing for 7-bit characters. */ for (i = 0; i < len; i++) { diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c index d571f26ef8c..1cd52669290 100644 --- a/src/backend/port/darwin/system.c +++ b/src/backend/port/darwin/system.c @@ -24,7 +24,7 @@ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c index 484eb43b5c3..ccd92c39d4b 100644 --- a/src/backend/port/dynloader/darwin.c +++ b/src/backend/port/dynloader/darwin.c @@ -47,7 +47,7 @@ pg_dlerror(void) /* * These routines were taken from the Apache source, but were made - * available with a PostgreSQL-compatible license. Kudos Wilfredo + * available with a PostgreSQL-compatible license. Kudos Wilfredo * Sánchez <[email protected]>. */ diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c index 53af482f072..60d86548189 100644 --- a/src/backend/port/dynloader/freebsd.c +++ b/src/backend/port/dynloader/freebsd.c @@ -20,7 +20,7 @@ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c index 1333cbcc584..9af0467347c 100644 --- a/src/backend/port/dynloader/netbsd.c +++ b/src/backend/port/dynloader/netbsd.c @@ -20,7 +20,7 @@ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c index 4a04b12fcd6..41459db3880 100644 --- a/src/backend/port/dynloader/openbsd.c +++ b/src/backend/port/dynloader/openbsd.c @@ -20,7 +20,7 @@ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c index c6ac713bc5a..1aafd31e1fe 100644 --- a/src/backend/port/posix_sema.c +++ b/src/backend/port/posix_sema.c @@ -138,7 +138,7 @@ PosixSemaphoreKill(sem_t * sem) * * This is called during postmaster start or shared memory reinitialization. * It should do whatever is needed to be able to support up to maxSemas - * subsequent PGSemaphoreCreate calls. Also, if any system resources + * subsequent PGSemaphoreCreate calls. Also, if any system resources * are acquired here or in PGSemaphoreCreate, register an on_shmem_exit * callback to release them. * diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c index d5d66edcd3b..9f72ed3115d 100644 --- a/src/backend/port/sysv_sema.c +++ b/src/backend/port/sysv_sema.c @@ -253,7 +253,7 @@ IpcSemaphoreCreate(int numSems) /* * Can only get here if some other process managed to create the same - * sema key before we did. Let him have that one, loop around to try + * sema key before we did. Let him have that one, loop around to try * next key. */ } @@ -278,12 +278,12 @@ IpcSemaphoreCreate(int numSems) * * This is called during postmaster start or shared memory reinitialization. * It should do whatever is needed to be able to support up to maxSemas - * subsequent PGSemaphoreCreate calls. Also, if any system resources + * subsequent PGSemaphoreCreate calls. Also, if any system resources * are acquired here or in PGSemaphoreCreate, register an on_shmem_exit * callback to release them. * * The port number is passed for possible use as a key (for SysV, we use - * it to generate the starting semaphore key). In a standalone backend, + * it to generate the starting semaphore key). In a standalone backend, * zero will be passed. * * In the SysV implementation, we acquire semaphore sets on-demand; the @@ -378,7 +378,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK) * from the operation prematurely because we were sent a signal. So we * try and lock the semaphore again. * - * Each time around the loop, we check for a cancel/die interrupt. On + * Each time around the loop, we check for a cancel/die interrupt. On * some platforms, if such an interrupt comes in while we are waiting, it * will cause the semop() call to exit with errno == EINTR, allowing us to * service the interrupt (if not in a critical section already) during the @@ -396,7 +396,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK) * do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will * execute directly. However, there is a huge pitfall: there is another * window of a few instructions after the semop() before we are able to - * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose + * reset ImmediateInterruptOK. If an interrupt occurs then, we'll lose * control, which means that the lock has been acquired but our caller did * not get a chance to record the fact. Therefore, we only set * ImmediateInterruptOK if the caller tells us it's OK to do so, ie, the @@ -409,9 +409,9 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK) * On some platforms, signals marked SA_RESTART (which is most, for us) * will not interrupt the semop(); it will just keep waiting. Therefore * it's necessary for cancel/die interrupts to be serviced directly by the - * signal handler. On these platforms the behavior is really the same + * signal handler. On these platforms the behavior is really the same * whether the signal arrives just before the semop() begins, or while it - * is waiting. The loop on EINTR is thus important only for platforms + * is waiting. The loop on EINTR is thus important only for platforms * without SA_RESTART. */ do diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c index 5e3850b024d..7430757c753 100644 --- a/src/backend/port/sysv_shmem.c +++ b/src/backend/port/sysv_shmem.c @@ -228,7 +228,7 @@ IpcMemoryDelete(int status, Datum shmId) * Is a previously-existing shmem segment still existing and in use? * * The point of this exercise is to detect the case where a prior postmaster - * crashed, but it left child backends that are still running. Therefore + * crashed, but it left child backends that are still running. Therefore * we only care about shmem segments that are associated with the intended * DataDir. This is an important consideration since accidental matches of * shmem segment IDs are reasonably common. @@ -374,8 +374,8 @@ CreateAnonymousSegment(Size *size) (huge_pages == HUGE_PAGES_TRY && ptr == MAP_FAILED)) { /* - * use the original size, not the rounded up value, when falling - * back to non-huge pages. + * use the original size, not the rounded up value, when falling back + * to non-huge pages. */ allocsize = *size; ptr = mmap(NULL, allocsize, PROT_READ | PROT_WRITE, @@ -411,14 +411,14 @@ CreateAnonymousSegment(Size *size) * the storage. * * Dead Postgres segments are recycled if found, but we do not fail upon - * collision with non-Postgres shmem segments. The idea here is to detect and + * collision with non-Postgres shmem segments. The idea here is to detect and * re-use keys that may have been assigned by a crashed postmaster or backend. * * makePrivate means to always create a new segment, rather than attach to * or recycle any existing segment. * * The port number is passed for possible use as a key (for SysV, we use - * it to generate the starting shmem key). In a standalone backend, + * it to generate the starting shmem key). In a standalone backend, * zero will be passed. */ PGShmemHeader * @@ -512,9 +512,9 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port, /* * The segment appears to be from a dead Postgres process, or from a * previous cycle of life in this same process. Zap it, if possible, - * and any associated dynamic shared memory segments, as well. - * This probably shouldn't fail, but if it does, assume the segment - * belongs to someone else after all, and continue quietly. + * and any associated dynamic shared memory segments, as well. This + * probably shouldn't fail, but if it does, assume the segment belongs + * to someone else after all, and continue quietly. */ if (hdr->dsm_control != 0) dsm_cleanup_using_control_segment(hdr->dsm_control); @@ -583,7 +583,7 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port, /* * PGSharedMemoryReAttach * - * Re-attach to an already existing shared memory segment. In the non + * Re-attach to an already existing shared memory segment. In the non * EXEC_BACKEND case this is not used, because postmaster children inherit * the shared memory segment attachment via fork(). * @@ -626,7 +626,7 @@ PGSharedMemoryReAttach(void) * * Detach from the shared memory segment, if still attached. This is not * intended for use by the process that originally created the segment - * (it will have an on_shmem_exit callback registered to do that). Rather, + * (it will have an on_shmem_exit callback registered to do that). Rather, * this is for subprocesses that have inherited an attachment and want to * get rid of it. */ diff --git a/src/backend/port/unix_latch.c b/src/backend/port/unix_latch.c index 4d1a3051771..d0e928f8c49 100644 --- a/src/backend/port/unix_latch.c +++ b/src/backend/port/unix_latch.c @@ -239,7 +239,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock, /* * Initialize timeout if requested. We must record the current time so * that we can determine the remaining timeout if the poll() or select() - * is interrupted. (On some platforms, select() will update the contents + * is interrupted. (On some platforms, select() will update the contents * of "tv" for us, but unfortunately we can't rely on that.) */ if (wakeEvents & WL_TIMEOUT) @@ -500,7 +500,7 @@ SetLatch(volatile Latch *latch) /* * XXX there really ought to be a memory barrier operation right here, to * ensure that any flag variables we might have changed get flushed to - * main memory before we check/set is_set. Without that, we have to + * main memory before we check/set is_set. Without that, we have to * require that callers provide their own synchronization for machines * with weak memory ordering (see latch.h). */ @@ -559,7 +559,7 @@ ResetLatch(volatile Latch *latch) /* * XXX there really ought to be a memory barrier operation right here, to * ensure that the write to is_set gets flushed to main memory before we - * examine any flag variables. Otherwise a concurrent SetLatch might + * examine any flag variables. Otherwise a concurrent SetLatch might * falsely conclude that it needn't signal us, even though we have missed * seeing some flag updates that SetLatch was supposed to inform us of. * For the moment, callers must supply their own synchronization of flag diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c index adc0e02335f..7b0f71b65dd 100644 --- a/src/backend/port/win32/socket.c +++ b/src/backend/port/win32/socket.c @@ -151,7 +151,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout) (errmsg_internal("could not reset socket waiting event: error code %lu", GetLastError()))); /* - * Track whether socket is UDP or not. (NB: most likely, this is both + * Track whether socket is UDP or not. (NB: most likely, this is both * useless and wrong; there is no reason to think that the behavior of * WSAEventSelect is different for TCP and UDP.) */ @@ -160,7 +160,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout) current_socket = s; /* - * Attach event to socket. NOTE: we must detach it again before + * Attach event to socket. NOTE: we must detach it again before * returning, since other bits of code may try to attach other events to * the socket. */ diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c index b5b7195d44c..6c50dbbe019 100644 --- a/src/backend/port/win32_latch.c +++ b/src/backend/port/win32_latch.c @@ -246,7 +246,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock, rc == WAIT_OBJECT_0 + pmdeath_eventno) { /* - * Postmaster apparently died. Since the consequences of falsely + * Postmaster apparently died. Since the consequences of falsely * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we * take the trouble to positively verify this with * PostmasterIsAlive(), even though there is no known reason to diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c index 8d0cc898406..d144edaa192 100644 --- a/src/backend/port/win32_shmem.c +++ b/src/backend/port/win32_shmem.c @@ -79,7 +79,7 @@ GetSharedMemName(void) * Is a previously-existing shmem segment still existing and in use? * * The point of this exercise is to detect the case where a prior postmaster - * crashed, but it left child backends that are still running. Therefore + * crashed, but it left child backends that are still running. Therefore * we only care about shmem segments that are associated with the intended * DataDir. This is an important consideration since accidental matches of * shmem segment IDs are reasonably common. diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 8926325faab..b53cfdbf6df 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -21,21 +21,21 @@ * There is an autovacuum shared memory area, where the launcher stores * information about the database it wants vacuumed. When it wants a new * worker to start, it sets a flag in shared memory and sends a signal to the - * postmaster. Then postmaster knows nothing more than it must start a worker; - * so it forks a new child, which turns into a worker. This new process + * postmaster. Then postmaster knows nothing more than it must start a worker; + * so it forks a new child, which turns into a worker. This new process * connects to shared memory, and there it can inspect the information that the * launcher has set up. * * If the fork() call fails in the postmaster, it sets a flag in the shared * memory area, and sends a signal to the launcher. The launcher, upon * noticing the flag, can try starting the worker again by resending the - * signal. Note that the failure can only be transient (fork failure due to + * signal. Note that the failure can only be transient (fork failure due to * high load, memory pressure, too many processes, etc); more permanent * problems, like failure to connect to a database, are detected later in the * worker and dealt with just by having the worker exit normally. The launcher * will launch a new worker again later, per schedule. * - * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The + * When the worker is done vacuuming it sends SIGUSR2 to the launcher. The * launcher then wakes up and is able to launch another worker, if the schedule * is so tight that a new worker is needed immediately. At this time the * launcher can also balance the settings for the various remaining workers' @@ -244,7 +244,7 @@ typedef enum /*------------- * The main autovacuum shmem struct. On shared memory we store this main - * struct and the array of WorkerInfo structs. This struct keeps: + * struct and the array of WorkerInfo structs. This struct keeps: * * av_signal set by other processes to indicate various conditions * av_launcherpid the PID of the autovacuum launcher @@ -429,7 +429,7 @@ AutoVacLauncherMain(int argc, char *argv[]) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (autovacuum probably never has any + * can signal any child processes too. (autovacuum probably never has any * child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -439,7 +439,7 @@ AutoVacLauncherMain(int argc, char *argv[]) #endif /* - * Set up signal handlers. We operate on databases much like a regular + * Set up signal handlers. We operate on databases much like a regular * backend, so we use the same signal handling. See equivalent code in * tcop/postgres.c. */ @@ -546,7 +546,7 @@ AutoVacLauncherMain(int argc, char *argv[]) /* * Force zero_damaged_pages OFF in the autovac process, even if it is set - * in postgresql.conf. We don't really want such a dangerous option being + * in postgresql.conf. We don't really want such a dangerous option being * applied non-interactively. */ SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE); @@ -869,7 +869,7 @@ launcher_determine_sleep(bool canlaunch, bool recursing, struct timeval * nap) * this the "new" database, because when the database was already present on * the list, we expect that this function is not called at all). The * preexisting list, if any, will be used to preserve the order of the - * databases in the autovacuum_naptime period. The new database is put at the + * databases in the autovacuum_naptime period. The new database is put at the * end of the interval. The actual values are not saved, which should not be * much of a problem. */ @@ -1073,7 +1073,7 @@ db_comparator(const void *a, const void *b) * * Bare-bones procedure for starting an autovacuum worker from the launcher. * It determines what database to work on, sets up shared memory stuff and - * signals postmaster to start the worker. It fails gracefully if invoked when + * signals postmaster to start the worker. It fails gracefully if invoked when * autovacuum_workers are already active. * * Return value is the OID of the database that the worker is going to process, @@ -1345,7 +1345,7 @@ launch_worker(TimestampTz now) /* * Called from postmaster to signal a failure to fork a process to become - * worker. The postmaster should kill(SIGUSR2) the launcher shortly + * worker. The postmaster should kill(SIGUSR2) the launcher shortly * after calling this function. */ void @@ -1497,7 +1497,7 @@ AutoVacWorkerMain(int argc, char *argv[]) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (autovacuum probably never has any + * can signal any child processes too. (autovacuum probably never has any * child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -1507,7 +1507,7 @@ AutoVacWorkerMain(int argc, char *argv[]) #endif /* - * Set up signal handlers. We operate on databases much like a regular + * Set up signal handlers. We operate on databases much like a regular * backend, so we use the same signal handling. See equivalent code in * tcop/postgres.c. * @@ -1558,7 +1558,7 @@ AutoVacWorkerMain(int argc, char *argv[]) EmitErrorReport(); /* - * We can now go away. Note that because we called InitProcess, a + * We can now go away. Note that because we called InitProcess, a * callback was registered to do ProcKill, which will clean up * necessary state. */ @@ -1572,7 +1572,7 @@ AutoVacWorkerMain(int argc, char *argv[]) /* * Force zero_damaged_pages OFF in the autovac process, even if it is set - * in postgresql.conf. We don't really want such a dangerous option being + * in postgresql.conf. We don't really want such a dangerous option being * applied non-interactively. */ SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE); @@ -1700,7 +1700,7 @@ FreeWorkerInfo(int code, Datum arg) /* * Wake the launcher up so that he can launch a new worker immediately * if required. We only save the launcher's PID in local memory here; - * the actual signal will be sent when the PGPROC is recycled. Note + * the actual signal will be sent when the PGPROC is recycled. Note * that we always do this, so that the launcher can rebalance the cost * limit setting of the remaining workers. * @@ -1808,7 +1808,7 @@ autovac_balance_cost(void) /* * We put a lower bound of 1 on the cost_limit, to avoid division- - * by-zero in the vacuum code. Also, in case of roundoff trouble + * by-zero in the vacuum code. Also, in case of roundoff trouble * in these calculations, let's be sure we don't ever set * cost_limit to more than the base value. */ @@ -1851,7 +1851,7 @@ get_database_list(void) /* * Start a transaction so we can access pg_database, and get a snapshot. * We don't have a use for the snapshot itself, but we're interested in - * the secondary effect that it sets RecentGlobalXmin. (This is critical + * the secondary effect that it sets RecentGlobalXmin. (This is critical * for anything that reads heap pages, because HOT may decide to prune * them even if the process doesn't attempt to modify any tuples.) */ @@ -2266,14 +2266,14 @@ do_autovacuum(void) } /* - * Ok, good to go. Store the table in shared memory before releasing + * Ok, good to go. Store the table in shared memory before releasing * the lock so that other workers don't vacuum it concurrently. */ MyWorkerInfo->wi_tableoid = relid; LWLockRelease(AutovacuumScheduleLock); /* - * Remember the prevailing values of the vacuum cost GUCs. We have to + * Remember the prevailing values of the vacuum cost GUCs. We have to * restore these at the bottom of the loop, else we'll compute wrong * values in the next iteration of autovac_balance_cost(). */ @@ -2302,7 +2302,7 @@ do_autovacuum(void) /* * Save the relation name for a possible error message, to avoid a - * catalog lookup in case of an error. If any of these return NULL, + * catalog lookup in case of an error. If any of these return NULL, * then the relation has been dropped since last we checked; skip it. * Note: they must live in a long-lived memory context because we call * vacuum and analyze in different transactions. @@ -2744,7 +2744,7 @@ relation_needs_vacanalyze(Oid relid, { /* * Skip a table not found in stat hash, unless we have to force vacuum - * for anti-wrap purposes. If it's not acted upon, there's no need to + * for anti-wrap purposes. If it's not acted upon, there's no need to * vacuum it. */ *dovacuum = force_vacuum; @@ -2946,7 +2946,7 @@ AutoVacuumShmemInit(void) * Refresh pgstats data for an autovacuum process * * Cause the next pgstats read operation to obtain fresh data, but throttle - * such refreshing in the autovacuum launcher. This is mostly to avoid + * such refreshing in the autovacuum launcher. This is mostly to avoid * rereading the pgstats files too many times in quick succession when there * are many databases. * diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index f65a80374c2..a6b25d8494a 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -35,7 +35,7 @@ /* * The postmaster's list of registered background workers, in private memory. */ -slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList); +slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList); /* * BackgroundWorkerSlots exist in shared memory and can be accessed (via @@ -71,23 +71,23 @@ slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList); */ typedef struct BackgroundWorkerSlot { - bool in_use; - bool terminate; - pid_t pid; /* InvalidPid = not started yet; 0 = dead */ - uint64 generation; /* incremented when slot is recycled */ + bool in_use; + bool terminate; + pid_t pid; /* InvalidPid = not started yet; 0 = dead */ + uint64 generation; /* incremented when slot is recycled */ BackgroundWorker worker; } BackgroundWorkerSlot; typedef struct BackgroundWorkerArray { - int total_slots; + int total_slots; BackgroundWorkerSlot slot[FLEXIBLE_ARRAY_MEMBER]; } BackgroundWorkerArray; struct BackgroundWorkerHandle { - int slot; - uint64 generation; + int slot; + uint64 generation; }; static BackgroundWorkerArray *BackgroundWorkerData; @@ -127,10 +127,10 @@ BackgroundWorkerShmemInit(void) BackgroundWorkerData->total_slots = max_worker_processes; /* - * Copy contents of worker list into shared memory. Record the - * shared memory slot assigned to each worker. This ensures - * a 1-to-1 correspondence betwen the postmaster's private list and - * the array in shared memory. + * Copy contents of worker list into shared memory. Record the shared + * memory slot assigned to each worker. This ensures a 1-to-1 + * correspondence betwen the postmaster's private list and the array + * in shared memory. */ slist_foreach(siter, &BackgroundWorkerList) { @@ -144,7 +144,7 @@ BackgroundWorkerShmemInit(void) slot->pid = InvalidPid; slot->generation = 0; rw->rw_shmem_slot = slotno; - rw->rw_worker.bgw_notify_pid = 0; /* might be reinit after crash */ + rw->rw_worker.bgw_notify_pid = 0; /* might be reinit after crash */ memcpy(&slot->worker, &rw->rw_worker, sizeof(BackgroundWorker)); ++slotno; } @@ -194,27 +194,27 @@ FindRegisteredWorkerBySlotNumber(int slotno) void BackgroundWorkerStateChange(void) { - int slotno; + int slotno; /* * The total number of slots stored in shared memory should match our * notion of max_worker_processes. If it does not, something is very * wrong. Further down, we always refer to this value as - * max_worker_processes, in case shared memory gets corrupted while - * we're looping. + * max_worker_processes, in case shared memory gets corrupted while we're + * looping. */ if (max_worker_processes != BackgroundWorkerData->total_slots) { elog(LOG, "inconsistent background worker state (max_worker_processes=%d, total_slots=%d", - max_worker_processes, - BackgroundWorkerData->total_slots); + max_worker_processes, + BackgroundWorkerData->total_slots); return; } /* - * Iterate through slots, looking for newly-registered workers or - * workers who must die. + * Iterate through slots, looking for newly-registered workers or workers + * who must die. */ for (slotno = 0; slotno < max_worker_processes; ++slotno) { @@ -267,8 +267,8 @@ BackgroundWorkerStateChange(void) } /* - * Copy strings in a paranoid way. If shared memory is corrupted, - * the source data might not even be NUL-terminated. + * Copy strings in a paranoid way. If shared memory is corrupted, the + * source data might not even be NUL-terminated. */ ascii_safe_strlcpy(rw->rw_worker.bgw_name, slot->worker.bgw_name, BGW_MAXLEN); @@ -280,10 +280,10 @@ BackgroundWorkerStateChange(void) /* * Copy various fixed-size fields. * - * flags, start_time, and restart_time are examined by the - * postmaster, but nothing too bad will happen if they are - * corrupted. The remaining fields will only be examined by the - * child process. It might crash, but we won't. + * flags, start_time, and restart_time are examined by the postmaster, + * but nothing too bad will happen if they are corrupted. The + * remaining fields will only be examined by the child process. It + * might crash, but we won't. */ rw->rw_worker.bgw_flags = slot->worker.bgw_flags; rw->rw_worker.bgw_start_time = slot->worker.bgw_start_time; @@ -292,13 +292,13 @@ BackgroundWorkerStateChange(void) rw->rw_worker.bgw_main_arg = slot->worker.bgw_main_arg; /* - * Copy the PID to be notified about state changes, but only if - * the postmaster knows about a backend with that PID. It isn't - * an error if the postmaster doesn't know about the PID, because - * the backend that requested the worker could have died (or been - * killed) just after doing so. Nonetheless, at least until we get - * some experience with how this plays out in the wild, log a message - * at a relative high debug level. + * Copy the PID to be notified about state changes, but only if the + * postmaster knows about a backend with that PID. It isn't an error + * if the postmaster doesn't know about the PID, because the backend + * that requested the worker could have died (or been killed) just + * after doing so. Nonetheless, at least until we get some experience + * with how this plays out in the wild, log a message at a relative + * high debug level. */ rw->rw_worker.bgw_notify_pid = slot->worker.bgw_notify_pid; if (!PostmasterMarkPIDForWorkerNotify(rw->rw_worker.bgw_notify_pid)) @@ -319,7 +319,7 @@ BackgroundWorkerStateChange(void) /* Log it! */ ereport(LOG, (errmsg("registering background worker \"%s\"", - rw->rw_worker.bgw_name))); + rw->rw_worker.bgw_name))); slist_push_head(&BackgroundWorkerList, &rw->rw_lnode); } @@ -348,7 +348,7 @@ ForgetBackgroundWorker(slist_mutable_iter *cur) ereport(LOG, (errmsg("unregistering background worker \"%s\"", - rw->rw_worker.bgw_name))); + rw->rw_worker.bgw_name))); slist_delete_current(cur); free(rw); @@ -458,7 +458,7 @@ SanityCheckBackgroundWorker(BackgroundWorker *worker, int elevel) static void bgworker_quickdie(SIGNAL_ARGS) { - sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */ + sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */ PG_SETMASK(&BlockSig); /* @@ -504,7 +504,7 @@ bgworker_die(SIGNAL_ARGS) static void bgworker_sigusr1_handler(SIGNAL_ARGS) { - int save_errno = errno; + int save_errno = errno; latch_sigusr1_handler(); @@ -581,7 +581,7 @@ StartBackgroundWorker(void) pqsignal(SIGHUP, SIG_IGN); pqsignal(SIGQUIT, bgworker_quickdie); - InitializeTimeouts(); /* establishes SIGALRM handler */ + InitializeTimeouts(); /* establishes SIGALRM handler */ pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR2, SIG_IGN); @@ -633,11 +633,11 @@ StartBackgroundWorker(void) /* * If bgw_main is set, we use that value as the initial entrypoint. * However, if the library containing the entrypoint wasn't loaded at - * postmaster startup time, passing it as a direct function pointer is - * not possible. To work around that, we allow callers for whom a - * function pointer is not available to pass a library name (which will - * be loaded, if necessary) and a function name (which will be looked up - * in the named library). + * postmaster startup time, passing it as a direct function pointer is not + * possible. To work around that, we allow callers for whom a function + * pointer is not available to pass a library name (which will be loaded, + * if necessary) and a function name (which will be looked up in the named + * library). */ if (worker->bgw_main != NULL) entrypt = worker->bgw_main; @@ -677,7 +677,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker) if (!IsUnderPostmaster) ereport(LOG, - (errmsg("registering background worker \"%s\"", worker->bgw_name))); + (errmsg("registering background worker \"%s\"", worker->bgw_name))); if (!process_shared_preload_libraries_in_progress) { @@ -697,7 +697,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker) ereport(LOG, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("background worker \"%s\": only dynamic background workers can request notification", - worker->bgw_name))); + worker->bgw_name))); return; } @@ -756,17 +756,17 @@ bool RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle) { - int slotno; - bool success = false; - uint64 generation = 0; + int slotno; + bool success = false; + uint64 generation = 0; /* - * We can't register dynamic background workers from the postmaster. - * If this is a standalone backend, we're the only process and can't - * start any more. In a multi-process environement, it might be - * theoretically possible, but we don't currently support it due to - * locking considerations; see comments on the BackgroundWorkerSlot - * data structure. + * We can't register dynamic background workers from the postmaster. If + * this is a standalone backend, we're the only process and can't start + * any more. In a multi-process environement, it might be theoretically + * possible, but we don't currently support it due to locking + * considerations; see comments on the BackgroundWorkerSlot data + * structure. */ if (!IsUnderPostmaster) return false; @@ -792,8 +792,8 @@ RegisterDynamicBackgroundWorker(BackgroundWorker *worker, generation = slot->generation; /* - * Make sure postmaster doesn't see the slot as in use before - * it sees the new contents. + * Make sure postmaster doesn't see the slot as in use before it + * sees the new contents. */ pg_write_barrier(); @@ -839,16 +839,16 @@ BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp) { BackgroundWorkerSlot *slot; - pid_t pid; + pid_t pid; Assert(handle->slot < max_worker_processes); slot = &BackgroundWorkerData->slot[handle->slot]; /* - * We could probably arrange to synchronize access to data using - * memory barriers only, but for now, let's just keep it simple and - * grab the lock. It seems unlikely that there will be enough traffic - * here to result in meaningful contention. + * We could probably arrange to synchronize access to data using memory + * barriers only, but for now, let's just keep it simple and grab the + * lock. It seems unlikely that there will be enough traffic here to + * result in meaningful contention. */ LWLockAcquire(BackgroundWorkerLock, LW_SHARED); @@ -887,9 +887,9 @@ GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp) BgwHandleStatus WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *pidp) { - BgwHandleStatus status; - int rc; - bool save_set_latch_on_sigusr1; + BgwHandleStatus status; + int rc; + bool save_set_latch_on_sigusr1; save_set_latch_on_sigusr1 = set_latch_on_sigusr1; set_latch_on_sigusr1 = true; @@ -898,7 +898,7 @@ WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *pidp) { for (;;) { - pid_t pid; + pid_t pid; CHECK_FOR_INTERRUPTS(); @@ -942,7 +942,7 @@ void TerminateBackgroundWorker(BackgroundWorkerHandle *handle) { BackgroundWorkerSlot *slot; - bool signal_postmaster = false; + bool signal_postmaster = false; Assert(handle->slot < max_worker_processes); slot = &BackgroundWorkerData->slot[handle->slot]; diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index 1ec66c221fb..780ee3bdcb0 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -2,11 +2,11 @@ * * bgwriter.c * - * The background writer (bgwriter) is new as of Postgres 8.0. It attempts + * The background writer (bgwriter) is new as of Postgres 8.0. It attempts * to keep regular backends from having to write out dirty shared buffers * (which they would only do when needing to free a shared buffer to read in * another page). In the best scenario all writes from shared buffers will - * be issued by the background writer process. However, regular backends are + * be issued by the background writer process. However, regular backends are * still empowered to issue writes if the bgwriter fails to maintain enough * clean shared buffers. * @@ -115,7 +115,7 @@ BackgroundWriterMain(void) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (bgwriter probably never has any + * can signal any child processes too. (bgwriter probably never has any * child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -194,7 +194,7 @@ BackgroundWriterMain(void) /* * These operations are really just a minimal subset of - * AbortTransaction(). We don't have very many resources to worry + * AbortTransaction(). We don't have very many resources to worry * about in bgwriter, but we do have LWLocks, buffers, and temp files. */ LWLockReleaseAll(); @@ -291,18 +291,18 @@ BackgroundWriterMain(void) if (FirstCallSinceLastCheckpoint()) { /* - * After any checkpoint, close all smgr files. This is so we + * After any checkpoint, close all smgr files. This is so we * won't hang onto smgr references to deleted files indefinitely. */ smgrcloseall(); } /* - * Log a new xl_running_xacts every now and then so replication can get - * into a consistent state faster (think of suboverflowed snapshots) - * and clean up resources (locks, KnownXids*) more frequently. The - * costs of this are relatively low, so doing it 4 times - * (LOG_SNAPSHOT_INTERVAL_MS) a minute seems fine. + * Log a new xl_running_xacts every now and then so replication can + * get into a consistent state faster (think of suboverflowed + * snapshots) and clean up resources (locks, KnownXids*) more + * frequently. The costs of this are relatively low, so doing it 4 + * times (LOG_SNAPSHOT_INTERVAL_MS) a minute seems fine. * * We assume the interval for writing xl_running_xacts is * significantly bigger than BgWriterDelay, so we don't complicate the @@ -314,20 +314,21 @@ BackgroundWriterMain(void) * we've logged a running xacts. * * We do this logging in the bgwriter as its the only process thats - * run regularly and returns to its mainloop all the - * time. E.g. Checkpointer, when active, is barely ever in its - * mainloop and thus makes it hard to log regularly. + * run regularly and returns to its mainloop all the time. E.g. + * Checkpointer, when active, is barely ever in its mainloop and thus + * makes it hard to log regularly. */ if (XLogStandbyInfoActive() && !RecoveryInProgress()) { TimestampTz timeout = 0; TimestampTz now = GetCurrentTimestamp(); + timeout = TimestampTzPlusMilliseconds(last_snapshot_ts, LOG_SNAPSHOT_INTERVAL_MS); /* - * only log if enough time has passed and some xlog record has been - * inserted. + * only log if enough time has passed and some xlog record has + * been inserted. */ if (now >= timeout && last_snapshot_lsn != GetXLogInsertRecPtr()) @@ -366,7 +367,7 @@ BackgroundWriterMain(void) * and the time we call StrategyNotifyBgWriter. While it's not * critical that we not hibernate anyway, we try to reduce the odds of * that by only hibernating when BgBufferSync says nothing's happening - * for two consecutive cycles. Also, we mitigate any possible + * for two consecutive cycles. Also, we mitigate any possible * consequences of a missed wakeup by not hibernating forever. */ if (rc == WL_TIMEOUT && can_hibernate && prev_hibernate) @@ -420,7 +421,7 @@ bg_quickdie(SIGNAL_ARGS) on_exit_reset(); /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a + * Note we do exit(2) not exit(0). This is to force the postmaster into a * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index e544c1f6d2c..2ac3061d974 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -2,7 +2,7 @@ * * checkpointer.c * - * The checkpointer is new as of Postgres 9.2. It handles all checkpoints. + * The checkpointer is new as of Postgres 9.2. It handles all checkpoints. * Checkpoints are automatically dispatched after a certain amount of time has * elapsed since the last one, and it can be signaled to perform requested * checkpoints as well. (The GUC parameter that mandates a checkpoint every @@ -14,7 +14,7 @@ * subprocess finishes, or as soon as recovery begins if we are doing archive * recovery. It remains alive until the postmaster commands it to terminate. * Normal termination is by SIGUSR2, which instructs the checkpointer to - * execute a shutdown checkpoint and then exit(0). (All backends must be + * execute a shutdown checkpoint and then exit(0). (All backends must be * stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT; * like any backend, the checkpointer will simply abort and exit on SIGQUIT. * @@ -198,7 +198,7 @@ CheckpointerMain(void) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (checkpointer probably never has + * can signal any child processes too. (checkpointer probably never has * any child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -211,7 +211,7 @@ CheckpointerMain(void) * Properly accept or ignore signals the postmaster might send us * * Note: we deliberately ignore SIGTERM, because during a standard Unix - * system shutdown cycle, init will SIGTERM all processes at once. We + * system shutdown cycle, init will SIGTERM all processes at once. We * want to wait for the backends to exit, whereupon the postmaster will * tell us it's okay to shut down (via SIGUSR2). */ @@ -279,7 +279,7 @@ CheckpointerMain(void) /* * These operations are really just a minimal subset of - * AbortTransaction(). We don't have very many resources to worry + * AbortTransaction(). We don't have very many resources to worry * about in checkpointer, but we do have LWLocks, buffers, and temp * files. */ @@ -506,7 +506,7 @@ CheckpointerMain(void) ckpt_performed = CreateRestartPoint(flags); /* - * After any checkpoint, close all smgr files. This is so we + * After any checkpoint, close all smgr files. This is so we * won't hang onto smgr references to deleted files indefinitely. */ smgrcloseall(); @@ -639,7 +639,7 @@ CheckArchiveTimeout(void) } /* - * Returns true if an immediate checkpoint request is pending. (Note that + * Returns true if an immediate checkpoint request is pending. (Note that * this does not check the *current* checkpoint's IMMEDIATE flag, but whether * there is one pending behind it.) */ @@ -826,7 +826,7 @@ chkpt_quickdie(SIGNAL_ARGS) on_exit_reset(); /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a + * Note we do exit(2) not exit(0). This is to force the postmaster into a * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c @@ -977,7 +977,7 @@ RequestCheckpoint(int flags) CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE); /* - * After any checkpoint, close all smgr files. This is so we won't + * After any checkpoint, close all smgr files. This is so we won't * hang onto smgr references to deleted files indefinitely. */ smgrcloseall(); @@ -1108,7 +1108,7 @@ RequestCheckpoint(int flags) * to the requests[] queue without checking for duplicates. The checkpointer * will have to eliminate dups internally anyway. However, if we discover * that the queue is full, we make a pass over the entire queue to compact - * it. This is somewhat expensive, but the alternative is for the backend + * it. This is somewhat expensive, but the alternative is for the backend * to perform its own fsync, which is far more expensive in practice. It * is theoretically possible a backend fsync might still be necessary, if * the queue is full and contains no duplicate entries. In that case, we @@ -1134,7 +1134,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) /* * If the checkpointer isn't running or the request queue is full, the - * backend will have to perform its own fsync request. But before forcing + * backend will have to perform its own fsync request. But before forcing * that to happen, we can try to compact the request queue. */ if (CheckpointerShmem->checkpointer_pid == 0 || @@ -1178,7 +1178,7 @@ ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) * Although a full fsync request queue is not common, it can lead to severe * performance problems when it does happen. So far, this situation has * only been observed to occur when the system is under heavy write load, - * and especially during the "sync" phase of a checkpoint. Without this + * and especially during the "sync" phase of a checkpoint. Without this * logic, each backend begins doing an fsync for every block written, which * gets very expensive and can slow down the whole system. * diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c index 3e2acdd0f52..f6df2de8706 100644 --- a/src/backend/postmaster/fork_process.c +++ b/src/backend/postmaster/fork_process.c @@ -101,7 +101,7 @@ fork_process(void) #endif /* LINUX_OOM_SCORE_ADJ */ /* - * Older Linux kernels have oom_adj not oom_score_adj. This works + * Older Linux kernels have oom_adj not oom_score_adj. This works * similarly except with a different scale of adjustment values. If * it's necessary to build Postgres to work with either API, you can * define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ. diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c index 815316055a7..6a5c5b07136 100644 --- a/src/backend/postmaster/pgarch.c +++ b/src/backend/postmaster/pgarch.c @@ -487,14 +487,20 @@ pgarch_ArchiverCopyLoop(void) /* successful */ pgarch_archiveDone(xlog); - /* Tell the collector about the WAL file that we successfully archived */ + /* + * Tell the collector about the WAL file that we successfully + * archived + */ pgstat_send_archiver(xlog, false); break; /* out of inner retry loop */ } else { - /* Tell the collector about the WAL file that we failed to archive */ + /* + * Tell the collector about the WAL file that we failed to + * archive + */ pgstat_send_archiver(xlog, true); if (++failures >= NUM_ARCHIVE_RETRIES) @@ -590,9 +596,9 @@ pgarch_archiveXlog(char *xlog) { /* * If either the shell itself, or a called command, died on a signal, - * abort the archiver. We do this because system() ignores SIGINT and + * abort the archiver. We do this because system() ignores SIGINT and * SIGQUIT while waiting; so a signal is very likely something that - * should have interrupted us too. If we overreact it's no big deal, + * should have interrupted us too. If we overreact it's no big deal, * the postmaster will just start the archiver again. * * Per the Single Unix Spec, shells report exit status > 128 when a diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 479dfa7d3cf..f86481665f3 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -368,7 +368,7 @@ pgstat_init(void) * On some platforms, pg_getaddrinfo_all() may return multiple addresses * only one of which will actually work (eg, both IPv6 and IPv4 addresses * when kernel will reject IPv6). Worse, the failure may occur at the - * bind() or perhaps even connect() stage. So we must loop through the + * bind() or perhaps even connect() stage. So we must loop through the * results till we find a working combination. We will generate LOG * messages, but no error, for bogus combinations. */ @@ -616,7 +616,7 @@ pgstat_reset_remove_files(const char *directory) /* * pgstat_reset_all() - * - * Remove the stats files. This is currently used only if WAL + * Remove the stats files. This is currently used only if WAL * recovery is needed after a crash. */ void @@ -677,7 +677,7 @@ pgstat_start(void) /* * Do nothing if too soon since last collector start. This is a safety * valve to protect against continuous respawn attempts if the collector - * is dying immediately at launch. Note that since we will be re-called + * is dying immediately at launch. Note that since we will be re-called * from the postmaster main loop, we will get another chance later. */ curtime = time(NULL); @@ -1122,7 +1122,7 @@ pgstat_vacuum_stat(void) * * Collect the OIDs of all objects listed in the specified system catalog * into a temporary hash table. Caller should hash_destroy the result - * when done with it. (However, we make the table in CurrentMemoryContext + * when done with it. (However, we make the table in CurrentMemoryContext * so that it will be freed properly in event of an error.) * ---------- */ @@ -1374,7 +1374,7 @@ pgstat_report_analyze(Relation rel, * have counted such rows as live or dead respectively. Because we will * report our counts of such rows at transaction end, we should subtract * off these counts from what we send to the collector now, else they'll - * be double-counted after commit. (This approach also ensures that the + * be double-counted after commit. (This approach also ensures that the * collector ends up with the right numbers if we abort instead of * committing.) */ @@ -1605,7 +1605,7 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize) /* * Compute the new f_total_time as the total elapsed time added to the - * pre-call value of f_total_time. This is necessary to avoid + * pre-call value of f_total_time. This is necessary to avoid * double-counting any time taken by recursive calls of myself. (We do * not need any similar kluge for self time, since that already excludes * any recursive calls.) @@ -2091,7 +2091,7 @@ AtPrepare_PgStat(void) * Clean up after successful PREPARE. * * All we need do here is unlink the transaction stats state from the - * nontransactional state. The nontransactional action counts will be + * nontransactional state. The nontransactional action counts will be * reported to the stats collector immediately, while the effects on live * and dead tuple counts are preserved in the 2PC state file. * @@ -2317,8 +2317,8 @@ pgstat_fetch_stat_beentry(int beid) /* ---------- * pgstat_fetch_stat_local_beentry() - * - * Like pgstat_fetch_stat_beentry() but with locally computed addtions (like - * xid and xmin values of the backend) + * Like pgstat_fetch_stat_beentry() but with locally computed addtions (like + * xid and xmin values of the backend) * * NB: caller is responsible for a check if the user is permitted to see * this info (especially the querystring). @@ -2670,7 +2670,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str) { /* * track_activities is disabled, but we last reported a - * non-disabled state. As our final update, change the state and + * non-disabled state. As our final update, change the state and * clear fields we will not be updating anymore. */ beentry->st_changecount++; @@ -2895,12 +2895,12 @@ pgstat_read_current_status(void) * pgstat_get_backend_current_activity() - * * Return a string representing the current activity of the backend with - * the specified PID. This looks directly at the BackendStatusArray, + * the specified PID. This looks directly at the BackendStatusArray, * and so will provide current information regardless of the age of our * transaction's snapshot of the status array. * * It is the caller's responsibility to invoke this only for backends whose - * state is expected to remain stable while the result is in use. The + * state is expected to remain stable while the result is in use. The * only current use is in deadlock reporting, where we can expect that * the target backend is blocked on a lock. (There are corner cases * where the target's wait could get aborted while we are looking at it, @@ -2968,7 +2968,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser) * pgstat_get_crashed_backend_activity() - * * Return a string representing the current activity of the backend with - * the specified PID. Like the function above, but reads shared memory with + * the specified PID. Like the function above, but reads shared memory with * the expectation that it may be corrupt. On success, copy the string * into the "buffer" argument and return that pointer. On failure, * return NULL. @@ -2977,7 +2977,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser) * query that crashed a backend. In particular, no attempt is made to * follow the correct concurrency protocol when accessing the * BackendStatusArray. But that's OK, in the worst case we'll return a - * corrupted message. We also must take care not to trip on ereport(ERROR). + * corrupted message. We also must take care not to trip on ereport(ERROR). * ---------- */ const char * @@ -3097,7 +3097,7 @@ pgstat_send(void *msg, int len) void pgstat_send_archiver(const char *xlog, bool failed) { - PgStat_MsgArchiver msg; + PgStat_MsgArchiver msg; /* * Prepare and send the message @@ -3145,7 +3145,7 @@ pgstat_send_bgwriter(void) /* ---------- * PgstatCollectorMain() - * - * Start up the statistics collector process. This is the body of the + * Start up the statistics collector process. This is the body of the * postmaster child process. * * The argc/argv parameters are valid only in EXEC_BACKEND case. @@ -3166,7 +3166,7 @@ PgstatCollectorMain(int argc, char *argv[]) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (pgstat probably never has any + * can signal any child processes too. (pgstat probably never has any * child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -3395,7 +3395,7 @@ PgstatCollectorMain(int argc, char *argv[]) /* * Windows, at least in its Windows Server 2003 R2 incarnation, - * sometimes loses FD_READ events. Waking up and retrying the recv() + * sometimes loses FD_READ events. Waking up and retrying the recv() * fixes that, so don't sleep indefinitely. This is a crock of the * first water, but until somebody wants to debug exactly what's * happening there, this is the best we can do. The two-second @@ -3912,8 +3912,8 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); /* - * Clear out global and archiver statistics so they start from zero - * in case we can't load an existing statsfile. + * Clear out global and archiver statistics so they start from zero in + * case we can't load an existing statsfile. */ memset(&globalStats, 0, sizeof(globalStats)); memset(&archiverStats, 0, sizeof(archiverStats)); @@ -4271,7 +4271,7 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename; /* - * Try to open the stats file. As above, anything but ENOENT is worthy of + * Try to open the stats file. As above, anything but ENOENT is worthy of * complaining about. */ if ((fpin = AllocateFile(statfile, PG_BINARY_R)) == NULL) @@ -4419,7 +4419,7 @@ backend_read_statsfile(void) * * We don't recompute min_ts after sleeping, except in the * unlikely case that cur_ts went backwards. So we might end up - * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In + * accepting a file a bit older than PGSTAT_STAT_INTERVAL. In * practice that shouldn't happen, though, as long as the sleep * time is less than PGSTAT_STAT_INTERVAL; and we don't want to * tell the collector that our cutoff time is less than what we'd @@ -4512,7 +4512,7 @@ pgstat_setup_memcxt(void) /* ---------- * pgstat_clear_snapshot() - * - * Discard any data collected in the current transaction. Any subsequent + * Discard any data collected in the current transaction. Any subsequent * request will cause new snapshots to be read. * * This is also invoked during transaction commit or abort to discard @@ -4996,7 +4996,7 @@ pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len) /* Failed archival attempt */ ++archiverStats.failed_count; memcpy(archiverStats.last_failed_wal, msg->m_xlog, - sizeof(archiverStats.last_failed_wal)); + sizeof(archiverStats.last_failed_wal)); archiverStats.last_failed_timestamp = msg->m_timestamp; } else @@ -5004,7 +5004,7 @@ pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len) /* Successful archival operation */ ++archiverStats.archived_count; memcpy(archiverStats.last_archived_wal, msg->m_xlog, - sizeof(archiverStats.last_archived_wal)); + sizeof(archiverStats.last_archived_wal)); archiverStats.last_archived_timestamp = msg->m_timestamp; } } diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index b573fd82b63..6d098874d9b 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2,7 +2,7 @@ * * postmaster.c * This program acts as a clearing house for requests to the - * POSTGRES system. Frontend programs send a startup message + * POSTGRES system. Frontend programs send a startup message * to the Postmaster and the postmaster uses the info in the * message to setup a backend process. * @@ -15,7 +15,7 @@ * The postmaster process creates the shared memory and semaphore * pools during startup, but as a rule does not touch them itself. * In particular, it is not a member of the PGPROC array of backends - * and so it cannot participate in lock-manager operations. Keeping + * and so it cannot participate in lock-manager operations. Keeping * the postmaster away from shared memory operations makes it simpler * and more reliable. The postmaster is almost always able to recover * from crashes of individual backends by resetting shared memory; @@ -144,7 +144,7 @@ * children we have and send them appropriate signals when necessary. * * "Special" children such as the startup, bgwriter and autovacuum launcher - * tasks are not in this list. Autovacuum worker and walsender are in it. + * tasks are not in this list. Autovacuum worker and walsender are in it. * Also, "dead_end" children are in it: these are children launched just for * the purpose of sending a friendly rejection message to a would-be client. * We must track them because they are attached to shared memory, but we know @@ -161,13 +161,13 @@ typedef struct bkend int child_slot; /* PMChildSlot for this backend, if any */ /* - * Flavor of backend or auxiliary process. Note that BACKEND_TYPE_WALSND + * Flavor of backend or auxiliary process. Note that BACKEND_TYPE_WALSND * backends initially announce themselves as BACKEND_TYPE_NORMAL, so if * bkend_type is normal, you should check for a recent transition. */ int bkend_type; bool dead_end; /* is it going to send an error and quit? */ - bool bgworker_notify; /* gets bgworker start/stop notifications */ + bool bgworker_notify; /* gets bgworker start/stop notifications */ dlist_node elem; /* list link in BackendList */ } Backend; @@ -212,10 +212,10 @@ static char ExtraOptions[MAXPGPATH]; /* * These globals control the behavior of the postmaster in case some - * backend dumps core. Normally, it kills all peers of the dead backend + * backend dumps core. Normally, it kills all peers of the dead backend * and reinitializes shared memory. By specifying -s or -n, we can have * the postmaster stop (rather than kill) peers and not reinitialize - * shared data structures. (Reinit is currently dead code, though.) + * shared data structures. (Reinit is currently dead code, though.) */ static bool Reinit = true; static int SendStop = false; @@ -264,7 +264,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */ * state and the startup process is launched. The startup process begins by * reading the control file and other preliminary initialization steps. * In a normal startup, or after crash recovery, the startup process exits - * with exit code 0 and we switch to PM_RUN state. However, archive recovery + * with exit code 0 and we switch to PM_RUN state. However, archive recovery * is handled specially since it takes much longer and we would like to support * hot standby during archive recovery. * @@ -273,7 +273,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */ * checkpointer are launched, while the startup process continues applying WAL. * If Hot Standby is enabled, then, after reaching a consistent point in WAL * redo, startup process signals us again, and we switch to PM_HOT_STANDBY - * state and begin accepting connections to perform read-only queries. When + * state and begin accepting connections to perform read-only queries. When * archive recovery is finished, the startup process exits with exit code 0 * and we switch to PM_RUN state. * @@ -456,7 +456,7 @@ typedef struct VariableCache ShmemVariableCache; Backend *ShmemBackendArray; #ifndef HAVE_SPINLOCKS - PGSemaphore SpinlockSemaArray; + PGSemaphore SpinlockSemaArray; #endif LWLockPadded *MainLWLockArray; slock_t *ProcStructLock; @@ -599,7 +599,7 @@ PostmasterMain(int argc, char *argv[]) opterr = 1; /* - * Parse command-line options. CAUTION: keep this in sync with + * Parse command-line options. CAUTION: keep this in sync with * tcop/postgres.c (the option sets should not conflict) and with the * common help() function in main/main.c. */ @@ -1093,6 +1093,7 @@ PostmasterMain(int argc, char *argv[]) InitPostmasterDeathWatchHandle(); #ifdef WIN32 + /* * Initialize I/O completion port used to deliver list of dead children. */ @@ -1157,8 +1158,8 @@ PostmasterMain(int argc, char *argv[]) if (!(Log_destination & LOG_DESTINATION_STDERR)) ereport(LOG, (errmsg("ending log output to stderr"), - errhint("Future log output will go to log destination \"%s\".", - Log_destination_string))); + errhint("Future log output will go to log destination \"%s\".", + Log_destination_string))); whereToSendOutput = DestNone; @@ -1197,7 +1198,7 @@ PostmasterMain(int argc, char *argv[]) /* - * Remove old temporary files. At this point there can be no other + * Remove old temporary files. At this point there can be no other * Postgres processes running in this directory, so this should be safe. */ RemovePgTempFiles(); @@ -1427,11 +1428,11 @@ DetermineSleepTime(struct timeval * timeout) if (HaveCrashedWorker) { - slist_mutable_iter siter; + slist_mutable_iter siter; /* * When there are crashed bgworkers, we sleep just long enough that - * they are restarted when they request to be. Scan the list to + * they are restarted when they request to be. Scan the list to * determine the minimum of all wakeup times according to most recent * crash time and requested restart interval. */ @@ -1655,9 +1656,9 @@ ServerLoop(void) /* * If we already sent SIGQUIT to children and they are slow to shut - * down, it's time to send them SIGKILL. This doesn't happen normally, - * but under certain conditions backends can get stuck while shutting - * down. This is a last measure to get them unwedged. + * down, it's time to send them SIGKILL. This doesn't happen + * normally, but under certain conditions backends can get stuck while + * shutting down. This is a last measure to get them unwedged. * * Note we also do this during recovery from a process crash. */ @@ -1671,8 +1672,8 @@ ServerLoop(void) AbortStartTime = 0; /* - * Additionally, unless we're recovering from a process crash, it's - * now the time for postmaster to abandon ship. + * Additionally, unless we're recovering from a process crash, + * it's now the time for postmaster to abandon ship. */ if (!FatalError) ExitPostmaster(1); @@ -1731,7 +1732,7 @@ ProcessStartupPacket(Port *port, bool SSLdone) { /* * EOF after SSLdone probably means the client didn't like our - * response to NEGOTIATE_SSL_CODE. That's not an error condition, so + * response to NEGOTIATE_SSL_CODE. That's not an error condition, so * don't clutter the log with a complaint. */ if (!SSLdone) @@ -1856,7 +1857,7 @@ retry1: int32 offset = sizeof(ProtocolVersion); /* - * Scan packet body for name/option pairs. We can assume any string + * Scan packet body for name/option pairs. We can assume any string * beginning within the packet body is null-terminated, thanks to * zeroing extra byte above. */ @@ -1898,7 +1899,7 @@ retry1: else if (!parse_bool(valptr, &am_walsender)) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for parameter \"replication\""), + errmsg("invalid value for parameter \"replication\""), errhint("Valid values are: false, 0, true, 1, database."))); } else @@ -2291,7 +2292,7 @@ reset_shared(int port) * * Note: in each "cycle of life" we will normally assign the same IPC keys * (if using SysV shmem and/or semas), since the port number is used to - * determine IPC keys. This helps ensure that we will clean up dead IPC + * determine IPC keys. This helps ensure that we will clean up dead IPC * objects if the postmaster crashes and is restarted. */ CreateSharedMemoryAndSemaphores(false, port); @@ -2650,7 +2651,7 @@ reaper(SIGNAL_ARGS) /* * OK, we saw normal exit of the checkpointer after it's been * told to shut down. We expect that it wrote a shutdown - * checkpoint. (If for some reason it didn't, recovery will + * checkpoint. (If for some reason it didn't, recovery will * occur on next postmaster start.) * * At this point we should have no normal backend children @@ -2726,7 +2727,7 @@ reaper(SIGNAL_ARGS) /* * Was it the autovacuum launcher? Normal exit can be ignored; we'll * start a new one at the next iteration of the postmaster's main - * loop, if necessary. Any other exit condition is treated as a + * loop, if necessary. Any other exit condition is treated as a * crash. */ if (pid == AutoVacPID) @@ -2868,7 +2869,7 @@ CleanupBackgroundWorker(int pid, if (!ReleasePostmasterChildSlot(rw->rw_child_slot)) { /* - * Uh-oh, the child failed to clean itself up. Treat as a crash + * Uh-oh, the child failed to clean itself up. Treat as a crash * after all. */ rw->rw_crashed_at = GetCurrentTimestamp(); @@ -2884,6 +2885,7 @@ CleanupBackgroundWorker(int pid, #ifdef EXEC_BACKEND ShmemBackendArrayRemove(rw->rw_backend); #endif + /* * It's possible that this background worker started some OTHER * background worker and asked to be notified when that worker @@ -2897,7 +2899,7 @@ CleanupBackgroundWorker(int pid, } rw->rw_pid = 0; rw->rw_child_slot = 0; - ReportBackgroundWorkerPID(rw); /* report child death */ + ReportBackgroundWorkerPID(rw); /* report child death */ LogChildExit(LOG, namebuf, pid, exitstatus); @@ -2930,6 +2932,7 @@ CleanupBackend(int pid, */ #ifdef WIN32 + /* * On win32, also treat ERROR_WAIT_NO_CHILDREN (128) as nonfatal case, * since that sometimes happens under load when the process fails to start @@ -2961,7 +2964,7 @@ CleanupBackend(int pid, if (!ReleasePostmasterChildSlot(bp->child_slot)) { /* - * Uh-oh, the child failed to clean itself up. Treat as a + * Uh-oh, the child failed to clean itself up. Treat as a * crash after all. */ HandleChildCrash(pid, exitstatus, _("server process")); @@ -2974,12 +2977,12 @@ CleanupBackend(int pid, if (bp->bgworker_notify) { /* - * This backend may have been slated to receive SIGUSR1 - * when some background worker started or stopped. Cancel - * those notifications, as we don't want to signal PIDs that - * are not PostgreSQL backends. This gets skipped in the - * (probably very common) case where the backend has never - * requested any such notifications. + * This backend may have been slated to receive SIGUSR1 when + * some background worker started or stopped. Cancel those + * notifications, as we don't want to signal PIDs that are not + * PostgreSQL backends. This gets skipped in the (probably + * very common) case where the backend has never requested any + * such notifications. */ BackgroundWorkerStopNotifications(bp->pid); } @@ -3006,10 +3009,11 @@ HandleChildCrash(int pid, int exitstatus, const char *procname) bool take_action; /* - * We only log messages and send signals if this is the first process crash - * and we're not doing an immediate shutdown; otherwise, we're only here to - * update postmaster's idea of live processes. If we have already signalled - * children, nonzero exit status is to be expected, so don't clutter log. + * We only log messages and send signals if this is the first process + * crash and we're not doing an immediate shutdown; otherwise, we're only + * here to update postmaster's idea of live processes. If we have already + * signalled children, nonzero exit status is to be expected, so don't + * clutter log. */ take_action = !FatalError && Shutdown != ImmediateShutdown; @@ -3052,7 +3056,7 @@ HandleChildCrash(int pid, int exitstatus, const char *procname) else { /* - * This worker is still alive. Unless we did so already, tell it + * This worker is still alive. Unless we did so already, tell it * to commit hara-kiri. * * SIGQUIT is the special signal that says exit without proc_exit @@ -3366,13 +3370,13 @@ PostmasterStateMachine(void) * PM_WAIT_BACKENDS state ends when we have no regular backends * (including autovac workers), no bgworkers (including unconnected * ones), and no walwriter, autovac launcher or bgwriter. If we are - * doing crash recovery or an immediate shutdown then we expect - * the checkpointer to exit as well, otherwise not. The archiver, - * stats, and syslogger processes are disregarded since - * they are not connected to shared memory; we also disregard - * dead_end children here. Walsenders are also disregarded, - * they will be terminated later after writing the checkpoint record, - * like the archiver process. + * doing crash recovery or an immediate shutdown then we expect the + * checkpointer to exit as well, otherwise not. The archiver, stats, + * and syslogger processes are disregarded since they are not + * connected to shared memory; we also disregard dead_end children + * here. Walsenders are also disregarded, they will be terminated + * later after writing the checkpoint record, like the archiver + * process. */ if (CountChildren(BACKEND_TYPE_NORMAL | BACKEND_TYPE_WORKER) == 0 && CountUnconnectedWorkers() == 0 && @@ -3387,7 +3391,7 @@ PostmasterStateMachine(void) if (Shutdown >= ImmediateShutdown || FatalError) { /* - * Start waiting for dead_end children to die. This state + * Start waiting for dead_end children to die. This state * change causes ServerLoop to stop creating new ones. */ pmState = PM_WAIT_DEAD_END; @@ -3487,7 +3491,7 @@ PostmasterStateMachine(void) /* * If we've been told to shut down, we exit as soon as there are no - * remaining children. If there was a crash, cleanup will occur at the + * remaining children. If there was a crash, cleanup will occur at the * next startup. (Before PostgreSQL 8.3, we tried to recover from the * crash before exiting, but that seems unwise if we are quitting because * we got SIGTERM from init --- there may well not be time for recovery @@ -3565,7 +3569,7 @@ PostmasterStateMachine(void) * system(). * * There is a race condition for recently-forked children: they might not - * have executed setsid() yet. So we signal the child directly as well as + * have executed setsid() yet. So we signal the child directly as well as * the group. We assume such a child will handle the signal before trying * to spawn any grandchild processes. We also assume that signaling the * child twice will not cause any problems. @@ -3817,7 +3821,7 @@ BackendStartup(Port *port) /* * Try to report backend fork() failure to client before we close the - * connection. Since we do not care to risk blocking the postmaster on + * connection. Since we do not care to risk blocking the postmaster on * this connection, we set the connection to non-blocking and try only once. * * This is grungy special-purpose code; we cannot use backend libpq since @@ -3871,7 +3875,7 @@ BackendInitialize(Port *port) /* * PreAuthDelay is a debugging aid for investigating problems in the * authentication cycle: it can be set in postgresql.conf to allow time to - * attach to the newly-forked backend with a debugger. (See also + * attach to the newly-forked backend with a debugger. (See also * PostAuthDelay, which we allow clients to pass through PGOPTIONS, but it * is not honored until after authentication.) */ @@ -3898,7 +3902,7 @@ BackendInitialize(Port *port) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (We do this now on the off chance + * can signal any child processes too. (We do this now on the off chance * that something might spawn a child process during authentication.) */ #ifdef HAVE_SETSID @@ -3908,7 +3912,7 @@ BackendInitialize(Port *port) /* * We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT or - * timeout while trying to collect the startup packet. Otherwise the + * timeout while trying to collect the startup packet. Otherwise the * postmaster cannot shutdown the database FAST or IMMED cleanly if a * buggy client fails to send the packet promptly. */ @@ -3995,7 +3999,7 @@ BackendInitialize(Port *port) status = ProcessStartupPacket(port, false); /* - * Stop here if it was bad or a cancel packet. ProcessStartupPacket + * Stop here if it was bad or a cancel packet. ProcessStartupPacket * already did any appropriate error reporting. */ if (status != STATUS_OK) @@ -4546,7 +4550,7 @@ SubPostmasterMain(int argc, char *argv[]) read_nondefault_variables(); /* - * Reload any libraries that were preloaded by the postmaster. Since we + * Reload any libraries that were preloaded by the postmaster. Since we * exec'd this process, those libraries didn't come along with us; but we * should load them into all child processes to be consistent with the * non-EXEC_BACKEND behavior. @@ -4599,7 +4603,7 @@ SubPostmasterMain(int argc, char *argv[]) * * This prevents a randomized stack base address that causes child * shared memory to be at a different address than the parent, making - * it impossible to attached to shared memory. Return the value to + * it impossible to attached to shared memory. Return the value to * '1' when finished. */ CreateSharedMemoryAndSemaphores(false, 0); @@ -4719,7 +4723,7 @@ ExitPostmaster(int status) /* should cleanup shared memory and kill all backends */ /* - * Not sure of the semantics here. When the Postmaster dies, should the + * Not sure of the semantics here. When the Postmaster dies, should the * backends all be killed? probably not. * * MUST -- vadim 05-10-1999 @@ -5028,7 +5032,7 @@ CountChildren(int target) /* * StartChildProcess -- start an auxiliary process for the postmaster * - * xlop determines what kind of child will be started. All child types + * xlop determines what kind of child will be started. All child types * initially go to AuxiliaryProcessMain, which will handle common setup. * * Return value of StartChildProcess is subprocess' PID, or 0 if failed @@ -5253,7 +5257,7 @@ CreateOptsFile(int argc, char *argv[], char *fullprogname) * These arrays include regular backends, autovac workers, walsenders * and background workers, but not special children nor dead_end children. * This allows the arrays to have a fixed maximum size, to wit the same - * too-many-children limit enforced by canAcceptConnections(). The exact value + * too-many-children limit enforced by canAcceptConnections(). The exact value * isn't too critical as long as it's more than MaxBackends. */ int @@ -5468,7 +5472,7 @@ assign_backendlist_entry(RegisteredBgWorker *rw) static void maybe_start_bgworker(void) { - slist_mutable_iter iter; + slist_mutable_iter iter; TimestampTz now = 0; if (FatalError) @@ -5544,7 +5548,7 @@ maybe_start_bgworker(void) else rw->rw_child_slot = MyPMChildSlot = AssignPostmasterChildSlot(); - do_start_bgworker(rw); /* sets rw->rw_pid */ + do_start_bgworker(rw); /* sets rw->rw_pid */ if (rw->rw_backend) { @@ -5955,7 +5959,7 @@ ShmemBackendArrayRemove(Backend *bn) #ifdef WIN32 /* - * Subset implementation of waitpid() for Windows. We assume pid is -1 + * Subset implementation of waitpid() for Windows. We assume pid is -1 * (that is, check all child processes) and options is WNOHANG (don't wait). */ static pid_t diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c index 5673c8c20fd..a116d029f2d 100644 --- a/src/backend/postmaster/startup.c +++ b/src/backend/postmaster/startup.c @@ -81,7 +81,7 @@ startupproc_quickdie(SIGNAL_ARGS) on_exit_reset(); /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a + * Note we do exit(2) not exit(0). This is to force the postmaster into a * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c index 4731ab73fed..f89a5339e0f 100644 --- a/src/backend/postmaster/syslogger.c +++ b/src/backend/postmaster/syslogger.c @@ -67,7 +67,7 @@ /* - * GUC parameters. Logging_collector cannot be changed after postmaster + * GUC parameters. Logging_collector cannot be changed after postmaster * start, but the rest can change at SIGHUP. */ bool Logging_collector = false; @@ -193,7 +193,7 @@ SysLoggerMain(int argc, char *argv[]) /* * If we restarted, our stderr is already redirected into our own input * pipe. This is of course pretty useless, not to mention that it - * interferes with detecting pipe EOF. Point stderr to /dev/null. This + * interferes with detecting pipe EOF. Point stderr to /dev/null. This * assumes that all interesting messages generated in the syslogger will * come through elog.c and will be sent to write_syslogger_file. */ @@ -203,7 +203,7 @@ SysLoggerMain(int argc, char *argv[]) /* * The closes might look redundant, but they are not: we want to be - * darn sure the pipe gets closed even if the open failed. We can + * darn sure the pipe gets closed even if the open failed. We can * survive running with stderr pointing nowhere, but we can't afford * to have extra pipe input descriptors hanging around. * @@ -249,7 +249,7 @@ SysLoggerMain(int argc, char *argv[]) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (syslogger probably never has any + * can signal any child processes too. (syslogger probably never has any * child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -419,7 +419,7 @@ SysLoggerMain(int argc, char *argv[]) /* * Calculate time till next time-based rotation, so that we don't - * sleep longer than that. We assume the value of "now" obtained + * sleep longer than that. We assume the value of "now" obtained * above is still close enough. Note we can't make this calculation * until after calling logfile_rotate(), since it will advance * next_rotation_time. @@ -523,7 +523,7 @@ SysLoggerMain(int argc, char *argv[]) (errmsg("logger shutting down"))); /* - * Normal exit from the syslogger is here. Note that we + * Normal exit from the syslogger is here. Note that we * deliberately do not close syslogFile before exiting; this is to * allow for the possibility of elog messages being generated * inside proc_exit. Regular exit() will take care of flushing @@ -652,8 +652,8 @@ SysLogger_Start(void) */ ereport(LOG, (errmsg("redirecting log output to logging collector process"), - errhint("Future log output will appear in directory \"%s\".", - Log_directory))); + errhint("Future log output will appear in directory \"%s\".", + Log_directory))); #ifndef WIN32 fflush(stdout); @@ -670,6 +670,7 @@ SysLogger_Start(void) close(syslogPipe[1]); syslogPipe[1] = -1; #else + /* * open the pipe in binary mode and make sure stderr is binary * after it's been dup'ed into, to avoid disturbing the pipe @@ -1354,7 +1355,7 @@ set_next_rotation_time(void) /* * The requirements here are to choose the next time > now that is a * "multiple" of the log rotation interval. "Multiple" can be interpreted - * fairly loosely. In this version we align to log_timezone rather than + * fairly loosely. In this version we align to log_timezone rather than * GMT. */ rotinterval = Log_RotationAge * SECS_PER_MINUTE; /* convert to seconds */ diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c index f8b19c2aa80..0826f8874ca 100644 --- a/src/backend/postmaster/walwriter.c +++ b/src/backend/postmaster/walwriter.c @@ -103,7 +103,7 @@ WalWriterMain(void) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (walwriter probably never has any + * can signal any child processes too. (walwriter probably never has any * child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -176,7 +176,7 @@ WalWriterMain(void) /* * These operations are really just a minimal subset of - * AbortTransaction(). We don't have very many resources to worry + * AbortTransaction(). We don't have very many resources to worry * about in walwriter, but we do have LWLocks, and perhaps buffers? */ LWLockReleaseAll(); @@ -250,7 +250,7 @@ WalWriterMain(void) int rc; /* - * Advertise whether we might hibernate in this cycle. We do this + * Advertise whether we might hibernate in this cycle. We do this * before resetting the latch to ensure that any async commits will * see the flag set if they might possibly need to wake us up, and * that we won't miss any signal they send us. (If we discover work @@ -341,7 +341,7 @@ wal_quickdie(SIGNAL_ARGS) on_exit_reset(); /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a + * Note we do exit(2) not exit(0). This is to force the postmaster into a * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c index e6aa899518f..c495cee3003 100644 --- a/src/backend/regex/regc_color.c +++ b/src/backend/regex/regc_color.c @@ -2,7 +2,7 @@ * colorings of characters * This file is #included by regcomp.c. * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c index 580a693161e..921a7d7f92a 100644 --- a/src/backend/regex/regc_cvec.c +++ b/src/backend/regex/regc_cvec.c @@ -2,7 +2,7 @@ * Utility functions for handling cvecs * This file is #included by regcomp.c. * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c index c4095e98cbd..6f2c0cb3eb4 100644 --- a/src/backend/regex/regc_lex.c +++ b/src/backend/regex/regc_lex.c @@ -2,7 +2,7 @@ * lexical analyzer * This file is #included by regcomp.c. * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c index da597053448..e7bbb50ef46 100644 --- a/src/backend/regex/regc_locale.c +++ b/src/backend/regex/regc_locale.c @@ -30,7 +30,7 @@ * * THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE + * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE * IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE * NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR * MODIFICATIONS. @@ -38,7 +38,7 @@ * GOVERNMENT USE: If you are acquiring this software on behalf of the * U.S. government, the Government shall have only "Restricted Rights" * in the software and related documentation as defined in the Federal - * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you + * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you * are acquiring the software on behalf of the Department of Defense, the * software shall be classified as "Commercial Computer Software" and the * Government shall have only "Restricted Rights" as defined in Clause @@ -667,7 +667,7 @@ allcases(struct vars * v, /* context */ /* * cmp - chr-substring compare * - * Backrefs need this. It should preferably be efficient. + * Backrefs need this. It should preferably be efficient. * Note that it does not need to report anything except equal/unequal. * Note also that the length is exact, and the comparison should not * stop at embedded NULs! diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c index f6dad013b54..3487734a64e 100644 --- a/src/backend/regex/regc_nfa.c +++ b/src/backend/regex/regc_nfa.c @@ -2,7 +2,7 @@ * NFA utilities. * This file is #included by regcomp.c. * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics @@ -1304,7 +1304,7 @@ fixempties(struct nfa * nfa, } /* - * And remove any states that have become useless. (This cleanup is not + * And remove any states that have become useless. (This cleanup is not * very thorough, and would be even less so if we tried to combine it with * the previous step; but cleanup() will take care of anything we miss.) */ @@ -1372,7 +1372,7 @@ replaceempty(struct nfa * nfa, * non-EMPTY out-arcs), we must keep it so, so always push forward in that * case. * - * The fan-out/fan-in comparison should count only non-EMPTY arcs. If + * The fan-out/fan-in comparison should count only non-EMPTY arcs. If * "from" is doomed, we can skip counting "to"'s arcs, since we want to * force taking the copyins path in that case. */ diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c index 425c278de43..6b2e38e165d 100644 --- a/src/backend/regex/regc_pg_locale.c +++ b/src/backend/regex/regc_pg_locale.c @@ -24,7 +24,7 @@ * several implementation strategies depending on the situation: * * 1. In C/POSIX collations, we use hard-wired code. We can't depend on - * the <ctype.h> functions since those will obey LC_CTYPE. Note that these + * the <ctype.h> functions since those will obey LC_CTYPE. Note that these * collations don't give a fig about multibyte characters. * * 2. In the "default" collation (which is supposed to obey LC_CTYPE): @@ -36,10 +36,10 @@ * * 2b. In all other encodings, or on machines that lack <wctype.h>, we use * the <ctype.h> functions for pg_wchar values up to 255, and punt for values - * above that. This is only 100% correct in single-byte encodings such as - * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern + * above that. This is only 100% correct in single-byte encodings such as + * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern * character sets for which the properties being tested here aren't very - * relevant for higher code values anyway. The difficulty with using the + * relevant for higher code values anyway. The difficulty with using the * <wctype.h> functions with non-Unicode multibyte encodings is that we can * have no certainty that the platform's wchar_t representation matches * what we do in pg_wchar conversions. @@ -730,7 +730,7 @@ store_match(pg_ctype_cache *pcc, pg_wchar chr1, int nchrs) /* * Given a probe function (e.g., pg_wc_isalpha) get a struct cvec for all - * chrs satisfying the probe function. The active collation is the one + * chrs satisfying the probe function. The active collation is the one * previously set by pg_set_regex_collation. Return NULL if out of memory. * * Note that the result must not be freed or modified by caller. @@ -777,7 +777,7 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc) * UTF8 go up to 0x7FF, which is a pretty arbitrary cutoff but we cannot * extend it as far as we'd like (say, 0xFFFF, the end of the Basic * Multilingual Plane) without creating significant performance issues due - * to too many characters being fed through the colormap code. This will + * to too many characters being fed through the colormap code. This will * need redesign to fix reasonably, but at least for the moment we have * all common European languages covered. Otherwise (not C, not UTF8) go * up to 255. These limits are interrelated with restrictions discussed diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c index d31d7f7b727..bfe6edd3e1d 100644 --- a/src/backend/regex/regcomp.c +++ b/src/backend/regex/regcomp.c @@ -2,7 +2,7 @@ * re_*comp and friends - compile REs * This file #includes several others (see the bottom). * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics @@ -564,7 +564,7 @@ makesearch(struct vars * v, * constraints, often knowing when you were in the pre state tells you * little; it's the next state(s) that are informative. But some of them * may have other inarcs, i.e. it may be possible to make actual progress - * and then return to one of them. We must de-optimize such cases, + * and then return to one of them. We must de-optimize such cases, * splitting each such state into progress and no-progress states. */ @@ -610,7 +610,7 @@ makesearch(struct vars * v, * parse - parse an RE * * This is actually just the top level, which parses a bunch of branches - * tied together with '|'. They appear in the tree as the left children + * tied together with '|'. They appear in the tree as the left children * of a chain of '|' subres. */ static struct subre * @@ -1352,7 +1352,7 @@ bracket(struct vars * v, /* * cbracket - handle complemented bracket expression * We do it by calling bracket() with dummy endpoints, and then complementing - * the result. The alternative would be to invoke rainbow(), and then delete + * the result. The alternative would be to invoke rainbow(), and then delete * arcs as the b.e. is seen... but that gets messy. */ static void diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c index 7a7ba5b89cf..d367a77e854 100644 --- a/src/backend/regex/rege_dfa.c +++ b/src/backend/regex/rege_dfa.c @@ -2,7 +2,7 @@ * DFA routines * This file is #included by regexec.c. * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c index 4b2573e6255..f863ee7344f 100644 --- a/src/backend/regex/regerror.c +++ b/src/backend/regex/regerror.c @@ -1,7 +1,7 @@ /* * regerror - error-code expansion * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c index 2e976627f52..7f41437cb58 100644 --- a/src/backend/regex/regexec.c +++ b/src/backend/regex/regexec.c @@ -1,7 +1,7 @@ /* * re_*exec and friends - match REs * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics @@ -955,7 +955,7 @@ citerdissect(struct vars * v, } /* - * We need workspace to track the endpoints of each sub-match. Normally + * We need workspace to track the endpoints of each sub-match. Normally * we consider only nonzero-length sub-matches, so there can be at most * end-begin of them. However, if min is larger than that, we will also * consider zero-length sub-matches in order to find enough matches. @@ -984,8 +984,8 @@ citerdissect(struct vars * v, /* * Our strategy is to first find a set of sub-match endpoints that are * valid according to the child node's DFA, and then recursively dissect - * each sub-match to confirm validity. If any validity check fails, - * backtrack the last sub-match and try again. And, when we next try for + * each sub-match to confirm validity. If any validity check fails, + * backtrack the last sub-match and try again. And, when we next try for * a validity check, we need not recheck any successfully verified * sub-matches that we didn't move the endpoints of. nverified remembers * how many sub-matches are currently known okay. @@ -1036,7 +1036,7 @@ citerdissect(struct vars * v, /* * We've identified a way to divide the string into k sub-matches that - * works so far as the child DFA can tell. If k is an allowed number + * works so far as the child DFA can tell. If k is an allowed number * of matches, start the slow part: recurse to verify each sub-match. * We always have k <= max_matches, needn't check that. */ @@ -1140,7 +1140,7 @@ creviterdissect(struct vars * v, } /* - * We need workspace to track the endpoints of each sub-match. Normally + * We need workspace to track the endpoints of each sub-match. Normally * we consider only nonzero-length sub-matches, so there can be at most * end-begin of them. However, if min is larger than that, we will also * consider zero-length sub-matches in order to find enough matches. @@ -1169,8 +1169,8 @@ creviterdissect(struct vars * v, /* * Our strategy is to first find a set of sub-match endpoints that are * valid according to the child node's DFA, and then recursively dissect - * each sub-match to confirm validity. If any validity check fails, - * backtrack the last sub-match and try again. And, when we next try for + * each sub-match to confirm validity. If any validity check fails, + * backtrack the last sub-match and try again. And, when we next try for * a validity check, we need not recheck any successfully verified * sub-matches that we didn't move the endpoints of. nverified remembers * how many sub-matches are currently known okay. @@ -1223,7 +1223,7 @@ creviterdissect(struct vars * v, /* * We've identified a way to divide the string into k sub-matches that - * works so far as the child DFA can tell. If k is an allowed number + * works so far as the child DFA can tell. If k is an allowed number * of matches, start the slow part: recurse to verify each sub-match. * We always have k <= max_matches, needn't check that. */ diff --git a/src/backend/regex/regfree.c b/src/backend/regex/regfree.c index b291749bd1a..ae17ae70eb6 100644 --- a/src/backend/regex/regfree.c +++ b/src/backend/regex/regfree.c @@ -1,7 +1,7 @@ /* * regfree - free an RE * - * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. + * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved. * * Development of this software was funded, in part, by Cray Research Inc., * UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics diff --git a/src/backend/regex/regprefix.c b/src/backend/regex/regprefix.c index 3b205e22dc0..9234b4c20ad 100644 --- a/src/backend/regex/regprefix.c +++ b/src/backend/regex/regprefix.c @@ -38,7 +38,7 @@ static int findprefix(struct cnfa * cnfa, struct colormap * cm, * * This function does not analyze all complex cases (such as lookahead * constraints) exactly. Therefore it is possible that some strings matching - * the reported prefix or exact-match string do not satisfy the regex. But + * the reported prefix or exact-match string do not satisfy the regex. But * it should never be the case that a string satisfying the regex does not * match the reported prefix or exact-match string. */ @@ -150,7 +150,7 @@ findprefix(struct cnfa * cnfa, * We could find a state with multiple out-arcs that are all labeled with * the same singleton color; this comes from patterns like "^ab(cde|cxy)". * In that case we add the chr "c" to the output string but then exit the - * loop with nextst == -1. This leaves a little bit on the table: if the + * loop with nextst == -1. This leaves a little bit on the table: if the * pattern is like "^ab(cde|cdy)", we won't notice that "d" could be added * to the prefix. But chasing multiple parallel state chains doesn't seem * worth the trouble. @@ -201,14 +201,14 @@ findprefix(struct cnfa * cnfa, /* * Identify the color's sole member chr and add it to the prefix - * string. In general the colormap data structure doesn't provide a + * string. In general the colormap data structure doesn't provide a * way to find color member chrs, except by trying GETCOLOR() on each * possible chr value, which won't do at all. However, for the cases * we care about it should be sufficient to test the "firstchr" value, * that is the first chr ever added to the color. There are cases * where this might no longer be a member of the color (so we do need * to test), but none of them are likely to arise for a character that - * is a member of a common prefix. If we do hit such a corner case, + * is a member of a common prefix. If we do hit such a corner case, * we just fall out without adding anything to the prefix string. */ c = cm->cd[thiscolor].firstchr; diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 42e66f2fed7..a3bf5001ec8 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -137,8 +137,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) SendXlogRecPtrResult(startptr, starttli); /* - * Calculate the relative path of temporary statistics directory - * in order to skip the files which are located in that directory later. + * Calculate the relative path of temporary statistics directory in order + * to skip the files which are located in that directory later. */ if (is_absolute_path(pgstat_stat_directory) && strncmp(pgstat_stat_directory, DataDir, datadirpathlen) == 0) @@ -231,8 +231,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) (int64) opt->maxrate * (int64) 1024 / THROTTLING_FREQUENCY; /* - * The minimum amount of time for throttling_sample - * bytes to be transfered. + * The minimum amount of time for throttling_sample bytes to be + * transfered. */ elapsed_min_unit = USECS_PER_SEC / THROTTLING_FREQUENCY; @@ -613,7 +613,7 @@ parse_basebackup_options(List *options, basebackup_options *opt) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)", - (int) maxrate, "MAX_RATE", MAX_RATE_LOWER, MAX_RATE_UPPER))); + (int) maxrate, "MAX_RATE", MAX_RATE_LOWER, MAX_RATE_UPPER))); opt->maxrate = (uint32) maxrate; o_maxrate = true; @@ -841,7 +841,7 @@ sendFileWithContent(const char *filename, const char *content) /* * Include the tablespace directory pointed to by 'path' in the output tar - * stream. If 'sizeonly' is true, we just calculate a total length and return + * stream. If 'sizeonly' is true, we just calculate a total length and return * it, without actually sending anything. * * Only used to send auxiliary tablespaces, not PGDATA. @@ -975,7 +975,7 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces) * always created there. */ if ((statrelpath != NULL && strcmp(pathbuf, statrelpath) == 0) || - strncmp(de->d_name, PG_STAT_TMP_DIR, strlen(PG_STAT_TMP_DIR)) == 0) + strncmp(de->d_name, PG_STAT_TMP_DIR, strlen(PG_STAT_TMP_DIR)) == 0) { if (!sizeonly) _tarWriteHeader(pathbuf + basepathlen + 1, NULL, &statbuf); @@ -1270,14 +1270,14 @@ throttle(size_t increment) * the maximum time to sleep. Thus the cast to long is safe. */ wait_result = WaitLatch(&MyWalSnd->latch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, (long) (sleep / 1000)); } else { /* - * The actual transfer rate is below the limit. A negative value would - * distort the adjustment of throttled_last. + * The actual transfer rate is below the limit. A negative value + * would distort the adjustment of throttled_last. */ wait_result = 0; sleep = 0; diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 88d27c7690e..7bc761db8f4 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -50,7 +50,7 @@ static void libpqrcv_connect(char *conninfo); static void libpqrcv_identify_system(TimeLineID *primary_tli); static void libpqrcv_readtimelinehistoryfile(TimeLineID tli, char **filename, char **content, int *len); static bool libpqrcv_startstreaming(TimeLineID tli, XLogRecPtr startpoint, - char *slotname); + char *slotname); static void libpqrcv_endstreaming(TimeLineID *next_tli); static int libpqrcv_receive(int timeout, char **buffer); static void libpqrcv_send(const char *buffer, int nbytes); diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 414cfa95586..7b6114a2097 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -9,12 +9,12 @@ * * NOTE: * This basically tries to handle all low level xlog stuff for - * reorderbuffer.c and snapbuild.c. There's some minor leakage where a - * specific record's struct is used to pass data along, but those just - * happen to contain the right amount of data in a convenient - * format. There isn't and shouldn't be much intelligence about the - * contents of records in here except turning them into a more usable - * format. + * reorderbuffer.c and snapbuild.c. There's some minor leakage where a + * specific record's struct is used to pass data along, but those just + * happen to contain the right amount of data in a convenient + * format. There isn't and shouldn't be much intelligence about the + * contents of records in here except turning them into a more usable + * format. * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -44,10 +44,10 @@ typedef struct XLogRecordBuffer { - XLogRecPtr origptr; - XLogRecPtr endptr; - XLogRecord record; - char *record_data; + XLogRecPtr origptr; + XLogRecPtr endptr; + XLogRecord record; + char *record_data; } XLogRecordBuffer; /* RMGR Handlers */ @@ -63,10 +63,10 @@ static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, - TransactionId xid, Oid dboid, - TimestampTz commit_time, - int nsubxacts, TransactionId *sub_xids, - int ninval_msgs, SharedInvalidationMessage *msg); + TransactionId xid, Oid dboid, + TimestampTz commit_time, + int nsubxacts, TransactionId *sub_xids, + int ninval_msgs, SharedInvalidationMessage *msg); static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, TransactionId *sub_xids, int nsubxacts); @@ -91,10 +91,10 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogRecord *record) /* cast so we get a warning when new rmgrs are added */ switch ((RmgrIds) buf.record.xl_rmid) { - /* - * Rmgrs we care about for logical decoding. Add new rmgrs in - * rmgrlist.h's order. - */ + /* + * Rmgrs we care about for logical decoding. Add new rmgrs in + * rmgrlist.h's order. + */ case RM_XLOG_ID: DecodeXLogOp(ctx, &buf); break; @@ -115,11 +115,11 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogRecord *record) DecodeHeapOp(ctx, &buf); break; - /* - * Rmgrs irrelevant for logical decoding; they describe stuff not - * represented in logical decoding. Add new rmgrs in rmgrlist.h's - * order. - */ + /* + * Rmgrs irrelevant for logical decoding; they describe stuff not + * represented in logical decoding. Add new rmgrs in rmgrlist.h's + * order. + */ case RM_SMGR_ID: case RM_CLOG_ID: case RM_DBASE_ID: @@ -149,13 +149,14 @@ DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) switch (info) { - /* this is also used in END_OF_RECOVERY checkpoints */ + /* this is also used in END_OF_RECOVERY checkpoints */ case XLOG_CHECKPOINT_SHUTDOWN: case XLOG_END_OF_RECOVERY: SnapBuildSerializationPoint(builder, buf->origptr); break; case XLOG_CHECKPOINT_ONLINE: + /* * a RUNNING_XACTS record will have been logged near to this, we * can restart from there. @@ -181,9 +182,9 @@ DecodeXLogOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) static void DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { - SnapBuild *builder = ctx->snapshot_builder; - ReorderBuffer *reorder = ctx->reorder; - XLogRecord *r = &buf->record; + SnapBuild *builder = ctx->snapshot_builder; + ReorderBuffer *reorder = ctx->reorder; + XLogRecord *r = &buf->record; uint8 info = r->xl_info & ~XLR_INFO_MASK; /* no point in doing anything yet, data could not be decoded anyway */ @@ -280,7 +281,7 @@ DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) int i; TransactionId *sub_xid; - xlrec = (xl_xact_assignment *) buf->record_data; + xlrec = (xl_xact_assignment *) buf->record_data; sub_xid = &xlrec->xsub[0]; @@ -292,6 +293,7 @@ DecodeXactOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) break; } case XLOG_XACT_PREPARE: + /* * Currently decoding ignores PREPARE TRANSACTION and will just * decode the transaction when the COMMIT PREPARED is sent or @@ -321,7 +323,9 @@ DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_RUNNING_XACTS: { xl_running_xacts *running = (xl_running_xacts *) buf->record_data; + SnapBuildProcessRunningXacts(builder, buf->origptr, running); + /* * Abort all transactions that we keep track of, that are * older than the record's oldestRunningXid. This is the most @@ -364,22 +368,25 @@ DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_HEAP2_NEW_CID: { xl_heap_new_cid *xlrec; + xlrec = (xl_heap_new_cid *) buf->record_data; SnapBuildProcessNewCid(builder, xid, buf->origptr, xlrec); break; } case XLOG_HEAP2_REWRITE: + /* * Although these records only exist to serve the needs of logical * decoding, all the work happens as part of crash or archive * recovery, so we don't need to do anything here. */ break; - /* - * Everything else here is just low level physical stuff we're - * not interested in. - */ + + /* + * Everything else here is just low level physical stuff we're not + * interested in. + */ case XLOG_HEAP2_FREEZE_PAGE: case XLOG_HEAP2_CLEAN: case XLOG_HEAP2_CLEANUP_INFO: @@ -429,6 +436,7 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) break; case XLOG_HEAP_NEWPAGE: + /* * This is only used in places like indexams and CLUSTER which * don't contain changes relevant for logical replication. @@ -436,6 +444,7 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) break; case XLOG_HEAP_INPLACE: + /* * Inplace updates are only ever performed on catalog tuples and * can, per definition, not change tuple visibility. Since we @@ -503,8 +512,8 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, * There basically two reasons we might not be interested in this * transaction: * 1) We might not be interested in decoding transactions up to this - * LSN. This can happen because we previously decoded it and now just - * are restarting or if we haven't assembled a consistent snapshot yet. + * LSN. This can happen because we previously decoded it and now just + * are restarting or if we haven't assembled a consistent snapshot yet. * 2) The transaction happened in another database. * * We can't just use ReorderBufferAbort() here, because we need to execute diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 1d08b50da39..438a3fb152d 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -8,21 +8,21 @@ * src/backend/replication/logical/logical.c * * NOTES - * This file coordinates interaction between the various modules that - * together provide logical decoding, primarily by providing so - * called LogicalDecodingContexts. The goal is to encapsulate most of the - * internal complexity for consumers of logical decoding, so they can - * create and consume a changestream with a low amount of code. Builtin - * consumers are the walsender and SQL SRF interface, but it's possible to - * add further ones without changing core code, e.g. to consume changes in - * a bgworker. + * This file coordinates interaction between the various modules that + * together provide logical decoding, primarily by providing so + * called LogicalDecodingContexts. The goal is to encapsulate most of the + * internal complexity for consumers of logical decoding, so they can + * create and consume a changestream with a low amount of code. Builtin + * consumers are the walsender and SQL SRF interface, but it's possible to + * add further ones without changing core code, e.g. to consume changes in + * a bgworker. * - * The idea is that a consumer provides three callbacks, one to read WAL, - * one to prepare a data write, and a final one for actually writing since - * their implementation depends on the type of consumer. Check - * logicalfuncs.c for an example implementation of a fairly simple consumer - * and a implementation of a WAL reading callback that's suitable for - * simple consumers. + * The idea is that a consumer provides three callbacks, one to read WAL, + * one to prepare a data write, and a final one for actually writing since + * their implementation depends on the type of consumer. Check + * logicalfuncs.c for an example implementation of a fairly simple consumer + * and a implementation of a WAL reading callback that's suitable for + * simple consumers. *------------------------------------------------------------------------- */ @@ -56,13 +56,13 @@ typedef struct LogicalErrorCallbackState /* wrappers around output plugin callbacks */ static void output_plugin_error_callback(void *arg); static void startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt, - bool is_init); + bool is_init); static void shutdown_cb_wrapper(LogicalDecodingContext *ctx); static void begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn); static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - XLogRecPtr commit_lsn); + XLogRecPtr commit_lsn); static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - Relation relation, ReorderBufferChange *change); + Relation relation, ReorderBufferChange *change); static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin); @@ -90,18 +90,18 @@ CheckLogicalDecodingRequirements(void) * * There's basically three things missing to allow this: * 1) We need to be able to correctly and quickly identify the timeline a - * LSN belongs to + * LSN belongs to * 2) We need to force hot_standby_feedback to be enabled at all times so - * the primary cannot remove rows we need. + * the primary cannot remove rows we need. * 3) support dropping replication slots referring to a database, in - * dbase_redo. There can't be any active ones due to HS recovery - * conflicts, so that should be relatively easy. + * dbase_redo. There can't be any active ones due to HS recovery + * conflicts, so that should be relatively easy. * ---- */ if (RecoveryInProgress()) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("logical decoding cannot be used while in recovery"))); + errmsg("logical decoding cannot be used while in recovery"))); } /* @@ -117,7 +117,8 @@ StartupDecodingContext(List *output_plugin_options, LogicalOutputPluginWriterWrite do_write) { ReplicationSlot *slot; - MemoryContext context, old_context; + MemoryContext context, + old_context; LogicalDecodingContext *ctx; /* shorter lines... */ @@ -133,7 +134,10 @@ StartupDecodingContext(List *output_plugin_options, ctx->context = context; - /* (re-)load output plugins, so we detect a bad (removed) output plugin now. */ + /* + * (re-)load output plugins, so we detect a bad (removed) output plugin + * now. + */ LoadOutputPlugin(&ctx->callbacks, NameStr(slot->data.plugin)); /* @@ -195,10 +199,10 @@ CreateInitDecodingContext(char *plugin, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write) { - TransactionId xmin_horizon = InvalidTransactionId; + TransactionId xmin_horizon = InvalidTransactionId; ReplicationSlot *slot; LogicalDecodingContext *ctx; - MemoryContext old_context; + MemoryContext old_context; /* shorter lines... */ slot = MyReplicationSlot; @@ -219,8 +223,8 @@ CreateInitDecodingContext(char *plugin, if (slot->data.database != MyDatabaseId) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("replication slot \"%s\" was not created in this database", - NameStr(slot->data.name)))); + errmsg("replication slot \"%s\" was not created in this database", + NameStr(slot->data.name)))); if (IsTransactionState() && GetTopTransactionIdIfAny() != InvalidTransactionId) @@ -252,9 +256,9 @@ CreateInitDecodingContext(char *plugin, */ if (!RecoveryInProgress()) { - XLogRecPtr flushptr; + XLogRecPtr flushptr; - /* start at current insert position*/ + /* start at current insert position */ slot->data.restart_lsn = GetXLogInsertRecPtr(); /* make sure we have enough information to start */ @@ -307,8 +311,8 @@ CreateInitDecodingContext(char *plugin, LWLockRelease(ProcArrayLock); /* - * tell the snapshot builder to only assemble snapshot once reaching - * the a running_xact's record with the respective xmin. + * tell the snapshot builder to only assemble snapshot once reaching the a + * running_xact's record with the respective xmin. */ xmin_horizon = slot->data.catalog_xmin; @@ -316,7 +320,7 @@ CreateInitDecodingContext(char *plugin, ReplicationSlotSave(); ctx = StartupDecodingContext(NIL, InvalidXLogRecPtr, xmin_horizon, - read_page, prepare_write, do_write); + read_page, prepare_write, do_write); /* call output plugin initialization callback */ old_context = MemoryContextSwitchTo(ctx->context); @@ -352,7 +356,7 @@ CreateDecodingContext(XLogRecPtr start_lsn, { LogicalDecodingContext *ctx; ReplicationSlot *slot; - MemoryContext old_context; + MemoryContext old_context; /* shorter lines... */ slot = MyReplicationSlot; @@ -370,8 +374,8 @@ CreateDecodingContext(XLogRecPtr start_lsn, if (slot->data.database != MyDatabaseId) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - (errmsg("replication slot \"%s\" was not created in this database", - NameStr(slot->data.name))))); + (errmsg("replication slot \"%s\" was not created in this database", + NameStr(slot->data.name))))); if (start_lsn == InvalidXLogRecPtr) { @@ -385,14 +389,14 @@ CreateDecodingContext(XLogRecPtr start_lsn, * pretty common for a client to acknowledge a LSN it doesn't have to * do anything for, and thus didn't store persistently, because the * xlog records didn't result in anything relevant for logical - * decoding. Clients have to be able to do that to support - * synchronous replication. + * decoding. Clients have to be able to do that to support synchronous + * replication. */ start_lsn = slot->data.confirmed_flush; elog(DEBUG1, "cannot stream from %X/%X, minimum is %X/%X, forwarding", - (uint32)(start_lsn >> 32), (uint32)start_lsn, - (uint32)(slot->data.confirmed_flush >> 32), - (uint32)slot->data.confirmed_flush); + (uint32) (start_lsn >> 32), (uint32) start_lsn, + (uint32) (slot->data.confirmed_flush >> 32), + (uint32) slot->data.confirmed_flush); } ctx = StartupDecodingContext(output_plugin_options, @@ -409,10 +413,10 @@ CreateDecodingContext(XLogRecPtr start_lsn, (errmsg("starting logical decoding for slot %s", NameStr(slot->data.name)), errdetail("streaming transactions committing after %X/%X, reading WAL from %X/%X", - (uint32)(slot->data.confirmed_flush >> 32), - (uint32)slot->data.confirmed_flush, - (uint32)(slot->data.restart_lsn >> 32), - (uint32)slot->data.restart_lsn))); + (uint32) (slot->data.confirmed_flush >> 32), + (uint32) slot->data.confirmed_flush, + (uint32) (slot->data.restart_lsn >> 32), + (uint32) slot->data.restart_lsn))); return ctx; } @@ -438,8 +442,8 @@ DecodingContextFindStartpoint(LogicalDecodingContext *ctx) startptr = ctx->slot->data.restart_lsn; elog(DEBUG1, "searching for logical decoding starting point, starting at %X/%X", - (uint32)(ctx->slot->data.restart_lsn >> 32), - (uint32)ctx->slot->data.restart_lsn); + (uint32) (ctx->slot->data.restart_lsn >> 32), + (uint32) ctx->slot->data.restart_lsn); /* Wait for a consistent starting point */ for (;;) @@ -543,14 +547,15 @@ static void output_plugin_error_callback(void *arg) { LogicalErrorCallbackState *state = (LogicalErrorCallbackState *) arg; + /* not all callbacks have an associated LSN */ if (state->report_location != InvalidXLogRecPtr) errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X", NameStr(state->ctx->slot->data.name), NameStr(state->ctx->slot->data.plugin), state->callback_name, - (uint32)(state->report_location >> 32), - (uint32)state->report_location); + (uint32) (state->report_location >> 32), + (uint32) state->report_location); else errcontext("slot \"%s\", output plugin \"%s\", in the %s callback", NameStr(state->ctx->slot->data.name), @@ -643,7 +648,7 @@ begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn) static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - XLogRecPtr commit_lsn) + XLogRecPtr commit_lsn) { LogicalDecodingContext *ctx = cache->private_data; LogicalErrorCallbackState state; @@ -652,7 +657,7 @@ commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "commit"; - state.report_location = txn->final_lsn; /* beginning of commit record */ + state.report_location = txn->final_lsn; /* beginning of commit record */ errcallback.callback = output_plugin_error_callback; errcallback.arg = (void *) &state; errcallback.previous = error_context_stack; @@ -672,7 +677,7 @@ commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - Relation relation, ReorderBufferChange *change) + Relation relation, ReorderBufferChange *change) { LogicalDecodingContext *ctx = cache->private_data; LogicalErrorCallbackState state; @@ -690,6 +695,7 @@ change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, /* set output state */ ctx->accept_writes = true; ctx->write_xid = txn->xid; + /* * report this change's lsn so replies from clients can give an up2date * answer. This won't ever be enough (and shouldn't be!) to confirm @@ -715,7 +721,7 @@ change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, void LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin) { - bool updated_xmin = false; + bool updated_xmin = false; ReplicationSlot *slot; slot = MyReplicationSlot; @@ -725,16 +731,17 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin) SpinLockAcquire(&slot->mutex); /* - * don't overwrite if we already have a newer xmin. This can - * happen if we restart decoding in a slot. + * don't overwrite if we already have a newer xmin. This can happen if we + * restart decoding in a slot. */ if (TransactionIdPrecedesOrEquals(xmin, slot->data.catalog_xmin)) { } + /* - * If the client has already confirmed up to this lsn, we directly - * can mark this as accepted. This can happen if we restart - * decoding in a slot. + * If the client has already confirmed up to this lsn, we directly can + * mark this as accepted. This can happen if we restart decoding in a + * slot. */ else if (current_lsn <= slot->data.confirmed_flush) { @@ -744,6 +751,7 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin) /* our candidate can directly be used */ updated_xmin = true; } + /* * Only increase if the previous values have been applied, otherwise we * might never end up updating if the receiver acks too slowly. @@ -770,7 +778,7 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin) void LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart_lsn) { - bool updated_lsn = false; + bool updated_lsn = false; ReplicationSlot *slot; slot = MyReplicationSlot; @@ -781,13 +789,14 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart SpinLockAcquire(&slot->mutex); - /* don't overwrite if have a newer restart lsn*/ + /* don't overwrite if have a newer restart lsn */ if (restart_lsn <= slot->data.restart_lsn) { } + /* - * We might have already flushed far enough to directly accept this lsn, in - * this case there is no need to check for existing candidate LSNs + * We might have already flushed far enough to directly accept this lsn, + * in this case there is no need to check for existing candidate LSNs */ else if (current_lsn <= slot->data.confirmed_flush) { @@ -797,6 +806,7 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart /* our candidate can directly be used */ updated_lsn = true; } + /* * Only increase if the previous values have been applied, otherwise we * might never end up updating if the receiver acks too slowly. A missed @@ -896,6 +906,7 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn) ReplicationSlotSave(); elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart); } + /* * Now the new xmin is safely on disk, we can let the global value * advance. We do not take ProcArrayLock or similar since we only diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c index 5fa1848001d..2da6bb10b22 100644 --- a/src/backend/replication/logical/logicalfuncs.c +++ b/src/backend/replication/logical/logicalfuncs.c @@ -42,11 +42,12 @@ #include "storage/fd.h" /* private date for writing out data */ -typedef struct DecodingOutputState { +typedef struct DecodingOutputState +{ Tuplestorestate *tupstore; - TupleDesc tupdesc; - bool binary_output; - int64 returned_rows; + TupleDesc tupdesc; + bool binary_output; + int64 returned_rows; } DecodingOutputState; /* @@ -91,7 +92,7 @@ LogicalOutputWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi /* ick, but cstring_to_text_with_len works for bytea perfectly fine */ values[2] = PointerGetDatum( - cstring_to_text_with_len(ctx->out->data, ctx->out->len)); + cstring_to_text_with_len(ctx->out->data, ctx->out->len)); tuplestore_putvalues(p->tupstore, p->tupdesc, values, nulls); p->returned_rows++; @@ -412,7 +413,7 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin InvalidateSystemCaches(); while ((startptr != InvalidXLogRecPtr && startptr < end_of_wal) || - (ctx->reader->EndRecPtr && ctx->reader->EndRecPtr < end_of_wal)) + (ctx->reader->EndRecPtr && ctx->reader->EndRecPtr < end_of_wal)) { XLogRecord *record; char *errm = NULL; @@ -474,7 +475,8 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin Datum pg_logical_slot_get_changes(PG_FUNCTION_ARGS) { - Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, false); + Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, false); + return ret; } @@ -484,7 +486,8 @@ pg_logical_slot_get_changes(PG_FUNCTION_ARGS) Datum pg_logical_slot_peek_changes(PG_FUNCTION_ARGS) { - Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, false); + Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, false); + return ret; } @@ -494,7 +497,8 @@ pg_logical_slot_peek_changes(PG_FUNCTION_ARGS) Datum pg_logical_slot_get_binary_changes(PG_FUNCTION_ARGS) { - Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, true); + Datum ret = pg_logical_slot_get_changes_guts(fcinfo, true, true); + return ret; } @@ -504,6 +508,7 @@ pg_logical_slot_get_binary_changes(PG_FUNCTION_ARGS) Datum pg_logical_slot_peek_binary_changes(PG_FUNCTION_ARGS) { - Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, true); + Datum ret = pg_logical_slot_get_changes_guts(fcinfo, false, true); + return ret; } diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index a2b2adb1732..7f2bbca302e 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -60,7 +60,7 @@ #include "replication/logical.h" #include "replication/reorderbuffer.h" #include "replication/slot.h" -#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */ +#include "replication/snapbuild.h" /* just for SnapBuildSnapDecRefcount */ #include "storage/bufmgr.h" #include "storage/fd.h" #include "storage/sinval.h" @@ -582,7 +582,7 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, */ void ReorderBufferQueueChange(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn, - ReorderBufferChange *change) + ReorderBufferChange *change) { ReorderBufferTXN *txn; @@ -1047,8 +1047,8 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) } /* - * Cleanup the tuplecids we stored for decoding catalog snapshot - * access. They are always stored in the toplevel transaction. + * Cleanup the tuplecids we stored for decoding catalog snapshot access. + * They are always stored in the toplevel transaction. */ dlist_foreach_modify(iter, &txn->tuplecids) { @@ -1204,9 +1204,9 @@ ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap, snap->subxip[i++] = txn->xid; /* - * nsubxcnt isn't decreased when subtransactions abort, so count - * manually. Since it's an upper boundary it is safe to use it for the - * allocation above. + * nsubxcnt isn't decreased when subtransactions abort, so count manually. + * Since it's an upper boundary it is safe to use it for the allocation + * above. */ snap->subxcnt = 1; @@ -1262,10 +1262,10 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, ReorderBufferIterTXNState *iterstate = NULL; ReorderBufferChange *change; - volatile CommandId command_id = FirstCommandId; - volatile Snapshot snapshot_now = NULL; - volatile bool txn_started = false; - volatile bool subtxn_started = false; + volatile CommandId command_id = FirstCommandId; + volatile Snapshot snapshot_now = NULL; + volatile bool txn_started = false; + volatile bool subtxn_started = false; txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr, false); @@ -1309,8 +1309,8 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, /* * Decoding needs access to syscaches et al., which in turn use - * heavyweight locks and such. Thus we need to have enough state around - * to keep track of those. The easiest way is to simply use a + * heavyweight locks and such. Thus we need to have enough state + * around to keep track of those. The easiest way is to simply use a * transaction internally. That also allows us to easily enforce that * nothing writes to the database by checking for xid assignments. * @@ -1344,7 +1344,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, Assert(snapshot_now); reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode, - change->data.tp.relnode.relNode); + change->data.tp.relnode.relNode); /* * Catalog tuple without data, emitted while catalog was @@ -1415,6 +1415,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, ReorderBufferCopySnap(rb, change->data.snapshot, txn, command_id); } + /* * Restored from disk, need to be careful not to double * free. We could introduce refcounting for that, but for @@ -1447,7 +1448,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, { /* we don't use the global one anymore */ snapshot_now = ReorderBufferCopySnap(rb, snapshot_now, - txn, command_id); + txn, command_id); } snapshot_now->curcid = command_id; @@ -1586,7 +1587,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid) */ dlist_foreach_modify(it, &rb->toplevel_by_lsn) { - ReorderBufferTXN * txn; + ReorderBufferTXN *txn; txn = dlist_container(ReorderBufferTXN, node, it.cur); @@ -1998,7 +1999,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, case REORDER_BUFFER_CHANGE_DELETE: { char *data; - ReorderBufferTupleBuf *oldtup, *newtup; + ReorderBufferTupleBuf *oldtup, + *newtup; Size oldlen = 0; Size newlen = 0; @@ -2007,12 +2009,12 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, if (oldtup) oldlen = offsetof(ReorderBufferTupleBuf, data) - + oldtup->tuple.t_len + +oldtup->tuple.t_len - offsetof(HeapTupleHeaderData, t_bits); if (newtup) newlen = offsetof(ReorderBufferTupleBuf, data) - + newtup->tuple.t_len + +newtup->tuple.t_len - offsetof(HeapTupleHeaderData, t_bits); sz += oldlen; @@ -2188,7 +2190,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, else if (readBytes < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from reorderbuffer spill file: %m"))); + errmsg("could not read from reorderbuffer spill file: %m"))); else if (readBytes != sizeof(ReorderBufferDiskChange)) ereport(ERROR, (errcode_for_file_access(), @@ -2199,7 +2201,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, ondisk = (ReorderBufferDiskChange *) rb->outbuf; ReorderBufferSerializeReserve(rb, - sizeof(ReorderBufferDiskChange) + ondisk->size); + sizeof(ReorderBufferDiskChange) + ondisk->size); ondisk = (ReorderBufferDiskChange *) rb->outbuf; readBytes = read(*fd, rb->outbuf + sizeof(ReorderBufferDiskChange), @@ -2208,13 +2210,13 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, if (readBytes < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from reorderbuffer spill file: %m"))); + errmsg("could not read from reorderbuffer spill file: %m"))); else if (readBytes != ondisk->size - sizeof(ReorderBufferDiskChange)) ereport(ERROR, (errcode_for_file_access(), errmsg("could not read from reorderbuffer spill file: read %d instead of %u bytes", readBytes, - (uint32) (ondisk->size - sizeof(ReorderBufferDiskChange))))); + (uint32) (ondisk->size - sizeof(ReorderBufferDiskChange))))); /* * ok, read a full change from disk, now restore it into proper @@ -2364,7 +2366,7 @@ StartupReorderBuffer(void) logical_dir = AllocateDir("pg_replslot"); while ((logical_de = ReadDir(logical_dir, "pg_replslot")) != NULL) { - struct stat statbuf; + struct stat statbuf; char path[MAXPGPATH]; if (strcmp(logical_de->d_name, ".") == 0 || @@ -2620,7 +2622,7 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn, cchange = dlist_container(ReorderBufferChange, node, it.cur); ctup = cchange->data.tp.newtuple; chunk = DatumGetPointer( - fastgetattr(&ctup->tuple, 3, toast_desc, &isnull)); + fastgetattr(&ctup->tuple, 3, toast_desc, &isnull)); Assert(!isnull); Assert(!VARATT_IS_EXTERNAL(chunk)); @@ -2800,7 +2802,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname) ReorderBufferTupleCidKey key; ReorderBufferTupleCidEnt *ent; ReorderBufferTupleCidEnt *new_ent; - bool found; + bool found; /* be careful about padding */ memset(&key, 0, sizeof(ReorderBufferTupleCidKey)); @@ -2813,7 +2815,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname) (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", path))); - else if (readBytes == 0) /* EOF */ + else if (readBytes == 0) /* EOF */ break; else if (readBytes != sizeof(LogicalRewriteMappingData)) ereport(ERROR, @@ -2884,8 +2886,8 @@ TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num) static int file_sort_by_lsn(const void *a_p, const void *b_p) { - RewriteMappingFile *a = *(RewriteMappingFile **)a_p; - RewriteMappingFile *b = *(RewriteMappingFile **)b_p; + RewriteMappingFile *a = *(RewriteMappingFile **) a_p; + RewriteMappingFile *b = *(RewriteMappingFile **) b_p; if (a->lsn < b->lsn) return -1; @@ -2912,19 +2914,20 @@ UpdateLogicalMappings(HTAB *tuplecid_data, Oid relid, Snapshot snapshot) mapping_dir = AllocateDir("pg_llog/mappings"); while ((mapping_de = ReadDir(mapping_dir, "pg_llog/mappings")) != NULL) { - Oid f_dboid; - Oid f_relid; - TransactionId f_mapped_xid; - TransactionId f_create_xid; - XLogRecPtr f_lsn; - uint32 f_hi, f_lo; + Oid f_dboid; + Oid f_relid; + TransactionId f_mapped_xid; + TransactionId f_create_xid; + XLogRecPtr f_lsn; + uint32 f_hi, + f_lo; RewriteMappingFile *f; if (strcmp(mapping_de->d_name, ".") == 0 || strcmp(mapping_de->d_name, "..") == 0) continue; - /* Ignore files that aren't ours*/ + /* Ignore files that aren't ours */ if (strncmp(mapping_de->d_name, "map-", 4) != 0) continue; @@ -2971,11 +2974,12 @@ UpdateLogicalMappings(HTAB *tuplecid_data, Oid relid, Snapshot snapshot) qsort(files_a, list_length(files), sizeof(RewriteMappingFile *), file_sort_by_lsn); - for(off = 0; off < list_length(files); off++) + for (off = 0; off < list_length(files); off++) { RewriteMappingFile *f = files_a[off]; + elog(DEBUG1, "applying mapping: \"%s\" in %u", f->fname, - snapshot->subxip[0]); + snapshot->subxip[0]); ApplyLogicalMappingFile(tuplecid_data, relid, f->fname); pfree(f); } @@ -2995,7 +2999,7 @@ ResolveCminCmaxDuringDecoding(HTAB *tuplecid_data, ReorderBufferTupleCidEnt *ent; ForkNumber forkno; BlockNumber blockno; - bool updated_mapping = false; + bool updated_mapping = false; /* be careful about padding */ memset(&key, 0, sizeof(key)); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 36034dbec9d..cb45f906fc1 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -57,27 +57,27 @@ * * The snapbuild machinery is starting up in several stages, as illustrated * by the following graph: - * +-------------------------+ - * +----|SNAPBUILD_START |-------------+ - * | +-------------------------+ | - * | | | - * | | | - * | running_xacts with running xacts | - * | | | - * | | | - * | v | - * | +-------------------------+ v - * | |SNAPBUILD_FULL_SNAPSHOT |------------>| - * | +-------------------------+ | - * running_xacts | saved snapshot - * with zero xacts | at running_xacts's lsn - * | | | - * | all running toplevel TXNs finished | - * | | | - * | v | - * | +-------------------------+ | - * +--->|SNAPBUILD_CONSISTENT |<------------+ - * +-------------------------+ + * +-------------------------+ + * +----|SNAPBUILD_START |-------------+ + * | +-------------------------+ | + * | | | + * | | | + * | running_xacts with running xacts | + * | | | + * | | | + * | v | + * | +-------------------------+ v + * | |SNAPBUILD_FULL_SNAPSHOT |------------>| + * | +-------------------------+ | + * running_xacts | saved snapshot + * with zero xacts | at running_xacts's lsn + * | | | + * | all running toplevel TXNs finished | + * | | | + * | v | + * | +-------------------------+ | + * +--->|SNAPBUILD_CONSISTENT |<------------+ + * +-------------------------+ * * Initially the machinery is in the START stage. When a xl_running_xacts * record is read that is sufficiently new (above the safe xmin horizon), @@ -184,7 +184,7 @@ struct SnapBuild * Information about initially running transactions * * When we start building a snapshot there already may be transactions in - * progress. Those are stored in running.xip. We don't have enough + * progress. Those are stored in running.xip. We don't have enough * information about those to decode their contents, so until they are * finished (xcnt=0) we cannot switch to a CONSISTENT state. */ @@ -244,7 +244,7 @@ struct SnapBuild * removes knowledge about the previously used resowner, so we save it here. */ ResourceOwner SavedResourceOwnerDuringExport = NULL; -bool ExportInProgress = false; +bool ExportInProgress = false; /* transaction state manipulation functions */ static void SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid); @@ -496,7 +496,7 @@ SnapBuildBuildSnapshot(SnapBuild *builder, TransactionId xid) snapshot->copied = false; snapshot->curcid = FirstCommandId; snapshot->active_count = 0; - snapshot->regd_count = 1; /* mark as registered so nobody frees it */ + snapshot->regd_count = 1; /* mark as registered so nobody frees it */ return snapshot; } @@ -635,7 +635,7 @@ SnapBuildClearExportedSnapshot() bool SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn) { - bool is_old_tx; + bool is_old_tx; /* * We can't handle data in transactions if we haven't built a snapshot @@ -692,10 +692,10 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid, CommandId cid; /* - * we only log new_cid's if a catalog tuple was modified, so mark - * the transaction as containing catalog modifications + * we only log new_cid's if a catalog tuple was modified, so mark the + * transaction as containing catalog modifications */ - ReorderBufferXidSetCatalogChanges(builder->reorder, xid,lsn); + ReorderBufferXidSetCatalogChanges(builder->reorder, xid, lsn); ReorderBufferAddNewTupleCids(builder->reorder, xlrec->top_xid, lsn, xlrec->target.node, xlrec->target.tid, @@ -712,7 +712,7 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid, cid = xlrec->cmin; else { - cid = InvalidCommandId; /* silence compiler */ + cid = InvalidCommandId; /* silence compiler */ elog(ERROR, "xl_heap_new_cid record without a valid CommandId"); } @@ -818,7 +818,7 @@ SnapBuildAddCommittedTxn(SnapBuild *builder, TransactionId xid) (uint32) builder->committed.xcnt_space); builder->committed.xip = repalloc(builder->committed.xip, - builder->committed.xcnt_space * sizeof(TransactionId)); + builder->committed.xcnt_space * sizeof(TransactionId)); } /* @@ -900,10 +900,10 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid) * so our incrementaly built snapshot now is consistent. */ ereport(LOG, - (errmsg("logical decoding found consistent point at %X/%X", - (uint32)(lsn >> 32), (uint32)lsn), - errdetail("xid %u finished, no running transactions anymore", - xid))); + (errmsg("logical decoding found consistent point at %X/%X", + (uint32) (lsn >> 32), (uint32) lsn), + errdetail("xid %u finished, no running transactions anymore", + xid))); builder->state = SNAPBUILD_CONSISTENT; } } @@ -1170,15 +1170,16 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact */ if (txn != NULL && txn->restart_decoding_lsn != InvalidXLogRecPtr) LogicalIncreaseRestartDecodingForSlot(lsn, txn->restart_decoding_lsn); + /* * No in-progress transaction, can reuse the last serialized snapshot if * we have one. */ else if (txn == NULL && - builder->reorder->current_restart_decoding_lsn != InvalidXLogRecPtr && + builder->reorder->current_restart_decoding_lsn != InvalidXLogRecPtr && builder->last_serialized_snapshot != InvalidXLogRecPtr) LogicalIncreaseRestartDecodingForSlot(lsn, - builder->last_serialized_snapshot); + builder->last_serialized_snapshot); } @@ -1199,23 +1200,23 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn * the currently running transactions. There are several ways to do that: * * a) There were no running transactions when the xl_running_xacts record - * was inserted, jump to CONSISTENT immediately. We might find such a - * state we were waiting for b) and c). + * was inserted, jump to CONSISTENT immediately. We might find such a + * state we were waiting for b) and c). * * b) Wait for all toplevel transactions that were running to end. We - * simply track the number of in-progress toplevel transactions and - * lower it whenever one commits or aborts. When that number - * (builder->running.xcnt) reaches zero, we can go from FULL_SNAPSHOT - * to CONSISTENT. + * simply track the number of in-progress toplevel transactions and + * lower it whenever one commits or aborts. When that number + * (builder->running.xcnt) reaches zero, we can go from FULL_SNAPSHOT + * to CONSISTENT. * NB: We need to search running.xip when seeing a transaction's end to - * make sure it's a toplevel transaction and it's been one of the - * intially running ones. + * make sure it's a toplevel transaction and it's been one of the + * intially running ones. * Interestingly, in contrast to HS, this allows us not to care about * subtransactions - and by extension suboverflowed xl_running_xacts - * at all. * * c) This (in a previous run) or another decoding slot serialized a - * snapshot to disk that we can use. + * snapshot to disk that we can use. * --- */ @@ -1231,7 +1232,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn (errmsg("skipping snapshot at %X/%X while building logical decoding snapshot, xmin horizon too low", (uint32) (lsn >> 32), (uint32) lsn), errdetail("initial xmin horizon of %u vs the snapshot's %u", - builder->initial_xmin_horizon, running->oldestRunningXid))); + builder->initial_xmin_horizon, running->oldestRunningXid))); return true; } @@ -1263,7 +1264,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn ereport(LOG, (errmsg("logical decoding found consistent point at %X/%X", - (uint32)(lsn >> 32), (uint32)lsn), + (uint32) (lsn >> 32), (uint32) lsn), errdetail("running xacts with xcnt == 0"))); return false; @@ -1274,15 +1275,16 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn /* there won't be any state to cleanup */ return false; } + /* * b) first encounter of a useable xl_running_xacts record. If we had - * found one earlier we would either track running transactions - * (i.e. builder->running.xcnt != 0) or be consistent (this function - * wouldn't get called). + * found one earlier we would either track running transactions (i.e. + * builder->running.xcnt != 0) or be consistent (this function wouldn't + * get called). */ else if (!builder->running.xcnt) { - int off; + int off; /* * We only care about toplevel xids as those are the ones we @@ -1302,7 +1304,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn builder->running.xcnt_space = running->xcnt; builder->running.xip = MemoryContextAlloc(builder->context, - builder->running.xcnt * sizeof(TransactionId)); + builder->running.xcnt * sizeof(TransactionId)); memcpy(builder->running.xip, running->xids, builder->running.xcnt * sizeof(TransactionId)); @@ -1320,9 +1322,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn builder->state = SNAPBUILD_FULL_SNAPSHOT; ereport(LOG, - (errmsg("logical decoding found initial starting point at %X/%X", - (uint32)(lsn >> 32), (uint32)lsn), - errdetail("%u xacts need to finish", (uint32) builder->running.xcnt))); + (errmsg("logical decoding found initial starting point at %X/%X", + (uint32) (lsn >> 32), (uint32) lsn), + errdetail("%u xacts need to finish", (uint32) builder->running.xcnt))); /* * Iterate through all xids, wait for them to finish. @@ -1331,7 +1333,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn * isolationtester to notice that we're currently waiting for * something. */ - for(off = 0; off < builder->running.xcnt; off++) + for (off = 0; off < builder->running.xcnt; off++) { TransactionId xid = builder->running.xip[off]; @@ -1471,9 +1473,9 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) * but remember location, so we don't need to read old data again. * * To be sure it has been synced to disk after the rename() from the - * tempfile filename to the real filename, we just repeat the - * fsync. That ought to be cheap because in most scenarios it should - * already be safely on disk. + * tempfile filename to the real filename, we just repeat the fsync. + * That ought to be cheap because in most scenarios it should already + * be safely on disk. */ fsync_fname(path, false); fsync_fname("pg_llog/snapshots", true); @@ -1504,7 +1506,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) if (unlink(tmppath) != 0 && errno != ENOENT) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not unlink file \"%s\": %m", path))); + errmsg("could not unlink file \"%s\": %m", path))); needed_length = sizeof(SnapBuildOnDisk) + sizeof(TransactionId) * builder->running.xcnt_space + @@ -1518,7 +1520,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) INIT_CRC32(ondisk->checksum); COMP_CRC32(ondisk->checksum, ((char *) ondisk) + SnapBuildOnDiskNotChecksummedSize, - SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize); + SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize); ondisk_c += sizeof(SnapBuildOnDisk); memcpy(&ondisk->builder, builder, sizeof(SnapBuild)); @@ -1597,8 +1599,8 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) fsync_fname("pg_llog/snapshots", true); /* - * Now there's no way we can loose the dumped state anymore, remember - * this as a serialization point. + * Now there's no way we can loose the dumped state anymore, remember this + * as a serialization point. */ builder->last_serialized_snapshot = lsn; @@ -1673,7 +1675,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) INIT_CRC32(checksum); COMP_CRC32(checksum, ((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize, - SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize); + SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize); /* read SnapBuild */ readBytes = read(fd, &ondisk.builder, sizeof(SnapBuild)); @@ -1781,7 +1783,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) ereport(LOG, (errmsg("logical decoding found consistent point at %X/%X", - (uint32)(lsn >> 32), (uint32)lsn), + (uint32) (lsn >> 32), (uint32) lsn), errdetail("found initial snapshot in snapbuild file"))); return true; @@ -1829,7 +1831,7 @@ CheckPointSnapBuild(void) uint32 hi; uint32 lo; XLogRecPtr lsn; - struct stat statbuf; + struct stat statbuf; if (strcmp(snap_de->d_name, ".") == 0 || strcmp(snap_de->d_name, "..") == 0) @@ -1846,8 +1848,8 @@ CheckPointSnapBuild(void) /* * temporary filenames from SnapBuildSerialize() include the LSN and * everything but are postfixed by .$pid.tmp. We can just remove them - * the same as other files because there can be none that are currently - * being written that are older than cutoff. + * the same as other files because there can be none that are + * currently being written that are older than cutoff. * * We just log a message if a file doesn't fit the pattern, it's * probably some editors lock/state file or similar... diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 76e55736605..ee0c7c07a97 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -72,7 +72,7 @@ typedef struct ReplicationSlotOnDisk sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize #define SLOT_MAGIC 0x1051CA1 /* format identifier */ -#define SLOT_VERSION 1 /* version for new files */ +#define SLOT_VERSION 1 /* version for new files */ /* Control array for replication slot management */ ReplicationSlotCtlData *ReplicationSlotCtl = NULL; @@ -81,7 +81,8 @@ ReplicationSlotCtlData *ReplicationSlotCtl = NULL; ReplicationSlot *MyReplicationSlot = NULL; /* GUCs */ -int max_replication_slots = 0; /* the maximum number of replication slots */ +int max_replication_slots = 0; /* the maximum number of replication + * slots */ static void ReplicationSlotDropAcquired(void); @@ -180,8 +181,8 @@ ReplicationSlotValidateName(const char *name, int elevel) { ereport(elevel, (errcode(ERRCODE_INVALID_NAME), - errmsg("replication slot name \"%s\" contains invalid character", - name), + errmsg("replication slot name \"%s\" contains invalid character", + name), errhint("Replication slot names may only contain letters, numbers and the underscore character."))); return false; } @@ -194,7 +195,7 @@ ReplicationSlotValidateName(const char *name, int elevel) * * name: Name of the slot * db_specific: logical decoding is db specific; if the slot is going to - * be used for that pass true, otherwise false. + * be used for that pass true, otherwise false. */ void ReplicationSlotCreate(const char *name, bool db_specific, @@ -208,18 +209,18 @@ ReplicationSlotCreate(const char *name, bool db_specific, ReplicationSlotValidateName(name, ERROR); /* - * If some other backend ran this code currently with us, we'd likely - * both allocate the same slot, and that would be bad. We'd also be - * at risk of missing a name collision. Also, we don't want to try to - * create a new slot while somebody's busy cleaning up an old one, because - * we might both be monkeying with the same directory. + * If some other backend ran this code currently with us, we'd likely both + * allocate the same slot, and that would be bad. We'd also be at risk of + * missing a name collision. Also, we don't want to try to create a new + * slot while somebody's busy cleaning up an old one, because we might + * both be monkeying with the same directory. */ LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE); /* - * Check for name collision, and identify an allocatable slot. We need - * to hold ReplicationSlotControlLock in shared mode for this, so that - * nobody else can change the in_use flags while we're looking at them. + * Check for name collision, and identify an allocatable slot. We need to + * hold ReplicationSlotControlLock in shared mode for this, so that nobody + * else can change the in_use flags while we're looking at them. */ LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); for (i = 0; i < max_replication_slots; i++) @@ -243,10 +244,10 @@ ReplicationSlotCreate(const char *name, bool db_specific, errhint("Free one or increase max_replication_slots."))); /* - * Since this slot is not in use, nobody should be looking at any - * part of it other than the in_use field unless they're trying to allocate - * it. And since we hold ReplicationSlotAllocationLock, nobody except us - * can be doing that. So it's safe to initialize the slot. + * Since this slot is not in use, nobody should be looking at any part of + * it other than the in_use field unless they're trying to allocate it. + * And since we hold ReplicationSlotAllocationLock, nobody except us can + * be doing that. So it's safe to initialize the slot. */ Assert(!slot->in_use); Assert(!slot->active); @@ -366,6 +367,7 @@ ReplicationSlotRelease(void) { /* Mark slot inactive. We're not freeing it, just disconnecting. */ volatile ReplicationSlot *vslot = slot; + SpinLockAcquire(&slot->mutex); vslot->active = false; SpinLockRelease(&slot->mutex); @@ -444,7 +446,7 @@ ReplicationSlotDropAcquired(void) else { volatile ReplicationSlot *vslot = slot; - bool fail_softly = slot->data.persistency == RS_EPHEMERAL; + bool fail_softly = slot->data.persistency == RS_EPHEMERAL; SpinLockAcquire(&slot->mutex); vslot->active = false; @@ -571,8 +573,8 @@ ReplicationSlotsComputeRequiredXmin(bool already_locked) for (i = 0; i < max_replication_slots; i++) { ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i]; - TransactionId effective_xmin; - TransactionId effective_catalog_xmin; + TransactionId effective_xmin; + TransactionId effective_catalog_xmin; if (!s->in_use) continue; @@ -612,7 +614,7 @@ void ReplicationSlotsComputeRequiredLSN(void) { int i; - XLogRecPtr min_required = InvalidXLogRecPtr; + XLogRecPtr min_required = InvalidXLogRecPtr; Assert(ReplicationSlotCtl != NULL); @@ -620,7 +622,7 @@ ReplicationSlotsComputeRequiredLSN(void) for (i = 0; i < max_replication_slots; i++) { ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i]; - XLogRecPtr restart_lsn; + XLogRecPtr restart_lsn; if (!s->in_use) continue; @@ -669,7 +671,7 @@ ReplicationSlotsComputeLogicalRestartLSN(void) for (i = 0; i < max_replication_slots; i++) { volatile ReplicationSlot *s; - XLogRecPtr restart_lsn; + XLogRecPtr restart_lsn; s = &ReplicationSlotCtl->replication_slots[i]; @@ -772,8 +774,8 @@ CheckSlotRequirements(void) static bool string_endswith(const char *str, const char *end) { - size_t slen = strlen(str); - size_t elen = strlen(end); + size_t slen = strlen(str); + size_t elen = strlen(end); /* can't be a postfix if longer */ if (elen > slen) @@ -802,8 +804,8 @@ CheckPointReplicationSlots(void) * Prevent any slot from being created/dropped while we're active. As we * explicitly do *not* want to block iterating over replication_slots or * acquiring a slot we cannot take the control lock - but that's OK, - * because holding ReplicationSlotAllocationLock is strictly stronger, - * and enough to guarantee that nobody can change the in_use bits on us. + * because holding ReplicationSlotAllocationLock is strictly stronger, and + * enough to guarantee that nobody can change the in_use bits on us. */ LWLockAcquire(ReplicationSlotAllocationLock, LW_SHARED); @@ -839,7 +841,7 @@ StartupReplicationSlots(XLogRecPtr checkPointRedo) replication_dir = AllocateDir("pg_replslot"); while ((replication_de = ReadDir(replication_dir, "pg_replslot")) != NULL) { - struct stat statbuf; + struct stat statbuf; char path[MAXPGPATH]; if (strcmp(replication_de->d_name, ".") == 0 || @@ -892,7 +894,7 @@ CreateSlotOnDisk(ReplicationSlot *slot) { char tmppath[MAXPGPATH]; char path[MAXPGPATH]; - struct stat st; + struct stat st; /* * No need to take out the io_in_progress_lock, nobody else can see this @@ -904,11 +906,10 @@ CreateSlotOnDisk(ReplicationSlot *slot) sprintf(tmppath, "pg_replslot/%s.tmp", NameStr(slot->data.name)); /* - * It's just barely possible that some previous effort to create or - * drop a slot with this name left a temp directory lying around. - * If that seems to be the case, try to remove it. If the rmtree() - * fails, we'll error out at the mkdir() below, so we don't bother - * checking success. + * It's just barely possible that some previous effort to create or drop a + * slot with this name left a temp directory lying around. If that seems + * to be the case, try to remove it. If the rmtree() fails, we'll error + * out at the mkdir() below, so we don't bother checking success. */ if (stat(tmppath, &st) == 0 && S_ISDIR(st.st_mode)) rmtree(tmppath, true); @@ -922,7 +923,7 @@ CreateSlotOnDisk(ReplicationSlot *slot) fsync_fname(tmppath, true); /* Write the actual state file. */ - slot->dirty = true; /* signal that we really need to write */ + slot->dirty = true; /* signal that we really need to write */ SaveSlotToPath(slot, tmppath, ERROR); /* Rename the directory into place. */ @@ -1003,12 +1004,13 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel) SpinLockRelease(&slot->mutex); COMP_CRC32(cp.checksum, - (char *)(&cp) + ReplicationSlotOnDiskConstantSize, + (char *) (&cp) + ReplicationSlotOnDiskConstantSize, ReplicationSlotOnDiskDynamicSize); if ((write(fd, &cp, sizeof(cp))) != sizeof(cp)) { - int save_errno = errno; + int save_errno = errno; + CloseTransientFile(fd); errno = save_errno; ereport(elevel, @@ -1021,7 +1023,8 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel) /* fsync the temporary file */ if (pg_fsync(fd) != 0) { - int save_errno = errno; + int save_errno = errno; + CloseTransientFile(fd); errno = save_errno; ereport(elevel, @@ -1150,19 +1153,19 @@ RestoreSlotFromDisk(const char *name) if (cp.version != SLOT_VERSION) ereport(PANIC, (errcode_for_file_access(), - errmsg("replication slot file \"%s\" has unsupported version %u", - path, cp.version))); + errmsg("replication slot file \"%s\" has unsupported version %u", + path, cp.version))); /* boundary check on length */ if (cp.length != ReplicationSlotOnDiskDynamicSize) ereport(PANIC, (errcode_for_file_access(), - errmsg("replication slot file \"%s\" has corrupted length %u", - path, cp.length))); + errmsg("replication slot file \"%s\" has corrupted length %u", + path, cp.length))); /* Now that we know the size, read the entire file */ readBytes = read(fd, - (char *)&cp + ReplicationSlotOnDiskConstantSize, + (char *) &cp + ReplicationSlotOnDiskConstantSize, cp.length); if (readBytes != cp.length) { @@ -1181,7 +1184,7 @@ RestoreSlotFromDisk(const char *name) /* now verify the CRC32 */ INIT_CRC32(checksum); COMP_CRC32(checksum, - (char *)&cp + ReplicationSlotOnDiskConstantSize, + (char *) &cp + ReplicationSlotOnDiskConstantSize, ReplicationSlotOnDiskDynamicSize); if (!EQ_CRC32(checksum, cp.checksum)) diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index c9416b03eee..dc94f504ee2 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -53,7 +53,7 @@ pg_create_physical_replication_slot(PG_FUNCTION_ARGS) if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "return type must be a row type"); - /* acquire replication slot, this will check for conflicting names*/ + /* acquire replication slot, this will check for conflicting names */ ReplicationSlotCreate(NameStr(*name), false, RS_PERSISTENT); values[0] = NameGetDatum(&MyReplicationSlot->data.name); @@ -97,8 +97,7 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS) Assert(!MyReplicationSlot); /* - * Acquire a logical decoding slot, this will check for conflicting - * names. + * Acquire a logical decoding slot, this will check for conflicting names. */ ReplicationSlotCreate(NameStr(*name), true, RS_EPHEMERAL); @@ -106,8 +105,8 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS) * Create logical decoding context, to build the initial snapshot. */ ctx = CreateInitDecodingContext( - NameStr(*plugin), NIL, - logical_read_local_xlog_page, NULL, NULL); + NameStr(*plugin), NIL, + logical_read_local_xlog_page, NULL, NULL); /* build initial snapshot, might take a while */ DecodingContextFindStartpoint(ctx); diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index f65021caa68..aa54bfba6cf 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -117,8 +117,8 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN) * set. See SyncRepUpdateSyncStandbysDefined. * * Also check that the standby hasn't already replied. Unlikely race - * condition but we'll be fetching that cache line anyway so it's likely to - * be a low cost check. + * condition but we'll be fetching that cache line anyway so it's likely + * to be a low cost check. */ if (!WalSndCtl->sync_standbys_defined || XactCommitLSN <= WalSndCtl->lsn[mode]) @@ -517,7 +517,7 @@ SyncRepGetStandbyPriority(void) } /* - * Walk the specified queue from head. Set the state of any backends that + * Walk the specified queue from head. Set the state of any backends that * need to be woken, remove them from the queue, and then wake them. * Pass all = true to wake whole queue; otherwise, just wake up to * the walsender's LSN. diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index b0de0ea253e..c2d4ed3a968 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -258,7 +258,7 @@ WalReceiverMain(void) /* * If possible, make this process a group leader, so that the postmaster - * can signal any child processes too. (walreceiver probably never has + * can signal any child processes too. (walreceiver probably never has * any child processes, but for consistency we make all postmaster child * processes do this.) */ @@ -786,7 +786,7 @@ WalRcvQuickDieHandler(SIGNAL_ARGS) on_exit_reset(); /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a + * Note we do exit(2) not exit(0). This is to force the postmaster into a * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c @@ -934,9 +934,9 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr) if (lseek(recvFile, (off_t) startoff, SEEK_SET) < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not seek in log segment %s to offset %u: %m", - XLogFileNameP(recvFileTLI, recvSegNo), - startoff))); + errmsg("could not seek in log segment %s to offset %u: %m", + XLogFileNameP(recvFileTLI, recvSegNo), + startoff))); recvOff = startoff; } diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c index acadec57f5a..579216af34d 100644 --- a/src/backend/replication/walreceiverfuncs.c +++ b/src/backend/replication/walreceiverfuncs.c @@ -291,7 +291,7 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo, * Returns the last+1 byte position that walreceiver has written. * * Optionally, returns the previous chunk start, that is the first byte - * written in the most recent walreceiver flush cycle. Callers not + * written in the most recent walreceiver flush cycle. Callers not * interested in that value may pass NULL for latestChunkStart. Same for * receiveTLI. */ diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 6e22c03bcfa..5c11d681c33 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -82,7 +82,7 @@ #include "utils/timestamp.h" /* - * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ. + * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ. * * We don't have a good idea of what a good value would be; there's some * overhead per message in both walsender and walreceiver, but on the other @@ -165,7 +165,7 @@ static bool streamingDoneSending; static bool streamingDoneReceiving; /* Are we there yet? */ -static bool WalSndCaughtUp = false; +static bool WalSndCaughtUp = false; /* Flags set by signal handlers for later service in main loop */ static volatile sig_atomic_t got_SIGHUP = false; @@ -180,7 +180,7 @@ static volatile sig_atomic_t walsender_ready_to_stop = false; static volatile sig_atomic_t replication_active = false; static LogicalDecodingContext *logical_decoding_ctx = NULL; -static XLogRecPtr logical_startptr = InvalidXLogRecPtr; +static XLogRecPtr logical_startptr = InvalidXLogRecPtr; /* Signal handlers */ static void WalSndSigHupHandler(SIGNAL_ARGS); @@ -188,7 +188,7 @@ static void WalSndXLogSendHandler(SIGNAL_ARGS); static void WalSndLastCycleHandler(SIGNAL_ARGS); /* Prototypes for private functions */ -typedef void (*WalSndSendDataCallback)(void); +typedef void (*WalSndSendDataCallback) (void); static void WalSndLoop(WalSndSendDataCallback send_data); static void InitWalSenderSlot(void); static void WalSndKill(int code, Datum arg); @@ -301,8 +301,8 @@ IdentifySystem(void) /* * Reply with a result set with one row, four columns. First col is system - * ID, second is timeline ID, third is current xlog location and the fourth - * contains the database name if we are connected to one. + * ID, second is timeline ID, third is current xlog location and the + * fourth contains the database name if we are connected to one. */ snprintf(sysid, sizeof(sysid), UINT64_FORMAT, @@ -358,22 +358,22 @@ IdentifySystem(void) pq_sendint(&buf, 0, 2); /* format code */ /* third field */ - pq_sendstring(&buf, "xlogpos"); /* col name */ - pq_sendint(&buf, 0, 4); /* table oid */ - pq_sendint(&buf, 0, 2); /* attnum */ - pq_sendint(&buf, TEXTOID, 4); /* type oid */ - pq_sendint(&buf, -1, 2); /* typlen */ - pq_sendint(&buf, 0, 4); /* typmod */ - pq_sendint(&buf, 0, 2); /* format code */ + pq_sendstring(&buf, "xlogpos"); /* col name */ + pq_sendint(&buf, 0, 4); /* table oid */ + pq_sendint(&buf, 0, 2); /* attnum */ + pq_sendint(&buf, TEXTOID, 4); /* type oid */ + pq_sendint(&buf, -1, 2); /* typlen */ + pq_sendint(&buf, 0, 4); /* typmod */ + pq_sendint(&buf, 0, 2); /* format code */ /* fourth field */ - pq_sendstring(&buf, "dbname"); /* col name */ - pq_sendint(&buf, 0, 4); /* table oid */ - pq_sendint(&buf, 0, 2); /* attnum */ - pq_sendint(&buf, TEXTOID, 4); /* type oid */ - pq_sendint(&buf, -1, 2); /* typlen */ - pq_sendint(&buf, 0, 4); /* typmod */ - pq_sendint(&buf, 0, 2); /* format code */ + pq_sendstring(&buf, "dbname"); /* col name */ + pq_sendint(&buf, 0, 4); /* table oid */ + pq_sendint(&buf, 0, 2); /* attnum */ + pq_sendint(&buf, TEXTOID, 4); /* type oid */ + pq_sendint(&buf, -1, 2); /* typlen */ + pq_sendint(&buf, 0, 4); /* typmod */ + pq_sendint(&buf, 0, 2); /* format code */ pq_endmessage(&buf); /* Send a DataRow message */ @@ -388,12 +388,12 @@ IdentifySystem(void) /* send NULL if not connected to a database */ if (dbname) { - pq_sendint(&buf, strlen(dbname), 4); /* col4 len */ + pq_sendint(&buf, strlen(dbname), 4); /* col4 len */ pq_sendbytes(&buf, (char *) dbname, strlen(dbname)); } else { - pq_sendint(&buf, -1, 4); /* col4 len, NULL */ + pq_sendint(&buf, -1, 4); /* col4 len, NULL */ } pq_endmessage(&buf); @@ -731,11 +731,11 @@ StartReplication(StartReplicationCmd *cmd) * set everytime WAL is flushed. */ static int -logical_read_xlog_page(XLogReaderState* state, XLogRecPtr targetPagePtr, int reqLen, - XLogRecPtr targetRecPtr, char* cur_page, TimeLineID *pageTLI) +logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, + XLogRecPtr targetRecPtr, char *cur_page, TimeLineID *pageTLI) { - XLogRecPtr flushptr; - int count; + XLogRecPtr flushptr; + int count; /* make sure we have enough WAL available */ flushptr = WalSndWaitForWal(targetPagePtr + reqLen); @@ -764,7 +764,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) { const char *slot_name; const char *snapshot_name = NULL; - char xpos[MAXFNAMELEN]; + char xpos[MAXFNAMELEN]; StringInfoData buf; Assert(!MyReplicationSlot); @@ -792,9 +792,9 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) LogicalDecodingContext *ctx; ctx = CreateInitDecodingContext( - cmd->plugin, NIL, - logical_read_xlog_page, - WalSndPrepareWrite, WalSndWriteData); + cmd->plugin, NIL, + logical_read_xlog_page, + WalSndPrepareWrite, WalSndWriteData); /* build initial snapshot, might take a while */ DecodingContextFindStartpoint(ctx); @@ -838,7 +838,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) pq_sendint(&buf, 0, 2); /* format code */ /* third field: exported snapshot's name */ - pq_sendstring(&buf, "snapshot_name"); /* col name */ + pq_sendstring(&buf, "snapshot_name"); /* col name */ pq_sendint(&buf, 0, 4); /* table oid */ pq_sendint(&buf, 0, 2); /* attnum */ pq_sendint(&buf, TEXTOID, 4); /* type oid */ @@ -847,7 +847,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) pq_sendint(&buf, 0, 2); /* format code */ /* fourth field: output plugin */ - pq_sendstring(&buf, "output_plugin"); /* col name */ + pq_sendstring(&buf, "output_plugin"); /* col name */ pq_sendint(&buf, 0, 4); /* table oid */ pq_sendint(&buf, 0, 2); /* attnum */ pq_sendint(&buf, TEXTOID, 4); /* type oid */ @@ -862,26 +862,26 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) pq_sendint(&buf, 4, 2); /* # of columns */ /* slot_name */ - pq_sendint(&buf, strlen(slot_name), 4); /* col1 len */ + pq_sendint(&buf, strlen(slot_name), 4); /* col1 len */ pq_sendbytes(&buf, slot_name, strlen(slot_name)); /* consistent wal location */ - pq_sendint(&buf, strlen(xpos), 4); /* col2 len */ + pq_sendint(&buf, strlen(xpos), 4); /* col2 len */ pq_sendbytes(&buf, xpos, strlen(xpos)); /* snapshot name */ if (snapshot_name != NULL) { - pq_sendint(&buf, strlen(snapshot_name), 4); /* col3 len */ + pq_sendint(&buf, strlen(snapshot_name), 4); /* col3 len */ pq_sendbytes(&buf, snapshot_name, strlen(snapshot_name)); } else - pq_sendint(&buf, -1, 4); /* col3 len, NULL */ + pq_sendint(&buf, -1, 4); /* col3 len, NULL */ /* plugin */ if (cmd->plugin != NULL) { - pq_sendint(&buf, strlen(cmd->plugin), 4); /* col4 len */ + pq_sendint(&buf, strlen(cmd->plugin), 4); /* col4 len */ pq_sendbytes(&buf, cmd->plugin, strlen(cmd->plugin)); } else @@ -951,9 +951,9 @@ StartLogicalReplication(StartReplicationCmd *cmd) * to be shipped from that position. */ logical_decoding_ctx = CreateDecodingContext( - cmd->startpoint, cmd->options, - logical_read_xlog_page, - WalSndPrepareWrite, WalSndWriteData); + cmd->startpoint, cmd->options, + logical_read_xlog_page, + WalSndPrepareWrite, WalSndWriteData); /* Start reading WAL from the oldest required WAL. */ logical_startptr = MyReplicationSlot->data.restart_lsn; @@ -1013,11 +1013,12 @@ WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi pq_sendbyte(ctx->out, 'w'); pq_sendint64(ctx->out, lsn); /* dataStart */ pq_sendint64(ctx->out, lsn); /* walEnd */ + /* * Fill out the sendtime later, just as it's done in XLogSendPhysical, but * reserve space here. */ - pq_sendint64(ctx->out, 0); /* sendtime */ + pq_sendint64(ctx->out, 0); /* sendtime */ } /* @@ -1035,9 +1036,9 @@ WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, pq_putmessage_noblock('d', ctx->out->data, ctx->out->len); /* - * Fill the send timestamp last, so that it is taken as late as - * possible. This is somewhat ugly, but the protocol's set as it's already - * used for several releases by streaming physical replication. + * Fill the send timestamp last, so that it is taken as late as possible. + * This is somewhat ugly, but the protocol's set as it's already used for + * several releases by streaming physical replication. */ resetStringInfo(&tmpbuf); pq_sendint64(&tmpbuf, GetCurrentIntegerTimestamp()); @@ -1056,7 +1057,7 @@ WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, { int wakeEvents; long sleeptime; - TimestampTz now; + TimestampTz now; /* * Emergency bailout if postmaster has died. This is to avoid the @@ -1140,7 +1141,7 @@ WalSndWaitForWal(XLogRecPtr loc) for (;;) { long sleeptime; - TimestampTz now; + TimestampTz now; /* * Emergency bailout if postmaster has died. This is to avoid the @@ -1297,6 +1298,7 @@ exec_replication_command(const char *cmd_string) case T_StartReplicationCmd: { StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node; + if (cmd->kind == REPLICATION_KIND_PHYSICAL) StartReplication(cmd); else @@ -1472,7 +1474,8 @@ ProcessStandbyMessage(void) static void PhysicalConfirmReceivedLocation(XLogRecPtr lsn) { - bool changed = false; + bool changed = false; + /* use volatile pointer to prevent code rearrangement */ volatile ReplicationSlot *slot = MyReplicationSlot; @@ -1492,9 +1495,9 @@ PhysicalConfirmReceivedLocation(XLogRecPtr lsn) } /* - * One could argue that the slot should be saved to disk now, but that'd be - * energy wasted - the worst lost information can do here is give us wrong - * information in a statistics view - we'll just potentially be more + * One could argue that the slot should be saved to disk now, but that'd + * be energy wasted - the worst lost information can do here is give us + * wrong information in a statistics view - we'll just potentially be more * conservative in removing files. */ } @@ -1561,15 +1564,16 @@ ProcessStandbyReplyMessage(void) static void PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin) { - bool changed = false; + bool changed = false; volatile ReplicationSlot *slot = MyReplicationSlot; SpinLockAcquire(&slot->mutex); MyPgXact->xmin = InvalidTransactionId; + /* - * For physical replication we don't need the interlock provided - * by xmin and effective_xmin since the consequences of a missed increase - * are limited to query cancellations, so set both at once. + * For physical replication we don't need the interlock provided by xmin + * and effective_xmin since the consequences of a missed increase are + * limited to query cancellations, so set both at once. */ if (!TransactionIdIsNormal(slot->data.xmin) || !TransactionIdIsNormal(feedbackXmin) || @@ -1655,7 +1659,7 @@ ProcessStandbyHSFeedbackMessage(void) * perhaps far enough to make feedbackXmin wrap around. In that case the * xmin we set here would be "in the future" and have no effect. No point * in worrying about this since it's too late to save the desired data - * anyway. Assuming that the standby sends us an increasing sequence of + * anyway. Assuming that the standby sends us an increasing sequence of * xmins, this could only happen during the first reply cycle, else our * own xmin would prevent nextXid from advancing so far. * @@ -1667,11 +1671,11 @@ ProcessStandbyHSFeedbackMessage(void) * * If we're using a replication slot we reserve the xmin via that, * otherwise via the walsender's PGXACT entry. - + * * XXX: It might make sense to introduce ephemeral slots and always use * the slot mechanism. */ - if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */ + if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */ PhysicalReplicationSlotNewXmin(feedbackXmin); else MyPgXact->xmin = feedbackXmin; @@ -1692,8 +1696,8 @@ WalSndComputeSleeptime(TimestampTz now) if (wal_sender_timeout > 0) { TimestampTz wakeup_time; - long sec_to_timeout; - int microsec_to_timeout; + long sec_to_timeout; + int microsec_to_timeout; /* * At the latest stop sleeping once wal_sender_timeout has been @@ -1703,13 +1707,13 @@ WalSndComputeSleeptime(TimestampTz now) wal_sender_timeout); /* - * If no ping has been sent yet, wakeup when it's time to do - * so. WalSndKeepaliveIfNecessary() wants to send a keepalive once - * half of the timeout passed without a response. + * If no ping has been sent yet, wakeup when it's time to do so. + * WalSndKeepaliveIfNecessary() wants to send a keepalive once half of + * the timeout passed without a response. */ if (!waiting_for_ping_response) wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp, - wal_sender_timeout / 2); + wal_sender_timeout / 2); /* Compute relative time until wakeup. */ TimestampDifference(now, wakeup_time, @@ -1738,11 +1742,11 @@ WalSndCheckTimeOut(TimestampTz now) { /* * Since typically expiration of replication timeout means - * communication problem, we don't send the error message to - * the standby. + * communication problem, we don't send the error message to the + * standby. */ ereport(COMMERROR, - (errmsg("terminating walsender process due to replication timeout"))); + (errmsg("terminating walsender process due to replication timeout"))); WalSndShutdown(); } @@ -1770,7 +1774,7 @@ WalSndLoop(WalSndSendDataCallback send_data) */ for (;;) { - TimestampTz now; + TimestampTz now; /* * Emergency bailout if postmaster has died. This is to avoid the @@ -1839,10 +1843,10 @@ WalSndLoop(WalSndSendDataCallback send_data) /* * When SIGUSR2 arrives, we send any outstanding logs up to the - * shutdown checkpoint record (i.e., the latest record), wait - * for them to be replicated to the standby, and exit. - * This may be a normal termination at shutdown, or a promotion, - * the walsender is not sure which. + * shutdown checkpoint record (i.e., the latest record), wait for + * them to be replicated to the standby, and exit. This may be a + * normal termination at shutdown, or a promotion, the walsender + * is not sure which. */ if (walsender_ready_to_stop) WalSndDone(send_data); @@ -2246,7 +2250,7 @@ XLogSendPhysical(void) * * Attempt to send all data that's already been written out and * fsync'd to disk. We cannot go further than what's been written out - * given the current implementation of XLogRead(). And in any case + * given the current implementation of XLogRead(). And in any case * it's unsafe to send WAL that is not securely down to disk on the * master: if the master subsequently crashes and restarts, slaves * must not have applied any WAL that gets lost on the master. @@ -2416,8 +2420,8 @@ XLogSendLogical(void) else { /* - * If the record we just wanted read is at or beyond the flushed point, - * then we're caught up. + * If the record we just wanted read is at or beyond the flushed + * point, then we're caught up. */ if (logical_decoding_ctx->reader->EndRecPtr >= GetFlushRecPtr()) WalSndCaughtUp = true; @@ -2452,10 +2456,10 @@ WalSndDone(WalSndSendDataCallback send_data) send_data(); /* - * Check a write location to see whether all the WAL have - * successfully been replicated if this walsender is connecting - * to a standby such as pg_receivexlog which always returns - * an invalid flush location. Otherwise, check a flush location. + * Check a write location to see whether all the WAL have successfully + * been replicated if this walsender is connecting to a standby such as + * pg_receivexlog which always returns an invalid flush location. + * Otherwise, check a flush location. */ replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ? MyWalSnd->write : MyWalSnd->flush; @@ -2562,8 +2566,8 @@ WalSndLastCycleHandler(SIGNAL_ARGS) /* * If replication has not yet started, die like with SIGTERM. If * replication is active, only set a flag and wake up the main loop. It - * will send any outstanding WAL, wait for it to be replicated to - * the standby, and then exit gracefully. + * will send any outstanding WAL, wait for it to be replicated to the + * standby, and then exit gracefully. */ if (!replication_active) kill(MyProcPid, SIGTERM); diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index 1fb6b692257..50ecf7e884c 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -202,7 +202,7 @@ DefineRule(RuleStmt *stmt, const char *queryString) transformRuleStmt(stmt, queryString, &actions, &whereClause); /* - * Find and lock the relation. Lock level should match + * Find and lock the relation. Lock level should match * DefineQueryRewrite. */ relId = RangeVarGetRelid(stmt->relation, AccessExclusiveLock, false); @@ -357,7 +357,7 @@ DefineQueryRewrite(char *rulename, RelationGetDescr(event_relation), true, event_relation->rd_rel->relkind != - RELKIND_MATVIEW); + RELKIND_MATVIEW); /* * ... there must not be another ON SELECT rule already ... @@ -409,7 +409,7 @@ DefineQueryRewrite(char *rulename, * * If so, check that the relation is empty because the storage for the * relation is going to be deleted. Also insist that the rel not have - * any triggers, indexes, or child tables. (Note: these tests are too + * any triggers, indexes, or child tables. (Note: these tests are too * strict, because they will reject relations that once had such but * don't anymore. But we don't really care, because this whole * business of converting relations to views is just a kluge to allow @@ -712,7 +712,7 @@ checkRuleResultList(List *targetList, TupleDesc resultDesc, bool isSelect, * Note: for a view (ON SELECT rule), the checkAsUser field of the OLD * RTE entry will be overridden when the view rule is expanded, and the * checkAsUser field of the NEW entry is irrelevant because that entry's - * requiredPerms bits will always be zero. However, for other types of rules + * requiredPerms bits will always be zero. However, for other types of rules * it's important to set these fields to match the rule owner. So we just set * them always. */ diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index caed8caee6b..e6c553068c7 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -209,7 +209,7 @@ AcquireRewriteLocks(Query *parsetree, /* * The elements of an alias list have to refer to * earlier RTEs of the same rtable, because that's the - * order the planner builds things in. So we already + * order the planner builds things in. So we already * processed the referenced RTE, and so it's safe to * use get_rte_attribute_is_dropped on it. (This might * not hold after rewriting or planning, but it's OK @@ -371,7 +371,7 @@ rewriteRuleAction(Query *parsetree, /* * Generate expanded rtable consisting of main parsetree's rtable plus * rule action's rtable; this becomes the complete rtable for the rule - * action. Some of the entries may be unused after we finish rewriting, + * action. Some of the entries may be unused after we finish rewriting, * but we leave them all in place for two reasons: * * We'd have a much harder job to adjust the query's varnos if we @@ -437,7 +437,7 @@ rewriteRuleAction(Query *parsetree, * that if the rule action refers to OLD, its jointree will add a * reference to rt_index. If the rule action doesn't refer to OLD, but * either the rule_qual or the user query quals do, then we need to keep - * the original rtindex in the jointree to provide data for the quals. We + * the original rtindex in the jointree to provide data for the quals. We * don't want the original rtindex to be joined twice, however, so avoid * keeping it if the rule action mentions it. * @@ -459,7 +459,7 @@ rewriteRuleAction(Query *parsetree, { /* * If sub_action is a setop, manipulating its jointree will do no - * good at all, because the jointree is dummy. (Perhaps someday + * good at all, because the jointree is dummy. (Perhaps someday * we could push the joining and quals down to the member * statements of the setop?) */ @@ -668,7 +668,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index) * then junk fields (these in no particular order). * * We must do items 1,2,3 before firing rewrite rules, else rewritten - * references to NEW.foo will produce wrong or incomplete results. Item 4 + * references to NEW.foo will produce wrong or incomplete results. Item 4 * is not needed for rewriting, but will be needed by the planner, and we * can do it essentially for free while handling the other items. * @@ -876,7 +876,7 @@ process_matched_tle(TargetEntry *src_tle, } /*---------- - * Multiple assignments to same attribute. Allow only if all are + * Multiple assignments to same attribute. Allow only if all are * FieldStore or ArrayRef assignment operations. This is a bit * tricky because what we may actually be looking at is a nest of * such nodes; consider @@ -894,7 +894,7 @@ process_matched_tle(TargetEntry *src_tle, * assignments appear to occur left-to-right. * * For FieldStore, instead of nesting we can generate a single - * FieldStore with multiple target fields. We must nest when + * FieldStore with multiple target fields. We must nest when * ArrayRefs are involved though. *---------- */ @@ -1186,7 +1186,7 @@ rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation, List *attrnos) * rewriteTargetListUD - rewrite UPDATE/DELETE targetlist as needed * * This function adds a "junk" TLE that is needed to allow the executor to - * find the original row for the update or delete. When the target relation + * find the original row for the update or delete. When the target relation * is a regular table, the junk TLE emits the ctid attribute of the original * row. When the target relation is a view, there is no ctid, so we instead * emit a whole-row Var that will contain the "old" values of the view row. @@ -1375,9 +1375,9 @@ ApplyRetrieveRule(Query *parsetree, * fine as the result relation. * * For UPDATE/DELETE, we need to expand the view so as to have source - * data for the operation. But we also need an unmodified RTE to + * data for the operation. But we also need an unmodified RTE to * serve as the target. So, copy the RTE and add the copy to the - * rangetable. Note that the copy does not get added to the jointree. + * rangetable. Note that the copy does not get added to the jointree. * Also note that there's a hack in fireRIRrules to avoid calling this * function again when it arrives at the copied RTE. */ @@ -1549,7 +1549,7 @@ markQueryForLocking(Query *qry, Node *jtnode, * in the given tree. * * NOTE: although this has the form of a walker, we cheat and modify the - * SubLink nodes in-place. It is caller's responsibility to ensure that + * SubLink nodes in-place. It is caller's responsibility to ensure that * no unwanted side-effects occur! * * This is unlike most of the other routines that recurse into subselects, @@ -1745,7 +1745,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown) * not just "NOT x" which the planner is much smarter about, else we will * do the wrong thing when the qual evaluates to NULL.) * - * The rule_qual may contain references to OLD or NEW. OLD references are + * The rule_qual may contain references to OLD or NEW. OLD references are * replaced by references to the specified rt_index (the relation that the * rule applies to). NEW references are only possible for INSERT and UPDATE * queries on the relation itself, and so they should be replaced by copies @@ -1818,7 +1818,7 @@ CopyAndAddInvertedQual(Query *parsetree, * rows that the qualified action doesn't act on. (If there are multiple * qualified INSTEAD rules, we AND all the negated quals onto a single * modified original query.) We won't execute the original, unmodified - * query if we find either qualified or unqualified INSTEAD rules. If + * query if we find either qualified or unqualified INSTEAD rules. If * we find both, the modified original query is discarded too. */ static List * @@ -2174,8 +2174,8 @@ view_cols_are_auto_updatable(Query *viewquery, ListCell *cell; /* - * The caller should have verified that this view is auto-updatable and - * so there should be a single base relation. + * The caller should have verified that this view is auto-updatable and so + * there should be a single base relation. */ Assert(list_length(viewquery->jointree->fromlist) == 1); rtr = (RangeTblRef *) linitial(viewquery->jointree->fromlist); @@ -2212,7 +2212,7 @@ view_cols_are_auto_updatable(Query *viewquery, } } - return NULL; /* all the required view columns are updatable */ + return NULL; /* all the required view columns are updatable */ } @@ -2227,7 +2227,7 @@ view_cols_are_auto_updatable(Query *viewquery, * updatability. * * This is used for the information_schema views, which have separate concepts - * of "updatable" and "trigger updatable". A relation is "updatable" if it + * of "updatable" and "trigger updatable". A relation is "updatable" if it * can be updated without the need for triggers (either because it has a * suitable RULE, or because it is simple enough to be automatically updated). * A relation is "trigger updatable" if it has a suitable INSTEAD OF trigger. @@ -2239,7 +2239,7 @@ view_cols_are_auto_updatable(Query *viewquery, * to have trigger updatability included in the result. * * The return value is a bitmask of rule event numbers indicating which of - * the INSERT, UPDATE and DELETE operations are supported. (We do it this way + * the INSERT, UPDATE and DELETE operations are supported. (We do it this way * so that we can test for UPDATE plus DELETE support in a single call.) */ int @@ -2354,9 +2354,9 @@ relation_is_updatable(Oid reloid, /* * Determine which of the view's columns are updatable. If there - * are none within the set of columns we are looking at, then - * the view doesn't support INSERT/UPDATE, but it may still - * support DELETE. + * are none within the set of columns we are looking at, then the + * view doesn't support INSERT/UPDATE, but it may still support + * DELETE. */ view_cols_are_auto_updatable(viewquery, NULL, &updatable_cols, NULL); @@ -2365,9 +2365,9 @@ relation_is_updatable(Oid reloid, updatable_cols = bms_int_members(updatable_cols, include_cols); if (bms_is_empty(updatable_cols)) - auto_events = (1 << CMD_DELETE); /* May support DELETE */ + auto_events = (1 << CMD_DELETE); /* May support DELETE */ else - auto_events = ALL_EVENTS; /* May support all events */ + auto_events = ALL_EVENTS; /* May support all events */ /* * The base relation must also support these update commands. @@ -2476,7 +2476,7 @@ adjust_view_column_set(Bitmapset *cols, List *targetlist) * the view's base relation becomes the target relation. * * Note that the base relation here may itself be a view, which may or may not - * have INSTEAD OF triggers or rules to handle the update. That is handled by + * have INSTEAD OF triggers or rules to handle the update. That is handled by * the recursion in RewriteQuery. */ static Query * @@ -2573,18 +2573,18 @@ rewriteTargetView(Query *parsetree, Relation view) case CMD_INSERT: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot insert into column \"%s\" of view \"%s\"", - non_updatable_col, - RelationGetRelationName(view)), - errdetail_internal("%s", _(auto_update_detail)))); + errmsg("cannot insert into column \"%s\" of view \"%s\"", + non_updatable_col, + RelationGetRelationName(view)), + errdetail_internal("%s", _(auto_update_detail)))); break; case CMD_UPDATE: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot update column \"%s\" of view \"%s\"", - non_updatable_col, - RelationGetRelationName(view)), - errdetail_internal("%s", _(auto_update_detail)))); + errmsg("cannot update column \"%s\" of view \"%s\"", + non_updatable_col, + RelationGetRelationName(view)), + errdetail_internal("%s", _(auto_update_detail)))); break; default: elog(ERROR, "unrecognized CmdType: %d", @@ -2688,7 +2688,7 @@ rewriteTargetView(Query *parsetree, Relation view) * that does not correspond to what happens in ordinary SELECT usage of a * view: all referenced columns must have read permission, even if * optimization finds that some of them can be discarded during query - * transformation. The flattening we're doing here is an optional + * transformation. The flattening we're doing here is an optional * optimization, too. (If you are unpersuaded and want to change this, * note that applying adjust_view_column_set to view_rte->selectedCols is * clearly *not* the right answer, since that neglects base-rel columns @@ -2703,8 +2703,8 @@ rewriteTargetView(Query *parsetree, Relation view) /* * Move any security barrier quals from the view RTE onto the new target - * RTE. Any such quals should now apply to the new target RTE and will not - * reference the original view RTE in the rewritten query. + * RTE. Any such quals should now apply to the new target RTE and will + * not reference the original view RTE in the rewritten query. */ new_rte->securityQuals = view_rte->securityQuals; view_rte->securityQuals = NIL; @@ -2790,8 +2790,8 @@ rewriteTargetView(Query *parsetree, Relation view) * we did with the view targetlist). * * Note that there is special-case handling for the quals of a security - * barrier view, since they need to be kept separate from any user-supplied - * quals, so these quals are kept on the new target RTE. + * barrier view, since they need to be kept separate from any + * user-supplied quals, so these quals are kept on the new target RTE. * * For INSERT, the view's quals can be ignored in the main query. */ @@ -2836,13 +2836,14 @@ rewriteTargetView(Query *parsetree, Relation view) * If the parent view has a cascaded check option, treat this view as * if it also had a cascaded check option. * - * New WithCheckOptions are added to the start of the list, so if there - * is a cascaded check option, it will be the first item in the list. + * New WithCheckOptions are added to the start of the list, so if + * there is a cascaded check option, it will be the first item in the + * list. */ if (parsetree->withCheckOptions != NIL) { WithCheckOption *parent_wco = - (WithCheckOption *) linitial(parsetree->withCheckOptions); + (WithCheckOption *) linitial(parsetree->withCheckOptions); if (parent_wco->cascaded) { @@ -3089,7 +3090,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events) /* * At this point product_queries contains any DO ALSO rule - * actions. Add the rewritten query before or after those. This + * actions. Add the rewritten query before or after those. This * must match the handling the original query would have gotten * below, if we allowed it to be included again. */ @@ -3309,7 +3310,7 @@ QueryRewrite(Query *parsetree) * * If the original query is still in the list, it sets the command tag. * Otherwise, the last INSTEAD query of the same kind as the original is - * allowed to set the tag. (Note these rules can leave us with no query + * allowed to set the tag. (Note these rules can leave us with no query * setting the tag. The tcop code has to cope with this by setting up a * default tag based on the original un-rewritten query.) * diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c index 1829c76bad1..bcf3bd9243a 100644 --- a/src/backend/rewrite/rewriteManip.c +++ b/src/backend/rewrite/rewriteManip.c @@ -58,7 +58,7 @@ static Relids adjust_relid_set(Relids relids, int oldrelid, int newrelid); * specified query level. * * The objective of this routine is to detect whether there are aggregates - * belonging to the given query level. Aggregates belonging to subqueries + * belonging to the given query level. Aggregates belonging to subqueries * or outer queries do NOT cause a true result. We must recurse into * subqueries to detect outer-reference aggregates that logically belong to * the specified query level. @@ -113,7 +113,7 @@ contain_aggs_of_level_walker(Node *node, * Find the parse location of any aggregate of the specified query level. * * Returns -1 if no such agg is in the querytree, or if they all have - * unknown parse location. (The former case is probably caller error, + * unknown parse location. (The former case is probably caller error, * but we don't bother to distinguish it from the latter case.) * * Note: it might seem appropriate to merge this functionality into @@ -208,7 +208,7 @@ contain_windowfuncs_walker(Node *node, void *context) * Find the parse location of any windowfunc of the current query level. * * Returns -1 if no such windowfunc is in the querytree, or if they all have - * unknown parse location. (The former case is probably caller error, + * unknown parse location. (The former case is probably caller error, * but we don't bother to distinguish it from the latter case.) * * Note: it might seem appropriate to merge this functionality into @@ -287,11 +287,11 @@ checkExprHasSubLink_walker(Node *node, void *context) * * Find all Var nodes in the given tree with varlevelsup == sublevels_up, * and increment their varno fields (rangetable indexes) by 'offset'. - * The varnoold fields are adjusted similarly. Also, adjust other nodes + * The varnoold fields are adjusted similarly. Also, adjust other nodes * that contain rangetable indexes, such as RangeTblRef and JoinExpr. * * NOTE: although this has the form of a walker, we cheat and modify the - * nodes in-place. The given expression tree should have been copied + * nodes in-place. The given expression tree should have been copied * earlier to ensure that no unwanted side-effects occur! */ @@ -449,11 +449,11 @@ offset_relid_set(Relids relids, int offset) * * Find all Var nodes in the given tree belonging to a specific relation * (identified by sublevels_up and rt_index), and change their varno fields - * to 'new_index'. The varnoold fields are changed too. Also, adjust other + * to 'new_index'. The varnoold fields are changed too. Also, adjust other * nodes that contain rangetable indexes, such as RangeTblRef and JoinExpr. * * NOTE: although this has the form of a walker, we cheat and modify the - * nodes in-place. The given expression tree should have been copied + * nodes in-place. The given expression tree should have been copied * earlier to ensure that no unwanted side-effects occur! */ @@ -646,7 +646,7 @@ adjust_relid_set(Relids relids, int oldrelid, int newrelid) * Likewise for other nodes containing levelsup fields, such as Aggref. * * NOTE: although this has the form of a walker, we cheat and modify the - * Var nodes in-place. The given expression tree should have been copied + * Var nodes in-place. The given expression tree should have been copied * earlier to ensure that no unwanted side-effects occur! */ @@ -1157,7 +1157,7 @@ replace_rte_variables_mutator(Node *node, * If the expression tree contains a whole-row Var for the target RTE, * the Var is not changed but *found_whole_row is returned as TRUE. * For most callers this is an error condition, but we leave it to the caller - * to report the error so that useful context can be provided. (In some + * to report the error so that useful context can be provided. (In some * usages it would be appropriate to modify the Var's vartype and insert a * ConvertRowtypeExpr node to map back to the original vartype. We might * someday extend this function's API to support that. For now, the only diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c index c107587da40..eb54d5ce6db 100644 --- a/src/backend/rewrite/rewriteSupport.c +++ b/src/backend/rewrite/rewriteSupport.c @@ -122,7 +122,7 @@ get_rewrite_oid(Oid relid, const char *rulename, bool missing_ok) * Find rule oid, given only a rule name but no rel OID. * * If there's more than one, it's an error. If there aren't any, that's an - * error, too. In general, this should be avoided - it is provided to support + * error, too. In general, this should be avoided - it is provided to support * syntax that is compatible with pre-7.3 versions of PG, where rule names * were unique across the entire database. */ diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index e1872426661..e03394c08bc 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -44,7 +44,7 @@ int32 *PrivateRefCount; * * IO_IN_PROGRESS -- this is a flag in the buffer descriptor. * It must be set when an IO is initiated and cleared at - * the end of the IO. It is there to make sure that one + * the end of the IO. It is there to make sure that one * process doesn't start to use a buffer while another is * faulting it in. see WaitIO and related routines. * @@ -54,7 +54,7 @@ int32 *PrivateRefCount; * * PrivateRefCount -- Each buffer also has a private refcount that keeps * track of the number of times the buffer is pinned in the current - * process. This is used for two purposes: first, if we pin a + * process. This is used for two purposes: first, if we pin a * a buffer more than once, we only need to change the shared refcount * once, thus only lock the shared state once; second, when a transaction * aborts, it should only unpin the buffers exactly the number of times it diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c index bdbfea4c72a..7a38f2f1509 100644 --- a/src/backend/storage/buffer/buf_table.c +++ b/src/backend/storage/buffer/buf_table.c @@ -3,7 +3,7 @@ * buf_table.c * routines for mapping BufferTags to buffer indexes. * - * Note: the routines in this file do no locking of their own. The caller + * Note: the routines in this file do no locking of their own. The caller * must hold a suitable lock on the appropriate BufMappingLock, as specified * in the comments. We can't do the locking inside these functions because * in most cases the caller needs to adjust the buffer header contents @@ -112,7 +112,7 @@ BufTableLookup(BufferTag *tagPtr, uint32 hashcode) * Insert a hashtable entry for given tag and buffer ID, * unless an entry already exists for that tag * - * Returns -1 on successful insertion. If a conflicting entry exists + * Returns -1 on successful insertion. If a conflicting entry exists * already, returns the buffer ID in that entry. * * Caller must hold exclusive lock on BufMappingLock for tag's partition diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 246f31bfe14..c0702789446 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -116,7 +116,7 @@ static int rnode_comparator(const void *p1, const void *p2); * PrefetchBuffer -- initiate asynchronous read of a block of a relation * * This is named by analogy to ReadBuffer but doesn't actually allocate a - * buffer. Instead it tries to ensure that a future ReadBuffer for the given + * buffer. Instead it tries to ensure that a future ReadBuffer for the given * block will not be delayed by the I/O. Prefetching is optional. * No-op if prefetching isn't compiled in. */ @@ -206,7 +206,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum) * Assume when this function is called, that reln has been opened already. * * In RBM_NORMAL mode, the page is read from disk, and the page header is - * validated. An error is thrown if the page header is not valid. (But + * validated. An error is thrown if the page header is not valid. (But * note that an all-zero page is considered "valid"; see PageIsVerified().) * * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not @@ -214,7 +214,7 @@ ReadBuffer(Relation reln, BlockNumber blockNum) * for non-critical data, where the caller is prepared to repair errors. * * In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled - * with zeros instead of reading it from disk. Useful when the caller is + * with zeros instead of reading it from disk. Useful when the caller is * going to fill the page from scratch, since this saves I/O and avoids * unnecessary failure if the page-on-disk has corrupt page headers. * Caution: do not use this mode to read a page that is beyond the relation's @@ -371,7 +371,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * This can happen because mdread doesn't complain about reads beyond * EOF (when zero_damaged_pages is ON) and so a previous attempt to * read a block beyond EOF could have left a "valid" zero-filled - * buffer. Unfortunately, we have also seen this case occurring + * buffer. Unfortunately, we have also seen this case occurring * because of buggy Linux kernels that sometimes return an * lseek(SEEK_END) result that doesn't account for a recent write. In * that situation, the pre-existing buffer would contain valid data @@ -597,7 +597,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, /* * Didn't find it in the buffer pool. We'll have to initialize a new - * buffer. Remember to unlock the mapping lock while doing the work. + * buffer. Remember to unlock the mapping lock while doing the work. */ LWLockRelease(newPartitionLock); @@ -607,7 +607,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, bool lock_held; /* - * Select a victim buffer. The buffer is returned with its header + * Select a victim buffer. The buffer is returned with its header * spinlock still held! Also (in most cases) the BufFreelistLock is * still held, since it would be bad to hold the spinlock while * possibly waking up other processes. @@ -656,7 +656,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * If using a nondefault strategy, and writing the buffer * would require a WAL flush, let the strategy decide whether * to go ahead and write/reuse the buffer or to choose another - * victim. We need lock to inspect the page LSN, so this + * victim. We need lock to inspect the page LSN, so this * can't be done inside StrategyGetBuffer. */ if (strategy != NULL) @@ -786,7 +786,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, { /* * We can only get here if (a) someone else is still reading - * in the page, or (b) a previous read attempt failed. We + * in the page, or (b) a previous read attempt failed. We * have to wait for any active read attempt to finish, and * then set up our own read attempt if the page is still not * BM_VALID. StartBufferIO does it all. @@ -879,7 +879,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, * This is used only in contexts such as dropping a relation. We assume * that no other backend could possibly be interested in using the page, * so the only reason the buffer might be pinned is if someone else is - * trying to write it out. We have to let them finish before we can + * trying to write it out. We have to let them finish before we can * reclaim the buffer. * * The buffer could get reclaimed by someone else while we are waiting @@ -978,7 +978,7 @@ retry: * * Marks buffer contents as dirty (actual write happens later). * - * Buffer must be pinned and exclusive-locked. (If caller does not hold + * Buffer must be pinned and exclusive-locked. (If caller does not hold * exclusive lock, then somebody could be in process of writing the buffer, * leading to risk of bad data written to disk.) */ @@ -1027,7 +1027,7 @@ MarkBufferDirty(Buffer buffer) * * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock * compared to calling the two routines separately. Now it's mainly just - * a convenience function. However, if the passed buffer is valid and + * a convenience function. However, if the passed buffer is valid and * already contains the desired block, we just return it as-is; and that * does save considerable work compared to a full release and reacquire. * @@ -1079,7 +1079,7 @@ ReleaseAndReadBuffer(Buffer buffer, * when we first pin it; for other strategies we just make sure the usage_count * isn't zero. (The idea of the latter is that we don't want synchronized * heap scans to inflate the count, but we need it to not be zero to discourage - * other backends from stealing buffers from our ring. As long as we cycle + * other backends from stealing buffers from our ring. As long as we cycle * through the ring faster than the global clock-sweep cycles, buffers in * our ring won't be chosen as victims for replacement by other backends.) * @@ -1087,7 +1087,7 @@ ReleaseAndReadBuffer(Buffer buffer, * * Note that ResourceOwnerEnlargeBuffers must have been done already. * - * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows + * Returns TRUE if buffer is BM_VALID, else FALSE. This provision allows * some callers to avoid an extra spinlock cycle. */ static bool @@ -1241,7 +1241,7 @@ BufferSync(int flags) * have the flag set. * * Note that if we fail to write some buffer, we may leave buffers with - * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would + * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would * certainly need to be written for the next checkpoint attempt, too. */ num_to_write = 0; @@ -1344,7 +1344,7 @@ BufferSync(int flags) * This is called periodically by the background writer process. * * Returns true if it's appropriate for the bgwriter process to go into - * low-power hibernation mode. (This happens if the strategy clock sweep + * low-power hibernation mode. (This happens if the strategy clock sweep * has been "lapped" and no buffer allocations have occurred recently, * or if the bgwriter has been effectively disabled by setting * bgwriter_lru_maxpages to 0.) @@ -2110,7 +2110,7 @@ BufferGetLSNAtomic(Buffer buffer) * specified relation fork that have block numbers >= firstDelBlock. * (In particular, with firstDelBlock = 0, all pages are removed.) * Dirty pages are simply dropped, without bothering to write them - * out first. Therefore, this is NOT rollback-able, and so should be + * out first. Therefore, this is NOT rollback-able, and so should be * used only with extreme caution! * * Currently, this is called only from smgr.c when the underlying file @@ -2119,7 +2119,7 @@ BufferGetLSNAtomic(Buffer buffer) * be deleted momentarily anyway, and there is no point in writing it. * It is the responsibility of higher-level code to ensure that the * deletion or truncation does not lose any data that could be needed - * later. It is also the responsibility of higher-level code to ensure + * later. It is also the responsibility of higher-level code to ensure * that no other process could be trying to load more pages of the * relation into buffers. * @@ -2281,9 +2281,9 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes) * * This function removes all the buffers in the buffer cache for a * particular database. Dirty pages are simply dropped, without - * bothering to write them out first. This is used when we destroy a + * bothering to write them out first. This is used when we destroy a * database, to avoid trying to flush data to disk when the directory - * tree no longer exists. Implementation is pretty similar to + * tree no longer exists. Implementation is pretty similar to * DropRelFileNodeBuffers() which is for destroying just one relation. * -------------------------------------------------------------------- */ diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 42afac6925e..4befab0e1ad 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -36,7 +36,7 @@ typedef struct */ /* - * Statistics. These counters should be wide enough that they can't + * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. */ uint32 completePasses; /* Complete cycles of the clock sweep */ @@ -135,7 +135,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held) /* * We count buffer allocation requests so that the bgwriter can estimate - * the rate of buffer consumption. Note that buffers recycled by a + * the rate of buffer consumption. Note that buffers recycled by a * strategy object are intentionally not counted here. */ StrategyControl->numBufferAllocs++; @@ -266,7 +266,7 @@ StrategyFreeBuffer(volatile BufferDesc *buf) * * In addition, we return the completed-pass count (which is effectively * the higher-order bits of nextVictimBuffer) and the count of recent buffer - * allocs if non-NULL pointers are passed. The alloc count is reset after + * allocs if non-NULL pointers are passed. The alloc count is reset after * being read. */ int @@ -291,7 +291,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc) * StrategyNotifyBgWriter -- set or clear allocation notification latch * * If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will - * set that latch. Pass NULL to clear the pending notification before it + * set that latch. Pass NULL to clear the pending notification before it * happens. This feature is used by the bgwriter process to wake itself up * from hibernation, and is not meant for anybody else to use. */ @@ -484,7 +484,7 @@ GetBufferFromRing(BufferAccessStrategy strategy) /* * If the slot hasn't been filled yet, tell the caller to allocate a new - * buffer with the normal allocation strategy. He will then fill this + * buffer with the normal allocation strategy. He will then fill this * slot by calling AddBufferToRing with the new buffer. */ bufnum = strategy->buffers[strategy->current]; @@ -537,7 +537,7 @@ AddBufferToRing(BufferAccessStrategy strategy, volatile BufferDesc *buf) * * When a nondefault strategy is used, the buffer manager calls this function * when it turns out that the buffer selected by StrategyGetBuffer needs to - * be written out and doing so would require flushing WAL too. This gives us + * be written out and doing so would require flushing WAL too. This gives us * a chance to choose a different victim. * * Returns true if buffer manager should ask for a new victim, and false diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 62adc1ce6ba..3135c5cf156 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -94,7 +94,7 @@ LocalPrefetchBuffer(SMgrRelation smgr, ForkNumber forkNum, * Find or create a local buffer for the given page of the given relation. * * API is similar to bufmgr.c's BufferAlloc, except that we do not need - * to do any locking since this is all local. Also, IO_IN_PROGRESS + * to do any locking since this is all local. Also, IO_IN_PROGRESS * does not get set. Lastly, we support only default access strategy * (hence, usage_count is always advanced). */ @@ -292,7 +292,7 @@ MarkLocalBufferDirty(Buffer buffer) * specified relation that have block numbers >= firstDelBlock. * (In particular, with firstDelBlock = 0, all pages are removed.) * Dirty pages are simply dropped, without bothering to write them - * out first. Therefore, this is NOT rollback-able, and so should be + * out first. Therefore, this is NOT rollback-able, and so should be * used only with extreme caution! * * See DropRelFileNodeBuffers in bufmgr.c for more notes. @@ -459,7 +459,7 @@ GetLocalBufferStorage(void) /* * We allocate local buffers in a context of their own, so that the * space eaten for them is easily recognizable in MemoryContextStats - * output. Create the context on first use. + * output. Create the context on first use. */ if (LocalBufferContext == NULL) LocalBufferContext = diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index e62d5d916e7..0f007c82122 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -29,7 +29,7 @@ * that was current at that time. * * BufFile also supports temporary files that exceed the OS file size limit - * (by opening multiple fd.c temporary files). This is an essential feature + * (by opening multiple fd.c temporary files). This is an essential feature * for sorts and hashjoins on large amounts of data. *------------------------------------------------------------------------- */ @@ -72,7 +72,7 @@ struct BufFile bool dirty; /* does buffer need to be written? */ /* - * resowner is the ResourceOwner to use for underlying temp files. (We + * resowner is the ResourceOwner to use for underlying temp files. (We * don't need to remember the memory context we're using explicitly, * because after creation we only repalloc our arrays larger.) */ @@ -519,7 +519,7 @@ BufFileSeek(BufFile *file, int fileno, off_t offset, int whence) { /* * Seek is to a point within existing buffer; we can just adjust - * pos-within-buffer, without flushing buffer. Note this is OK + * pos-within-buffer, without flushing buffer. Note this is OK * whether reading or writing, but buffer remains dirty if we were * writing. */ diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 0560bf9d72b..1f69c9e03c9 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -83,7 +83,7 @@ * and other code that tries to open files without consulting fd.c. This * is the number left free. (While we can be pretty sure we won't get * EMFILE, there's never any guarantee that we won't get ENFILE due to - * other processes chewing up FDs. So it's a bad idea to try to open files + * other processes chewing up FDs. So it's a bad idea to try to open files * without consulting fd.c. Nonetheless we cannot control all code.) * * Because this is just a fixed setting, we are effectively assuming that @@ -168,8 +168,8 @@ typedef struct vfd } Vfd; /* - * Virtual File Descriptor array pointer and size. This grows as - * needed. 'File' values are indexes into this array. + * Virtual File Descriptor array pointer and size. This grows as + * needed. 'File' values are indexes into this array. * Note that VfdCache[0] is not a usable VFD, just a list header. */ static Vfd *VfdCache; @@ -189,7 +189,7 @@ static bool have_xact_temporary_files = false; /* * Tracks the total size of all temporary files. Note: when temp_file_limit * is being enforced, this cannot overflow since the limit cannot be more - * than INT_MAX kilobytes. When not enforcing, it could theoretically + * than INT_MAX kilobytes. When not enforcing, it could theoretically * overflow, but we don't care. */ static uint64 temporary_files_size = 0; @@ -252,7 +252,7 @@ static int nextTempTableSpace = 0; * * The Least Recently Used ring is a doubly linked list that begins and * ends on element zero. Element zero is special -- it doesn't represent - * a file and its "fd" field always == VFD_CLOSED. Element zero is just an + * a file and its "fd" field always == VFD_CLOSED. Element zero is just an * anchor that shows us the beginning/end of the ring. * Only VFD elements that are currently really open (have an FD assigned) are * in the Lru ring. Elements that are "virtually" open can be recognized @@ -473,7 +473,7 @@ InitFileAccess(void) * We stop counting if usable_fds reaches max_to_probe. Note: a small * value of max_to_probe might result in an underestimate of already_open; * we must fill in any "gaps" in the set of used FDs before the calculation - * of already_open will give the right answer. In practice, max_to_probe + * of already_open will give the right answer. In practice, max_to_probe * of a couple of dozen should be enough to ensure good results. * * We assume stdin (FD 0) is available for dup'ing @@ -550,7 +550,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open) pfree(fd); /* - * Return results. usable_fds is just the number of successful dups. We + * Return results. usable_fds is just the number of successful dups. We * assume that the system limit is highestfd+1 (remember 0 is a legal FD * number) and so already_open is highestfd+1 - usable_fds. */ @@ -1045,7 +1045,7 @@ OpenTemporaryFile(bool interXact) /* * If not, or if tablespace is bad, create in database's default - * tablespace. MyDatabaseTableSpace should normally be set before we get + * tablespace. MyDatabaseTableSpace should normally be set before we get * here, but just in case it isn't, fall back to pg_default tablespace. */ if (file <= 0) @@ -1339,7 +1339,7 @@ FileWrite(File file, char *buffer, int amount) /* * If enforcing temp_file_limit and it's a temp file, check to see if the - * write would overrun temp_file_limit, and throw error if so. Note: it's + * write would overrun temp_file_limit, and throw error if so. Note: it's * really a modularity violation to throw error here; we should set errno * and return -1. However, there's no way to report a suitable error * message if we do that. All current callers would just throw error @@ -1618,7 +1618,7 @@ reserveAllocatedDesc(void) /* * Routines that want to use stdio (ie, FILE*) should use AllocateFile * rather than plain fopen(). This lets fd.c deal with freeing FDs if - * necessary to open the file. When done, call FreeFile rather than fclose. + * necessary to open the file. When done, call FreeFile rather than fclose. * * Note that files that will be open for any significant length of time * should NOT be handled this way, since they cannot share kernel file @@ -1923,7 +1923,7 @@ TryAgain: * Read a directory opened with AllocateDir, ereport'ing any error. * * This is easier to use than raw readdir() since it takes care of some - * otherwise rather tedious and error-prone manipulation of errno. Also, + * otherwise rather tedious and error-prone manipulation of errno. Also, * if you are happy with a generic error message for AllocateDir failure, * you can just do * @@ -2058,7 +2058,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces) numTempTableSpaces = numSpaces; /* - * Select a random starting point in the list. This is to minimize + * Select a random starting point in the list. This is to minimize * conflicts between backends that are most likely sharing the same list * of temp tablespaces. Note that if we create multiple temp files in the * same transaction, we'll advance circularly through the list --- this @@ -2087,7 +2087,7 @@ TempTablespacesAreSet(void) /* * GetNextTempTableSpace * - * Select the next temp tablespace to use. A result of InvalidOid means + * Select the next temp tablespace to use. A result of InvalidOid means * to use the current database's default tablespace. */ Oid diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index cdf444111f8..8eee0ce80e6 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -48,7 +48,7 @@ * Range Category * 0 - 31 0 * 32 - 63 1 - * ... ... ... + * ... ... ... * 8096 - 8127 253 * 8128 - 8163 254 * 8164 - 8192 255 @@ -123,7 +123,7 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr, bool *eof); * will turn out to have too little space available by the time the caller * gets a lock on it. In that case, the caller should report the actual * amount of free space available on that page and then try again (see - * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned, + * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned, * extend the relation. */ BlockNumber diff --git a/src/backend/storage/freespace/fsmpage.c b/src/backend/storage/freespace/fsmpage.c index 64dc2fd7265..6b003cbf874 100644 --- a/src/backend/storage/freespace/fsmpage.c +++ b/src/backend/storage/freespace/fsmpage.c @@ -185,13 +185,13 @@ restart: /*---------- * Start the search from the target slot. At every step, move one - * node to the right, then climb up to the parent. Stop when we reach + * node to the right, then climb up to the parent. Stop when we reach * a node with enough free space (as we must, since the root has enough * space). * * The idea is to gradually expand our "search triangle", that is, all * nodes covered by the current node, and to be sure we search to the - * right from the start point. At the first step, only the target slot + * right from the start point. At the first step, only the target slot * is examined. When we move up from a left child to its parent, we are * adding the right-hand subtree of that parent to the search triangle. * When we move right then up from a right child, we are dropping the diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c index 6c410f77d9e..733fa5f7bd3 100644 --- a/src/backend/storage/ipc/dsm.c +++ b/src/backend/storage/ipc/dsm.c @@ -59,29 +59,29 @@ /* Backend-local tracking for on-detach callbacks. */ typedef struct dsm_segment_detach_callback { - on_dsm_detach_callback function; - Datum arg; - slist_node node; + on_dsm_detach_callback function; + Datum arg; + slist_node node; } dsm_segment_detach_callback; /* Backend-local state for a dynamic shared memory segment. */ struct dsm_segment { - dlist_node node; /* List link in dsm_segment_list. */ - ResourceOwner resowner; /* Resource owner. */ - dsm_handle handle; /* Segment name. */ - uint32 control_slot; /* Slot in control segment. */ - void *impl_private; /* Implementation-specific private data. */ - void *mapped_address; /* Mapping address, or NULL if unmapped. */ - Size mapped_size; /* Size of our mapping. */ - slist_head on_detach; /* On-detach callbacks. */ + dlist_node node; /* List link in dsm_segment_list. */ + ResourceOwner resowner; /* Resource owner. */ + dsm_handle handle; /* Segment name. */ + uint32 control_slot; /* Slot in control segment. */ + void *impl_private; /* Implementation-specific private data. */ + void *mapped_address; /* Mapping address, or NULL if unmapped. */ + Size mapped_size; /* Size of our mapping. */ + slist_head on_detach; /* On-detach callbacks. */ }; /* Shared-memory state for a dynamic shared memory segment. */ typedef struct dsm_control_item { dsm_handle handle; - uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */ + uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */ } dsm_control_item; /* Layout of the dynamic shared memory control segment. */ @@ -90,7 +90,7 @@ typedef struct dsm_control_header uint32 magic; uint32 nitems; uint32 maxitems; - dsm_control_item item[FLEXIBLE_ARRAY_MEMBER]; + dsm_control_item item[FLEXIBLE_ARRAY_MEMBER]; } dsm_control_header; static void dsm_cleanup_for_mmap(void); @@ -132,7 +132,7 @@ static dlist_head dsm_segment_list = DLIST_STATIC_INIT(dsm_segment_list); static dsm_handle dsm_control_handle; static dsm_control_header *dsm_control; static Size dsm_control_mapped_size = 0; -static void *dsm_control_impl_private = NULL; +static void *dsm_control_impl_private = NULL; /* * Start up the dynamic shared memory system. @@ -166,14 +166,14 @@ dsm_postmaster_startup(PGShmemHeader *shim) maxitems = PG_DYNSHMEM_FIXED_SLOTS + PG_DYNSHMEM_SLOTS_PER_BACKEND * MaxBackends; elog(DEBUG2, "dynamic shared memory system will support %u segments", - maxitems); + maxitems); segsize = dsm_control_bytes_needed(maxitems); /* - * Loop until we find an unused identifier for the new control segment. - * We sometimes use 0 as a sentinel value indicating that no control - * segment is known to exist, so avoid using that value for a real - * control segment. + * Loop until we find an unused identifier for the new control segment. We + * sometimes use 0 as a sentinel value indicating that no control segment + * is known to exist, so avoid using that value for a real control + * segment. */ for (;;) { @@ -224,17 +224,17 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle) /* * Try to attach the segment. If this fails, it probably just means that - * the operating system has been rebooted and the segment no longer exists, - * or an unrelated proces has used the same shm ID. So just fall out - * quietly. + * the operating system has been rebooted and the segment no longer + * exists, or an unrelated proces has used the same shm ID. So just fall + * out quietly. */ if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private, &mapped_address, &mapped_size, DEBUG1)) return; /* - * We've managed to reattach it, but the contents might not be sane. - * If they aren't, we disregard the segment after all. + * We've managed to reattach it, but the contents might not be sane. If + * they aren't, we disregard the segment after all. */ old_control = (dsm_control_header *) mapped_address; if (!dsm_control_segment_sane(old_control, mapped_size)) @@ -245,14 +245,14 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle) } /* - * OK, the control segment looks basically valid, so we can get use - * it to get a list of segments that need to be removed. + * OK, the control segment looks basically valid, so we can get use it to + * get a list of segments that need to be removed. */ nitems = old_control->nitems; for (i = 0; i < nitems; ++i) { - dsm_handle handle; - uint32 refcnt; + dsm_handle handle; + uint32 refcnt; /* If the reference count is 0, the slot is actually unused. */ refcnt = old_control->item[i].refcnt; @@ -262,7 +262,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle) /* Log debugging information. */ handle = old_control->item[i].handle; elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u (reference count %u)", - handle, refcnt); + handle, refcnt); /* Destroy the referenced segment. */ dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private, @@ -290,7 +290,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle) static void dsm_cleanup_for_mmap(void) { - DIR *dir; + DIR *dir; struct dirent *dent; /* Open the directory; can't use AllocateDir in postmaster. */ @@ -298,15 +298,16 @@ dsm_cleanup_for_mmap(void) ereport(ERROR, (errcode_for_file_access(), errmsg("could not open directory \"%s\": %m", - PG_DYNSHMEM_DIR))); + PG_DYNSHMEM_DIR))); /* Scan for something with a name of the correct format. */ while ((dent = ReadDir(dir, PG_DYNSHMEM_DIR)) != NULL) { if (strncmp(dent->d_name, PG_DYNSHMEM_MMAP_FILE_PREFIX, - strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0) + strlen(PG_DYNSHMEM_MMAP_FILE_PREFIX)) == 0) { - char buf[MAXPGPATH]; + char buf[MAXPGPATH]; + snprintf(buf, MAXPGPATH, PG_DYNSHMEM_DIR "/%s", dent->d_name); elog(DEBUG2, "removing file \"%s\"", buf); @@ -314,7 +315,7 @@ dsm_cleanup_for_mmap(void) /* We found a matching file; so remove it. */ if (unlink(buf) != 0) { - int save_errno; + int save_errno; save_errno = errno; closedir(dir); @@ -352,8 +353,8 @@ dsm_postmaster_shutdown(int code, Datum arg) * If some other backend exited uncleanly, it might have corrupted the * control segment while it was dying. In that case, we warn and ignore * the contents of the control segment. This may end up leaving behind - * stray shared memory segments, but there's not much we can do about - * that if the metadata is gone. + * stray shared memory segments, but there's not much we can do about that + * if the metadata is gone. */ nitems = dsm_control->nitems; if (!dsm_control_segment_sane(dsm_control, dsm_control_mapped_size)) @@ -375,7 +376,7 @@ dsm_postmaster_shutdown(int code, Datum arg) /* Log debugging information. */ handle = dsm_control->item[i].handle; elog(DEBUG2, "cleaning up orphaned dynamic shared memory with ID %u", - handle); + handle); /* Destroy the segment. */ dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private, @@ -427,7 +428,7 @@ dsm_backend_startup(void) &dsm_control_mapped_size, WARNING); ereport(FATAL, (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("dynamic shared memory control segment is not valid"))); + errmsg("dynamic shared memory control segment is not valid"))); } } #endif @@ -455,9 +456,9 @@ dsm_set_control_handle(dsm_handle h) dsm_segment * dsm_create(Size size) { - dsm_segment *seg = dsm_create_descriptor(); - uint32 i; - uint32 nitems; + dsm_segment *seg = dsm_create_descriptor(); + uint32 i; + uint32 nitems; /* Unsafe in postmaster (and pointless in a stand-alone backend). */ Assert(IsUnderPostmaster); @@ -524,10 +525,10 @@ dsm_create(Size size) dsm_segment * dsm_attach(dsm_handle h) { - dsm_segment *seg; - dlist_iter iter; - uint32 i; - uint32 nitems; + dsm_segment *seg; + dlist_iter iter; + uint32 i; + uint32 nitems; /* Unsafe in postmaster (and pointless in a stand-alone backend). */ Assert(IsUnderPostmaster); @@ -537,13 +538,13 @@ dsm_attach(dsm_handle h) /* * Since this is just a debugging cross-check, we could leave it out - * altogether, or include it only in assert-enabled builds. But since - * the list of attached segments should normally be very short, let's - * include it always for right now. + * altogether, or include it only in assert-enabled builds. But since the + * list of attached segments should normally be very short, let's include + * it always for right now. * - * If you're hitting this error, you probably want to attempt to - * find an existing mapping via dsm_find_mapping() before calling - * dsm_attach() to create a new one. + * If you're hitting this error, you probably want to attempt to find an + * existing mapping via dsm_find_mapping() before calling dsm_attach() to + * create a new one. */ dlist_foreach(iter, &dsm_segment_list) { @@ -584,10 +585,10 @@ dsm_attach(dsm_handle h) LWLockRelease(DynamicSharedMemoryControlLock); /* - * If we didn't find the handle we're looking for in the control - * segment, it probably means that everyone else who had it mapped, - * including the original creator, died before we got to this point. - * It's up to the caller to decide what to do about that. + * If we didn't find the handle we're looking for in the control segment, + * it probably means that everyone else who had it mapped, including the + * original creator, died before we got to this point. It's up to the + * caller to decide what to do about that. */ if (seg->control_slot == INVALID_CONTROL_SLOT) { @@ -612,7 +613,7 @@ dsm_backend_shutdown(void) { while (!dlist_is_empty(&dsm_segment_list)) { - dsm_segment *seg; + dsm_segment *seg; seg = dlist_head_element(dsm_segment, node, &dsm_segment_list); dsm_detach(seg); @@ -628,11 +629,11 @@ dsm_backend_shutdown(void) void dsm_detach_all(void) { - void *control_address = dsm_control; + void *control_address = dsm_control; while (!dlist_is_empty(&dsm_segment_list)) { - dsm_segment *seg; + dsm_segment *seg; seg = dlist_head_element(dsm_segment, node, &dsm_segment_list); dsm_detach(seg); @@ -697,7 +698,7 @@ dsm_detach(dsm_segment *seg) { slist_node *node; dsm_segment_detach_callback *cb; - on_dsm_detach_callback function; + on_dsm_detach_callback function; Datum arg; node = slist_pop_head_node(&seg->on_detach); @@ -710,13 +711,12 @@ dsm_detach(dsm_segment *seg) } /* - * Try to remove the mapping, if one exists. Normally, there will be, - * but maybe not, if we failed partway through a create or attach - * operation. We remove the mapping before decrementing the reference - * count so that the process that sees a zero reference count can be - * certain that no remaining mappings exist. Even if this fails, we - * pretend that it works, because retrying is likely to fail in the - * same way. + * Try to remove the mapping, if one exists. Normally, there will be, but + * maybe not, if we failed partway through a create or attach operation. + * We remove the mapping before decrementing the reference count so that + * the process that sees a zero reference count can be certain that no + * remaining mappings exist. Even if this fails, we pretend that it + * works, because retrying is likely to fail in the same way. */ if (seg->mapped_address != NULL) { @@ -730,8 +730,8 @@ dsm_detach(dsm_segment *seg) /* Reduce reference count, if we previously increased it. */ if (seg->control_slot != INVALID_CONTROL_SLOT) { - uint32 refcnt; - uint32 control_slot = seg->control_slot; + uint32 refcnt; + uint32 control_slot = seg->control_slot; LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE); Assert(dsm_control->item[control_slot].handle == seg->handle); @@ -744,15 +744,15 @@ dsm_detach(dsm_segment *seg) if (refcnt == 1) { /* - * If we fail to destroy the segment here, or are killed before - * we finish doing so, the reference count will remain at 1, which + * If we fail to destroy the segment here, or are killed before we + * finish doing so, the reference count will remain at 1, which * will mean that nobody else can attach to the segment. At * postmaster shutdown time, or when a new postmaster is started * after a hard kill, another attempt will be made to remove the * segment. * - * The main case we're worried about here is being killed by - * a signal before we can finish removing the segment. In that + * The main case we're worried about here is being killed by a + * signal before we can finish removing the segment. In that * case, it's important to be sure that the segment still gets * removed. If we actually fail to remove the segment for some * other reason, the postmaster may not have any better luck than @@ -827,8 +827,8 @@ dsm_keep_segment(dsm_segment *seg) dsm_segment * dsm_find_mapping(dsm_handle h) { - dlist_iter iter; - dsm_segment *seg; + dlist_iter iter; + dsm_segment *seg; dlist_foreach(iter, &dsm_segment_list) { @@ -899,7 +899,7 @@ void cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg) { - slist_mutable_iter iter; + slist_mutable_iter iter; slist_foreach_modify(iter, &seg->on_detach) { @@ -921,7 +921,7 @@ cancel_on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, void reset_on_dsm_detach(void) { - dlist_iter iter; + dlist_iter iter; dlist_foreach(iter, &dsm_segment_list) { @@ -952,7 +952,7 @@ reset_on_dsm_detach(void) static dsm_segment * dsm_create_descriptor(void) { - dsm_segment *seg; + dsm_segment *seg; ResourceOwnerEnlargeDSMs(CurrentResourceOwner); @@ -1005,5 +1005,5 @@ static uint64 dsm_control_bytes_needed(uint32 nitems) { return offsetof(dsm_control_header, item) - + sizeof(dsm_control_item) * (uint64) nitems; + +sizeof(dsm_control_item) * (uint64) nitems; } diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c index fa253f0af53..74dace999ef 100644 --- a/src/backend/storage/ipc/dsm_impl.c +++ b/src/backend/storage/ipc/dsm_impl.c @@ -76,40 +76,40 @@ static bool dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, #endif #ifdef USE_DSM_SYSV static bool dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, - void **impl_private, void **mapped_address, - Size *mapped_size, int elevel); + void **impl_private, void **mapped_address, + Size *mapped_size, int elevel); #endif #ifdef USE_DSM_WINDOWS static bool dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, - void **impl_private, void **mapped_address, - Size *mapped_size, int elevel); + void **impl_private, void **mapped_address, + Size *mapped_size, int elevel); #endif #ifdef USE_DSM_MMAP static bool dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel); #endif -static int errcode_for_dynamic_shared_memory(void); +static int errcode_for_dynamic_shared_memory(void); const struct config_enum_entry dynamic_shared_memory_options[] = { #ifdef USE_DSM_POSIX - { "posix", DSM_IMPL_POSIX, false}, + {"posix", DSM_IMPL_POSIX, false}, #endif #ifdef USE_DSM_SYSV - { "sysv", DSM_IMPL_SYSV, false}, + {"sysv", DSM_IMPL_SYSV, false}, #endif #ifdef USE_DSM_WINDOWS - { "windows", DSM_IMPL_WINDOWS, false}, + {"windows", DSM_IMPL_WINDOWS, false}, #endif #ifdef USE_DSM_MMAP - { "mmap", DSM_IMPL_MMAP, false}, + {"mmap", DSM_IMPL_MMAP, false}, #endif - { "none", DSM_IMPL_NONE, false}, + {"none", DSM_IMPL_NONE, false}, {NULL, 0, false} }; /* Implementation selector. */ -int dynamic_shared_memory_type; +int dynamic_shared_memory_type; /* Size of buffer to be used for zero-filling. */ #define ZBUFFER_SIZE 8192 @@ -137,20 +137,20 @@ int dynamic_shared_memory_type; * segment. * * Arguments: - * op: The operation to be performed. - * handle: The handle of an existing object, or for DSM_OP_CREATE, the - * a new handle the caller wants created. - * request_size: For DSM_OP_CREATE, the requested size. For DSM_OP_RESIZE, - * the new size. Otherwise, 0. - * impl_private: Private, implementation-specific data. Will be a pointer - * to NULL for the first operation on a shared memory segment within this - * backend; thereafter, it will point to the value to which it was set - * on the previous call. - * mapped_address: Pointer to start of current mapping; pointer to NULL - * if none. Updated with new mapping address. - * mapped_size: Pointer to size of current mapping; pointer to 0 if none. - * Updated with new mapped size. - * elevel: Level at which to log errors. + * op: The operation to be performed. + * handle: The handle of an existing object, or for DSM_OP_CREATE, the + * a new handle the caller wants created. + * request_size: For DSM_OP_CREATE, the requested size. For DSM_OP_RESIZE, + * the new size. Otherwise, 0. + * impl_private: Private, implementation-specific data. Will be a pointer + * to NULL for the first operation on a shared memory segment within this + * backend; thereafter, it will point to the value to which it was set + * on the previous call. + * mapped_address: Pointer to start of current mapping; pointer to NULL + * if none. Updated with new mapping address. + * mapped_size: Pointer to size of current mapping; pointer to 0 if none. + * Updated with new mapped size. + * elevel: Level at which to log errors. * * Return value: true on success, false on failure. When false is returned, * a message should first be logged at the specified elevel, except in the @@ -165,7 +165,7 @@ dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size, { Assert(op == DSM_OP_CREATE || op == DSM_OP_RESIZE || request_size == 0); Assert((op != DSM_OP_CREATE && op != DSM_OP_ATTACH) || - (*mapped_address == NULL && *mapped_size == 0)); + (*mapped_address == NULL && *mapped_size == 0)); switch (dynamic_shared_memory_type) { @@ -243,10 +243,10 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel) { - char name[64]; - int flags; - int fd; - char *address; + char name[64]; + int flags; + int fd; + char *address; snprintf(name, 64, "/PostgreSQL.%u", handle); @@ -258,8 +258,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, { ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not unmap shared memory segment \"%s\": %m", - name))); + errmsg("could not unmap shared memory segment \"%s\": %m", + name))); return false; } *mapped_address = NULL; @@ -268,8 +268,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, { ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not remove shared memory segment \"%s\": %m", - name))); + errmsg("could not remove shared memory segment \"%s\": %m", + name))); return false; } return true; @@ -290,7 +290,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), errmsg("could not open shared memory segment \"%s\": %m", - name))); + name))); return false; } @@ -304,7 +304,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, if (fstat(fd, &st) != 0) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -314,14 +314,14 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), errmsg("could not stat shared memory segment \"%s\": %m", - name))); + name))); return false; } request_size = st.st_size; } else if (*mapped_size != request_size && ftruncate(fd, request_size)) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -332,8 +332,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not resize shared memory segment %s to %zu bytes: %m", - name, request_size))); + errmsg("could not resize shared memory segment %s to %zu bytes: %m", + name, request_size))); return false; } @@ -347,7 +347,7 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, return true; if (munmap(*mapped_address, *mapped_size) != 0) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -358,8 +358,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not unmap shared memory segment \"%s\": %m", - name))); + errmsg("could not unmap shared memory segment \"%s\": %m", + name))); return false; } *mapped_address = NULL; @@ -367,11 +367,11 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size, } /* Map it. */ - address = mmap(NULL, request_size, PROT_READ|PROT_WRITE, - MAP_SHARED|MAP_HASSEMAPHORE, fd, 0); + address = mmap(NULL, request_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_HASSEMAPHORE, fd, 0); if (address == MAP_FAILED) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -409,11 +409,11 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel) { - key_t key; - int ident; - char *address; - char name[64]; - int *ident_cache; + key_t key; + int ident; + char *address; + char name[64]; + int *ident_cache; /* Resize is not supported for System V shared memory. */ if (op == DSM_OP_RESIZE) @@ -427,38 +427,38 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, return true; /* - * POSIX shared memory and mmap-based shared memory identify segments - * with names. To avoid needless error message variation, we use the - * handle as the name. + * POSIX shared memory and mmap-based shared memory identify segments with + * names. To avoid needless error message variation, we use the handle as + * the name. */ snprintf(name, 64, "%u", handle); /* - * The System V shared memory namespace is very restricted; names are - * of type key_t, which is expected to be some sort of integer data type, - * but not necessarily the same one as dsm_handle. Since we use - * dsm_handle to identify shared memory segments across processes, this - * might seem like a problem, but it's really not. If dsm_handle is - * bigger than key_t, the cast below might truncate away some bits from - * the handle the user-provided, but it'll truncate exactly the same bits - * away in exactly the same fashion every time we use that handle, which - * is all that really matters. Conversely, if dsm_handle is smaller than - * key_t, we won't use the full range of available key space, but that's - * no big deal either. + * The System V shared memory namespace is very restricted; names are of + * type key_t, which is expected to be some sort of integer data type, but + * not necessarily the same one as dsm_handle. Since we use dsm_handle to + * identify shared memory segments across processes, this might seem like + * a problem, but it's really not. If dsm_handle is bigger than key_t, + * the cast below might truncate away some bits from the handle the + * user-provided, but it'll truncate exactly the same bits away in exactly + * the same fashion every time we use that handle, which is all that + * really matters. Conversely, if dsm_handle is smaller than key_t, we + * won't use the full range of available key space, but that's no big deal + * either. * - * We do make sure that the key isn't negative, because that might not - * be portable. + * We do make sure that the key isn't negative, because that might not be + * portable. */ key = (key_t) handle; - if (key < 1) /* avoid compiler warning if type is unsigned */ + if (key < 1) /* avoid compiler warning if type is unsigned */ key = -key; /* * There's one special key, IPC_PRIVATE, which can't be used. If we end - * up with that value by chance during a create operation, just pretend - * it already exists, so that caller will retry. If we run into it - * anywhere else, the caller has passed a handle that doesn't correspond - * to anything we ever created, which should not happen. + * up with that value by chance during a create operation, just pretend it + * already exists, so that caller will retry. If we run into it anywhere + * else, the caller has passed a handle that doesn't correspond to + * anything we ever created, which should not happen. */ if (key == IPC_PRIVATE) { @@ -469,9 +469,9 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, } /* - * Before we can do anything with a shared memory segment, we have to - * map the shared memory key to a shared memory identifier using shmget(). - * To avoid repeated lookups, we store the key using impl_private. + * Before we can do anything with a shared memory segment, we have to map + * the shared memory key to a shared memory identifier using shmget(). To + * avoid repeated lookups, we store the key using impl_private. */ if (*impl_private != NULL) { @@ -480,8 +480,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, } else { - int flags = IPCProtection; - size_t segsize; + int flags = IPCProtection; + size_t segsize; /* * Allocate the memory BEFORE acquiring the resource, so that we don't @@ -506,7 +506,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, { if (errno != EEXIST) { - int save_errno = errno; + int save_errno = errno; + pfree(ident_cache); errno = save_errno; ereport(elevel, @@ -529,8 +530,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, { ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not unmap shared memory segment \"%s\": %m", - name))); + errmsg("could not unmap shared memory segment \"%s\": %m", + name))); return false; } *mapped_address = NULL; @@ -539,8 +540,8 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, { ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not remove shared memory segment \"%s\": %m", - name))); + errmsg("could not remove shared memory segment \"%s\": %m", + name))); return false; } return true; @@ -553,7 +554,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, if (shmctl(ident, IPC_STAT, &shm) != 0) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -564,7 +565,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), errmsg("could not stat shared memory segment \"%s\": %m", - name))); + name))); return false; } request_size = shm.shm_segsz; @@ -574,7 +575,7 @@ dsm_impl_sysv(dsm_op op, dsm_handle handle, Size request_size, address = shmat(ident, NULL, PG_SHMAT_FLAGS); if (address == (void *) -1) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -614,9 +615,9 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel) { - char *address; + char *address; HANDLE hmap; - char name[64]; + char name[64]; MEMORY_BASIC_INFORMATION info; /* Resize is not supported for Windows shared memory. */ @@ -631,12 +632,12 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, return true; /* - * Storing the shared memory segment in the Global\ namespace, can - * allow any process running in any session to access that file - * mapping object provided that the caller has the required access rights. - * But to avoid issues faced in main shared memory, we are using the naming - * convention similar to main shared memory. We can change here once - * issue mentioned in GetSharedMemName is resolved. + * Storing the shared memory segment in the Global\ namespace, can allow + * any process running in any session to access that file mapping object + * provided that the caller has the required access rights. But to avoid + * issues faced in main shared memory, we are using the naming convention + * similar to main shared memory. We can change here once issue mentioned + * in GetSharedMemName is resolved. */ snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle); @@ -652,8 +653,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, _dosmaperr(GetLastError()); ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not unmap shared memory segment \"%s\": %m", - name))); + errmsg("could not unmap shared memory segment \"%s\": %m", + name))); return false; } if (*impl_private != NULL @@ -662,8 +663,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, _dosmaperr(GetLastError()); ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not remove shared memory segment \"%s\": %m", - name))); + errmsg("could not remove shared memory segment \"%s\": %m", + name))); return false; } @@ -688,9 +689,9 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, size_low = (DWORD) request_size; hmap = CreateFileMapping(INVALID_HANDLE_VALUE, /* Use the pagefile */ - NULL, /* Default security attrs */ - PAGE_READWRITE, /* Memory is read/write */ - size_high, /* Upper 32 bits of size */ + NULL, /* Default security attrs */ + PAGE_READWRITE, /* Memory is read/write */ + size_high, /* Upper 32 bits of size */ size_low, /* Lower 32 bits of size */ name); if (!hmap) @@ -698,8 +699,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, _dosmaperr(GetLastError()); ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not create shared memory segment \"%s\": %m", - name))); + errmsg("could not create shared memory segment \"%s\": %m", + name))); return false; } _dosmaperr(GetLastError()); @@ -718,8 +719,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, else { hmap = OpenFileMapping(FILE_MAP_WRITE | FILE_MAP_READ, - FALSE, /* do not inherit the name */ - name); /* name of mapping object */ + FALSE, /* do not inherit the name */ + name); /* name of mapping object */ if (!hmap) { _dosmaperr(GetLastError()); @@ -736,7 +737,7 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, 0, 0, 0); if (!address) { - int save_errno; + int save_errno; _dosmaperr(GetLastError()); /* Back out what's already been done. */ @@ -752,14 +753,14 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, } /* - * VirtualQuery gives size in page_size units, which is 4K for Windows. - * We need size only when we are attaching, but it's better to get the - * size when creating new segment to keep size consistent both for + * VirtualQuery gives size in page_size units, which is 4K for Windows. We + * need size only when we are attaching, but it's better to get the size + * when creating new segment to keep size consistent both for * DSM_OP_CREATE and DSM_OP_ATTACH. */ if (VirtualQuery(address, &info, sizeof(info)) == 0) { - int save_errno; + int save_errno; _dosmaperr(GetLastError()); /* Back out what's already been done. */ @@ -770,8 +771,8 @@ dsm_impl_windows(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not stat shared memory segment \"%s\": %m", - name))); + errmsg("could not stat shared memory segment \"%s\": %m", + name))); return false; } @@ -799,13 +800,13 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel) { - char name[64]; - int flags; - int fd; - char *address; + char name[64]; + int flags; + int fd; + char *address; snprintf(name, 64, PG_DYNSHMEM_DIR "/" PG_DYNSHMEM_MMAP_FILE_PREFIX "%u", - handle); + handle); /* Handle teardown cases. */ if (op == DSM_OP_DETACH || op == DSM_OP_DESTROY) @@ -815,8 +816,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, { ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not unmap shared memory segment \"%s\": %m", - name))); + errmsg("could not unmap shared memory segment \"%s\": %m", + name))); return false; } *mapped_address = NULL; @@ -825,8 +826,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, { ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not remove shared memory segment \"%s\": %m", - name))); + errmsg("could not remove shared memory segment \"%s\": %m", + name))); return false; } return true; @@ -840,7 +841,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), errmsg("could not open shared memory segment \"%s\": %m", - name))); + name))); return false; } @@ -854,7 +855,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, if (fstat(fd, &st) != 0) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -864,14 +865,14 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), errmsg("could not stat shared memory segment \"%s\": %m", - name))); + name))); return false; } request_size = st.st_size; } else if (*mapped_size > request_size && ftruncate(fd, request_size)) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -882,8 +883,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not resize shared memory segment %s to %zu bytes: %m", - name, request_size))); + errmsg("could not resize shared memory segment %s to %zu bytes: %m", + name, request_size))); return false; } else if (*mapped_size < request_size) @@ -891,23 +892,23 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, /* * Allocate a buffer full of zeros. * - * Note: palloc zbuffer, instead of just using a local char array, - * to ensure it is reasonably well-aligned; this may save a few - * cycles transferring data to the kernel. + * Note: palloc zbuffer, instead of just using a local char array, to + * ensure it is reasonably well-aligned; this may save a few cycles + * transferring data to the kernel. */ - char *zbuffer = (char *) palloc0(ZBUFFER_SIZE); - uint32 remaining = request_size; - bool success = true; + char *zbuffer = (char *) palloc0(ZBUFFER_SIZE); + uint32 remaining = request_size; + bool success = true; /* - * Zero-fill the file. We have to do this the hard way to ensure - * that all the file space has really been allocated, so that we - * don't later seg fault when accessing the memory mapping. This - * is pretty pessimal. + * Zero-fill the file. We have to do this the hard way to ensure that + * all the file space has really been allocated, so that we don't + * later seg fault when accessing the memory mapping. This is pretty + * pessimal. */ while (success && remaining > 0) { - Size goal = remaining; + Size goal = remaining; if (goal > ZBUFFER_SIZE) goal = ZBUFFER_SIZE; @@ -919,7 +920,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, if (!success) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -931,7 +932,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), errmsg("could not resize shared memory segment %s to %zu bytes: %m", - name, request_size))); + name, request_size))); return false; } } @@ -946,7 +947,7 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, return true; if (munmap(*mapped_address, *mapped_size) != 0) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -957,8 +958,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, ereport(elevel, (errcode_for_dynamic_shared_memory(), - errmsg("could not unmap shared memory segment \"%s\": %m", - name))); + errmsg("could not unmap shared memory segment \"%s\": %m", + name))); return false; } *mapped_address = NULL; @@ -966,11 +967,11 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, } /* Map it. */ - address = mmap(NULL, request_size, PROT_READ|PROT_WRITE, - MAP_SHARED|MAP_HASSEMAPHORE, fd, 0); + address = mmap(NULL, request_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_HASSEMAPHORE, fd, 0); if (address == MAP_FAILED) { - int save_errno; + int save_errno; /* Back out what's already been done. */ save_errno = errno; @@ -1009,24 +1010,24 @@ dsm_impl_keep_segment(dsm_handle handle, void *impl_private) { #ifdef USE_DSM_WINDOWS case DSM_IMPL_WINDOWS: - { - HANDLE hmap; - - if (!DuplicateHandle(GetCurrentProcess(), impl_private, - PostmasterHandle, &hmap, 0, FALSE, - DUPLICATE_SAME_ACCESS)) { - char name[64]; - - snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle); - _dosmaperr(GetLastError()); - ereport(ERROR, - (errcode_for_dynamic_shared_memory(), - errmsg("could not duplicate handle for \"%s\": %m", - name))); + HANDLE hmap; + + if (!DuplicateHandle(GetCurrentProcess(), impl_private, + PostmasterHandle, &hmap, 0, FALSE, + DUPLICATE_SAME_ACCESS)) + { + char name[64]; + + snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle); + _dosmaperr(GetLastError()); + ereport(ERROR, + (errcode_for_dynamic_shared_memory(), + errmsg("could not duplicate handle for \"%s\": %m", + name))); + } + break; } - break; - } #endif default: break; diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c index 5dea0ed8ddb..bd7cbeae980 100644 --- a/src/backend/storage/ipc/ipc.c +++ b/src/backend/storage/ipc/ipc.c @@ -4,7 +4,7 @@ * POSTGRES inter-process communication definitions. * * This file is misnamed, as it no longer has much of anything directly - * to do with IPC. The functionality here is concerned with managing + * to do with IPC. The functionality here is concerned with managing * exit-time cleanup for either a postmaster or a backend. * * @@ -90,7 +90,7 @@ static int on_proc_exit_index, * -cim 2/6/90 * * Unfortunately, we can't really guarantee that add-on code - * obeys the rule of not calling exit() directly. So, while + * obeys the rule of not calling exit() directly. So, while * this is the preferred way out of the system, we also register * an atexit callback that will make sure cleanup happens. * ---------------------------------------------------------------- @@ -109,7 +109,7 @@ proc_exit(int code) * fixed file name, each backend will overwrite earlier profiles. To * fix that, we create a separate subdirectory for each backend * (./gprof/pid) and 'cd' to that subdirectory before we exit() - that - * forces mcleanup() to write each profile into its own directory. We + * forces mcleanup() to write each profile into its own directory. We * end up with something like: $PGDATA/gprof/8829/gmon.out * $PGDATA/gprof/8845/gmon.out ... * @@ -219,16 +219,16 @@ shmem_exit(int code) /* * Call before_shmem_exit callbacks. * - * These should be things that need most of the system to still be - * up and working, such as cleanup of temp relations, which requires - * catalog access; or things that need to be completed because later - * cleanup steps depend on them, such as releasing lwlocks. + * These should be things that need most of the system to still be up and + * working, such as cleanup of temp relations, which requires catalog + * access; or things that need to be completed because later cleanup steps + * depend on them, such as releasing lwlocks. */ elog(DEBUG3, "shmem_exit(%d): %d before_shmem_exit callbacks to make", code, before_shmem_exit_index); while (--before_shmem_exit_index >= 0) (*before_shmem_exit_list[before_shmem_exit_index].function) (code, - before_shmem_exit_list[before_shmem_exit_index].arg); + before_shmem_exit_list[before_shmem_exit_index].arg); before_shmem_exit_index = 0; /* @@ -241,9 +241,9 @@ shmem_exit(int code) * callback before invoking it, so that we don't get stuck in an infinite * loop if one of those callbacks itself throws an ERROR or FATAL. * - * Note that explicitly calling this function here is quite different - * from registering it as an on_shmem_exit callback for precisely this - * reason: if one dynamic shared memory callback errors out, the remaining + * Note that explicitly calling this function here is quite different from + * registering it as an on_shmem_exit callback for precisely this reason: + * if one dynamic shared memory callback errors out, the remaining * callbacks will still be invoked. Thus, hard-coding this call puts it * equal footing with callbacks for the main shared memory segment. */ @@ -261,7 +261,7 @@ shmem_exit(int code) code, on_shmem_exit_index); while (--on_shmem_exit_index >= 0) (*on_shmem_exit_list[on_shmem_exit_index].function) (code, - on_shmem_exit_list[on_shmem_exit_index].arg); + on_shmem_exit_list[on_shmem_exit_index].arg); on_shmem_exit_index = 0; } @@ -287,7 +287,7 @@ atexit_callback(void) * on_proc_exit * * this function adds a callback function to the list of - * functions invoked by proc_exit(). -cim 2/6/90 + * functions invoked by proc_exit(). -cim 2/6/90 * ---------------------------------------------------------------- */ void @@ -380,7 +380,7 @@ cancel_before_shmem_exit(pg_on_exit_callback function, Datum arg) { if (before_shmem_exit_index > 0 && before_shmem_exit_list[before_shmem_exit_index - 1].function - == function && + == function && before_shmem_exit_list[before_shmem_exit_index - 1].arg == arg) --before_shmem_exit_index; } diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c index 4290d2dc81d..1d04c5508a9 100644 --- a/src/backend/storage/ipc/ipci.c +++ b/src/backend/storage/ipc/ipci.c @@ -55,7 +55,7 @@ static bool addin_request_allowed = true; * a loadable module. * * This is only useful if called from the _PG_init hook of a library that - * is loaded into the postmaster via shared_preload_libraries. Once + * is loaded into the postmaster via shared_preload_libraries. Once * shared memory has been allocated, calls will be ignored. (We could * raise an error, but it seems better to make it a no-op, so that * libraries containing such calls can be reloaded if needed.) @@ -85,7 +85,7 @@ RequestAddinShmemSpace(Size size) * This is a bit code-wasteful and could be cleaned up.) * * If "makePrivate" is true then we only need private memory, not shared - * memory. This is true for a standalone backend, false for a postmaster. + * memory. This is true for a standalone backend, false for a postmaster. */ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port) diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c index 7347234d6ab..83b8d170481 100644 --- a/src/backend/storage/ipc/pmsignal.c +++ b/src/backend/storage/ipc/pmsignal.c @@ -26,9 +26,9 @@ /* * The postmaster is signaled by its children by sending SIGUSR1. The - * specific reason is communicated via flags in shared memory. We keep + * specific reason is communicated via flags in shared memory. We keep * a boolean flag for each possible "reason", so that different reasons - * can be signaled by different backends at the same time. (However, + * can be signaled by different backends at the same time. (However, * if the same reason is signaled more than once simultaneously, the * postmaster will observe it only once.) * @@ -42,7 +42,7 @@ * have three possible states: UNUSED, ASSIGNED, ACTIVE. An UNUSED slot is * available for assignment. An ASSIGNED slot is associated with a postmaster * child process, but either the process has not touched shared memory yet, - * or it has successfully cleaned up after itself. A ACTIVE slot means the + * or it has successfully cleaned up after itself. A ACTIVE slot means the * process is actively using shared memory. The slots are assigned to * child processes at random, and postmaster.c is responsible for tracking * which one goes with which PID. diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index ac32d5cb625..cdd92d99a22 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -19,11 +19,11 @@ * * During hot standby, we also keep a list of XIDs representing transactions * that are known to be running in the master (or more precisely, were running - * as of the current point in the WAL stream). This list is kept in the + * as of the current point in the WAL stream). This list is kept in the * KnownAssignedXids array, and is updated by watching the sequence of * arriving XIDs. This is necessary because if we leave those XIDs out of * snapshots taken for standby queries, then they will appear to be already - * complete, leading to MVCC failures. Note that in hot standby, the PGPROC + * complete, leading to MVCC failures. Note that in hot standby, the PGPROC * array represents standby processes, which by definition are not running * transactions that have XIDs. * @@ -276,7 +276,7 @@ ProcArrayAdd(PGPROC *proc) if (arrayP->numProcs >= arrayP->maxProcs) { /* - * Ooops, no room. (This really shouldn't happen, since there is a + * Ooops, no room. (This really shouldn't happen, since there is a * fixed supply of PGPROC structs too, and so we should have failed * earlier.) */ @@ -686,7 +686,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running) ExtendSUBTRANS(latestObservedXid); TransactionIdAdvance(latestObservedXid); } - TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */ + TransactionIdRetreat(latestObservedXid); /* = running->nextXid - 1 */ /* ---------- * Now we've got the running xids we need to set the global values that @@ -733,7 +733,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running) * ShmemVariableCache->nextXid must be beyond any observed xid. * * We don't expect anyone else to modify nextXid, hence we don't need to - * hold a lock while examining it. We still acquire the lock to modify + * hold a lock while examining it. We still acquire the lock to modify * it, though. */ nextXid = latestObservedXid; @@ -1485,7 +1485,7 @@ GetSnapshotData(Snapshot snapshot) * do that much work while holding the ProcArrayLock. * * The other backend can add more subxids concurrently, but cannot - * remove any. Hence it's important to fetch nxids just once. + * remove any. Hence it's important to fetch nxids just once. * Should be safe to use memcpy, though. (We needn't worry about * missing any xids added concurrently, because they must postdate * xmax.) @@ -2153,7 +2153,7 @@ BackendPidGetProc(int pid) * Only main transaction Ids are considered. This function is mainly * useful for determining what backend owns a lock. * - * Beware that not every xact has an XID assigned. However, as long as you + * Beware that not every xact has an XID assigned. However, as long as you * only call this using an XID found on disk, you're safe. */ int @@ -2217,7 +2217,7 @@ IsBackendPid(int pid) * some snapshot we have. Since we examine the procarray with only shared * lock, there are race conditions: a backend could set its xmin just after * we look. Indeed, on multiprocessors with weak memory ordering, the - * other backend could have set its xmin *before* we look. We know however + * other backend could have set its xmin *before* we look. We know however * that such a backend must have held shared ProcArrayLock overlapping our * own hold of ProcArrayLock, else we would see its xmin update. Therefore, * any snapshot the other backend is taking concurrently with our scan cannot @@ -2723,7 +2723,7 @@ ProcArrayGetReplicationSlotXmin(TransactionId *xmin, * XidCacheRemoveRunningXids * * Remove a bunch of TransactionIds from the list of known-running - * subtransactions for my backend. Both the specified xid and those in + * subtransactions for my backend. Both the specified xid and those in * the xids[] array (of length nxids) are removed from the subxids cache. * latestXid must be the latest XID among the group. */ @@ -2829,7 +2829,7 @@ DisplayXidCache(void) * treated as running by standby transactions, even though they are not in * the standby server's PGXACT array. * - * We record all XIDs that we know have been assigned. That includes all the + * We record all XIDs that we know have been assigned. That includes all the * XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have * been assigned. We can deduce the existence of unobserved XIDs because we * know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids @@ -2838,7 +2838,7 @@ DisplayXidCache(void) * * During hot standby we do not fret too much about the distinction between * top-level XIDs and subtransaction XIDs. We store both together in the - * KnownAssignedXids list. In backends, this is copied into snapshots in + * KnownAssignedXids list. In backends, this is copied into snapshots in * GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot() * doesn't care about the distinction either. Subtransaction XIDs are * effectively treated as top-level XIDs and in the typical case pg_subtrans @@ -3053,14 +3053,14 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid) * must hold shared ProcArrayLock to examine the array. To remove XIDs from * the array, the startup process must hold ProcArrayLock exclusively, for * the usual transactional reasons (compare commit/abort of a transaction - * during normal running). Compressing unused entries out of the array + * during normal running). Compressing unused entries out of the array * likewise requires exclusive lock. To add XIDs to the array, we just insert * them into slots to the right of the head pointer and then advance the head * pointer. This wouldn't require any lock at all, except that on machines * with weak memory ordering we need to be careful that other processors * see the array element changes before they see the head pointer change. * We handle this by using a spinlock to protect reads and writes of the - * head/tail pointers. (We could dispense with the spinlock if we were to + * head/tail pointers. (We could dispense with the spinlock if we were to * create suitable memory access barrier primitives and use those instead.) * The spinlock must be taken to read or write the head/tail pointers unless * the caller holds ProcArrayLock exclusively. @@ -3157,7 +3157,7 @@ KnownAssignedXidsCompress(bool force) * If exclusive_lock is true then caller already holds ProcArrayLock in * exclusive mode, so we need no extra locking here. Else caller holds no * lock, so we need to be sure we maintain sufficient interlocks against - * concurrent readers. (Only the startup process ever calls this, so no need + * concurrent readers. (Only the startup process ever calls this, so no need * to worry about concurrent writers.) */ static void @@ -3203,7 +3203,7 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids); /* - * Verify that insertions occur in TransactionId sequence. Note that even + * Verify that insertions occur in TransactionId sequence. Note that even * if the last existing element is marked invalid, it must still have a * correctly sequenced XID value. */ @@ -3306,7 +3306,7 @@ KnownAssignedXidsSearch(TransactionId xid, bool remove) } /* - * Standard binary search. Note we can ignore the KnownAssignedXidsValid + * Standard binary search. Note we can ignore the KnownAssignedXidsValid * array here, since even invalid entries will contain sorted XIDs. */ first = tail; diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c index 6526b2688a8..cd9a287efec 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c @@ -64,7 +64,7 @@ typedef struct * Spurious wakeups must be expected. Make sure that the flag is cleared * in the error path. */ -bool set_latch_on_sigusr1; +bool set_latch_on_sigusr1; static ProcSignalSlot *ProcSignalSlots = NULL; static volatile ProcSignalSlot *MyProcSignalSlot = NULL; diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 4f7dd9c4ef1..6f9c3a3b6c2 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -142,7 +142,7 @@ static shm_mq_result shm_mq_send_bytes(shm_mq_handle *mq, Size nbytes, void *data, bool nowait, Size *bytes_written); static shm_mq_result shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait, Size *nbytesp, void **datap); -static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr, +static bool shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr, BackgroundWorkerHandle *handle); static uint64 shm_mq_get_bytes_read(volatile shm_mq *mq, bool *detached); static void shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n); @@ -152,8 +152,8 @@ static shm_mq_result shm_mq_notify_receiver(volatile shm_mq *mq); static void shm_mq_detach_callback(dsm_segment *seg, Datum arg); /* Minimum queue size is enough for header and at least one chunk of data. */ -const Size shm_mq_minimum_size = - MAXALIGN(offsetof(shm_mq, mq_ring)) + MAXIMUM_ALIGNOF; +const Size shm_mq_minimum_size = +MAXALIGN(offsetof(shm_mq, mq_ring)) + MAXIMUM_ALIGNOF; #define MQH_INITIAL_BUFSIZE 8192 @@ -193,7 +193,7 @@ void shm_mq_set_receiver(shm_mq *mq, PGPROC *proc) { volatile shm_mq *vmq = mq; - PGPROC *sender; + PGPROC *sender; SpinLockAcquire(&mq->mq_mutex); Assert(vmq->mq_receiver == NULL); @@ -212,7 +212,7 @@ void shm_mq_set_sender(shm_mq *mq, PGPROC *proc) { volatile shm_mq *vmq = mq; - PGPROC *receiver; + PGPROC *receiver; SpinLockAcquire(&mq->mq_mutex); Assert(vmq->mq_sender == NULL); @@ -231,7 +231,7 @@ PGPROC * shm_mq_get_receiver(shm_mq *mq) { volatile shm_mq *vmq = mq; - PGPROC *receiver; + PGPROC *receiver; SpinLockAcquire(&mq->mq_mutex); receiver = vmq->mq_receiver; @@ -247,7 +247,7 @@ PGPROC * shm_mq_get_sender(shm_mq *mq) { volatile shm_mq *vmq = mq; - PGPROC *sender; + PGPROC *sender; SpinLockAcquire(&mq->mq_mutex); sender = vmq->mq_sender; @@ -280,7 +280,7 @@ shm_mq_get_sender(shm_mq *mq) shm_mq_handle * shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle) { - shm_mq_handle *mqh = palloc(sizeof(shm_mq_handle)); + shm_mq_handle *mqh = palloc(sizeof(shm_mq_handle)); Assert(mq->mq_receiver == MyProc || mq->mq_sender == MyProc); mqh->mqh_queue = mq; @@ -317,9 +317,9 @@ shm_mq_attach(shm_mq *mq, dsm_segment *seg, BackgroundWorkerHandle *handle) shm_mq_result shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait) { - shm_mq_result res; - shm_mq *mq = mqh->mqh_queue; - Size bytes_written; + shm_mq_result res; + shm_mq *mq = mqh->mqh_queue; + Size bytes_written; Assert(mq->mq_sender == MyProc); @@ -328,7 +328,7 @@ shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait) { Assert(mqh->mqh_partial_bytes < sizeof(Size)); res = shm_mq_send_bytes(mqh, sizeof(Size) - mqh->mqh_partial_bytes, - ((char *) &nbytes) + mqh->mqh_partial_bytes, + ((char *) &nbytes) +mqh->mqh_partial_bytes, nowait, &bytes_written); mqh->mqh_partial_bytes += bytes_written; if (res != SHM_MQ_SUCCESS) @@ -390,11 +390,11 @@ shm_mq_send(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait) shm_mq_result shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) { - shm_mq *mq = mqh->mqh_queue; - shm_mq_result res; - Size rb = 0; - Size nbytes; - void *rawdata; + shm_mq *mq = mqh->mqh_queue; + shm_mq_result res; + Size rb = 0; + Size nbytes; + void *rawdata; Assert(mq->mq_receiver == MyProc); @@ -439,18 +439,19 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) */ if (mqh->mqh_partial_bytes == 0 && rb >= sizeof(Size)) { - Size needed; + Size needed; - nbytes = * (Size *) rawdata; + nbytes = *(Size *) rawdata; /* If we've already got the whole message, we're done. */ needed = MAXALIGN(sizeof(Size)) + MAXALIGN(nbytes); if (rb >= needed) { /* - * Technically, we could consume the message length information - * at this point, but the extra write to shared memory wouldn't - * be free and in most cases we would reap no benefit. + * Technically, we could consume the message length + * information at this point, but the extra write to shared + * memory wouldn't be free and in most cases we would reap no + * benefit. */ mqh->mqh_consume_pending = needed; *nbytesp = nbytes; @@ -469,7 +470,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) } else { - Size lengthbytes; + Size lengthbytes; /* Can't be split unless bigger than required alignment. */ Assert(sizeof(Size) > MAXIMUM_ALIGNOF); @@ -498,7 +499,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) if (mqh->mqh_partial_bytes >= sizeof(Size)) { Assert(mqh->mqh_partial_bytes == sizeof(Size)); - mqh->mqh_expected_bytes = * (Size *) mqh->mqh_buffer; + mqh->mqh_expected_bytes = *(Size *) mqh->mqh_buffer; mqh->mqh_length_word_complete = true; mqh->mqh_partial_bytes = 0; } @@ -527,12 +528,12 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) /* * The message has wrapped the buffer. We'll need to copy it in order - * to return it to the client in one chunk. First, make sure we have a - * large enough buffer available. + * to return it to the client in one chunk. First, make sure we have + * a large enough buffer available. */ if (mqh->mqh_buflen < nbytes) { - Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE); + Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE); while (newbuflen < nbytes) newbuflen *= 2; @@ -551,7 +552,7 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) /* Loop until we've copied the entire message. */ for (;;) { - Size still_needed; + Size still_needed; /* Copy as much as we can. */ Assert(mqh->mqh_partial_bytes + rb <= nbytes); @@ -559,10 +560,10 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) mqh->mqh_partial_bytes += rb; /* - * Update count of bytes read, with alignment padding. Note - * that this will never actually insert any padding except at the - * end of a message, because the buffer size is a multiple of - * MAXIMUM_ALIGNOF, and each read and write is as well. + * Update count of bytes read, with alignment padding. Note that this + * will never actually insert any padding except at the end of a + * message, because the buffer size is a multiple of MAXIMUM_ALIGNOF, + * and each read and write is as well. */ Assert(mqh->mqh_partial_bytes == nbytes || rb == MAXALIGN(rb)); shm_mq_inc_bytes_read(mq, MAXALIGN(rb)); @@ -601,7 +602,7 @@ shm_mq_result shm_mq_wait_for_attach(shm_mq_handle *mqh) { shm_mq *mq = mqh->mqh_queue; - PGPROC **victim; + PGPROC **victim; if (shm_mq_get_receiver(mq) == MyProc) victim = &mq->mq_sender; @@ -663,8 +664,8 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait, while (sent < nbytes) { - bool detached; - uint64 rb; + bool detached; + uint64 rb; /* Compute number of ring buffer bytes used and available. */ rb = shm_mq_get_bytes_read(mq, &detached); @@ -679,7 +680,7 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait, if (available == 0) { - shm_mq_result res; + shm_mq_result res; /* * The queue is full, so if the receiver isn't yet known to be @@ -717,11 +718,11 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait, } /* - * Wait for our latch to be set. It might already be set for - * some unrelated reason, but that'll just result in one extra - * trip through the loop. It's worth it to avoid resetting the - * latch at top of loop, because setting an already-set latch is - * much cheaper than setting one that has been reset. + * Wait for our latch to be set. It might already be set for some + * unrelated reason, but that'll just result in one extra trip + * through the loop. It's worth it to avoid resetting the latch + * at top of loop, because setting an already-set latch is much + * cheaper than setting one that has been reset. */ WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0); @@ -733,8 +734,8 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait, } else { - Size offset = mq->mq_bytes_written % (uint64) ringsize; - Size sendnow = Min(available, ringsize - offset); + Size offset = mq->mq_bytes_written % (uint64) ringsize; + Size sendnow = Min(available, ringsize - offset); /* Write as much data as we can via a single memcpy(). */ memcpy(&mq->mq_ring[mq->mq_ring_offset + offset], @@ -751,9 +752,9 @@ shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, void *data, bool nowait, shm_mq_inc_bytes_written(mq, MAXALIGN(sendnow)); /* - * For efficiency, we don't set the reader's latch here. We'll - * do that only when the buffer fills up or after writing an - * entire message. + * For efficiency, we don't set the reader's latch here. We'll do + * that only when the buffer fills up or after writing an entire + * message. */ } } @@ -801,10 +802,10 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait, /* * Fall out before waiting if the queue has been detached. * - * Note that we don't check for this until *after* considering - * whether the data already available is enough, since the - * receiver can finish receiving a message stored in the buffer - * even after the sender has detached. + * Note that we don't check for this until *after* considering whether + * the data already available is enough, since the receiver can finish + * receiving a message stored in the buffer even after the sender has + * detached. */ if (detached) return SHM_MQ_DETACHED; @@ -814,11 +815,11 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait, return SHM_MQ_WOULD_BLOCK; /* - * Wait for our latch to be set. It might already be set for - * some unrelated reason, but that'll just result in one extra - * trip through the loop. It's worth it to avoid resetting the - * latch at top of loop, because setting an already-set latch is - * much cheaper than setting one that has been reset. + * Wait for our latch to be set. It might already be set for some + * unrelated reason, but that'll just result in one extra trip through + * the loop. It's worth it to avoid resetting the latch at top of + * loop, because setting an already-set latch is much cheaper than + * setting one that has been reset. */ WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0); @@ -842,11 +843,11 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait, * non-NULL when our counterpart attaches to the queue. */ static bool -shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr, +shm_mq_wait_internal(volatile shm_mq *mq, PGPROC *volatile * ptr, BackgroundWorkerHandle *handle) { - bool save_set_latch_on_sigusr1; - bool result = false; + bool save_set_latch_on_sigusr1; + bool result = false; save_set_latch_on_sigusr1 = set_latch_on_sigusr1; if (handle != NULL) @@ -856,9 +857,9 @@ shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr, { for (;;) { - BgwHandleStatus status; - pid_t pid; - bool detached; + BgwHandleStatus status; + pid_t pid; + bool detached; /* Acquire the lock just long enough to check the pointer. */ SpinLockAcquire(&mq->mq_mutex); @@ -913,7 +914,7 @@ shm_mq_wait_internal(volatile shm_mq *mq, PGPROC * volatile *ptr, static uint64 shm_mq_get_bytes_read(volatile shm_mq *mq, bool *detached) { - uint64 v; + uint64 v; SpinLockAcquire(&mq->mq_mutex); v = mq->mq_bytes_read; @@ -948,7 +949,7 @@ shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n) static uint64 shm_mq_get_bytes_written(volatile shm_mq *mq, bool *detached) { - uint64 v; + uint64 v; SpinLockAcquire(&mq->mq_mutex); v = mq->mq_bytes_written; @@ -975,8 +976,8 @@ shm_mq_inc_bytes_written(volatile shm_mq *mq, Size n) static shm_mq_result shm_mq_notify_receiver(volatile shm_mq *mq) { - PGPROC *receiver; - bool detached; + PGPROC *receiver; + bool detached; SpinLockAcquire(&mq->mq_mutex); detached = mq->mq_detached; diff --git a/src/backend/storage/ipc/shm_toc.c b/src/backend/storage/ipc/shm_toc.c index e4e007b97a9..820b12e12c6 100644 --- a/src/backend/storage/ipc/shm_toc.c +++ b/src/backend/storage/ipc/shm_toc.c @@ -19,17 +19,17 @@ typedef struct shm_toc_entry { - uint64 key; /* Arbitrary identifier */ - uint64 offset; /* Bytes offset */ + uint64 key; /* Arbitrary identifier */ + uint64 offset; /* Bytes offset */ } shm_toc_entry; struct shm_toc { - uint64 toc_magic; /* Magic number for this TOC */ - slock_t toc_mutex; /* Spinlock for mutual exclusion */ - Size toc_total_bytes; /* Bytes managed by this TOC */ + uint64 toc_magic; /* Magic number for this TOC */ + slock_t toc_mutex; /* Spinlock for mutual exclusion */ + Size toc_total_bytes; /* Bytes managed by this TOC */ Size toc_allocated_bytes; /* Bytes allocated of those managed */ - Size toc_nentry; /* Number of entries in TOC */ + Size toc_nentry; /* Number of entries in TOC */ shm_toc_entry toc_entry[FLEXIBLE_ARRAY_MEMBER]; }; @@ -39,7 +39,7 @@ struct shm_toc shm_toc * shm_toc_create(uint64 magic, void *address, Size nbytes) { - shm_toc *toc = (shm_toc *) address; + shm_toc *toc = (shm_toc *) address; Assert(nbytes > offsetof(shm_toc, toc_entry)); toc->toc_magic = magic; @@ -58,7 +58,7 @@ shm_toc_create(uint64 magic, void *address, Size nbytes) extern shm_toc * shm_toc_attach(uint64 magic, void *address) { - shm_toc *toc = (shm_toc *) address; + shm_toc *toc = (shm_toc *) address; if (toc->toc_magic != magic) return NULL; @@ -96,7 +96,7 @@ shm_toc_allocate(shm_toc *toc, Size nbytes) total_bytes = vtoc->toc_total_bytes; allocated_bytes = vtoc->toc_allocated_bytes; nentry = vtoc->toc_nentry; - toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry) + toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry) + allocated_bytes; /* Check for memory exhaustion and overflow. */ @@ -132,7 +132,7 @@ shm_toc_freespace(shm_toc *toc) nentry = vtoc->toc_nentry; SpinLockRelease(&toc->toc_mutex); - toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry); + toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry); Assert(allocated_bytes + BUFFERALIGN(toc_bytes) <= total_bytes); return total_bytes - (allocated_bytes + BUFFERALIGN(toc_bytes)); } @@ -176,7 +176,7 @@ shm_toc_insert(shm_toc *toc, uint64 key, void *address) total_bytes = vtoc->toc_total_bytes; allocated_bytes = vtoc->toc_allocated_bytes; nentry = vtoc->toc_nentry; - toc_bytes = offsetof(shm_toc, toc_entry) + nentry * sizeof(shm_toc_entry) + toc_bytes = offsetof(shm_toc, toc_entry) +nentry * sizeof(shm_toc_entry) + allocated_bytes; /* Check for memory exhaustion and overflow. */ @@ -241,6 +241,6 @@ Size shm_toc_estimate(shm_toc_estimator *e) { return add_size(offsetof(shm_toc, toc_entry), - add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)), - e->space_for_chunks)); + add_size(mul_size(e->number_of_keys, sizeof(shm_toc_entry)), + e->space_for_chunks)); } diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 1d27a89bdd1..2ea2216a65c 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -26,7 +26,7 @@ * for a module and should never be allocated after the shared memory * initialization phase. Hash tables have a fixed maximum size, but * their actual size can vary dynamically. When entries are added - * to the table, more space is allocated. Queues link data structures + * to the table, more space is allocated. Queues link data structures * that have been allocated either within fixed-size structures or as hash * buckets. Each shared data structure has a string name to identify * it (assigned in the module that declares it). @@ -40,7 +40,7 @@ * The shmem index has two purposes: first, it gives us * a simple model of how the world looks when a backend process * initializes. If something is present in the shmem index, - * it is initialized. If it is not, it is uninitialized. Second, + * it is initialized. If it is not, it is uninitialized. Second, * the shmem index allows us to allocate shared memory on demand * instead of trying to preallocate structures and hard-wire the * sizes and locations in header files. If you are using a lot @@ -55,8 +55,8 @@ * pointers using the method described in (b) above. * * (d) memory allocation model: shared memory can never be - * freed, once allocated. Each hash table has its own free list, - * so hash buckets can be reused when an item is deleted. However, + * freed, once allocated. Each hash table has its own free list, + * so hash buckets can be reused when an item is deleted. However, * if one hash table grows very large and then shrinks, its space * cannot be redistributed to other tables. We could build a simple * hash bucket garbage collector if need be. Right now, it seems @@ -232,7 +232,7 @@ InitShmemIndex(void) * * Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex * hashtable to exist already, we have a bit of a circularity problem in - * initializing the ShmemIndex itself. The special "ShmemIndex" hash + * initializing the ShmemIndex itself. The special "ShmemIndex" hash * table name will tell ShmemInitStruct to fake it. */ info.keysize = SHMEM_INDEX_KEYSIZE; @@ -309,7 +309,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */ * ShmemInitStruct -- Create/attach to a structure in shared memory. * * This is called during initialization to find or allocate - * a data structure in shared memory. If no other process + * a data structure in shared memory. If no other process * has created the structure, this routine allocates space * for it. If it exists already, a pointer to the existing * structure is returned. @@ -318,7 +318,7 @@ ShmemInitHash(const char *name, /* table string name for shmem index */ * already in the shmem index (hence, already initialized). * * Note: before Postgres 9.0, this function returned NULL for some failure - * cases. Now, it always throws error instead, so callers need not check + * cases. Now, it always throws error instead, so callers need not check * for NULL. */ void * @@ -350,7 +350,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr) * be trying to init the shmem index itself. * * Notice that the ShmemIndexLock is released before the shmem - * index has been initialized. This should be OK because no other + * index has been initialized. This should be OK because no other * process can be accessing shared memory yet. */ Assert(shmemseghdr->index == NULL); diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c index 872c63f0ec6..708ccea208c 100644 --- a/src/backend/storage/ipc/shmqueue.c +++ b/src/backend/storage/ipc/shmqueue.c @@ -14,7 +14,7 @@ * * Package for managing doubly-linked lists in shared memory. * The only tricky thing is that SHM_QUEUE will usually be a field - * in a larger record. SHMQueueNext has to return a pointer + * in a larger record. SHMQueueNext has to return a pointer * to the record itself instead of a pointer to the SHMQueue field * of the record. It takes an extra parameter and does some extra * pointer arithmetic to do this correctly. diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c index 264f700207c..d7d040628c7 100644 --- a/src/backend/storage/ipc/sinval.c +++ b/src/backend/storage/ipc/sinval.c @@ -29,7 +29,7 @@ uint64 SharedInvalidMessageCounter; * Because backends sitting idle will not be reading sinval events, we * need a way to give an idle backend a swift kick in the rear and make * it catch up before the sinval queue overflows and forces it to go - * through a cache reset exercise. This is done by sending + * through a cache reset exercise. This is done by sending * PROCSIG_CATCHUP_INTERRUPT to any backend that gets too far behind. * * State for catchup events consists of two flags: one saying whether @@ -68,7 +68,7 @@ SendSharedInvalidMessages(const SharedInvalidationMessage *msgs, int n) * NOTE: it is entirely possible for this routine to be invoked recursively * as a consequence of processing inside the invalFunction or resetFunction. * Furthermore, such a recursive call must guarantee that all outstanding - * inval messages have been processed before it exits. This is the reason + * inval messages have been processed before it exits. This is the reason * for the strange-looking choice to use a statically allocated buffer array * and counters; it's so that a recursive call can process messages already * sucked out of sinvaladt.c. @@ -137,7 +137,7 @@ ReceiveSharedInvalidMessages( * We are now caught up. If we received a catchup signal, reset that * flag, and call SICleanupQueue(). This is not so much because we need * to flush dead messages right now, as that we want to pass on the - * catchup signal to the next slowest backend. "Daisy chaining" the + * catchup signal to the next slowest backend. "Daisy chaining" the * catchup signal this way avoids creating spikes in system load for what * should be just a background maintenance activity. */ @@ -157,7 +157,7 @@ ReceiveSharedInvalidMessages( * * If we are idle (catchupInterruptEnabled is set), we can safely * invoke ProcessCatchupEvent directly. Otherwise, just set a flag - * to do it later. (Note that it's quite possible for normal processing + * to do it later. (Note that it's quite possible for normal processing * of the current transaction to cause ReceiveSharedInvalidMessages() * to be run later on; in that case the flag will get cleared again, * since there's no longer any reason to do anything.) @@ -233,7 +233,7 @@ HandleCatchupInterrupt(void) * EnableCatchupInterrupt * * This is called by the PostgresMain main loop just before waiting - * for a frontend command. We process any pending catchup events, + * for a frontend command. We process any pending catchup events, * and enable the signal handler to process future events directly. * * NOTE: the signal handler starts out disabled, and stays so until @@ -278,7 +278,7 @@ EnableCatchupInterrupt(void) * DisableCatchupInterrupt * * This is called by the PostgresMain main loop just after receiving - * a frontend command. Signal handler execution of catchup events + * a frontend command. Signal handler execution of catchup events * is disabled until the next EnableCatchupInterrupt call. * * The PROCSIG_NOTIFY_INTERRUPT signal handler also needs to call this, diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c index e6805d96b1c..0328660b83e 100644 --- a/src/backend/storage/ipc/sinvaladt.c +++ b/src/backend/storage/ipc/sinvaladt.c @@ -46,7 +46,7 @@ * In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES * entries. We translate MsgNum values into circular-buffer indexes by * computing MsgNum % MAXNUMMESSAGES (this should be fast as long as - * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum + * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum * doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space * in the buffer. If the buffer does overflow, we recover by setting the * "reset" flag for each backend that has fallen too far behind. A backend @@ -59,7 +59,7 @@ * normal behavior is that at most one such interrupt is in flight at a time; * when a backend completes processing a catchup interrupt, it executes * SICleanupQueue, which will signal the next-furthest-behind backend if - * needed. This avoids undue contention from multiple backends all trying + * needed. This avoids undue contention from multiple backends all trying * to catch up at once. However, the furthest-back backend might be stuck * in a state where it can't catch up. Eventually it will get reset, so it * won't cause any more problems for anyone but itself. But we don't want @@ -90,7 +90,7 @@ * the writer wants to change maxMsgNum while readers need to read it. * We deal with that by having a spinlock that readers must take for just * long enough to read maxMsgNum, while writers take it for just long enough - * to write maxMsgNum. (The exact rule is that you need the spinlock to + * to write maxMsgNum. (The exact rule is that you need the spinlock to * read maxMsgNum if you are not holding SInvalWriteLock, and you need the * spinlock to write maxMsgNum unless you are holding both locks.) * @@ -442,7 +442,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n) SISeg *segP = shmInvalBuffer; /* - * N can be arbitrarily large. We divide the work into groups of no more + * N can be arbitrarily large. We divide the work into groups of no more * than WRITE_QUANTUM messages, to be sure that we don't hold the lock for * an unreasonably long time. (This is not so much because we care about * letting in other writers, as that some just-caught-up backend might be @@ -465,7 +465,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n) * If the buffer is full, we *must* acquire some space. Clean the * queue and reset anyone who is preventing space from being freed. * Otherwise, clean the queue only when it's exceeded the next - * fullness threshold. We have to loop and recheck the buffer state + * fullness threshold. We have to loop and recheck the buffer state * after any call of SICleanupQueue. */ for (;;) @@ -533,11 +533,11 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n) * executing on behalf of other backends, since each instance will modify only * fields of its own backend's ProcState, and no instance will look at fields * of other backends' ProcStates. We express this by grabbing SInvalReadLock - * in shared mode. Note that this is not exactly the normal (read-only) + * in shared mode. Note that this is not exactly the normal (read-only) * interpretation of a shared lock! Look closely at the interactions before * allowing SInvalReadLock to be grabbed in shared mode for any other reason! * - * NB: this can also run in parallel with SIInsertDataEntries. It is not + * NB: this can also run in parallel with SIInsertDataEntries. It is not * guaranteed that we will return any messages added after the routine is * entered. * @@ -557,10 +557,10 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize) /* * Before starting to take locks, do a quick, unlocked test to see whether - * there can possibly be anything to read. On a multiprocessor system, + * there can possibly be anything to read. On a multiprocessor system, * it's possible that this load could migrate backwards and occur before * we actually enter this function, so we might miss a sinval message that - * was just added by some other processor. But they can't migrate + * was just added by some other processor. But they can't migrate * backwards over a preceding lock acquisition, so it should be OK. If we * haven't acquired a lock preventing against further relevant * invalidations, any such occurrence is not much different than if the @@ -651,7 +651,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize) * * Caution: because we transiently release write lock when we have to signal * some other backend, it is NOT guaranteed that there are still minFree - * free message slots at exit. Caller must recheck and perhaps retry. + * free message slots at exit. Caller must recheck and perhaps retry. */ void SICleanupQueue(bool callerHasWriteLock, int minFree) @@ -672,7 +672,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree) /* * Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the * furthest-back backend that needs signaling (if any), and reset any - * backends that are too far back. Note that because we ignore sendOnly + * backends that are too far back. Note that because we ignore sendOnly * backends here it is possible for them to keep sending messages without * a problem even when they are the only active backend. */ diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index aa8bea5538b..d0abe4117f8 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -130,7 +130,7 @@ GetStandbyLimitTime(void) /* * The cutoff time is the last WAL data receipt time plus the appropriate - * delay variable. Delay of -1 means wait forever. + * delay variable. Delay of -1 means wait forever. */ GetXLogReceiptTime(&rtime, &fromStream); if (fromStream) @@ -475,7 +475,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason) * determine whether an actual deadlock condition is present: the lock we * need to wait for might be unrelated to any held by the Startup process. * Sooner or later, this mechanism should get ripped out in favor of somehow - * accounting for buffer locks in DeadLockCheck(). However, errors here + * accounting for buffer locks in DeadLockCheck(). However, errors here * seem to be very low-probability in practice, so for now it's not worth * the trouble. */ @@ -867,7 +867,7 @@ standby_redo(XLogRecPtr lsn, XLogRecord *record) XLogRecPtr LogStandbySnapshot(void) { - XLogRecPtr recptr; + XLogRecPtr recptr; RunningTransactions running; xl_standby_lock *locks; int nlocks; @@ -889,8 +889,8 @@ LogStandbySnapshot(void) running = GetRunningTransactionData(); /* - * GetRunningTransactionData() acquired ProcArrayLock, we must release - * it. For Hot Standby this can be done before inserting the WAL record + * GetRunningTransactionData() acquired ProcArrayLock, we must release it. + * For Hot Standby this can be done before inserting the WAL record * because ProcArrayApplyRecoveryInfo() rechecks the commit status using * the clog. For logical decoding, though, the lock can't be released * early becuase the clog might be "in the future" from the POV of the @@ -977,9 +977,9 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts) /* * Ensure running_xacts information is synced to disk not too far in the * future. We don't want to stall anything though (i.e. use XLogFlush()), - * so we let the wal writer do it during normal - * operation. XLogSetAsyncXactLSN() conveniently will mark the LSN as - * to-be-synced and nudge the WALWriter into action if sleeping. Check + * so we let the wal writer do it during normal operation. + * XLogSetAsyncXactLSN() conveniently will mark the LSN as to-be-synced + * and nudge the WALWriter into action if sleeping. Check * XLogBackgroundFlush() for details why a record might not be flushed * without it. */ diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c index 69c7bdb2a06..57ec1c2a6f9 100644 --- a/src/backend/storage/large_object/inv_api.c +++ b/src/backend/storage/large_object/inv_api.c @@ -266,10 +266,10 @@ inv_open(Oid lobjId, int flags, MemoryContext mcxt) errmsg("large object %u does not exist", lobjId))); /* - * We must register the snapshot in TopTransaction's resowner, because - * it must stay alive until the LO is closed rather than until the - * current portal shuts down. Do this after checking that the LO exists, - * to avoid leaking the snapshot if an error is thrown. + * We must register the snapshot in TopTransaction's resowner, because it + * must stay alive until the LO is closed rather than until the current + * portal shuts down. Do this after checking that the LO exists, to avoid + * leaking the snapshot if an error is thrown. */ if (snapshot) snapshot = RegisterSnapshotOnOwner(snapshot, @@ -809,7 +809,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len) /* * If we found the page of the truncation point we need to truncate the - * data in it. Otherwise if we're in a hole, we need to create a page to + * data in it. Otherwise if we're in a hole, we need to create a page to * mark the end of data. */ if (olddata != NULL && olddata->pageno == pageno) diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c index 736fcd06196..298c5776407 100644 --- a/src/backend/storage/lmgr/deadlock.c +++ b/src/backend/storage/lmgr/deadlock.c @@ -51,7 +51,7 @@ typedef struct } WAIT_ORDER; /* - * Information saved about each edge in a detected deadlock cycle. This + * Information saved about each edge in a detected deadlock cycle. This * is used to print a diagnostic message upon failure. * * Note: because we want to examine this info after releasing the lock @@ -119,7 +119,7 @@ static PGPROC *blocking_autovacuum_proc = NULL; * InitDeadLockChecking -- initialize deadlock checker during backend startup * * This does per-backend initialization of the deadlock checker; primarily, - * allocation of working memory for DeadLockCheck. We do this per-backend + * allocation of working memory for DeadLockCheck. We do this per-backend * since there's no percentage in making the kernel do copy-on-write * inheritance of workspace from the postmaster. We want to allocate the * space at startup because (a) the deadlock checker might be invoked when @@ -291,10 +291,10 @@ GetBlockingAutoVacuumPgproc(void) * DeadLockCheckRecurse -- recursively search for valid orderings * * curConstraints[] holds the current set of constraints being considered - * by an outer level of recursion. Add to this each possible solution + * by an outer level of recursion. Add to this each possible solution * constraint for any cycle detected at this level. * - * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free + * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free * state is attainable, in which case waitOrders[] shows the required * rearrangements of lock wait queues (if any). */ @@ -429,7 +429,7 @@ TestConfiguration(PGPROC *startProc) * * Since we need to be able to check hypothetical configurations that would * exist after wait queue rearrangement, the routine pays attention to the - * table of hypothetical queue orders in waitOrders[]. These orders will + * table of hypothetical queue orders in waitOrders[]. These orders will * be believed in preference to the actual ordering seen in the locktable. */ static bool @@ -506,7 +506,7 @@ FindLockCycleRecurse(PGPROC *checkProc, conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode]; /* - * Scan for procs that already hold conflicting locks. These are "hard" + * Scan for procs that already hold conflicting locks. These are "hard" * edges in the waits-for graph. */ procLocks = &(lock->procLocks); @@ -705,7 +705,7 @@ ExpandConstraints(EDGE *constraints, nWaitOrders = 0; /* - * Scan constraint list backwards. This is because the last-added + * Scan constraint list backwards. This is because the last-added * constraint is the only one that could fail, and so we want to test it * for inconsistency first. */ @@ -759,7 +759,7 @@ ExpandConstraints(EDGE *constraints, * The initial queue ordering is taken directly from the lock's wait queue. * The output is an array of PGPROC pointers, of length equal to the lock's * wait queue length (the caller is responsible for providing this space). - * The partial order is specified by an array of EDGE structs. Each EDGE + * The partial order is specified by an array of EDGE structs. Each EDGE * is one that we need to reverse, therefore the "waiter" must appear before * the "blocker" in the output array. The EDGE array may well contain * edges associated with other locks; these should be ignored. @@ -829,7 +829,7 @@ TopoSort(LOCK *lock, afterConstraints[k] = i + 1; } /*-------------------- - * Now scan the topoProcs array backwards. At each step, output the + * Now scan the topoProcs array backwards. At each step, output the * last proc that has no remaining before-constraints, and decrease * the beforeConstraints count of each of the procs it was constrained * against. diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index d692aad6cbf..6cc4d269eaf 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -35,7 +35,7 @@ typedef struct XactLockTableWaitInfo { XLTW_Oper oper; Relation rel; - ItemPointer ctid; + ItemPointer ctid; } XactLockTableWaitInfo; static void XactLockTableWaitErrorCb(void *arg); @@ -80,7 +80,7 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid) /* * LockRelationOid * - * Lock a relation given only its OID. This should generally be used + * Lock a relation given only its OID. This should generally be used * before attempting to open the relation's relcache entry. */ void @@ -268,7 +268,7 @@ LockHasWaitersRelation(Relation relation, LOCKMODE lockmode) /* * LockRelationIdForSession * - * This routine grabs a session-level lock on the target relation. The + * This routine grabs a session-level lock on the target relation. The * session lock persists across transaction boundaries. It will be removed * when UnlockRelationIdForSession() is called, or if an ereport(ERROR) occurs, * or if the backend exits. @@ -471,7 +471,7 @@ XactLockTableInsert(TransactionId xid) * * Delete the lock showing that the given transaction ID is running. * (This is never used for main transaction IDs; those locks are only - * released implicitly at transaction end. But we do use it for subtrans IDs.) + * released implicitly at transaction end. But we do use it for subtrans IDs.) */ void XactLockTableDelete(TransactionId xid) @@ -494,7 +494,7 @@ XactLockTableDelete(TransactionId xid) * subtransaction, we will exit as soon as it aborts or its top parent commits. * It takes some extra work to ensure this, because to save on shared memory * the XID lock of a subtransaction is released when it ends, whether - * successfully or unsuccessfully. So we have to check if it's "still running" + * successfully or unsuccessfully. So we have to check if it's "still running" * and if so wait for its parent. */ void @@ -663,7 +663,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode) /* * Note: GetLockConflicts() never reports our own xid, hence we need not - * check for that. Also, prepared xacts are not reported, which is fine + * check for that. Also, prepared xacts are not reported, which is fine * since they certainly aren't going to do anything anymore. */ @@ -690,7 +690,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode) void WaitForLockers(LOCKTAG heaplocktag, LOCKMODE lockmode) { - List *l; + List *l; l = list_make1(&heaplocktag); WaitForLockersMultiple(l, lockmode); diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 682506374fe..cd468bcc99c 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -187,7 +187,7 @@ static int FastPathLocalUseCount = 0; /* * The fast-path lock mechanism is concerned only with relation locks on - * unshared relations by backends bound to a database. The fast-path + * unshared relations by backends bound to a database. The fast-path * mechanism exists mostly to accelerate acquisition and release of locks * that rarely conflict. Because ShareUpdateExclusiveLock is * self-conflicting, it can't use the fast-path mechanism; but it also does @@ -914,7 +914,7 @@ LockAcquireExtended(const LOCKTAG *locktag, /* * If lock requested conflicts with locks requested by waiters, must join - * wait queue. Otherwise, check for conflict with already-held locks. + * wait queue. Otherwise, check for conflict with already-held locks. * (That's last because most complex check.) */ if (lockMethodTable->conflictTab[lockmode] & lock->waitMask) @@ -995,7 +995,7 @@ LockAcquireExtended(const LOCKTAG *locktag, /* * NOTE: do not do any material change of state between here and - * return. All required changes in locktable state must have been + * return. All required changes in locktable state must have been * done when the lock was granted to us --- see notes in WaitOnLock. */ @@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag, { /* * Decode the locktag back to the original values, to avoid sending - * lots of empty bytes with every message. See lock.h to check how a + * lots of empty bytes with every message. See lock.h to check how a * locktag is defined for LOCKTAG_RELATION */ LogAccessExclusiveLock(locktag->locktag_field1, @@ -1289,7 +1289,7 @@ LockCheckConflicts(LockMethod lockMethodTable, } /* - * Rats. Something conflicts. But it could still be my own lock. We have + * Rats. Something conflicts. But it could still be my own lock. We have * to construct a conflict mask that does not reflect our own locks, but * only lock types held by other processes. */ @@ -1381,7 +1381,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode, /* * We need only run ProcLockWakeup if the released lock conflicts with at - * least one of the lock types requested by waiter(s). Otherwise whatever + * least one of the lock types requested by waiter(s). Otherwise whatever * conflict made them wait must still exist. NOTE: before MVCC, we could * skip wakeup if lock->granted[lockmode] was still positive. But that's * not true anymore, because the remaining granted locks might belong to @@ -1401,7 +1401,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode, } /* - * CleanUpLock -- clean up after releasing a lock. We garbage-collect the + * CleanUpLock -- clean up after releasing a lock. We garbage-collect the * proclock and lock objects if possible, and call ProcLockWakeup if there * are remaining requests and the caller says it's OK. (Normally, this * should be called after UnGrantLock, and wakeupNeeded is the result from @@ -1823,7 +1823,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) } /* - * Decrease the total local count. If we're still holding the lock, we're + * Decrease the total local count. If we're still holding the lock, we're * done. */ locallock->nLocks--; @@ -1955,7 +1955,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) #endif /* - * Get rid of our fast-path VXID lock, if appropriate. Note that this is + * Get rid of our fast-path VXID lock, if appropriate. Note that this is * the only way that the lock we hold on our own VXID can ever get * released: it is always and only released when a toplevel transaction * ends. @@ -2042,7 +2042,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) * fast-path data structures, we must acquire it before attempting * to release the lock via the fast-path. We will continue to * hold the LWLock until we're done scanning the locallock table, - * unless we hit a transferred fast-path lock. (XXX is this + * unless we hit a transferred fast-path lock. (XXX is this * really such a good idea? There could be a lot of entries ...) */ if (!have_fast_path_lwlock) @@ -2061,7 +2061,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) /* * Our lock, originally taken via the fast path, has been - * transferred to the main lock table. That's going to require + * transferred to the main lock table. That's going to require * some extra work, so release our fast-path lock before starting. */ LWLockRelease(MyProc->backendLock); @@ -2070,7 +2070,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) /* * Now dump the lock. We haven't got a pointer to the LOCK or * PROCLOCK in this case, so we have to handle this a bit - * differently than a normal lock release. Unfortunately, this + * differently than a normal lock release. Unfortunately, this * requires an extra LWLock acquire-and-release cycle on the * partitionLock, but hopefully it shouldn't happen often. */ @@ -2505,9 +2505,9 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag * acquiring proc->backendLock. In particular, it's certainly safe to * assume that if the target backend holds any fast-path locks, it * must have performed a memory-fencing operation (in particular, an - * LWLock acquisition) since setting proc->databaseId. However, it's + * LWLock acquisition) since setting proc->databaseId. However, it's * less clear that our backend is certain to have performed a memory - * fencing operation since the other backend set proc->databaseId. So + * fencing operation since the other backend set proc->databaseId. So * for now, we test it after acquiring the LWLock just to be safe. */ if (proc->databaseId != locktag->locktag_field1) @@ -3021,7 +3021,7 @@ AtPrepare_Locks(void) continue; /* - * If we have both session- and transaction-level locks, fail. This + * If we have both session- and transaction-level locks, fail. This * should never happen with regular locks, since we only take those at * session level in some special operations like VACUUM. It's * possible to hit this with advisory locks, though. @@ -3030,7 +3030,7 @@ AtPrepare_Locks(void) * the transactional hold to the prepared xact. However, that would * require two PROCLOCK objects, and we cannot be sure that another * PROCLOCK will be available when it comes time for PostPrepare_Locks - * to do the deed. So for now, we error out while we can still do so + * to do the deed. So for now, we error out while we can still do so * safely. */ if (haveSessionLock) @@ -3219,7 +3219,7 @@ PostPrepare_Locks(TransactionId xid) /* * We cannot simply modify proclock->tag.myProc to reassign * ownership of the lock, because that's part of the hash key and - * the proclock would then be in the wrong hash chain. Instead + * the proclock would then be in the wrong hash chain. Instead * use hash_update_hash_key. (We used to create a new hash entry, * but that risks out-of-memory failure if other processes are * busy making proclocks too.) We must unlink the proclock from @@ -3319,7 +3319,7 @@ GetLockStatusData(void) /* * First, we iterate through the per-backend fast-path arrays, locking - * them one at a time. This might produce an inconsistent picture of the + * them one at a time. This might produce an inconsistent picture of the * system state, but taking all of those LWLocks at the same time seems * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't * matter too much, because none of these locks can be involved in lock @@ -3398,7 +3398,7 @@ GetLockStatusData(void) * will be self-consistent. * * Since this is a read-only operation, we take shared instead of - * exclusive lock. There's not a whole lot of point to this, because all + * exclusive lock. There's not a whole lot of point to this, because all * the normal operations require exclusive lock, but it doesn't hurt * anything either. It will at least allow two backends to do * GetLockStatusData in parallel. @@ -3917,7 +3917,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info, * as MyProc->lxid, you might wonder if we really need both. The * difference is that MyProc->lxid is set and cleared unlocked, and * examined by procarray.c, while fpLocalTransactionId is protected by - * backendLock and is used only by the locking subsystem. Doing it this + * backendLock and is used only by the locking subsystem. Doing it this * way makes it easier to verify that there are no funny race conditions. * * We don't bother recording this lock in the local lock table, since it's diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index df8f9bfd893..d23ac62bf84 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -6,7 +6,7 @@ * Lightweight locks are intended primarily to provide mutual exclusion of * access to shared-memory data structures. Therefore, they offer both * exclusive and shared lock modes (to support read/write and read-only - * access to a shared object). There are few other frammishes. User-level + * access to a shared object). There are few other frammishes. User-level * locking should be done with the full lock manager --- which depends on * LWLocks to protect its shared state. * @@ -54,7 +54,7 @@ extern slock_t *ShmemLock; * to the current backend. */ static LWLockTranche **LWLockTrancheArray = NULL; -static int LWLockTranchesAllocated = 0; +static int LWLockTranchesAllocated = 0; #define T_NAME(lock) \ (LWLockTrancheArray[(lock)->tranche]->name) @@ -91,18 +91,18 @@ static bool LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, #ifdef LWLOCK_STATS typedef struct lwlock_stats_key { - int tranche; - int instance; -} lwlock_stats_key; + int tranche; + int instance; +} lwlock_stats_key; typedef struct lwlock_stats { - lwlock_stats_key key; - int sh_acquire_count; - int ex_acquire_count; - int block_count; - int spin_delay_count; -} lwlock_stats; + lwlock_stats_key key; + int sh_acquire_count; + int ex_acquire_count; + int block_count; + int spin_delay_count; +} lwlock_stats; static int counts_for_pid = 0; static HTAB *lwlock_stats_htab; @@ -173,7 +173,7 @@ print_lwlock_stats(int code, Datum arg) while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL) { fprintf(stderr, - "PID %d lwlock %s %d: shacq %u exacq %u blk %u spindelay %u\n", + "PID %d lwlock %s %d: shacq %u exacq %u blk %u spindelay %u\n", MyProcPid, LWLockTrancheArray[lwstats->key.tranche]->name, lwstats->key.instance, lwstats->sh_acquire_count, lwstats->ex_acquire_count, lwstats->block_count, @@ -186,9 +186,9 @@ print_lwlock_stats(int code, Datum arg) static lwlock_stats * get_lwlock_stats_entry(LWLock *lock) { - lwlock_stats_key key; + lwlock_stats_key key; lwlock_stats *lwstats; - bool found; + bool found; /* Set up local count state first time through in a given process */ if (counts_for_pid != MyProcPid) @@ -270,7 +270,7 @@ NumLWLocks(void) * a loadable module. * * This is only useful if called from the _PG_init hook of a library that - * is loaded into the postmaster via shared_preload_libraries. Once + * is loaded into the postmaster via shared_preload_libraries. Once * shared memory has been allocated, calls will be ignored. (We could * raise an error, but it seems better to make it a no-op, so that * libraries containing such calls can be reloaded if needed.) @@ -339,12 +339,12 @@ CreateLWLocks(void) * before the first LWLock. LWLockCounter[0] is the allocation * counter for lwlocks, LWLockCounter[1] is the maximum number that * can be allocated from the main array, and LWLockCounter[2] is the - * allocation counter for tranches. + * allocation counter for tranches. */ LWLockCounter = (int *) ((char *) MainLWLockArray - 3 * sizeof(int)); LWLockCounter[0] = NUM_FIXED_LWLOCKS; LWLockCounter[1] = numLocks; - LWLockCounter[2] = 1; /* 0 is the main array */ + LWLockCounter[2] = 1; /* 0 is the main array */ } if (LWLockTrancheArray == NULL) @@ -352,7 +352,7 @@ CreateLWLocks(void) LWLockTranchesAllocated = 16; LWLockTrancheArray = (LWLockTranche **) MemoryContextAlloc(TopMemoryContext, - LWLockTranchesAllocated * sizeof(LWLockTranche *)); + LWLockTranchesAllocated * sizeof(LWLockTranche *)); } MainLWLockTranche.name = "main"; @@ -422,7 +422,7 @@ LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche) if (tranche_id >= LWLockTranchesAllocated) { - int i = LWLockTranchesAllocated; + int i = LWLockTranchesAllocated; while (i <= tranche_id) i *= 2; @@ -534,7 +534,7 @@ LWLockAcquireCommon(LWLock *l, LWLockMode mode, uint64 *valptr, uint64 val) * in the presence of contention. The efficiency of being able to do that * outweighs the inefficiency of sometimes wasting a process dispatch * cycle because the lock is not free when a released waiter finally gets - * to run. See pgsql-hackers archives for 29-Dec-01. + * to run. See pgsql-hackers archives for 29-Dec-01. */ for (;;) { @@ -731,7 +731,7 @@ LWLockConditionalAcquire(LWLock *l, LWLockMode mode) /* * LWLockAcquireOrWait - Acquire lock, or wait until it's free * - * The semantics of this function are a bit funky. If the lock is currently + * The semantics of this function are a bit funky. If the lock is currently * free, it is acquired in the given mode, and the function returns true. If * the lock isn't immediately free, the function waits until it is released * and returns false, but does not acquire the lock. @@ -920,8 +920,8 @@ LWLockWaitForVar(LWLock *l, uint64 *valptr, uint64 oldval, uint64 *newval) return true; /* - * Lock out cancel/die interrupts while we sleep on the lock. There is - * no cleanup mechanism to remove us from the wait queue if we got + * Lock out cancel/die interrupts while we sleep on the lock. There is no + * cleanup mechanism to remove us from the wait queue if we got * interrupted. */ HOLD_INTERRUPTS(); diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 593d80f9d22..7c8d53e6a5a 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -32,11 +32,11 @@ * examining the MVCC data.) * * (1) Besides tuples actually read, they must cover ranges of tuples - * which would have been read based on the predicate. This will + * which would have been read based on the predicate. This will * require modelling the predicates through locks against database * objects such as pages, index ranges, or entire tables. * - * (2) They must be kept in RAM for quick access. Because of this, it + * (2) They must be kept in RAM for quick access. Because of this, it * isn't possible to always maintain tuple-level granularity -- when * the space allocated to store these approaches exhaustion, a * request for a lock may need to scan for situations where a single @@ -49,7 +49,7 @@ * * (4) While they are associated with a transaction, they must survive * a successful COMMIT of that transaction, and remain until all - * overlapping transactions complete. This even means that they + * overlapping transactions complete. This even means that they * must survive termination of the transaction's process. If a * top level transaction is rolled back, however, it is immediately * flagged so that it can be ignored, and its SIREAD locks can be @@ -90,7 +90,7 @@ * may yet matter because they overlap still-active transactions. * * SerializablePredicateLockListLock - * - Protects the linked list of locks held by a transaction. Note + * - Protects the linked list of locks held by a transaction. Note * that the locks themselves are also covered by the partition * locks of their respective lock targets; this lock only affects * the linked list connecting the locks related to a transaction. @@ -101,11 +101,11 @@ * - It is relatively infrequent that another process needs to * modify the list for a transaction, but it does happen for such * things as index page splits for pages with predicate locks and - * freeing of predicate locked pages by a vacuum process. When + * freeing of predicate locked pages by a vacuum process. When * removing a lock in such cases, the lock itself contains the * pointers needed to remove it from the list. When adding a * lock in such cases, the lock can be added using the anchor in - * the transaction structure. Neither requires walking the list. + * the transaction structure. Neither requires walking the list. * - Cleaning up the list for a terminated transaction is sometimes * not done on a retail basis, in which case no lock is required. * - Due to the above, a process accessing its active transaction's @@ -355,7 +355,7 @@ int max_predicate_locks_per_xact; /* set by guc.c */ /* * This provides a list of objects in order to track transactions - * participating in predicate locking. Entries in the list are fixed size, + * participating in predicate locking. Entries in the list are fixed size, * and reside in shared memory. The memory address of an entry must remain * fixed during its lifetime. The list will be protected from concurrent * update externally; no provision is made in this code to manage that. The @@ -547,7 +547,7 @@ SerializationNeededForWrite(Relation relation) /* * These functions are a simple implementation of a list for this specific - * type of struct. If there is ever a generalized shared memory list, we + * type of struct. If there is ever a generalized shared memory list, we * should probably switch to that. */ static SERIALIZABLEXACT * @@ -767,7 +767,7 @@ OldSerXidPagePrecedesLogically(int p, int q) int diff; /* - * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should + * We have to compare modulo (OLDSERXID_MAX_PAGE+1)/2. Both inputs should * be in the range 0..OLDSERXID_MAX_PAGE. */ Assert(p >= 0 && p <= OLDSERXID_MAX_PAGE); @@ -929,7 +929,7 @@ OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo) } /* - * Get the minimum commitSeqNo for any conflict out for the given xid. For + * Get the minimum commitSeqNo for any conflict out for the given xid. For * a transaction which exists but has no conflict out, InvalidSerCommitSeqNo * will be returned. */ @@ -982,7 +982,7 @@ OldSerXidSetActiveSerXmin(TransactionId xid) /* * When no sxacts are active, nothing overlaps, set the xid values to * invalid to show that there are no valid entries. Don't clear headPage, - * though. A new xmin might still land on that page, and we don't want to + * though. A new xmin might still land on that page, and we don't want to * repeatedly zero out the same page. */ if (!TransactionIdIsValid(xid)) @@ -1467,7 +1467,7 @@ SummarizeOldestCommittedSxact(void) /* * Grab the first sxact off the finished list -- this will be the earliest - * commit. Remove it from the list. + * commit. Remove it from the list. */ sxact = (SERIALIZABLEXACT *) SHMQueueNext(FinishedSerializableTransactions, @@ -1620,7 +1620,7 @@ SetSerializableTransactionSnapshot(Snapshot snapshot, /* * We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to * import snapshots, since there's no way to wait for a safe snapshot when - * we're using the snap we're told to. (XXX instead of throwing an error, + * we're using the snap we're told to. (XXX instead of throwing an error, * we could just ignore the XactDeferrable flag?) */ if (XactReadOnly && XactDeferrable) @@ -1669,7 +1669,7 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot, * release SerializableXactHashLock to call SummarizeOldestCommittedSxact, * this means we have to create the sxact first, which is a bit annoying * (in particular, an elog(ERROR) in procarray.c would cause us to leak - * the sxact). Consider refactoring to avoid this. + * the sxact). Consider refactoring to avoid this. */ #ifdef TEST_OLDSERXID SummarizeOldestCommittedSxact(); @@ -2051,7 +2051,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash) /* * Delete child target locks owned by this process. * This implementation is assuming that the usage of each target tag field - * is uniform. No need to make this hard if we don't have to. + * is uniform. No need to make this hard if we don't have to. * * We aren't acquiring lightweight locks for the predicate lock or lock * target structures associated with this transaction unless we're going @@ -2092,7 +2092,7 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag) if (TargetTagIsCoveredBy(oldtargettag, *newtargettag)) { uint32 oldtargettaghash; - LWLock *partitionLock; + LWLock *partitionLock; PREDICATELOCK *rmpredlock PG_USED_FOR_ASSERTS_ONLY; oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag); @@ -2497,7 +2497,7 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot) } /* - * Do quick-but-not-definitive test for a relation lock first. This will + * Do quick-but-not-definitive test for a relation lock first. This will * never cause a return when the relation is *not* locked, but will * occasionally let the check continue when there really *is* a relation * level lock. @@ -2809,7 +2809,7 @@ exit: * transaction which is not serializable. * * NOTE: This is currently only called with transfer set to true, but that may - * change. If we decide to clean up the locks from a table on commit of a + * change. If we decide to clean up the locks from a table on commit of a * transaction which executed DROP TABLE, the false condition will be useful. */ static void @@ -2890,7 +2890,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) continue; /* already the right lock */ /* - * If we made it here, we have work to do. We make sure the heap + * If we made it here, we have work to do. We make sure the heap * relation lock exists, then we walk the list of predicate locks for * the old target we found, moving all locks to the heap relation lock * -- unless they already hold that. @@ -3338,7 +3338,7 @@ ReleasePredicateLocks(bool isCommit) } /* - * Release all outConflicts to committed transactions. If we're rolling + * Release all outConflicts to committed transactions. If we're rolling * back clear them all. Set SXACT_FLAG_CONFLICT_OUT if any point to * previously committed transactions. */ @@ -3657,7 +3657,7 @@ ClearOldPredicateLocks(void) * matter -- but keep the transaction entry itself and any outConflicts. * * When the summarize flag is set, we've run short of room for sxact data - * and must summarize to the SLRU. Predicate locks are transferred to a + * and must summarize to the SLRU. Predicate locks are transferred to a * dummy "old" transaction, with duplicate locks on a single target * collapsing to a single lock with the "latest" commitSeqNo from among * the conflicting locks.. @@ -3850,7 +3850,7 @@ XidIsConcurrent(TransactionId xid) /* * CheckForSerializableConflictOut * We are reading a tuple which has been modified. If it is visible to - * us but has been deleted, that indicates a rw-conflict out. If it's + * us but has been deleted, that indicates a rw-conflict out. If it's * not visible and was created by a concurrent (overlapping) * serializable transaction, that is also a rw-conflict out, * @@ -3937,7 +3937,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation, Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin)); /* - * Find top level xid. Bail out if xid is too early to be a conflict, or + * Find top level xid. Bail out if xid is too early to be a conflict, or * if it's our own xid. */ if (TransactionIdEquals(xid, GetTopTransactionIdIfAny())) @@ -4002,7 +4002,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation, /* * We have a conflict out to a transaction which has a conflict out to a - * summarized transaction. That summarized transaction must have + * summarized transaction. That summarized transaction must have * committed first, and we can't tell when it committed in relation to our * snapshot acquisition, so something needs to be canceled. */ @@ -4036,7 +4036,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation, && (!SxactHasConflictOut(sxact) || MySerializableXact->SeqNo.lastCommitBeforeSnapshot < sxact->SeqNo.earliestOutConflictCommit)) { - /* Read-only transaction will appear to run first. No conflict. */ + /* Read-only transaction will appear to run first. No conflict. */ LWLockRelease(SerializableXactHashLock); return; } @@ -4282,8 +4282,8 @@ CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, SET_PREDICATELOCKTARGETTAG_TUPLE(targettag, relation->rd_node.dbNode, relation->rd_id, - ItemPointerGetBlockNumber(&(tuple->t_self)), - ItemPointerGetOffsetNumber(&(tuple->t_self))); + ItemPointerGetBlockNumber(&(tuple->t_self)), + ItemPointerGetOffsetNumber(&(tuple->t_self))); CheckTargetForConflictsIn(&targettag); } @@ -4627,7 +4627,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader, * * If a dangerous structure is found, the pivot (the near conflict) is * marked for death, because rolling back another transaction might mean - * that we flail without ever making progress. This transaction is + * that we flail without ever making progress. This transaction is * committing writes, so letting it commit ensures progress. If we * canceled the far conflict, it might immediately fail again on retry. */ diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 5cd8fcec450..266b0daa94f 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -229,10 +229,10 @@ InitProcGlobal(void) /* * Newly created PGPROCs for normal backends, autovacuum and bgworkers - * must be queued up on the appropriate free list. Because there can + * must be queued up on the appropriate free list. Because there can * only ever be a small, fixed number of auxiliary processes, no free * list is used in that case; InitAuxiliaryProcess() instead uses a - * linear search. PGPROCs for prepared transactions are added to a + * linear search. PGPROCs for prepared transactions are added to a * free list by TwoPhaseShmemInit(). */ if (i < MaxConnections) @@ -291,7 +291,7 @@ InitProcess(void) elog(ERROR, "you already exist"); /* - * Initialize process-local latch support. This could fail if the kernel + * Initialize process-local latch support. This could fail if the kernel * is low on resources, and if so we want to exit cleanly before acquiring * any shared-memory resources. */ @@ -400,7 +400,7 @@ InitProcess(void) /* * We might be reusing a semaphore that belonged to a failed process. So - * be careful and reinitialize its value here. (This is not strictly + * be careful and reinitialize its value here. (This is not strictly * necessary anymore, but seems like a good idea for cleanliness.) */ PGSemaphoreReset(&MyProc->sem); @@ -450,7 +450,7 @@ InitProcessPhase2(void) * * Auxiliary processes are presently not expected to wait for real (lockmgr) * locks, so we need not set up the deadlock checker. They are never added - * to the ProcArray or the sinval messaging mechanism, either. They also + * to the ProcArray or the sinval messaging mechanism, either. They also * don't get a VXID assigned, since this is only useful when we actually * hold lockmgr locks. * @@ -476,7 +476,7 @@ InitAuxiliaryProcess(void) elog(ERROR, "you already exist"); /* - * Initialize process-local latch support. This could fail if the kernel + * Initialize process-local latch support. This could fail if the kernel * is low on resources, and if so we want to exit cleanly before acquiring * any shared-memory resources. */ @@ -557,7 +557,7 @@ InitAuxiliaryProcess(void) /* * We might be reusing a semaphore that belonged to a failed process. So - * be careful and reinitialize its value here. (This is not strictly + * be careful and reinitialize its value here. (This is not strictly * necessary anymore, but seems like a good idea for cleanliness.) */ PGSemaphoreReset(&MyProc->sem); @@ -715,7 +715,7 @@ LockErrorCleanup(void) /* * We used to do PGSemaphoreReset() here to ensure that our proc's wait - * semaphore is reset to zero. This prevented a leftover wakeup signal + * semaphore is reset to zero. This prevented a leftover wakeup signal * from remaining in the semaphore if someone else had granted us the lock * we wanted before we were able to remove ourselves from the wait-list. * However, now that ProcSleep loops until waitStatus changes, a leftover @@ -851,7 +851,7 @@ ProcKill(int code, Datum arg) /* * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary - * processes (bgwriter, etc). The PGPROC and sema are not released, only + * processes (bgwriter, etc). The PGPROC and sema are not released, only * marked as not-in-use. */ static void @@ -977,7 +977,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) * * Special case: if I find I should go in front of some waiter, check to * see if I conflict with already-held locks or the requests before that - * waiter. If not, then just grant myself the requested lock immediately. + * waiter. If not, then just grant myself the requested lock immediately. * This is the same as the test for immediate grant in LockAcquire, except * we are only considering the part of the wait queue before my insertion * point. @@ -996,7 +996,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks) { /* - * Yes, so we have a deadlock. Easiest way to clean up + * Yes, so we have a deadlock. Easiest way to clean up * correctly is to call RemoveFromWaitQueue(), but we * can't do that until we are *on* the wait queue. So, set * a flag to check below, and break out of loop. Also, @@ -1117,8 +1117,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) /* * If someone wakes us between LWLockRelease and PGSemaphoreLock, - * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore - * implementation. While this is normally good, there are cases where a + * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore + * implementation. While this is normally good, there are cases where a * saved wakeup might be leftover from a previous operation (for example, * we aborted ProcWaitForSignal just before someone did ProcSendSignal). * So, loop to wait again if the waitStatus shows we haven't been granted @@ -1138,7 +1138,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) /* * waitStatus could change from STATUS_WAITING to something else - * asynchronously. Read it just once per loop to prevent surprising + * asynchronously. Read it just once per loop to prevent surprising * behavior (such as missing log messages). */ myWaitStatus = MyProc->waitStatus; @@ -1623,10 +1623,10 @@ check_done: * This can share the semaphore normally used for waiting for locks, * since a backend could never be waiting for a lock and a signal at * the same time. As with locks, it's OK if the signal arrives just - * before we actually reach the waiting state. Also as with locks, + * before we actually reach the waiting state. Also as with locks, * it's necessary that the caller be robust against bogus wakeups: * always check that the desired state has occurred, and wait again - * if not. This copes with possible "leftover" wakeups. + * if not. This copes with possible "leftover" wakeups. */ void ProcWaitForSignal(void) diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c index b3987494c13..efe1b43fa72 100644 --- a/src/backend/storage/lmgr/s_lock.c +++ b/src/backend/storage/lmgr/s_lock.c @@ -79,7 +79,7 @@ s_lock(volatile slock_t *lock, const char *file, int line) * * We time out and declare error after NUM_DELAYS delays (thus, exactly * that many tries). With the given settings, this will usually take 2 or - * so minutes. It seems better to fix the total number of tries (and thus + * so minutes. It seems better to fix the total number of tries (and thus * the probability of unintended failure) than to fix the total time * spent. */ @@ -137,7 +137,7 @@ s_lock(volatile slock_t *lock, const char *file, int line) * Note: spins_per_delay is local within our current process. We want to * average these observations across multiple backends, since it's * relatively rare for this function to even get entered, and so a single - * backend might not live long enough to converge on a good value. That + * backend might not live long enough to converge on a good value. That * is handled by the two routines below. */ if (cur_delay == 0) @@ -177,7 +177,7 @@ update_spins_per_delay(int shared_spins_per_delay) /* * We use an exponential moving average with a relatively slow adaption * rate, so that noise in any one backend's result won't affect the shared - * value too much. As long as both inputs are within the allowed range, + * value too much. As long as both inputs are within the allowed range, * the result must be too, so we need not worry about clamping the result. * * We deliberately truncate rather than rounding; this is so that single diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c index 9499db115a6..9b71744cc6c 100644 --- a/src/backend/storage/lmgr/spin.c +++ b/src/backend/storage/lmgr/spin.c @@ -5,7 +5,7 @@ * * * For machines that have test-and-set (TAS) instructions, s_lock.h/.c - * define the spinlock implementation. This file contains only a stub + * define the spinlock implementation. This file contains only a stub * implementation for spinlocks using PGSemaphores. Unless semaphores * are implemented in a way that doesn't involve a kernel call, this * is too slow to be very useful :-( @@ -74,7 +74,7 @@ SpinlockSemas(void) extern void SpinlockSemaInit(PGSemaphore spinsemas) { - int i; + int i; for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i) PGSemaphoreCreate(&spinsemas[i]); @@ -88,7 +88,7 @@ SpinlockSemaInit(PGSemaphore spinsemas) void s_init_lock_sema(volatile slock_t *lock) { - static int counter = 0; + static int counter = 0; *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES; } diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index 7729fcacf0e..6351a9bea47 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -63,7 +63,7 @@ PageInit(Page page, Size pageSize, Size specialSize) * PageIsVerified * Check that the page header and checksum (if any) appear valid. * - * This is called when a page has just been read in from disk. The idea is + * This is called when a page has just been read in from disk. The idea is * to cheaply detect trashed pages before we go nuts following bogus item * pointers, testing invalid transaction identifiers, etc. * @@ -155,7 +155,7 @@ PageIsVerified(Page page, BlockNumber blkno) /* * PageAddItem * - * Add an item to a page. Return value is offset at which it was + * Add an item to a page. Return value is offset at which it was * inserted, or InvalidOffsetNumber if there's not room to insert. * * If overwrite is true, we just store the item at the specified @@ -769,7 +769,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum) * PageIndexMultiDelete * * This routine handles the case of deleting multiple tuples from an - * index page at once. It is considerably faster than a loop around + * index page at once. It is considerably faster than a loop around * PageIndexTupleDelete ... however, the caller *must* supply the array * of item numbers to be deleted in item number order! */ @@ -780,7 +780,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) Offset pd_lower = phdr->pd_lower; Offset pd_upper = phdr->pd_upper; Offset pd_special = phdr->pd_special; - itemIdSortData itemidbase[MaxIndexTuplesPerPage]; + itemIdSortData itemidbase[MaxIndexTuplesPerPage]; itemIdSort itemidptr; ItemId lp; int nline, @@ -903,7 +903,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) * If checksums are disabled, or if the page is not initialized, just return * the input. Otherwise, we must make a copy of the page before calculating * the checksum, to prevent concurrent modifications (e.g. setting hint bits) - * from making the final checksum invalid. It doesn't matter if we include or + * from making the final checksum invalid. It doesn't matter if we include or * exclude hints during the copy, as long as we write a valid page and * associated checksum. * diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index 921861b0bd5..3c1c81a7295 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -86,7 +86,7 @@ * not needed because of an mdtruncate() operation. The reason for leaving * them present at size zero, rather than unlinking them, is that other * backends and/or the checkpointer might be holding open file references to - * such segments. If the relation expands again after mdtruncate(), such + * such segments. If the relation expands again after mdtruncate(), such * that a deactivated segment becomes active again, it is important that * such file references still be valid --- else data might get written * out to an unlinked old copy of a segment file that will eventually @@ -123,7 +123,7 @@ static MemoryContext MdCxt; /* context for all md.c allocations */ * we keep track of pending fsync operations: we need to remember all relation * segments that have been written since the last checkpoint, so that we can * fsync them down to disk before completing the next checkpoint. This hash - * table remembers the pending operations. We use a hash table mostly as + * table remembers the pending operations. We use a hash table mostly as * a convenient way of merging duplicate requests. * * We use a similar mechanism to remember no-longer-needed files that can @@ -291,7 +291,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo) * During bootstrap, there are cases where a system relation will be * accessed (by internal backend processes) before the bootstrap * script nominally creates it. Therefore, allow the file to exist - * already, even if isRedo is not set. (See also mdopen) + * already, even if isRedo is not set. (See also mdopen) */ if (isRedo || IsBootstrapProcessingMode()) fd = PathNameOpenFile(path, O_RDWR | PG_BINARY, 0600); @@ -336,7 +336,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo) * if the contents of the file were repopulated by subsequent WAL entries. * But if we didn't WAL-log insertions, but instead relied on fsyncing the * file after populating it (as for instance CLUSTER and CREATE INDEX do), - * the contents of the file would be lost forever. By leaving the empty file + * the contents of the file would be lost forever. By leaving the empty file * until after the next checkpoint, we prevent reassignment of the relfilenode * number until it's safe, because relfilenode assignment skips over any * existing file. @@ -349,7 +349,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo) * * All the above applies only to the relation's main fork; other forks can * just be removed immediately, since they are not needed to prevent the - * relfilenode number from being recycled. Also, we do not carefully + * relfilenode number from being recycled. Also, we do not carefully * track whether other forks have been created or not, but just attempt to * unlink them unconditionally; so we should never complain about ENOENT. * @@ -366,7 +366,7 @@ mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo) { /* * We have to clean out any pending fsync requests for the doomed - * relation, else the next mdsync() will fail. There can't be any such + * relation, else the next mdsync() will fail. There can't be any such * requests for a temp relation, though. We can send just one request * even when deleting multiple forks, since the fsync queuing code accepts * the "InvalidForkNumber = all forks" convention. @@ -503,7 +503,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, /* * Note: because caller usually obtained blocknum by calling mdnblocks, * which did a seek(SEEK_END), this seek is often redundant and will be - * optimized away by fd.c. It's not redundant, however, if there is a + * optimized away by fd.c. It's not redundant, however, if there is a * partial page at the end of the file. In that case we want to try to * overwrite the partial page with a full page. It's also not redundant * if bufmgr.c had to dump another buffer of the same file to make room @@ -803,9 +803,9 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum) * exactly RELSEG_SIZE long, and it's useless to recheck that each time. * * NOTE: this assumption could only be wrong if another backend has - * truncated the relation. We rely on higher code levels to handle that + * truncated the relation. We rely on higher code levels to handle that * scenario by closing and re-opening the md fd, which is handled via - * relcache flush. (Since the checkpointer doesn't participate in + * relcache flush. (Since the checkpointer doesn't participate in * relcache flush, it could have segment chain entries for inactive * segments; that's OK because the checkpointer never needs to compute * relation size.) @@ -999,7 +999,7 @@ mdsync(void) /* * If we are in the checkpointer, the sync had better include all fsync - * requests that were queued by backends up to this point. The tightest + * requests that were queued by backends up to this point. The tightest * race condition that could occur is that a buffer that must be written * and fsync'd for the checkpoint could have been dumped by a backend just * before it was visited by BufferSync(). We know the backend will have @@ -1115,7 +1115,7 @@ mdsync(void) * that have been deleted (unlinked) by the time we get to * them. Rather than just hoping an ENOENT (or EACCES on * Windows) error can be ignored, what we do on error is - * absorb pending requests and then retry. Since mdunlink() + * absorb pending requests and then retry. Since mdunlink() * queues a "cancel" message before actually unlinking, the * fsync request is guaranteed to be marked canceled after the * absorb if it really was this case. DROP DATABASE likewise @@ -1219,7 +1219,7 @@ mdsync(void) /* * We've finished everything that was requested before we started to - * scan the entry. If no new requests have been inserted meanwhile, + * scan the entry. If no new requests have been inserted meanwhile, * remove the entry. Otherwise, update its cycle counter, as all the * requests now in it must have arrived during this cycle. */ @@ -1324,7 +1324,7 @@ mdpostckpt(void) /* * As in mdsync, we don't want to stop absorbing fsync requests for a - * long time when there are many deletions to be done. We can safely + * long time when there are many deletions to be done. We can safely * call AbsorbFsyncRequests() at this point in the loop (note it might * try to delete list entries). */ @@ -1449,7 +1449,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) /* * We can't just delete the entry since mdsync could have an * active hashtable scan. Instead we delete the bitmapsets; this - * is safe because of the way mdsync is coded. We also set the + * is safe because of the way mdsync is coded. We also set the * "canceled" flags so that mdsync can tell that a cancel arrived * for the fork(s). */ @@ -1551,7 +1551,7 @@ RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) /* * NB: it's intentional that we don't change cycle_ctr if the entry - * already exists. The cycle_ctr must represent the oldest fsync + * already exists. The cycle_ctr must represent the oldest fsync * request that could be in the entry. */ @@ -1720,7 +1720,7 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno, { /* * Normally we will create new segments only if authorized by the - * caller (i.e., we are doing mdextend()). But when doing WAL + * caller (i.e., we are doing mdextend()). But when doing WAL * recovery, create segments anyway; this allows cases such as * replaying WAL data that has a write into a high-numbered * segment of a relation that was later deleted. We want to go diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index fcbdc4117a5..d16f5592983 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -494,7 +494,7 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo) } /* - * Get rid of any remaining buffers for the relations. bufmgr will just + * Get rid of any remaining buffers for the relations. bufmgr will just * drop them without bothering to write the contents. */ DropRelFileNodesAllBuffers(rnodes, nrels); @@ -679,7 +679,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks) * Send a shared-inval message to force other backends to close any smgr * references they may have for this rel. This is useful because they * might have open file pointers to segments that got removed, and/or - * smgr_targblock variables pointing past the new rel end. (The inval + * smgr_targblock variables pointing past the new rel end. (The inval * message will come back to our backend, too, causing a * probably-unnecessary local smgr flush. But we don't expect that this * is a performance-critical path.) As in the unlink code, we want to be diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c index b2113496944..9f50c5add58 100644 --- a/src/backend/tcop/fastpath.c +++ b/src/backend/tcop/fastpath.c @@ -44,8 +44,8 @@ * each fastpath call as a separate transaction command, and so the * cached data could never actually have been reused. If it had worked * as intended, it would have had problems anyway with dangling references - * in the FmgrInfo struct. So, forget about caching and just repeat the - * syscache fetches on each usage. They're not *that* expensive. + * in the FmgrInfo struct. So, forget about caching and just repeat the + * syscache fetches on each usage. They're not *that* expensive. */ struct fp_info { @@ -205,7 +205,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip) /* * Since the validity of this structure is determined by whether the - * funcid is OK, we clear the funcid here. It must not be set to the + * funcid is OK, we clear the funcid here. It must not be set to the * correct value until we are about to return with a good struct fp_info, * since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any * time. [No longer really an issue since we don't save the struct @@ -257,7 +257,7 @@ fetch_fp_info(Oid func_id, struct fp_info * fip) * RETURNS: * 0 if successful completion, EOF if frontend connection lost. * - * Note: All ordinary errors result in ereport(ERROR,...). However, + * Note: All ordinary errors result in ereport(ERROR,...). However, * if we lose the frontend connection there is no one to ereport to, * and no use in proceeding... * @@ -526,7 +526,7 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip, /* * Since stringinfo.c keeps a trailing null in place even for - * binary data, the contents of abuf are a valid C string. We + * binary data, the contents of abuf are a valid C string. We * have to do encoding conversion before calling the typinput * routine, though. */ diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index be961017d66..89d2d4a7b8a 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -443,7 +443,7 @@ SocketBackend(StringInfo inBuf) default: /* - * Otherwise we got garbage from the frontend. We treat this as + * Otherwise we got garbage from the frontend. We treat this as * fatal because we have probably lost message boundary sync, and * there's no good way to recover. */ @@ -851,7 +851,7 @@ exec_simple_query(const char *query_string) ResetUsage(); /* - * Start up a transaction command. All queries generated by the + * Start up a transaction command. All queries generated by the * query_string will be in this same command block, *unless* we find a * BEGIN/COMMIT/ABORT statement; we have to force a new xact command after * one of those, else bad things will happen in xact.c. (Note that this @@ -860,7 +860,7 @@ exec_simple_query(const char *query_string) start_xact_command(); /* - * Zap any pre-existing unnamed statement. (While not strictly necessary, + * Zap any pre-existing unnamed statement. (While not strictly necessary, * it seems best to define simple-Query mode as if it used the unnamed * statement and portal; this ensures we recover any storage used by prior * unnamed operations.) @@ -919,7 +919,7 @@ exec_simple_query(const char *query_string) /* * Get the command name for use in status display (it also becomes the - * default completion tag, down inside PortalRun). Set ps_status and + * default completion tag, down inside PortalRun). Set ps_status and * do any special start-of-SQL-command processing needed by the * destination. */ @@ -1007,7 +1007,7 @@ exec_simple_query(const char *query_string) /* * Select the appropriate output format: text unless we are doing a - * FETCH from a binary cursor. (Pretty grotty to have to do this here + * FETCH from a binary cursor. (Pretty grotty to have to do this here * --- but it avoids grottiness in other places. Ah, the joys of * backward compatibility...) */ @@ -1308,7 +1308,7 @@ exec_parse_message(const char *query_string, /* string to execute */ } else { - /* Empty input string. This is legal. */ + /* Empty input string. This is legal. */ raw_parse_tree = NULL; commandTag = NULL; psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag); @@ -1358,7 +1358,7 @@ exec_parse_message(const char *query_string, /* string to execute */ /* * We do NOT close the open transaction command here; that only happens - * when the client sends Sync. Instead, do CommandCounterIncrement just + * when the client sends Sync. Instead, do CommandCounterIncrement just * in case something happened during parse/plan. */ CommandCounterIncrement(); @@ -1500,7 +1500,7 @@ exec_bind_message(StringInfo input_message) * If we are in aborted transaction state, the only portals we can * actually run are those containing COMMIT or ROLLBACK commands. We * disallow binding anything else to avoid problems with infrastructure - * that expects to run inside a valid transaction. We also disallow + * that expects to run inside a valid transaction. We also disallow * binding any parameters, since we can't risk calling user-defined I/O * functions. */ @@ -1589,7 +1589,7 @@ exec_bind_message(StringInfo input_message) /* * Rather than copying data around, we just set up a phony * StringInfo pointing to the correct portion of the message - * buffer. We assume we can scribble on the message buffer so + * buffer. We assume we can scribble on the message buffer so * as to maintain the convention that StringInfos have a * trailing null. This is grotty but is a big win when * dealing with very large parameter strings. @@ -1939,7 +1939,7 @@ exec_execute_message(const char *portal_name, long max_rows) if (is_xact_command) { /* - * If this was a transaction control statement, commit it. We + * If this was a transaction control statement, commit it. We * will start a new xact command for the next command (if any). */ finish_xact_command(); @@ -2345,7 +2345,7 @@ exec_describe_portal_message(const char *portal_name) /* * If we are in aborted transaction state, we can't run * SendRowDescriptionMessage(), because that needs catalog accesses. - * Hence, refuse to Describe portals that return data. (We shouldn't just + * Hence, refuse to Describe portals that return data. (We shouldn't just * refuse all Describes, since that might break the ability of some * clients to issue COMMIT or ROLLBACK commands, if they use code that * blindly Describes whatever it does.) @@ -2562,7 +2562,7 @@ quickdie(SIGNAL_ARGS) on_exit_reset(); /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a + * Note we do exit(2) not exit(0). This is to force the postmaster into a * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c @@ -3291,7 +3291,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx, #endif /* - * Parse command-line options. CAUTION: keep this in sync with + * Parse command-line options. CAUTION: keep this in sync with * postmaster/postmaster.c (the option sets should not conflict) and with * the common help() function in main/main.c. */ @@ -3594,7 +3594,7 @@ PostgresMain(int argc, char *argv[], * we have set up the handler. * * Also note: it's best not to use any signals that are SIG_IGNored in the - * postmaster. If such a signal arrives before we are able to change the + * postmaster. If such a signal arrives before we are able to change the * handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy * handler in the postmaster to reserve the signal. (Of course, this isn't * an issue for signals that are locally generated, such as SIGALRM and @@ -3800,7 +3800,7 @@ PostgresMain(int argc, char *argv[], /* * NOTE: if you are tempted to add more code in this if-block, * consider the high probability that it should be in - * AbortTransaction() instead. The only stuff done directly here + * AbortTransaction() instead. The only stuff done directly here * should be stuff that is guaranteed to apply *only* for outer-level * error recovery, such as adjusting the FE/BE protocol status. */ @@ -3923,7 +3923,7 @@ PostgresMain(int argc, char *argv[], * collector, and to update the PS stats display. We avoid doing * those every time through the message loop because it'd slow down * processing of batched messages, and because we don't want to report - * uncommitted updates (that confuses autovacuum). The notification + * uncommitted updates (that confuses autovacuum). The notification * processor wants a call too, if we are not in a transaction block. */ if (send_ready_for_query) diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index f85bd031c19..fa561a4861a 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -558,7 +558,7 @@ PortalStart(Portal portal, ParamListInfo params, /* * We don't start the executor until we are told to run the - * portal. We do need to set up the result tupdesc. + * portal. We do need to set up the result tupdesc. */ { PlannedStmt *pstmt; @@ -908,7 +908,7 @@ PortalRunSelect(Portal portal, Assert(queryDesc || portal->holdStore); /* - * Force the queryDesc destination to the right thing. This supports + * Force the queryDesc destination to the right thing. This supports * MOVE, for example, which will pass in dest = DestNone. This is okay to * change as long as we do it on every fetch. (The Executor must not * assume that dest never changes.) @@ -1156,12 +1156,12 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel, elog(DEBUG3, "ProcessUtility"); /* - * Set snapshot if utility stmt needs one. Most reliable way to do this + * Set snapshot if utility stmt needs one. Most reliable way to do this * seems to be to enumerate those that do not need one; this is a short * list. Transaction control, LOCK, and SET must *not* set a snapshot * since they need to be executable at the start of a transaction-snapshot * mode transaction without freezing a snapshot. By extension we allow - * SHOW not to set a snapshot. The other stmts listed are just efficiency + * SHOW not to set a snapshot. The other stmts listed are just efficiency * hacks. Beware of listing anything that can modify the database --- if, * say, it has to update an index with expressions that invoke * user-defined functions, then it had better have a snapshot. @@ -1196,7 +1196,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel, /* * Some utility commands may pop the ActiveSnapshot stack from under us, - * so we only pop the stack if we actually see a snapshot set. Note that + * so we only pop the stack if we actually see a snapshot set. Note that * the set of utility commands that do this must be the same set * disallowed to run inside a transaction; otherwise, we could be popping * a snapshot that belongs to some other operation. @@ -1518,7 +1518,7 @@ DoPortalRunFetch(Portal portal, * Definition: Rewind to start, advance count-1 rows, return * next row (if any). In practice, if the goal is less than * halfway back to the start, it's better to scan from where - * we are. In any case, we arrange to fetch the target row + * we are. In any case, we arrange to fetch the target row * going forwards. */ if (portal->posOverflow || portal->portalPos == LONG_MAX || @@ -1625,7 +1625,7 @@ DoPortalRunFetch(Portal portal, * If we are sitting on a row, back up one so we can re-fetch it. * If we are not sitting on a row, we still have to start up and * shut down the executor so that the destination is initialized - * and shut down correctly; so keep going. To PortalRunSelect, + * and shut down correctly; so keep going. To PortalRunSelect, * count == 0 means we will retrieve no row. */ if (on_row) diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 1846570a3e3..3423898c112 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -232,7 +232,7 @@ PreventCommandIfReadOnly(const char *cmdname) * PreventCommandDuringRecovery: throw error if RecoveryInProgress * * The majority of operations that are unsafe in a Hot Standby slave - * will be rejected by XactReadOnly tests. However there are a few + * will be rejected by XactReadOnly tests. However there are a few * commands that are allowed in "read-only" xacts but cannot be allowed * in Hot Standby mode. Those commands should call this function. */ @@ -965,7 +965,7 @@ ProcessUtilitySlow(Node *parsetree, LOCKMODE lockmode; /* - * Figure out lock mode, and acquire lock. This also does + * Figure out lock mode, and acquire lock. This also does * basic permissions checks, so that we won't wait for a * lock on (for example) a relation on which we have no * permissions. diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c index f9490c835dd..1b866f31ed2 100644 --- a/src/backend/tsearch/ts_locale.c +++ b/src/backend/tsearch/ts_locale.c @@ -90,7 +90,7 @@ t_isprint(const char *ptr) /* - * Set up to read a file using tsearch_readline(). This facility is + * Set up to read a file using tsearch_readline(). This facility is * better than just reading the file directly because it provides error * context pointing to the specific line where a problem is detected. * @@ -168,7 +168,7 @@ tsearch_readline_callback(void *arg) /* * We can't include the text of the config line for errors that occur - * during t_readline() itself. This is only partly a consequence of our + * during t_readline() itself. This is only partly a consequence of our * arms-length use of that routine: the major cause of such errors is * encoding violations, and we daren't try to print error messages * containing badly-encoded data. diff --git a/src/backend/tsearch/ts_selfuncs.c b/src/backend/tsearch/ts_selfuncs.c index 273f13068b3..25337e87ab6 100644 --- a/src/backend/tsearch/ts_selfuncs.c +++ b/src/backend/tsearch/ts_selfuncs.c @@ -319,7 +319,7 @@ tsquery_opr_selec(QueryItem *item, char *operand, * exclusive. We treat occurrences as independent events. * * This is only a good plan if we have a pretty fair number of - * MCELEMs available; we set the threshold at 100. If no stats or + * MCELEMs available; we set the threshold at 100. If no stats or * insufficient stats, arbitrarily use DEFAULT_TS_MATCH_SEL*4. */ if (lookup == NULL || length < 100) diff --git a/src/backend/tsearch/ts_typanalyze.c b/src/backend/tsearch/ts_typanalyze.c index c4691fa0b0f..fe208704a13 100644 --- a/src/backend/tsearch/ts_typanalyze.c +++ b/src/backend/tsearch/ts_typanalyze.c @@ -114,13 +114,13 @@ ts_typanalyze(PG_FUNCTION_ARGS) * language's frequency table, where K is the target number of entries in * the MCELEM array plus an arbitrary constant, meant to reflect the fact * that the most common words in any language would usually be stopwords - * so we will not actually see them in the input. We assume that the + * so we will not actually see them in the input. We assume that the * distribution of word frequencies (including the stopwords) follows Zipf's * law with an exponent of 1. * * Assuming Zipfian distribution, the frequency of the K'th word is equal * to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of - * words in the language. Putting W as one million, we get roughly 0.07/K. + * words in the language. Putting W as one million, we get roughly 0.07/K. * Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set * epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and * maximum expected hashtable size of about 1000 * (K + 10). @@ -161,7 +161,7 @@ compute_tsvector_stats(VacAttrStats *stats, TrackItem *item; /* - * We want statistics_target * 10 lexemes in the MCELEM array. This + * We want statistics_target * 10 lexemes in the MCELEM array. This * multiplier is pretty arbitrary, but is meant to reflect the fact that * the number of individual lexeme values tracked in pg_statistic ought to * be more than the number of values for a simple scalar column. @@ -232,7 +232,7 @@ compute_tsvector_stats(VacAttrStats *stats, /* * We loop through the lexemes in the tsvector and add them to our - * tracking hashtable. Note: the hashtable entries will point into + * tracking hashtable. Note: the hashtable entries will point into * the (detoasted) tsvector value, therefore we cannot free that * storage until we're done. */ @@ -299,7 +299,7 @@ compute_tsvector_stats(VacAttrStats *stats, /* * Construct an array of the interesting hashtable items, that is, - * those meeting the cutoff frequency (s - epsilon)*N. Also identify + * those meeting the cutoff frequency (s - epsilon)*N. Also identify * the minimum and maximum frequencies among these items. * * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff @@ -332,7 +332,7 @@ compute_tsvector_stats(VacAttrStats *stats, /* * If we obtained more lexemes than we really want, get rid of those - * with least frequencies. The easiest way is to qsort the array into + * with least frequencies. The easiest way is to qsort the array into * descending frequency order and truncate the array. */ if (num_mcelem < track_len) @@ -363,7 +363,7 @@ compute_tsvector_stats(VacAttrStats *stats, * they get sorted on frequencies. The rationale is that we * usually search through most common elements looking for a * specific value, so we can grab its frequency. When values are - * presorted we can employ binary search for that. See + * presorted we can employ binary search for that. See * ts_selfuncs.c for a real usage scenario. */ qsort(sort_table, num_mcelem, sizeof(TrackItem *), diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c index 8a0e0767cba..2e8f4f168f9 100644 --- a/src/backend/tsearch/ts_utils.c +++ b/src/backend/tsearch/ts_utils.c @@ -23,8 +23,8 @@ /* * Given the base name and extension of a tsearch config file, return - * its full path name. The base name is assumed to be user-supplied, - * and is checked to prevent pathname attacks. The extension is assumed + * its full path name. The base name is assumed to be user-supplied, + * and is checked to prevent pathname attacks. The extension is assumed * to be safe. * * The result is a palloc'd string. @@ -37,7 +37,7 @@ get_tsearch_config_filename(const char *basename, char *result; /* - * We limit the basename to contain a-z, 0-9, and underscores. This may + * We limit the basename to contain a-z, 0-9, and underscores. This may * be overly restrictive, but we don't want to allow access to anything * outside the tsearch_data directory, so for instance '/' *must* be * rejected, and on some platforms '\' and ':' are risky as well. Allowing @@ -61,7 +61,7 @@ get_tsearch_config_filename(const char *basename, /* * Reads a stop-word file. Each word is run through 'wordop' - * function, if given. wordop may either modify the input in-place, + * function, if given. wordop may either modify the input in-place, * or palloc a new version. */ void diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c index 67282121411..d53f2e75c2e 100644 --- a/src/backend/tsearch/wparser_def.c +++ b/src/backend/tsearch/wparser_def.c @@ -330,7 +330,7 @@ TParserInit(char *str, int len) /* * Use of %.*s here is a bit risky since it can misbehave if the data is - * not in what libc thinks is the prevailing encoding. However, since + * not in what libc thinks is the prevailing encoding. However, since * this is just a debugging aid, we choose to live with that. */ fprintf(stderr, "parsing \"%.*s\"\n", len, str); diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index dfac1243a40..38cd5b89c99 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -123,7 +123,7 @@ static Oid get_role_oid_or_public(const char *rolname); /* * getid * Consumes the first alphanumeric string (identifier) found in string - * 's', ignoring any leading white space. If it finds a double quote + * 's', ignoring any leading white space. If it finds a double quote * it returns the word inside the quotes. * * RETURNS: @@ -229,7 +229,7 @@ putid(char *p, const char *s) * * RETURNS: * the string position in 's' immediately following the ACL - * specification. Also: + * specification. Also: * - loads the structure pointed to by 'aip' with the appropriate * UID/GID, id type identifier and mode type values. */ @@ -837,7 +837,7 @@ acldefault(GrantObjectType objtype, Oid ownerId) /* - * SQL-accessible version of acldefault(). Hackish mapping from "char" type to + * SQL-accessible version of acldefault(). Hackish mapping from "char" type to * ACL_OBJECT_* values, but it's only used in the information schema, not * documented for general use. */ @@ -1006,7 +1006,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip, } /* - * Remove abandoned privileges (cascading revoke). Currently we can only + * Remove abandoned privileges (cascading revoke). Currently we can only * handle this when the grantee is not PUBLIC. */ if ((old_goptions & ~new_goptions) != 0) @@ -1072,7 +1072,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId) /* * If the old ACL contained any references to the new owner, then we may - * now have generated an ACL containing duplicate entries. Find them and + * now have generated an ACL containing duplicate entries. Find them and * merge them so that there are not duplicates. (This is relatively * expensive since we use a stupid O(N^2) algorithm, but it's unlikely to * be the normal case.) @@ -1083,7 +1083,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId) * remove privilege-free entries, should there be any in the input.) dst * is the next output slot, targ is the currently considered input slot * (always >= dst), and src scans entries to the right of targ looking for - * duplicates. Once an entry has been emitted to dst it is known + * duplicates. Once an entry has been emitted to dst it is known * duplicate-free and need not be considered anymore. */ if (newpresent) @@ -2468,7 +2468,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum, * existence of the pg_class row before risking calling pg_class_aclcheck. * Note: it might seem there's a race condition against concurrent DROP, * but really it's safe because there will be no syscache flush between - * here and there. So if we see the row in the syscache, so will + * here and there. So if we see the row in the syscache, so will * pg_class_aclcheck. */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid))) @@ -4904,7 +4904,7 @@ is_member_of_role_nosuper(Oid member, Oid role) /* - * Is member an admin of role? That is, is member the role itself (subject to + * Is member an admin of role? That is, is member the role itself (subject to * restrictions below), a member (directly or indirectly) WITH ADMIN OPTION, * or a superuser? */ @@ -4919,6 +4919,7 @@ is_admin_of_role(Oid member, Oid role) return true; if (member == role) + /* * A role can admin itself when it matches the session user and we're * outside any security-restricted operation, SECURITY DEFINER or @@ -5015,14 +5016,14 @@ count_one_bits(AclMode mask) * The grantor must always be either the object owner or some role that has * been explicitly granted grant options. This ensures that all granted * privileges appear to flow from the object owner, and there are never - * multiple "original sources" of a privilege. Therefore, if the would-be + * multiple "original sources" of a privilege. Therefore, if the would-be * grantor is a member of a role that has the needed grant options, we have * to do the grant as that role instead. * * It is possible that the would-be grantor is a member of several roles * that have different subsets of the desired grant options, but no one * role has 'em all. In this case we pick a role with the largest number - * of desired options. Ties are broken in favor of closer ancestors. + * of desired options. Ties are broken in favor of closer ancestors. * * roleId: the role attempting to do the GRANT/REVOKE * privileges: the privileges to be granted/revoked diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c index 20eb358a620..170a28a067c 100644 --- a/src/backend/utils/adt/array_selfuncs.c +++ b/src/backend/utils/adt/array_selfuncs.c @@ -524,7 +524,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry, /* * Estimate selectivity of "column @> const" and "column && const" based on - * most common element statistics. This estimation assumes element + * most common element statistics. This estimation assumes element * occurrences are independent. * * mcelem (of length nmcelem) and numbers (of length nnumbers) are from @@ -689,7 +689,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem, * In the "column @> const" and "column && const" cases, we usually have a * "const" with low number of elements (otherwise we have selectivity close * to 0 or 1 respectively). That's why the effect of dependence related - * to distinct element count distribution is negligible there. In the + * to distinct element count distribution is negligible there. In the * "column <@ const" case, number of elements is usually high (otherwise we * have selectivity close to 0). That's why we should do a correction with * the array distinct element count distribution here. @@ -848,7 +848,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem, /* * The presence of many distinct rare elements materially decreases * selectivity. Use the Poisson distribution to estimate the probability - * of a column value having zero occurrences of such elements. See above + * of a column value having zero occurrences of such elements. See above * for the definition of "rest". */ mult *= exp(-rest); @@ -856,7 +856,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem, /*---------- * Using the distinct element count histogram requires * O(unique_nitems * (nmcelem + unique_nitems)) - * operations. Beyond a certain computational cost threshold, it's + * operations. Beyond a certain computational cost threshold, it's * reasonable to sacrifice accuracy for decreased planning time. We limit * the number of operations to EFFORT * nmcelem; since nmcelem is limited * by the column's statistics target, the work done is user-controllable. @@ -868,7 +868,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem, * elements to start with, we'd have to remove any discarded elements' * frequencies from "mult", but since this is only an approximation * anyway, we don't bother with that. Therefore it's sufficient to qsort - * elem_selec[] and take the largest elements. (They will no longer match + * elem_selec[] and take the largest elements. (They will no longer match * up with the elements of array_data[], but we don't care.) *---------- */ @@ -878,7 +878,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem, unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems)) { /* - * Use the quadratic formula to solve for largest allowable N. We + * Use the quadratic formula to solve for largest allowable N. We * have A = 1, B = nmcelem, C = - EFFORT * nmcelem. */ double b = (double) nmcelem; @@ -953,7 +953,7 @@ calc_hist(const float4 *hist, int nhist, int n) /* * frac is a probability contribution for each interval between histogram - * values. We have nhist - 1 intervals, so contribution of each one will + * values. We have nhist - 1 intervals, so contribution of each one will * be 1 / (nhist - 1). */ frac = 1.0f / ((float) (nhist - 1)); @@ -1020,8 +1020,8 @@ calc_hist(const float4 *hist, int nhist, int n) * "rest" is the sum of the probabilities of all low-probability events not * included in p. * - * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the - * probability that exactly j of first i events occur. Obviously M[0,0] = 1. + * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the + * probability that exactly j of first i events occur. Obviously M[0,0] = 1. * For any constant j, each increment of i increases the probability iff the * event occurs. So, by the law of total probability: * M[i,j] = M[i - 1, j] * (1 - p[i]) + M[i - 1, j - 1] * p[i] @@ -1143,7 +1143,7 @@ floor_log2(uint32 n) /* * find_next_mcelem binary-searches a most common elements array, starting - * from *index, for the first member >= value. It saves the position of the + * from *index, for the first member >= value. It saves the position of the * match into *index and returns true if it's an exact match. (Note: we * assume the mcelem elements are distinct so there can't be more than one * exact match.) diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c index 70aba1b5d8d..4d7e9c311fb 100644 --- a/src/backend/utils/adt/array_typanalyze.c +++ b/src/backend/utils/adt/array_typanalyze.c @@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS) * compute_array_stats() -- compute statistics for a array column * * This function computes statistics useful for determining selectivity of - * the array operators <@, &&, and @>. It is invoked by ANALYZE via the + * the array operators <@, &&, and @>. It is invoked by ANALYZE via the * compute_stats hook after sample rows have been collected. * * We also invoke the standard compute_stats function, which will compute * "scalar" statistics relevant to the btree-style array comparison operators. * However, exact duplicates of an entire array may be rare despite many - * arrays sharing individual elements. This especially afflicts long arrays, + * arrays sharing individual elements. This especially afflicts long arrays, * which are also liable to lack all scalar statistics due to the low * WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats, * we find the most common array elements and compute a histogram of distinct @@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS) * In the absence of a principled basis for other particular values, we * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10. * But we leave out the correction for stopwords, which do not apply to - * arrays. These parameters give bucket width w = K/0.007 and maximum + * arrays. These parameters give bucket width w = K/0.007 and maximum * expected hashtable size of about 1000 * K. * * Elements may repeat within an array. Since duplicates do not change the @@ -463,7 +463,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, /* * Construct an array of the interesting hashtable items, that is, - * those meeting the cutoff frequency (s - epsilon)*N. Also identify + * those meeting the cutoff frequency (s - epsilon)*N. Also identify * the minimum and maximum frequencies among these items. * * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff @@ -498,7 +498,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, /* * If we obtained more elements than we really want, get rid of those - * with least frequencies. The easiest way is to qsort the array into + * with least frequencies. The easiest way is to qsort the array into * descending frequency order and truncate the array. */ if (num_mcelem < track_len) @@ -532,7 +532,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, /* * We sorted statistics on the element value, but we want to be * able to find the minimal and maximal frequencies without going - * through all the values. We also want the frequency of null + * through all the values. We also want the frequency of null * elements. Store these three values at the end of mcelem_freqs. */ mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum)); @@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, * (compare the histogram-making loop in compute_scalar_stats()). * But instead of that we have the sorted_count_items[] array, * which holds unique DEC values with their frequencies (that is, - * a run-length-compressed version of the full array). So we + * a run-length-compressed version of the full array). So we * control advancing through sorted_count_items[] with the * variable "frac", which is defined as (x - y) * (num_hist - 1), * where x is the index in the notional DECs array corresponding diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c index c62e3fb1765..831466dec91 100644 --- a/src/backend/utils/adt/array_userfuncs.c +++ b/src/backend/utils/adt/array_userfuncs.c @@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS) /* * The transition type for array_agg() is declared to be "internal", which - * is a pass-by-value type the same size as a pointer. So we can safely + * is a pass-by-value type the same size as a pointer. So we can safely * pass the ArrayBuildState pointer through nodeAgg.c's machinations. */ PG_RETURN_POINTER(state); @@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS) int lbs[1]; /* - * Test for null before Asserting we are in right context. This is to + * Test for null before Asserting we are in right context. This is to * avoid possible Assert failure in 8.4beta installations, where it is * possible for users to create NULL constants of type internal. */ diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 91df1842427..f8e94ec3652 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -694,7 +694,7 @@ ReadArrayStr(char *arrayStr, /* * We have to remove " and \ characters to create a clean item value to - * pass to the datatype input routine. We overwrite each item value + * pass to the datatype input routine. We overwrite each item value * in-place within arrayStr to do this. srcptr is the current scan point, * and dstptr is where we are copying to. * @@ -894,7 +894,7 @@ ReadArrayStr(char *arrayStr, * referenced by Datums after copying them. * * If the input data is of varlena type, the caller must have ensured that - * the values are not toasted. (Doing it here doesn't work since the + * the values are not toasted. (Doing it here doesn't work since the * caller has already allocated space for the array...) */ static void @@ -1747,6 +1747,7 @@ Datum array_cardinality(PG_FUNCTION_ARGS) { ArrayType *v = PG_GETARG_ARRAYTYPE_P(0); + PG_RETURN_INT32(ArrayGetNItems(ARR_NDIM(v), ARR_DIMS(v))); } @@ -2002,7 +2003,7 @@ array_get_slice(ArrayType *array, memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int)); /* - * Lower bounds of the new array are set to 1. Formerly (before 7.3) we + * Lower bounds of the new array are set to 1. Formerly (before 7.3) we * copied the given lowerIndx values ... but that seems confusing. */ newlb = ARR_LBOUND(newarray); @@ -2634,7 +2635,7 @@ array_set_slice(ArrayType *array, /* * array_map() * - * Map an array through an arbitrary function. Return a new array with + * Map an array through an arbitrary function. Return a new array with * same dimensions and each source element transformed by fn(). Each * source element is passed as the first argument to fn(); additional * arguments to be passed to fn() can be specified by the caller. @@ -2649,9 +2650,9 @@ array_set_slice(ArrayType *array, * first argument position initially holds the input array value. * * inpType: OID of element type of input array. This must be the same as, * or binary-compatible with, the first argument type of fn(). - * * retType: OID of element type of output array. This must be the same as, + * * retType: OID of element type of output array. This must be the same as, * or binary-compatible with, the result type of fn(). - * * amstate: workspace for array_map. Must be zeroed by caller before + * * amstate: workspace for array_map. Must be zeroed by caller before * first call, and not touched after that. * * It is legitimate to pass a freshly-zeroed ArrayMapState on each call, @@ -3505,7 +3506,7 @@ array_cmp(FunctionCallInfo fcinfo) /* * If arrays contain same data (up to end of shorter one), apply - * additional rules to sort by dimensionality. The relative significance + * additional rules to sort by dimensionality. The relative significance * of the different bits of information is historical; mainly we just care * that we don't say "equal" for arrays of different dimensionality. */ @@ -3767,7 +3768,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation, /* * We assume that the comparison operator is strict, so a NULL can't - * match anything. XXX this diverges from the "NULL=NULL" behavior of + * match anything. XXX this diverges from the "NULL=NULL" behavior of * array_eq, should we act like that? */ if (isnull1) @@ -4258,7 +4259,7 @@ array_copy(char *destptr, int nitems, * * Note: this could certainly be optimized using standard bitblt methods. * However, it's not clear that the typical Postgres array has enough elements - * to make it worth worrying too much. For the moment, KISS. + * to make it worth worrying too much. For the moment, KISS. */ void array_bitmap_copy(bits8 *destbitmap, int destoffset, @@ -4455,7 +4456,7 @@ array_extract_slice(ArrayType *newarray, * Insert a slice into an array. * * ndim/dim[]/lb[] are dimensions of the original array. A new array with - * those same dimensions is to be constructed. destArray must already + * those same dimensions is to be constructed. destArray must already * have been allocated and its header initialized. * * st[]/endp[] identify the slice to be replaced. Elements within the slice @@ -5123,7 +5124,7 @@ array_unnest(PG_FUNCTION_ARGS) * Get the array value and detoast if needed. We can't do this * earlier because if we have to detoast, we want the detoasted copy * to be in multi_call_memory_ctx, so it will go away when we're done - * and not before. (If no detoast happens, we assume the originally + * and not before. (If no detoast happens, we assume the originally * passed array will stick around till then.) */ arr = PG_GETARG_ARRAYTYPE_P(0); @@ -5199,7 +5200,7 @@ array_unnest(PG_FUNCTION_ARGS) * * Find all array entries matching (not distinct from) search/search_isnull, * and delete them if remove is true, else replace them with - * replace/replace_isnull. Comparisons are done using the specified + * replace/replace_isnull. Comparisons are done using the specified * collation. fcinfo is passed only for caching purposes. */ static ArrayType * @@ -5271,7 +5272,7 @@ array_replace_internal(ArrayType *array, typalign = typentry->typalign; /* - * Detoast values if they are toasted. The replacement value must be + * Detoast values if they are toasted. The replacement value must be * detoasted for insertion into the result array, while detoasting the * search value only once saves cycles. */ diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c index 5b1afa0d8f2..477ccadfb85 100644 --- a/src/backend/utils/adt/arrayutils.c +++ b/src/backend/utils/adt/arrayutils.c @@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span) /* * ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array, - * and get the contents converted to integers. Returns a palloc'd array + * and get the contents converted to integers. Returns a palloc'd array * and places the length at *n. */ int32 * diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c index 015875875be..6aba20de851 100644 --- a/src/backend/utils/adt/cash.c +++ b/src/backend/utils/adt/cash.c @@ -382,79 +382,79 @@ cash_out(PG_FUNCTION_ARGS) case 0: if (cs_precedes) result = psprintf("(%s%s%s)", - csymbol, - (sep_by_space == 1) ? " " : "", - bufptr); + csymbol, + (sep_by_space == 1) ? " " : "", + bufptr); else result = psprintf("(%s%s%s)", - bufptr, - (sep_by_space == 1) ? " " : "", - csymbol); + bufptr, + (sep_by_space == 1) ? " " : "", + csymbol); break; case 1: default: if (cs_precedes) result = psprintf("%s%s%s%s%s", - signsymbol, - (sep_by_space == 2) ? " " : "", - csymbol, - (sep_by_space == 1) ? " " : "", - bufptr); + signsymbol, + (sep_by_space == 2) ? " " : "", + csymbol, + (sep_by_space == 1) ? " " : "", + bufptr); else result = psprintf("%s%s%s%s%s", - signsymbol, - (sep_by_space == 2) ? " " : "", - bufptr, - (sep_by_space == 1) ? " " : "", - csymbol); + signsymbol, + (sep_by_space == 2) ? " " : "", + bufptr, + (sep_by_space == 1) ? " " : "", + csymbol); break; case 2: if (cs_precedes) result = psprintf("%s%s%s%s%s", - csymbol, - (sep_by_space == 1) ? " " : "", - bufptr, - (sep_by_space == 2) ? " " : "", - signsymbol); + csymbol, + (sep_by_space == 1) ? " " : "", + bufptr, + (sep_by_space == 2) ? " " : "", + signsymbol); else result = psprintf("%s%s%s%s%s", - bufptr, - (sep_by_space == 1) ? " " : "", - csymbol, - (sep_by_space == 2) ? " " : "", - signsymbol); + bufptr, + (sep_by_space == 1) ? " " : "", + csymbol, + (sep_by_space == 2) ? " " : "", + signsymbol); break; case 3: if (cs_precedes) result = psprintf("%s%s%s%s%s", - signsymbol, - (sep_by_space == 2) ? " " : "", - csymbol, - (sep_by_space == 1) ? " " : "", - bufptr); + signsymbol, + (sep_by_space == 2) ? " " : "", + csymbol, + (sep_by_space == 1) ? " " : "", + bufptr); else result = psprintf("%s%s%s%s%s", - bufptr, - (sep_by_space == 1) ? " " : "", - signsymbol, - (sep_by_space == 2) ? " " : "", - csymbol); + bufptr, + (sep_by_space == 1) ? " " : "", + signsymbol, + (sep_by_space == 2) ? " " : "", + csymbol); break; case 4: if (cs_precedes) result = psprintf("%s%s%s%s%s", - csymbol, - (sep_by_space == 2) ? " " : "", - signsymbol, - (sep_by_space == 1) ? " " : "", - bufptr); + csymbol, + (sep_by_space == 2) ? " " : "", + signsymbol, + (sep_by_space == 1) ? " " : "", + bufptr); else result = psprintf("%s%s%s%s%s", - bufptr, - (sep_by_space == 1) ? " " : "", - csymbol, - (sep_by_space == 2) ? " " : "", - signsymbol); + bufptr, + (sep_by_space == 1) ? " " : "", + csymbol, + (sep_by_space == 2) ? " " : "", + signsymbol); break; } diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c index 99191e1d90c..e0d974eea5a 100644 --- a/src/backend/utils/adt/char.c +++ b/src/backend/utils/adt/char.c @@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS) * charrecv - converts external binary format to char * * The external representation is one byte, with no character set - * conversion. This is somewhat dubious, perhaps, but in many + * conversion. This is somewhat dubious, perhaps, but in many * cases people use char for a 1-byte binary type. */ Datum diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c index 06cc0cda0f0..073104d4bac 100644 --- a/src/backend/utils/adt/date.c +++ b/src/backend/utils/adt/date.c @@ -1358,7 +1358,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod) * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but - * the integer code always rounds up (away from zero). Is it worth + * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP @@ -1706,7 +1706,7 @@ time_interval(PG_FUNCTION_ARGS) * Convert interval to time data type. * * This is defined as producing the fractional-day portion of the interval. - * Therefore, we can just ignore the months field. It is not real clear + * Therefore, we can just ignore the months field. It is not real clear * what to do with negative intervals, but we choose to subtract the floor, * so that, say, '-2 hours' becomes '22:00:00'. */ @@ -2695,7 +2695,7 @@ timetz_zone(PG_FUNCTION_ARGS) pg_tz *tzp; /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index d200437e628..7632d1177e6 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -351,7 +351,7 @@ j2date(int jd, int *year, int *month, int *day) * j2day - convert Julian date to day-of-week (0..6 == Sun..Sat) * * Note: various places use the locution j2day(date - 1) to produce a - * result according to the convention 0..6 = Mon..Sun. This is a bit of + * result according to the convention 0..6 = Mon..Sun. This is a bit of * a crock, but will work as long as the computation here is just a modulo. */ int @@ -819,10 +819,11 @@ DecodeDateTime(char **field, int *ftype, int nf, switch (ftype[i]) { case DTK_DATE: + /* - * Integral julian day with attached time zone? - * All other forms with JD will be separated into - * distinct fields, so we handle just this case here. + * Integral julian day with attached time zone? All other + * forms with JD will be separated into distinct fields, so we + * handle just this case here. */ if (ptype == DTK_JULIAN) { @@ -849,6 +850,7 @@ DecodeDateTime(char **field, int *ftype, int nf, ptype = 0; break; } + /* * Already have a date? Then this might be a time zone name * with embedded punctuation (e.g. "America/New_York") or a @@ -1158,17 +1160,18 @@ DecodeDateTime(char **field, int *ftype, int nf, if (dterr < 0) return dterr; } + /* * Is this a YMD or HMS specification, or a year number? * YMD and HMS are required to be six digits or more, so * if it is 5 digits, it is a year. If it is six or more * more digits, we assume it is YMD or HMS unless no date - * and no time values have been specified. This forces - * 6+ digit years to be at the end of the string, or to use + * and no time values have been specified. This forces 6+ + * digit years to be at the end of the string, or to use * the ISO date specification. */ else if (flen >= 6 && (!(fmask & DTK_DATE_M) || - !(fmask & DTK_TIME_M))) + !(fmask & DTK_TIME_M))) { dterr = DecodeNumberField(flen, field[i], fmask, &tmask, tm, @@ -2490,7 +2493,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask, /* * Nothing so far; make a decision about what we think the input - * is. There used to be lots of heuristics here, but the + * is. There used to be lots of heuristics here, but the * consensus now is to be paranoid. It *must* be either * YYYY-MM-DD (with a more-than-two-digit year field), or the * field order defined by DateOrder. @@ -2523,9 +2526,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask, { /* * We are at the first numeric field of a date that included a - * textual month name. We want to support the variants + * textual month name. We want to support the variants * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous - * inputs. We will also accept MON-DD-YY or DD-MON-YY in + * inputs. We will also accept MON-DD-YY or DD-MON-YY in * either DMY or MDY modes, as well as YY-MON-DD in YMD mode. */ if (flen >= 3 || DateOrder == DATEORDER_YMD) @@ -2654,6 +2657,7 @@ DecodeNumberField(int len, char *str, int fmask, if (len >= 6) { *tmask = DTK_DATE_M; + /* * Start from end and consider first 2 as Day, next 2 as Month, * and the rest as Year. @@ -2890,7 +2894,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range, Assert(*field[i] == '-' || *field[i] == '+'); /* - * Check for signed hh:mm or hh:mm:ss. If so, process exactly + * Check for signed hh:mm or hh:mm:ss. If so, process exactly * like DTK_TIME case above, plus handling the sign. */ if (strchr(field[i] + 1, ':') != NULL && @@ -2978,8 +2982,8 @@ DecodeInterval(char **field, int *ftype, int nf, int range, type = DTK_MONTH; if (*field[i] == '-') val2 = -val2; - if (((double)val * MONTHS_PER_YEAR + val2) > INT_MAX || - ((double)val * MONTHS_PER_YEAR + val2) < INT_MIN) + if (((double) val * MONTHS_PER_YEAR + val2) > INT_MAX || + ((double) val * MONTHS_PER_YEAR + val2) < INT_MIN) return DTERR_FIELD_OVERFLOW; val = val * MONTHS_PER_YEAR + val2; fval = 0; @@ -3327,7 +3331,7 @@ DecodeISO8601Interval(char *str, return dterr; /* - * Note: we could step off the end of the string here. Code below + * Note: we could step off the end of the string here. Code below * *must* exit the loop if unit == '\0'. */ unit = *str++; @@ -4130,7 +4134,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str) /* * We've been burnt by stupid errors in the ordering of the datetkn tables - * once too often. Arrange to check them during postmaster start. + * once too often. Arrange to check them during postmaster start. */ static bool CheckDateTokenTable(const char *tablename, const datetkn *base, int nel) diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c index 4b5d65c5ff5..a79d5d587cc 100644 --- a/src/backend/utils/adt/datum.c +++ b/src/backend/utils/adt/datum.c @@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen) /* * just compare the two datums. NOTE: just comparing "len" bytes will * not do the work, because we do not know how these bytes are aligned - * inside the "Datum". We assume instead that any given datatype is + * inside the "Datum". We assume instead that any given datatype is * consistent about how it fills extraneous bits in the Datum. */ res = (value1 == value2); diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 68ab0e19061..8c663379ae7 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -358,6 +358,7 @@ calculate_toast_table_size(Oid toastrelid) foreach(lc, indexlist) { Relation toastIdxRel; + toastIdxRel = relation_open(lfirst_oid(lc), AccessShareLock); for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) @@ -689,7 +690,7 @@ pg_size_pretty_numeric(PG_FUNCTION_ARGS) * This is expected to be used in queries like * SELECT pg_relation_filenode(oid) FROM pg_class; * That leads to a couple of choices. We work from the pg_class row alone - * rather than actually opening each relation, for efficiency. We don't + * rather than actually opening each relation, for efficiency. We don't * fail if we can't find the relation --- some rows might be visible in * the query's MVCC snapshot even though the relations have been dropped. * (Note: we could avoid using the catcache, but there's little point diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c index 515481805a7..bbca5d68baf 100644 --- a/src/backend/utils/adt/domains.c +++ b/src/backend/utils/adt/domains.c @@ -12,11 +12,11 @@ * The overhead required for constraint checking can be high, since examining * the catalogs to discover the constraints for a given domain is not cheap. * We have three mechanisms for minimizing this cost: - * 1. In a nest of domains, we flatten the checking of all the levels + * 1. In a nest of domains, we flatten the checking of all the levels * into just one operation. - * 2. We cache the list of constraint items in the FmgrInfo struct + * 2. We cache the list of constraint items in the FmgrInfo struct * passed by the caller. - * 3. If there are CHECK constraints, we cache a standalone ExprContext + * 3. If there are CHECK constraints, we cache a standalone ExprContext * to evaluate them in. * * @@ -311,7 +311,7 @@ domain_recv(PG_FUNCTION_ARGS) /* * domain_check - check that a datum satisfies the constraints of a - * domain. extra and mcxt can be passed if they are available from, + * domain. extra and mcxt can be passed if they are available from, * say, a FmgrInfo structure, or they can be NULL, in which case the * setup is repeated for each call. */ diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 774267ed5d2..41b3eaa2135 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -276,7 +276,7 @@ float4in(PG_FUNCTION_ARGS) /* * Some platforms return ERANGE for denormalized numbers (those * that are not zero, but are too close to zero to have full - * precision). We'd prefer not to throw error for that, so try to + * precision). We'd prefer not to throw error for that, so try to * detect whether it's a "real" out-of-range condition by checking * to see if the result is zero or huge. */ @@ -475,7 +475,7 @@ float8in(PG_FUNCTION_ARGS) /* * Some platforms return ERANGE for denormalized numbers (those * that are not zero, but are too close to zero to have full - * precision). We'd prefer not to throw error for that, so try to + * precision). We'd prefer not to throw error for that, so try to * detect whether it's a "real" out-of-range condition by checking * to see if the result is zero or huge. */ @@ -2054,7 +2054,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS) * in that order. Note that Y is the first argument to the aggregates! * * It might seem attractive to optimize this by having multiple accumulator - * functions that only calculate the sums actually needed. But on most + * functions that only calculate the sums actually needed. But on most * modern machines, a couple of extra floating-point multiplies will be * insignificant compared to the other per-tuple overhead, so I've chosen * to minimize code space instead. diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c index 5b75d34dcbc..e1763a37642 100644 --- a/src/backend/utils/adt/format_type.c +++ b/src/backend/utils/adt/format_type.c @@ -44,14 +44,14 @@ static char *printTypmod(const char *typname, int32 typmod, Oid typmodout); * double quoted if it contains funny characters or matches a keyword. * * If typemod is NULL then we are formatting a type name in a context where - * no typemod is available, eg a function argument or result type. This + * no typemod is available, eg a function argument or result type. This * yields a slightly different result from specifying typemod = -1 in some * cases. Given typemod = -1 we feel compelled to produce an output that * the parser will interpret as having typemod -1, so that pg_dump will - * produce CREATE TABLE commands that recreate the original state. But + * produce CREATE TABLE commands that recreate the original state. But * given NULL typemod, we assume that the parser's interpretation of * typemod doesn't matter, and so we are willing to output a slightly - * "prettier" representation of the same type. For example, type = bpchar + * "prettier" representation of the same type. For example, type = bpchar * and typemod = NULL gets you "character", whereas typemod = -1 gets you * "bpchar" --- the former will be interpreted as character(1) by the * parser, which does not yield typemod -1. diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 2099ad0c302..15bcefd0021 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1823,7 +1823,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) /* * Note: we assume that toupper_l()/tolower_l() will not be so broken - * as to need guard tests. When using the default collation, we apply + * as to need guard tests. When using the default collation, we apply * the traditional Postgres behavior that forces ASCII-style treatment * of I/i, but in non-default collations you get exactly what the * collation says. @@ -3629,7 +3629,7 @@ do_to_timestamp(text *date_txt, text *fmt, { /* * The month and day field have not been set, so we use the - * day-of-year field to populate them. Depending on the date mode, + * day-of-year field to populate them. Depending on the date mode, * this field may be interpreted as a Gregorian day-of-year, or an ISO * week date day-of-year. */ diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c index 72cb4e991fc..54391fd7aba 100644 --- a/src/backend/utils/adt/geo_ops.c +++ b/src/backend/utils/adt/geo_ops.c @@ -32,7 +32,10 @@ * Internal routines */ -enum path_delim { PATH_NONE, PATH_OPEN, PATH_CLOSED }; +enum path_delim +{ + PATH_NONE, PATH_OPEN, PATH_CLOSED +}; static int point_inside(Point *p, int npts, Point *plist); static int lseg_crossing(double x, double y, double px, double py); @@ -1024,7 +1027,7 @@ line_out(PG_FUNCTION_ARGS) Datum line_recv(PG_FUNCTION_ARGS) { - StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); + StringInfo buf = (StringInfo) PG_GETARG_POINTER(0); LINE *line; line = (LINE *) palloc(sizeof(LINE)); @@ -1386,7 +1389,7 @@ path_in(PG_FUNCTION_ARGS) } base_size = sizeof(path->p[0]) * npts; - size = offsetof(PATH, p[0]) + base_size; + size = offsetof(PATH, p[0]) +base_size; /* Check for integer overflow */ if (base_size / npts != sizeof(path->p[0]) || size <= base_size) @@ -3448,7 +3451,7 @@ poly_in(PG_FUNCTION_ARGS) errmsg("invalid input syntax for type polygon: \"%s\"", str))); base_size = sizeof(poly->p[0]) * npts; - size = offsetof(POLYGON, p[0]) + base_size; + size = offsetof(POLYGON, p[0]) +base_size; /* Check for integer overflow */ if (base_size / npts != sizeof(poly->p[0]) || size <= base_size) diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c index 99ca8edbd04..4a2156d4669 100644 --- a/src/backend/utils/adt/geo_selfuncs.c +++ b/src/backend/utils/adt/geo_selfuncs.c @@ -22,7 +22,7 @@ /* - * Selectivity functions for geometric operators. These are bogus -- unless + * Selectivity functions for geometric operators. These are bogus -- unless * we know the actual key distribution in the index, we can't make a good * prediction of the selectivity of these operators. * @@ -34,7 +34,7 @@ * In general, GiST needs to search multiple subtrees in order to guarantee * that all occurrences of the same key have been found. Because of this, * the estimated cost for scanning the index ought to be higher than the - * output selectivity would indicate. gistcostestimate(), over in selfuncs.c, + * output selectivity would indicate. gistcostestimate(), over in selfuncs.c, * ought to be adjusted accordingly --- but until we can generate somewhat * realistic numbers here, it hardly matters... */ diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c index 5f2a3d361d9..d33534ec173 100644 --- a/src/backend/utils/adt/inet_cidr_ntop.c +++ b/src/backend/utils/adt/inet_cidr_ntop.c @@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) } else { - /* Copy src to private buffer. Zero host part. */ + /* Copy src to private buffer. Zero host part. */ p = (bits + 7) / 8; memcpy(inbuf, src, p); memset(inbuf + p, 0, 16 - p); diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c index 669355e4540..b8f56e5c2e1 100644 --- a/src/backend/utils/adt/int.c +++ b/src/backend/utils/adt/int.c @@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There are two cases where this fails: arg2 = 0 (which cannot * overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will * overflow and thus incorrectly match). @@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * @@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c index e78eb2a2022..96146e0fda0 100644 --- a/src/backend/utils/adt/int8.c +++ b/src/backend/utils/adt/int8.c @@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result) ptr++; /* - * Do an explicit check for INT64_MIN. Ugly though this is, it's + * Do an explicit check for INT64_MIN. Ugly though this is, it's * cleaner than trying to get the loop below to handle it portably. */ if (strncmp(ptr, "9223372036854775808", 19) == 0) @@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There are two cases where this fails: arg2 = 0 (which cannot * overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself * will overflow and thus incorrectly match). @@ -764,7 +764,7 @@ int8dec(PG_FUNCTION_ARGS) /* * These functions are exactly like int8inc/int8dec but are used for - * aggregates that count only non-null values. Since the functions are + * aggregates that count only non-null values. Since the functions are * declared strict, the null checks happen before we ever get here, and all we * need do is increment the state value. We could actually make these pg_proc * entries point right at int8inc/int8dec, but then the opr_sanity regression @@ -824,7 +824,7 @@ int84pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -845,8 +845,8 @@ int84mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -866,7 +866,7 @@ int84mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * @@ -933,7 +933,7 @@ int48pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -954,8 +954,8 @@ int48mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -975,7 +975,7 @@ int48mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * @@ -1021,7 +1021,7 @@ int82pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1042,8 +1042,8 @@ int82mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1063,7 +1063,7 @@ int82mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * @@ -1130,7 +1130,7 @@ int28pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1151,8 +1151,8 @@ int28mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1172,7 +1172,7 @@ int28mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index c34a1bb50be..16f4eccc06e 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -598,10 +598,10 @@ json_lex(JsonLexContext *lex) /* * We're not dealing with a string, number, legal - * punctuation mark, or end of string. The only legal + * punctuation mark, or end of string. The only legal * tokens we might find here are true, false, and null, * but for error reporting purposes we scan until we see a - * non-alphanumeric character. That way, we can report + * non-alphanumeric character. That way, we can report * the whole word as an unexpected token, rather than just * some unintuitive prefix thereof. */ @@ -897,12 +897,12 @@ json_lex_string(JsonLexContext *lex) * begin with a '0'. * * (3) An optional decimal part, consisting of a period ('.') followed by - * one or more digits. (Note: While this part can be omitted + * one or more digits. (Note: While this part can be omitted * completely, it's not OK to have only the decimal point without * any digits afterwards.) * * (4) An optional exponent part, consisting of 'e' or 'E', optionally - * followed by '+' or '-', followed by one or more digits. (Note: + * followed by '+' or '-', followed by one or more digits. (Note: * As with the decimal part, if 'e' or 'E' is present, it must be * followed by at least one digit.) * @@ -980,7 +980,7 @@ json_lex_number(JsonLexContext *lex, char *s, bool *num_err) } /* - * Check for trailing garbage. As in json_lex(), any alphanumeric stuff + * Check for trailing garbage. As in json_lex(), any alphanumeric stuff * here should be considered part of the token for error-reporting * purposes. */ @@ -1805,7 +1805,7 @@ json_agg_transfn(PG_FUNCTION_ARGS) /* * The transition type for array_agg() is declared to be "internal", which - * is a pass-by-value type the same size as a pointer. So we can safely + * is a pass-by-value type the same size as a pointer. So we can safely * pass the ArrayBuildState pointer through nodeAgg.c's machinations. */ PG_RETURN_POINTER(state); diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 781ab66ef2c..cf5d6f23264 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -22,7 +22,7 @@ typedef struct JsonbInState { JsonbParseState *parseState; JsonbValue *res; -} JsonbInState; +} JsonbInState; static inline Datum jsonb_from_cstring(char *json, int len); static size_t checkStringLen(size_t len); @@ -31,9 +31,9 @@ static void jsonb_in_object_end(void *pstate); static void jsonb_in_array_start(void *pstate); static void jsonb_in_array_end(void *pstate); static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull); -static void jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal); +static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal); static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype); -char *JsonbToCString(StringInfo out, char *in, int estimated_len); +char *JsonbToCString(StringInfo out, char *in, int estimated_len); /* * jsonb type input function @@ -245,7 +245,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull) JsonbInState *_state = (JsonbInState *) pstate; JsonbValue v; - Assert (fname != NULL); + Assert(fname != NULL); v.type = jbvString; v.val.string.len = checkStringLen(strlen(fname)); v.val.string.val = pnstrdup(fname, v.val.string.len); @@ -255,7 +255,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull) } static void -jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal) +jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal) { switch (scalarVal->type) { @@ -267,8 +267,8 @@ jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal) break; case jbvNumeric: appendStringInfoString(out, - DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(scalarVal->val.numeric)))); + DatumGetCString(DirectFunctionCall1(numeric_out, + PointerGetDatum(scalarVal->val.numeric)))); break; case jbvBool: if (scalarVal->val.boolean) @@ -296,21 +296,23 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype) { case JSON_TOKEN_STRING: - Assert (token != NULL); + Assert(token != NULL); v.type = jbvString; v.val.string.len = checkStringLen(strlen(token)); v.val.string.val = pnstrdup(token, v.val.string.len); v.estSize += v.val.string.len; break; case JSON_TOKEN_NUMBER: + /* - * No need to check size of numeric values, because maximum numeric - * size is well below the JsonbValue restriction + * No need to check size of numeric values, because maximum + * numeric size is well below the JsonbValue restriction */ - Assert (token != NULL); + Assert(token != NULL); v.type = jbvNumeric; v.val.numeric = DatumGetNumeric(DirectFunctionCall3(numeric_in, CStringGetDatum(token), 0, -1)); - v.estSize += VARSIZE_ANY(v.val.numeric) + sizeof(JEntry) /* alignment */ ; + + v.estSize += VARSIZE_ANY(v.val.numeric) +sizeof(JEntry) /* alignment */ ; break; case JSON_TOKEN_TRUE: v.type = jbvBool; diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c index 62546ebaf28..9f8c178ab10 100644 --- a/src/backend/utils/adt/jsonb_gin.c +++ b/src/backend/utils/adt/jsonb_gin.c @@ -22,12 +22,12 @@ typedef struct PathHashStack { - uint32 hash; + uint32 hash; struct PathHashStack *parent; -} PathHashStack; +} PathHashStack; static text *make_text_key(const char *str, int len, char flag); -static text *make_scalar_key(const JsonbValue * scalarVal, char flag); +static text *make_scalar_key(const JsonbValue *scalarVal, char flag); /* * @@ -97,14 +97,14 @@ gin_extract_jsonb(PG_FUNCTION_ARGS) * JsonbExistsStrategyNumber. Our definition of existence does not * allow for checking the existence of a non-jbvString element (just * like the definition of the underlying operator), because the - * operator takes a text rhs argument (which is taken as a proxy for an - * equivalent Jsonb string). + * operator takes a text rhs argument (which is taken as a proxy for + * an equivalent Jsonb string). * * The way existence is represented does not preclude an alternative * existence operator, that takes as its rhs value an arbitrarily - * internally-typed Jsonb. The only reason that isn't the case here is - * that the existence operator is only really intended to determine if - * an object has a certain key (object pair keys are of course + * internally-typed Jsonb. The only reason that isn't the case here + * is that the existence operator is only really intended to determine + * if an object has a certain key (object pair keys are of course * invariably strings), which is extended to jsonb arrays. You could * think of the default Jsonb definition of existence as being * equivalent to a definition where all types of scalar array elements @@ -116,11 +116,11 @@ gin_extract_jsonb(PG_FUNCTION_ARGS) * JsonbExistsStrategyNumber, since we know that keys are strings for * both objects and arrays, and don't have to further account for type * mismatch. Not having to set the reset flag makes it less than - * tempting to tighten up the definition of existence to preclude array - * elements entirely, which would arguably be a simpler alternative. - * In any case the infrastructure used to implement the existence - * operator could trivially support this hypothetical, slightly - * distinct definition of existence. + * tempting to tighten up the definition of existence to preclude + * array elements entirely, which would arguably be a simpler + * alternative. In any case the infrastructure used to implement the + * existence operator could trivially support this hypothetical, + * slightly distinct definition of existence. */ switch (r) { @@ -290,8 +290,10 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS) { GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); + /* Jsonb *query = PG_GETARG_JSONB(2); */ int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ GinTernaryValue res = GIN_TRUE; @@ -299,7 +301,7 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS) if (strategy == JsonbContainsStrategyNumber) { - bool has_maybe = false; + bool has_maybe = false; /* * All extracted keys must be present. Combination of GIN_MAYBE and @@ -323,8 +325,9 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS) /* * Index doesn't have information about correspondence of Jsonb keys * and values (as distinct from GIN keys, which a key/value pair is - * stored as), so invariably we recheck. This is also reflected in how - * GIN_MAYBE is given in response to there being no GIN_MAYBE input. + * stored as), so invariably we recheck. This is also reflected in + * how GIN_MAYBE is given in response to there being no GIN_MAYBE + * input. */ if (!has_maybe && res == GIN_TRUE) res = GIN_MAYBE; @@ -379,8 +382,10 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); + /* Jsonb *query = PG_GETARG_JSONB(2); */ int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); bool res = true; @@ -390,13 +395,13 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS) elog(ERROR, "unrecognized strategy number: %d", strategy); /* - * jsonb_hash_ops index doesn't have information about correspondence - * of Jsonb keys and values (as distinct from GIN keys, which a - * key/value pair is stored as), so invariably we recheck. Besides, - * there are some special rules around the containment of raw scalar - * arrays and regular arrays that are not represented here. However, - * if all of the keys are not present, that's sufficient reason to - * return false and finish immediately. + * jsonb_hash_ops index doesn't have information about correspondence of + * Jsonb keys and values (as distinct from GIN keys, which a key/value + * pair is stored as), so invariably we recheck. Besides, there are some + * special rules around the containment of raw scalar arrays and regular + * arrays that are not represented here. However, if all of the keys are + * not present, that's sufficient reason to return false and finish + * immediately. */ *recheck = true; for (i = 0; i < nkeys; i++) @@ -416,12 +421,14 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS) { GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); + /* Jsonb *query = PG_GETARG_JSONB(2); */ int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ GinTernaryValue res = GIN_TRUE; - int32 i; - bool has_maybe = false; + int32 i; + bool has_maybe = false; if (strategy != JsonbContainsStrategyNumber) elog(ERROR, "unrecognized strategy number: %d", strategy); @@ -447,10 +454,10 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS) /* * jsonb_hash_ops index doesn't have information about correspondence of - * Jsonb keys and values (as distinct from GIN keys, which for this opclass - * are a hash of a pair, or a hash of just an element), so invariably we - * recheck. This is also reflected in how GIN_MAYBE is given in response - * to there being no GIN_MAYBE input. + * Jsonb keys and values (as distinct from GIN keys, which for this + * opclass are a hash of a pair, or a hash of just an element), so + * invariably we recheck. This is also reflected in how GIN_MAYBE is + * given in response to there being no GIN_MAYBE input. */ if (!has_maybe && res == GIN_TRUE) res = GIN_MAYBE; @@ -488,7 +495,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS) while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE) { - PathHashStack *tmp; + PathHashStack *tmp; if (i >= total) { @@ -513,10 +520,10 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS) /* * We pass forward hashes from previous container nesting * levels so that nested arrays with an outermost nested - * object will have element hashes mixed with the outermost - * key. It's also somewhat useful to have nested objects - * innermost values have hashes that are a function of not - * just their own key, but outer keys too. + * object will have element hashes mixed with the + * outermost key. It's also somewhat useful to have + * nested objects innermost values have hashes that are a + * function of not just their own key, but outer keys too. */ stack->hash = tmp->hash; } @@ -526,7 +533,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS) * At least nested level, initialize with stable container * type proxy value */ - stack->hash = (r == WJB_BEGIN_ARRAY)? JB_FARRAY:JB_FOBJECT; + stack->hash = (r == WJB_BEGIN_ARRAY) ? JB_FARRAY : JB_FOBJECT; } stack->parent = tmp; break; @@ -607,7 +614,7 @@ make_text_key(const char *str, int len, char flag) * Create a textual representation of a jsonbValue for GIN storage. */ static text * -make_scalar_key(const JsonbValue * scalarVal, char flag) +make_scalar_key(const JsonbValue *scalarVal, char flag) { text *item; char *cstr; @@ -621,6 +628,7 @@ make_scalar_key(const JsonbValue * scalarVal, char flag) item = make_text_key(scalarVal->val.boolean ? "t" : "f", 1, flag); break; case jbvNumeric: + /* * A normalized textual representation, free of trailing zeroes is * is required. diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c index cfddccbbbbf..38bd5676739 100644 --- a/src/backend/utils/adt/jsonb_op.c +++ b/src/backend/utils/adt/jsonb_op.c @@ -69,7 +69,7 @@ jsonb_exists_any(PG_FUNCTION_ARGS) if (findJsonbValueFromSuperHeader(VARDATA(jb), JB_FOBJECT | JB_FARRAY, plowbound, - arrKey->val.array.elems + i) != NULL) + arrKey->val.array.elems + i) != NULL) PG_RETURN_BOOL(true); } @@ -103,7 +103,7 @@ jsonb_exists_all(PG_FUNCTION_ARGS) if (findJsonbValueFromSuperHeader(VARDATA(jb), JB_FOBJECT | JB_FARRAY, plowbound, - arrKey->val.array.elems + i) == NULL) + arrKey->val.array.elems + i) == NULL) PG_RETURN_BOOL(false); } @@ -116,7 +116,8 @@ jsonb_contains(PG_FUNCTION_ARGS) Jsonb *val = PG_GETARG_JSONB(0); Jsonb *tmpl = PG_GETARG_JSONB(1); - JsonbIterator *it1, *it2; + JsonbIterator *it1, + *it2; if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) || JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl)) @@ -135,7 +136,8 @@ jsonb_contained(PG_FUNCTION_ARGS) Jsonb *tmpl = PG_GETARG_JSONB(0); Jsonb *val = PG_GETARG_JSONB(1); - JsonbIterator *it1, *it2; + JsonbIterator *it1, + *it2; if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) || JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl)) @@ -209,7 +211,6 @@ jsonb_le(PG_FUNCTION_ARGS) Datum jsonb_ge(PG_FUNCTION_ARGS) { - Jsonb *jba = PG_GETARG_JSONB(0); Jsonb *jbb = PG_GETARG_JSONB(1); bool res; @@ -270,7 +271,7 @@ jsonb_hash(PG_FUNCTION_ARGS) { switch (r) { - /* Rotation is left to JsonbHashScalarValue() */ + /* Rotation is left to JsonbHashScalarValue() */ case WJB_BEGIN_ARRAY: hash ^= JB_FARRAY; break; diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index 1ac145b1cd9..1caaa4a9cc3 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -45,10 +45,10 @@ */ typedef struct convertLevel { - uint32 i; /* Iterates once per element, or once per pair */ - uint32 *header; /* Pointer to current container header */ - JEntry *meta; /* This level's metadata */ - char *begin; /* Pointer into convertState.buffer */ + uint32 i; /* Iterates once per element, or once per pair */ + uint32 *header; /* Pointer to current container header */ + JEntry *meta; /* This level's metadata */ + char *begin; /* Pointer into convertState.buffer */ } convertLevel; /* @@ -57,41 +57,41 @@ typedef struct convertLevel typedef struct convertState { /* Preallocated buffer in which to form varlena/Jsonb value */ - Jsonb *buffer; + Jsonb *buffer; /* Pointer into buffer */ - char *ptr; + char *ptr; /* State for */ - convertLevel *allState, /* Overall state array */ - *contPtr; /* Cur container pointer (in allState) */ + convertLevel *allState, /* Overall state array */ + *contPtr; /* Cur container pointer (in allState) */ /* Current size of buffer containing allState array */ - Size levelSz; - -} convertState; - -static int compareJsonbScalarValue(JsonbValue * a, JsonbValue * b); -static int lexicalCompareJsonbStringValue(const void *a, const void *b); -static Size convertJsonb(JsonbValue * val, Jsonb* buffer); -static inline short addPaddingInt(convertState * cstate); -static void walkJsonbValueConversion(JsonbValue * val, convertState * cstate, - uint32 nestlevel); -static void putJsonbValueConversion(convertState * cstate, JsonbValue * val, - uint32 flags, uint32 level); -static void putScalarConversion(convertState * cstate, JsonbValue * scalarVal, - uint32 level, uint32 i); -static void iteratorFromContainerBuf(JsonbIterator * it, char *buffer); -static bool formIterIsContainer(JsonbIterator ** it, JsonbValue * val, - JEntry * ent, bool skipNested); -static JsonbIterator *freeAndGetParent(JsonbIterator * it); -static JsonbParseState *pushState(JsonbParseState ** pstate); -static void appendKey(JsonbParseState * pstate, JsonbValue * scalarVal); -static void appendValue(JsonbParseState * pstate, JsonbValue * scalarVal); -static void appendElement(JsonbParseState * pstate, JsonbValue * scalarVal); -static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg); -static int lengthCompareJsonbPair(const void *a, const void *b, void *arg); -static void uniqueifyJsonbObject(JsonbValue * object); -static void uniqueifyJsonbArray(JsonbValue * array); + Size levelSz; + +} convertState; + +static int compareJsonbScalarValue(JsonbValue *a, JsonbValue *b); +static int lexicalCompareJsonbStringValue(const void *a, const void *b); +static Size convertJsonb(JsonbValue *val, Jsonb *buffer); +static inline short addPaddingInt(convertState *cstate); +static void walkJsonbValueConversion(JsonbValue *val, convertState *cstate, + uint32 nestlevel); +static void putJsonbValueConversion(convertState *cstate, JsonbValue *val, + uint32 flags, uint32 level); +static void putScalarConversion(convertState *cstate, JsonbValue *scalarVal, + uint32 level, uint32 i); +static void iteratorFromContainerBuf(JsonbIterator *it, char *buffer); +static bool formIterIsContainer(JsonbIterator **it, JsonbValue *val, + JEntry *ent, bool skipNested); +static JsonbIterator *freeAndGetParent(JsonbIterator *it); +static JsonbParseState *pushState(JsonbParseState **pstate); +static void appendKey(JsonbParseState *pstate, JsonbValue *scalarVal); +static void appendValue(JsonbParseState *pstate, JsonbValue *scalarVal); +static void appendElement(JsonbParseState *pstate, JsonbValue *scalarVal); +static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg); +static int lengthCompareJsonbPair(const void *a, const void *b, void *arg); +static void uniqueifyJsonbObject(JsonbValue *object); +static void uniqueifyJsonbArray(JsonbValue *array); /* * Turn an in-memory JsonbValue into a Jsonb for on-disk storage. @@ -107,7 +107,7 @@ static void uniqueifyJsonbArray(JsonbValue * array); * inconvenient to deal with a great amount of other state. */ Jsonb * -JsonbValueToJsonb(JsonbValue * val) +JsonbValueToJsonb(JsonbValue *val) { Jsonb *out; Size sz; @@ -164,7 +164,7 @@ int compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b) { JsonbIterator *ita, - *itb; + *itb; int res = 0; ita = JsonbIteratorInit(a); @@ -182,9 +182,9 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b) /* * To a limited extent we'll redundantly iterate over an array/object - * while re-performing the same test without any reasonable expectation - * of the same container types having differing lengths (as when we - * process a WJB_BEGIN_OBJECT, and later the corresponding + * while re-performing the same test without any reasonable + * expectation of the same container types having differing lengths + * (as when we process a WJB_BEGIN_OBJECT, and later the corresponding * WJB_END_OBJECT), but no matter. */ if (ra == rb) @@ -208,9 +208,10 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b) res = compareJsonbScalarValue(&va, &vb); break; case jbvArray: + /* - * This could be a "raw scalar" pseudo array. That's a - * special case here though, since we still want the + * This could be a "raw scalar" pseudo array. That's + * a special case here though, since we still want the * general type-based comparisons to apply, and as far * as we're concerned a pseudo array is just a scalar. */ @@ -258,12 +259,14 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b) while (ita != NULL) { JsonbIterator *i = ita->parent; + pfree(ita); ita = i; } while (itb != NULL) { JsonbIterator *i = itb->parent; + pfree(itb); itb = i; } @@ -313,12 +316,12 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b) */ JsonbValue * findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags, - uint32 *lowbound, JsonbValue * key) + uint32 *lowbound, JsonbValue *key) { - uint32 superheader = *(uint32 *) sheader; - JEntry *array = (JEntry *) (sheader + sizeof(uint32)); - int count = (superheader & JB_CMASK); - JsonbValue *result = palloc(sizeof(JsonbValue)); + uint32 superheader = *(uint32 *) sheader; + JEntry *array = (JEntry *) (sheader + sizeof(uint32)); + int count = (superheader & JB_CMASK); + JsonbValue *result = palloc(sizeof(JsonbValue)); Assert((flags & ~(JB_FARRAY | JB_FOBJECT)) == 0); @@ -347,6 +350,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags, { result->type = jbvNumeric; result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e))); + result->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(result->val.numeric); } @@ -381,8 +385,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags, JsonbValue candidate; /* - * Note how we compensate for the fact that we're iterating through - * pairs (not entries) throughout. + * Note how we compensate for the fact that we're iterating + * through pairs (not entries) throughout. */ stopMiddle = stopLow + (count - stopLow) / 2; @@ -419,6 +423,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags, { result->type = jbvNumeric; result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*v))); + result->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(result->val.numeric); } @@ -431,8 +436,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags, else { /* - * See header comments to understand why this never happens - * with arrays + * See header comments to understand why this never + * happens with arrays */ result->type = jbvBinary; result->val.binary.data = data + INTALIGN(JBE_OFF(*v)); @@ -508,6 +513,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i) { result->type = jbvNumeric; result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e))); + result->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(result->val.numeric); } else if (JBE_ISBOOL(*e)) @@ -541,7 +547,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i) * "raw scalar" pseudo array to append that. */ JsonbValue * -pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal) +pushJsonbValue(JsonbParseState **pstate, int seq, JsonbValue *scalarVal) { JsonbValue *result = NULL; @@ -555,7 +561,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal) (*pstate)->contVal.estSize = 3 * sizeof(JEntry); (*pstate)->contVal.val.array.nElems = 0; (*pstate)->contVal.val.array.rawScalar = (scalarVal && - scalarVal->val.array.rawScalar); + scalarVal->val.array.rawScalar); if (scalarVal && scalarVal->val.array.nElems > 0) { /* Assume that this array is still really a scalar */ @@ -567,7 +573,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal) (*pstate)->size = 4; } (*pstate)->contVal.val.array.elems = palloc(sizeof(JsonbValue) * - (*pstate)->size); + (*pstate)->size); break; case WJB_BEGIN_OBJECT: Assert(!scalarVal); @@ -578,7 +584,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal) (*pstate)->contVal.val.object.nPairs = 0; (*pstate)->size = 4; (*pstate)->contVal.val.object.pairs = palloc(sizeof(JsonbPair) * - (*pstate)->size); + (*pstate)->size); break; case WJB_KEY: Assert(scalarVal->type == jbvString); @@ -674,9 +680,9 @@ JsonbIteratorInit(JsonbSuperHeader sheader) * garbage. */ int -JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) +JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested) { - JsonbIterState state; + JsonbIterState state; /* Guard against stack overflow due to overly complex Jsonb */ check_stack_depth(); @@ -694,9 +700,10 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) /* Set v to array on first array call */ val->type = jbvArray; val->val.array.nElems = (*it)->nElems; + /* - * v->val.array.elems is not actually set, because we aren't doing a - * full conversion + * v->val.array.elems is not actually set, because we aren't doing + * a full conversion */ val->val.array.rawScalar = (*it)->isScalar; (*it)->i = 0; @@ -709,8 +716,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) if ((*it)->i >= (*it)->nElems) { /* - * All elements within array already processed. Report this to - * caller, and give it back original parent iterator (which + * All elements within array already processed. Report this + * to caller, and give it back original parent iterator (which * independently tracks iteration progress at its level of * nesting). */ @@ -741,6 +748,7 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) /* Set v to object on first object call */ val->type = jbvObject; val->val.object.nPairs = (*it)->nElems; + /* * v->val.object.pairs is not actually set, because we aren't * doing a full conversion @@ -756,9 +764,9 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) { /* * All pairs within object already processed. Report this to - * caller, and give it back original containing iterator (which - * independently tracks iteration progress at its level of - * nesting). + * caller, and give it back original containing iterator + * (which independently tracks iteration progress at its level + * of nesting). */ *it = freeAndGetParent(*it); return WJB_END_OBJECT; @@ -787,8 +795,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) /* * Value may be a container, in which case we recurse with new, - * child iterator. If it is, don't bother !skipNested callers with - * dealing with the jbvBinary representation. + * child iterator. If it is, don't bother !skipNested callers + * with dealing with the jbvBinary representation. */ if (formIterIsContainer(it, val, &(*it)->meta[((*it)->i++) * 2 + 1], skipNested)) @@ -815,17 +823,18 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested) * We determine if mContained is contained within val. */ bool -JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) +JsonbDeepContains(JsonbIterator **val, JsonbIterator **mContained) { uint32 rval, rcont; JsonbValue vval, vcontained; + /* * Guard against stack overflow due to overly complex Jsonb. * - * Functions called here independently take this precaution, but that might - * not be sufficient since this is also a recursive function. + * Functions called here independently take this precaution, but that + * might not be sufficient since this is also a recursive function. */ check_stack_depth(); @@ -898,7 +907,8 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) else { /* Nested container value (object or array) */ - JsonbIterator *nestval, *nestContained; + JsonbIterator *nestval, + *nestContained; Assert(lhsVal->type == jbvBinary); Assert(vcontained.type == jbvBinary); @@ -922,8 +932,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) * In other words, the mapping of container nodes in the rhs * "vcontained" Jsonb to internal nodes on the lhs is * injective, and parent-child edges on the rhs must be mapped - * to parent-child edges on the lhs to satisfy the condition of - * containment (plus of course the mapped nodes must be equal). + * to parent-child edges on the lhs to satisfy the condition + * of containment (plus of course the mapped nodes must be + * equal). */ if (!JsonbDeepContains(&nestval, &nestContained)) return false; @@ -942,10 +953,10 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) * arrays. * * A raw scalar may contain another raw scalar, and an array may - * contain a raw scalar, but a raw scalar may not contain an array. We - * don't do something like this for the object case, since objects can - * only contain pairs, never raw scalars (a pair is represented by an - * rhs object argument with a single contained pair). + * contain a raw scalar, but a raw scalar may not contain an array. + * We don't do something like this for the object case, since objects + * can only contain pairs, never raw scalars (a pair is represented by + * an rhs object argument with a single contained pair). */ if (vval.val.array.rawScalar && !vcontained.val.array.rawScalar) return false; @@ -956,8 +967,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) rcont = JsonbIteratorNext(mContained, &vcontained, true); /* - * When we get through caller's rhs "is it contained within?" array - * without failing to find one of its values, it's contained. + * When we get through caller's rhs "is it contained within?" + * array without failing to find one of its values, it's + * contained. */ if (rcont == WJB_END_ARRAY) return true; @@ -989,7 +1001,7 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) for (i = 0; i < nLhsElems; i++) { - /* Store all lhs elements in temp array*/ + /* Store all lhs elements in temp array */ rcont = JsonbIteratorNext(val, &vval, true); Assert(rcont == WJB_ELEM); @@ -1009,8 +1021,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained) for (i = 0; i < nLhsElems; i++) { /* Nested container value (object or array) */ - JsonbIterator *nestval, *nestContained; - bool contains; + JsonbIterator *nestval, + *nestContained; + bool contains; nestval = JsonbIteratorInit(lhsConts[i].val.binary.data); nestContained = JsonbIteratorInit(vcontained.val.binary.data); @@ -1069,9 +1082,9 @@ arrayToJsonbSortedArray(ArrayType *array) /* * A text array uses at least eight bytes per element, so any overflow in * "key_count * sizeof(JsonbPair)" is small enough for palloc() to catch. - * However, credible improvements to the array format could invalidate that - * assumption. Therefore, use an explicit check rather than relying on - * palloc() to complain. + * However, credible improvements to the array format could invalidate + * that assumption. Therefore, use an explicit check rather than relying + * on palloc() to complain. */ if (elem_count > JSONB_MAX_PAIRS) ereport(ERROR, @@ -1108,9 +1121,9 @@ arrayToJsonbSortedArray(ArrayType *array) * flags. */ void -JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash) +JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash) { - int tmp; + int tmp; /* * Combine hash values of successive keys, values and elements by rotating @@ -1131,11 +1144,11 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash) case jbvNumeric: /* Must be unaffected by trailing zeroes */ tmp = DatumGetInt32(DirectFunctionCall1(hash_numeric, - NumericGetDatum(scalarVal->val.numeric))); + NumericGetDatum(scalarVal->val.numeric))); *hash ^= tmp; return; case jbvBool: - *hash ^= scalarVal->val.boolean? 0x02:0x04; + *hash ^= scalarVal->val.boolean ? 0x02 : 0x04; return; default: elog(ERROR, "invalid jsonb scalar type"); @@ -1150,7 +1163,7 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash) * within a single jsonb. */ static int -compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar) +compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar) { if (aScalar->type == bScalar->type) { @@ -1162,8 +1175,8 @@ compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar) return lengthCompareJsonbStringValue(aScalar, bScalar, NULL); case jbvNumeric: return DatumGetInt32(DirectFunctionCall2(numeric_cmp, - PointerGetDatum(aScalar->val.numeric), - PointerGetDatum(bScalar->val.numeric))); + PointerGetDatum(aScalar->val.numeric), + PointerGetDatum(bScalar->val.numeric))); case jbvBool: if (aScalar->val.boolean != bScalar->val.boolean) return (aScalar->val.boolean > bScalar->val.boolean) ? 1 : -1; @@ -1201,10 +1214,10 @@ lexicalCompareJsonbStringValue(const void *a, const void *b) * sufficiently large to fit the value */ static Size -convertJsonb(JsonbValue * val, Jsonb *buffer) +convertJsonb(JsonbValue *val, Jsonb *buffer) { - convertState state; - Size len; + convertState state; + Size len; /* Should not already have binary representation */ Assert(val->type != jbvBinary); @@ -1232,7 +1245,7 @@ convertJsonb(JsonbValue * val, Jsonb *buffer) * token (in a manner similar to generic iteration). */ static void -walkJsonbValueConversion(JsonbValue * val, convertState * cstate, +walkJsonbValueConversion(JsonbValue *val, convertState *cstate, uint32 nestlevel) { int i; @@ -1290,9 +1303,11 @@ walkJsonbValueConversion(JsonbValue * val, convertState * cstate, * access to conversion buffer. */ static inline -short addPaddingInt(convertState * cstate) +short +addPaddingInt(convertState *cstate) { - short padlen, p; + short padlen, + p; padlen = INTALIGN(cstate->ptr - VARDATA(cstate->buffer)) - (cstate->ptr - VARDATA(cstate->buffer)); @@ -1320,14 +1335,14 @@ short addPaddingInt(convertState * cstate) * and the end (i.e. there is one call per sequential processing WJB_* token). */ static void -putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags, +putJsonbValueConversion(convertState *cstate, JsonbValue *val, uint32 flags, uint32 level) { if (level == cstate->levelSz) { cstate->levelSz *= 2; cstate->allState = repalloc(cstate->allState, - sizeof(convertLevel) * cstate->levelSz); + sizeof(convertLevel) * cstate->levelSz); } cstate->contPtr = cstate->allState + level; @@ -1385,9 +1400,9 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags, } else if (flags & (WJB_END_ARRAY | WJB_END_OBJECT)) { - convertLevel *prevPtr; /* Prev container pointer */ - uint32 len, - i; + convertLevel *prevPtr; /* Prev container pointer */ + uint32 len, + i; Assert(((flags & WJB_END_ARRAY) && val->type == jbvArray) || ((flags & WJB_END_OBJECT) && val->type == jbvObject)); @@ -1443,10 +1458,10 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags, * metadata peculiar to each scalar type. */ static void -putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level, +putScalarConversion(convertState *cstate, JsonbValue *scalarVal, uint32 level, uint32 i) { - int numlen; + int numlen; short padlen; cstate->contPtr = cstate->allState + level; @@ -1509,7 +1524,7 @@ putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level, * container type. */ static void -iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader) +iteratorFromContainerBuf(JsonbIterator *it, JsonbSuperHeader sheader) { uint32 superheader = *(uint32 *) sheader; @@ -1531,6 +1546,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader) Assert(!it->isScalar || it->nElems == 1); break; case JB_FOBJECT: + /* * Offset reflects that nElems indicates JsonbPairs in an object. * Each key and each value contain Jentry metadata just the same. @@ -1562,7 +1578,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader) * anywhere). */ static bool -formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent, +formIterIsContainer(JsonbIterator **it, JsonbValue *val, JEntry *ent, bool skipNested) { if (JBE_ISNULL(*ent)) @@ -1585,6 +1601,7 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent, { val->type = jbvNumeric; val->val.numeric = (Numeric) ((*it)->dataProper + INTALIGN(JBE_OFF(*ent))); + val->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(val->val.numeric); return false; @@ -1609,8 +1626,8 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent, else { /* - * Must be container type, so setup caller's iterator to point to that, - * and return indication of that. + * Must be container type, so setup caller's iterator to point to + * that, and return indication of that. * * Get child iterator. */ @@ -1627,11 +1644,11 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent, } /* - * JsonbIteratorNext() worker: Return parent, while freeing memory for current + * JsonbIteratorNext() worker: Return parent, while freeing memory for current * iterator */ static JsonbIterator * -freeAndGetParent(JsonbIterator * it) +freeAndGetParent(JsonbIterator *it) { JsonbIterator *v = it->parent; @@ -1643,7 +1660,7 @@ freeAndGetParent(JsonbIterator * it) * pushJsonbValue() worker: Iteration-like forming of Jsonb */ static JsonbParseState * -pushState(JsonbParseState ** pstate) +pushState(JsonbParseState **pstate) { JsonbParseState *ns = palloc(sizeof(JsonbParseState)); @@ -1655,7 +1672,7 @@ pushState(JsonbParseState ** pstate) * pushJsonbValue() worker: Append a pair key to state when generating a Jsonb */ static void -appendKey(JsonbParseState * pstate, JsonbValue * string) +appendKey(JsonbParseState *pstate, JsonbValue *string) { JsonbValue *object = &pstate->contVal; @@ -1672,7 +1689,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string) { pstate->size *= 2; object->val.object.pairs = repalloc(object->val.object.pairs, - sizeof(JsonbPair) * pstate->size); + sizeof(JsonbPair) * pstate->size); } object->val.object.pairs[object->val.object.nPairs].key = *string; @@ -1686,7 +1703,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string) * Jsonb */ static void -appendValue(JsonbParseState * pstate, JsonbValue * scalarVal) +appendValue(JsonbParseState *pstate, JsonbValue *scalarVal) { JsonbValue *object = &pstate->contVal; @@ -1700,7 +1717,7 @@ appendValue(JsonbParseState * pstate, JsonbValue * scalarVal) * pushJsonbValue() worker: Append an element to state when generating a Jsonb */ static void -appendElement(JsonbParseState * pstate, JsonbValue * scalarVal) +appendElement(JsonbParseState *pstate, JsonbValue *scalarVal) { JsonbValue *array = &pstate->contVal; @@ -1716,7 +1733,7 @@ appendElement(JsonbParseState * pstate, JsonbValue * scalarVal) { pstate->size *= 2; array->val.array.elems = repalloc(array->val.array.elems, - sizeof(JsonbValue) * pstate->size); + sizeof(JsonbValue) * pstate->size); } array->val.array.elems[array->val.array.nElems++] = *scalarVal; @@ -1797,7 +1814,7 @@ lengthCompareJsonbPair(const void *a, const void *b, void *binequal) * Sort and unique-ify pairs in JsonbValue object */ static void -uniqueifyJsonbObject(JsonbValue * object) +uniqueifyJsonbObject(JsonbValue *object) { bool hasNonUniq = false; @@ -1838,15 +1855,15 @@ uniqueifyJsonbObject(JsonbValue * object) * Sorting uses internal ordering. */ static void -uniqueifyJsonbArray(JsonbValue * array) +uniqueifyJsonbArray(JsonbValue *array) { - bool hasNonUniq = false; + bool hasNonUniq = false; Assert(array->type == jbvArray); /* - * Actually sort values, determining if any were equal on the basis of full - * binary equality (rather than just having the same string length). + * Actually sort values, determining if any were equal on the basis of + * full binary equality (rather than just having the same string length). */ if (array->val.array.nElems > 1) qsort_arg(array->val.array.elems, array->val.array.nElems, diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 2423b737c9e..6b1ce9b3a9f 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -104,11 +104,12 @@ static void populate_recordset_array_element_start(void *state, bool isnull); /* worker function for populate_recordset and to_recordset */ static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg); + /* Worker that takes care of common setup for us */ static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader, - uint32 flags, - char *key, - uint32 keylen); + uint32 flags, + char *key, + uint32 keylen); /* search type classification for json_get* functions */ typedef enum @@ -235,8 +236,8 @@ typedef struct PopulateRecordsetState } PopulateRecordsetState; /* Turn a jsonb object into a record */ -static void make_row_from_rec_and_jsonb(Jsonb * element, - PopulateRecordsetState *state); +static void make_row_from_rec_and_jsonb(Jsonb *element, + PopulateRecordsetState *state); /* * SQL function json_object_keys @@ -791,7 +792,7 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text) result = get_worker(json, NULL, -1, tpath, ipath, npath, as_text); if (result != NULL) - PG_RETURN_TEXT_P(result); + PG_RETURN_TEXT_P(result); else /* null is NULL, regardless */ PG_RETURN_NULL(); @@ -1178,7 +1179,7 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) jbvp = findJsonbValueFromSuperHeaderLen(superHeader, JB_FOBJECT, VARDATA_ANY(pathtext[i]), - VARSIZE_ANY_EXHDR(pathtext[i])); + VARSIZE_ANY_EXHDR(pathtext[i])); } else if (have_array) { @@ -1209,8 +1210,8 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) if (jbvp->type == jbvBinary) { - JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data); - int r; + JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data); + int r; r = JsonbIteratorNext(&it, &tv, true); superHeader = (JsonbSuperHeader) jbvp->val.binary.data; @@ -1932,7 +1933,7 @@ elements_array_element_end(void *state, bool isnull) text *val; HeapTuple tuple; Datum values[1]; - bool nulls[1] = {false}; + bool nulls[1] = {false}; /* skip over nested objects */ if (_state->lex->lex_level != 1) @@ -2035,7 +2036,7 @@ json_to_record(PG_FUNCTION_ARGS) static inline Datum populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg) { - int json_arg_num = have_record_arg ? 1 : 0; + int json_arg_num = have_record_arg ? 1 : 0; Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num); text *json; Jsonb *jb = NULL; @@ -2060,7 +2061,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg) if (have_record_arg) { - Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); + Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); if (!type_is_rowtype(argtype)) ereport(ERROR, @@ -2275,7 +2276,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg) s = pnstrdup((v->val.boolean) ? "t" : "f", 1); else if (v->type == jbvNumeric) s = DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(v->val.numeric))); + PointerGetDatum(v->val.numeric))); else if (!use_json_as_text) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -2476,7 +2477,7 @@ json_to_recordset(PG_FUNCTION_ARGS) } static void -make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state) +make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState *state) { Datum *values; bool *nulls; @@ -2575,7 +2576,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state) s = pnstrdup((v->val.boolean) ? "t" : "f", 1); else if (v->type == jbvNumeric) s = DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(v->val.numeric))); + PointerGetDatum(v->val.numeric))); else if (!state->use_json_as_text) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -2603,7 +2604,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state) static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg) { - int json_arg_num = have_record_arg ? 1 : 0; + int json_arg_num = have_record_arg ? 1 : 0; Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num); bool use_json_as_text; ReturnSetInfo *rsi; @@ -2620,7 +2621,7 @@ populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg) if (have_record_arg) { - Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); + Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); if (!type_is_rowtype(argtype)) ereport(ERROR, diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index 3d5f3d538b6..bcd9e2182d0 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -76,12 +76,12 @@ wchareq(char *p1, char *p2) /* * Formerly we had a routine iwchareq() here that tried to do case-insensitive - * comparison of multibyte characters. It did not work at all, however, + * comparison of multibyte characters. It did not work at all, however, * because it relied on tolower() which has a single-byte API ... and * towlower() wouldn't be much better since we have no suitably cheap way * of getting a single character transformed to the system's wchar_t format. * So now, we just downcase the strings using lower() and apply regular LIKE - * comparison. This should be revisited when we install better locale support. + * comparison. This should be revisited when we install better locale support. */ /* diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 241f738d608..4eeb6314fae 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -96,7 +96,7 @@ pg_signal_backend(int pid, int sig) /* * BackendPidGetProc returns NULL if the pid isn't valid; but by the time * we reach kill(), a process for which we get a valid proc here might - * have terminated on its own. There's no way to acquire a lock on an + * have terminated on its own. There's no way to acquire a lock on an * arbitrary process to prevent that. But since so far all the callers of * this mechanism involve some request for ending the process anyway, that * it might end on its own first is not a problem. @@ -120,7 +120,7 @@ pg_signal_backend(int pid, int sig) * recycled for a new process, before reaching here? Then we'd be trying * to kill the wrong thing. Seems near impossible when sequential pid * assignment and wraparound is used. Perhaps it could happen on a system - * where pid re-use is randomized. That race condition possibility seems + * where pid re-use is randomized. That race condition possibility seems * too unlikely to worry about. */ @@ -140,7 +140,7 @@ pg_signal_backend(int pid, int sig) } /* - * Signal to cancel a backend process. This is allowed if you are superuser or + * Signal to cancel a backend process. This is allowed if you are superuser or * have the same role as the process being canceled. */ Datum @@ -254,7 +254,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS) fctx->location = psprintf("base"); else fctx->location = psprintf("pg_tblspc/%u/%s", tablespaceOid, - TABLESPACE_VERSION_DIRECTORY); + TABLESPACE_VERSION_DIRECTORY); fctx->dirdesc = AllocateDir(fctx->location); @@ -326,7 +326,7 @@ pg_tablespace_location(PG_FUNCTION_ARGS) /* * It's useful to apply this function to pg_class.reltablespace, wherein - * zero means "the database's default tablespace". So, rather than + * zero means "the database's default tablespace". So, rather than * throwing an error for zero, we choose to assume that's what is meant. */ if (tablespaceOid == InvalidOid) @@ -384,7 +384,7 @@ pg_sleep(PG_FUNCTION_ARGS) * loop. * * By computing the intended stop time initially, we avoid accumulation of - * extra delay across multiple sleeps. This also ensures we won't delay + * extra delay across multiple sleeps. This also ensures we won't delay * less than the specified time when WaitLatch is terminated early by a * non-query-cancelling signal such as SIGHUP. */ @@ -547,7 +547,7 @@ pg_relation_is_updatable(PG_FUNCTION_ARGS) * pg_column_is_updatable - determine whether a column is updatable * * This function encapsulates the decision about just what - * information_schema.columns.is_updatable actually means. It's not clear + * information_schema.columns.is_updatable actually means. It's not clear * whether deletability of the column's relation should be required, so * we want that decision in C code where we could change it without initdb. */ diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c index 74d24aa0651..a6d30851df9 100644 --- a/src/backend/utils/adt/nabstime.c +++ b/src/backend/utils/adt/nabstime.c @@ -118,26 +118,24 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn) if (tzp != NULL) { - *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */ + *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */ + /* + * XXX FreeBSD man pages indicate that this should work - tgl 97/04/23 + */ + if (tzn != NULL) + { /* - * XXX FreeBSD man pages indicate that this should work - tgl - * 97/04/23 + * Copy no more than MAXTZLEN bytes of timezone to tzn, in case it + * contains an error message, which doesn't fit in the buffer */ - if (tzn != NULL) - { - /* - * Copy no more than MAXTZLEN bytes of timezone to tzn, in - * case it contains an error message, which doesn't fit in the - * buffer - */ - StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1); - if (strlen(tm->tm_zone) > MAXTZLEN) - ereport(WARNING, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid time zone name: \"%s\"", - tm->tm_zone))); - } + StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1); + if (strlen(tm->tm_zone) > MAXTZLEN) + ereport(WARNING, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid time zone name: \"%s\"", + tm->tm_zone))); + } } else tm->tm_isdst = -1; @@ -175,7 +173,7 @@ tm2abstime(struct pg_tm * tm, int tz) sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE; /* - * check for overflow. We need a little slop here because the H/M/S plus + * check for overflow. We need a little slop here because the H/M/S plus * TZ offset could add up to more than 1 day. */ if ((day >= MAX_DAYNUM - 10 && sec < 0) || @@ -1140,7 +1138,7 @@ tintervalsame(PG_FUNCTION_ARGS) * 1. The interval length computations overflow at 2^31 seconds, causing * intervals longer than that to sort oddly compared to those shorter. * 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are - * just ordinary integers. Since this code doesn't handle them specially, + * just ordinary integers. Since this code doesn't handle them specially, * it's possible for [a b] to be considered longer than [c infinity] for * finite abstimes a, b, c. In combination with the previous point, the * interval [-infinity infinity] is treated as being shorter than many finite diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index 8bdf5778d89..69c7ac182f0 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -39,7 +39,7 @@ network_in(char *src, bool is_cidr) dst = (inet *) palloc0(sizeof(inet)); /* - * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses + * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses * will have a : somewhere in them (several, in fact) so if there is one * present, assume it's V6, otherwise assume it's V4. */ @@ -144,7 +144,7 @@ cidr_out(PG_FUNCTION_ARGS) * family, bits, is_cidr, address length, address in network byte order. * * Presence of is_cidr is largely for historical reasons, though it might - * allow some code-sharing on the client side. We send it correctly on + * allow some code-sharing on the client side. We send it correctly on * output, but ignore the value on input. */ static inet * @@ -1401,7 +1401,7 @@ inetmi(PG_FUNCTION_ARGS) /* * We form the difference using the traditional complement, increment, * and add rule, with the increment part being handled by starting the - * carry off at 1. If you don't think integer arithmetic is done in + * carry off at 1. If you don't think integer arithmetic is done in * two's complement, too bad. */ int nb = ip_addrsize(ip); @@ -1423,7 +1423,7 @@ inetmi(PG_FUNCTION_ARGS) else { /* - * Input wider than int64: check for overflow. All bytes to + * Input wider than int64: check for overflow. All bytes to * the left of what will fit should be 0 or 0xFF, depending on * sign of the now-complete result. */ @@ -1454,9 +1454,9 @@ inetmi(PG_FUNCTION_ARGS) * XXX This should go away someday! * * This is a kluge needed because we don't yet support zones in stored inet - * values. Since the result of getnameinfo() might include a zone spec, + * values. Since the result of getnameinfo() might include a zone spec, * call this to remove it anywhere we want to feed getnameinfo's output to - * network_in. Beats failing entirely. + * network_in. Beats failing entirely. * * An alternative approach would be to let network_in ignore %-parts for * itself, but that would mean we'd silently drop zone specs in user input, diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c index 0a826ae90a2..69b9d104749 100644 --- a/src/backend/utils/adt/network_gist.c +++ b/src/backend/utils/adt/network_gist.c @@ -7,7 +7,7 @@ * "union" of a set of INET/CIDR values. It works like this: * 1. If the values are not all of the same IP address family, the "union" * is a dummy value with family number zero, minbits zero, commonbits zero, - * address all zeroes. Otherwise: + * address all zeroes. Otherwise: * 2. The union has the common IP address family number. * 3. The union's minbits value is the smallest netmask length ("ip_bits") * of all the input values. @@ -202,8 +202,8 @@ inet_gist_consistent(PG_FUNCTION_ARGS) * * Compare available common prefix bits to the query, but not beyond * either the query's netmask or the minimum netmask among the represented - * values. If these bits don't match the query, we have our answer (and - * may or may not need to descend, depending on the operator). If they do + * values. If these bits don't match the query, we have our answer (and + * may or may not need to descend, depending on the operator). If they do * match, and we are not at a leaf, we descend in all cases. * * Note this is the final check for operators that only consider the @@ -682,7 +682,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS) { /* * If there's more than 2 families, all but maxfamily go into the - * left union. This could only happen if the inputs include some + * left union. This could only happen if the inputs include some * IPv4, some IPv6, and some already-multiple-family unions. */ tmp = DatumGetInetKeyP(ent[i].key); @@ -741,7 +741,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS) } /* - * Compute the union value for each side from scratch. In most cases we + * Compute the union value for each side from scratch. In most cases we * could approximate the union values with what we already know, but this * ensures that each side has minbits and commonbits set as high as * possible. diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index bf4f29d14d7..19d0bdcbb98 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -50,7 +50,7 @@ * Numeric values are represented in a base-NBASE floating point format. * Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed * and wide enough to store a digit. We assume that NBASE*NBASE can fit in - * an int. Although the purely calculational routines could handle any even + * an int. Although the purely calculational routines could handle any even * NBASE that's less than sqrt(INT_MAX), in practice we are only interested * in NBASE a power of ten, so that I/O conversions and decimal rounding * are easy. Also, it's actually more efficient if NBASE is rather less than @@ -95,11 +95,11 @@ typedef int16 NumericDigit; * If the high bits of the first word of a NumericChoice (n_header, or * n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the * numeric follows the NumericShort format; if they are NUMERIC_POS or - * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN, + * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN, * it is a NaN. We currently always store a NaN using just two bytes (i.e. * only n_header), but previous releases used only the NumericLong format, * so we might find 4-byte NaNs on disk if a database has been migrated using - * pg_upgrade. In either case, when the high bits indicate a NaN, the + * pg_upgrade. In either case, when the high bits indicate a NaN, the * remaining bits are never examined. Currently, we always initialize these * to zero, but it might be possible to use them for some other purpose in * the future. @@ -207,19 +207,19 @@ struct NumericData : ((n)->choice.n_long.n_weight)) /* ---------- - * NumericVar is the format we use for arithmetic. The digit-array part + * NumericVar is the format we use for arithmetic. The digit-array part * is the same as the NumericData storage format, but the header is more * complex. * * The value represented by a NumericVar is determined by the sign, weight, * ndigits, and digits[] array. * Note: the first digit of a NumericVar's value is assumed to be multiplied - * by NBASE ** weight. Another way to say it is that there are weight+1 + * by NBASE ** weight. Another way to say it is that there are weight+1 * digits before the decimal point. It is possible to have weight < 0. * * buf points at the physical start of the palloc'd digit buffer for the - * NumericVar. digits points at the first digit in actual use (the one - * with the specified weight). We normally leave an unused digit or two + * NumericVar. digits points at the first digit in actual use (the one + * with the specified weight). We normally leave an unused digit or two * (preset to zeroes) between buf and digits, so that there is room to store * a carry out of the top digit without reallocating space. We just need to * decrement digits (and increment weight) to make room for the carry digit. @@ -596,7 +596,7 @@ numeric_maximum_size(int32 typmod) * In most cases, the size of a numeric will be smaller than the value * computed below, because the varlena header will typically get toasted * down to a single byte before being stored on disk, and it may also be - * possible to use a short numeric header. But our job here is to compute + * possible to use a short numeric header. But our job here is to compute * the worst case. */ return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit)); @@ -636,7 +636,8 @@ numeric_normalize(Numeric num) { NumericVar x; char *str; - int orig, last; + int orig, + last; /* * Handle NaN @@ -754,7 +755,7 @@ numeric_send(PG_FUNCTION_ARGS) * * Flatten calls to numeric's length coercion function that solely represent * increases in allowable precision. Scale changes mutate every datum, so - * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an + * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an * unconstrained numeric, so a change from an unconstrained numeric to any * constrained numeric is also unoptimizable. */ @@ -784,7 +785,7 @@ numeric_transform(PG_FUNCTION_ARGS) * If new_typmod < VARHDRSZ, the destination is unconstrained; that's * always OK. If old_typmod >= VARHDRSZ, the source is constrained, * and we're OK if the scale is unchanged and the precision is not - * decreasing. See further notes in function header comment. + * decreasing. See further notes in function header comment. */ if (new_typmod < (int32) VARHDRSZ || (old_typmod >= (int32) VARHDRSZ && @@ -996,7 +997,7 @@ numeric_uminus(PG_FUNCTION_ARGS) /* * The packed format is known to be totally zero digit trimmed always. So - * we can identify a ZERO by the fact that there are no digits at all. Do + * we can identify a ZERO by the fact that there are no digits at all. Do * nothing to a zero. */ if (NUMERIC_NDIGITS(num) != 0) @@ -1972,7 +1973,7 @@ numeric_sqrt(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(make_result(&const_nan)); /* - * Unpack the argument and determine the result scale. We choose a scale + * Unpack the argument and determine the result scale. We choose a scale * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any * case not less than the input's dscale. */ @@ -2023,7 +2024,7 @@ numeric_exp(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(make_result(&const_nan)); /* - * Unpack the argument and determine the result scale. We choose a scale + * Unpack the argument and determine the result scale. We choose a scale * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any * case not less than the input's dscale. */ @@ -2517,7 +2518,7 @@ typedef struct NumericAggState NumericVar sumX; /* sum of processed numbers */ NumericVar sumX2; /* sum of squares of processed numbers */ int maxScale; /* maximum scale seen so far */ - int64 maxScaleCount; /* number of values seen with maximum scale */ + int64 maxScaleCount; /* number of values seen with maximum scale */ int64 NaNcount; /* count of NaN values (not included in N!) */ } NumericAggState; @@ -2652,8 +2653,8 @@ do_numeric_discard(NumericAggState *state, Numeric newval) if (state->maxScaleCount > 1 || state->maxScale == 0) { /* - * Some remaining inputs have same dscale, or dscale hasn't - * gotten above zero anyway + * Some remaining inputs have same dscale, or dscale hasn't gotten + * above zero anyway */ state->maxScaleCount--; } @@ -2767,9 +2768,9 @@ numeric_accum_inv(PG_FUNCTION_ARGS) /* * Integer data types all use Numeric accumulators to share code and - * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation + * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation * is overkill for the N and sum(X) values, but definitely not overkill - * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only + * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only * for stddev/variance --- there are faster special-purpose accumulator * routines for SUM and AVG of these datatypes. */ @@ -2965,7 +2966,7 @@ numeric_avg(PG_FUNCTION_ARGS) if (state == NULL || (state->N + state->NaNcount) == 0) PG_RETURN_NULL(); - if (state->NaNcount > 0) /* there was at least one NaN input */ + if (state->NaNcount > 0) /* there was at least one NaN input */ PG_RETURN_NUMERIC(make_result(&const_nan)); N_datum = DirectFunctionCall1(int8_numeric, Int64GetDatum(state->N)); @@ -2985,7 +2986,7 @@ numeric_sum(PG_FUNCTION_ARGS) if (state == NULL || (state->N + state->NaNcount) == 0) PG_RETURN_NULL(); - if (state->NaNcount > 0) /* there was at least one NaN input */ + if (state->NaNcount > 0) /* there was at least one NaN input */ PG_RETURN_NUMERIC(make_result(&const_nan)); PG_RETURN_NUMERIC(make_result(&(state->sumX))); @@ -3167,7 +3168,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS) * the initial condition of the transition data value needs to be NULL. This * means we can't rely on ExecAgg to automatically insert the first non-null * data value into the transition data: it doesn't know how to do the type - * conversion. The upshot is that these routines have to be marked non-strict + * conversion. The upshot is that these routines have to be marked non-strict * and handle substitution of the first non-null input themselves. * * Note: these functions are used only in plain aggregation mode. @@ -3653,7 +3654,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest) /* * We first parse the string to extract decimal digits and determine the - * correct decimal weight. Then convert to NBASE representation. + * correct decimal weight. Then convert to NBASE representation. */ switch (*cp) { @@ -4261,7 +4262,7 @@ apply_typmod(NumericVar *var, int32 typmod) /* * Convert numeric to int8, rounding if needed. * - * If overflow, return FALSE (no error is raised). Return TRUE if okay. + * If overflow, return FALSE (no error is raised). Return TRUE if okay. */ static bool numericvar_to_int8(NumericVar *var, int64 *result) @@ -4732,7 +4733,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result) * mul_var() - * * Multiplication on variable level. Product of var1 * var2 is stored - * in result. Result is rounded to no more than rscale fractional digits. + * in result. Result is rounded to no more than rscale fractional digits. */ static void mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result, @@ -4776,7 +4777,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * Determine number of result digits to compute. If the exact result * would have more than rscale fractional digits, truncate the computation - * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one + * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one * or both inputs have fewer digits than they really do. */ res_ndigits = var1ndigits + var2ndigits + 1; @@ -5019,7 +5020,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, * * We need the first divisor digit to be >= NBASE/2. If it isn't, * make it so by scaling up both the divisor and dividend by the - * factor "d". (The reason for allocating dividend[0] above is to + * factor "d". (The reason for allocating dividend[0] above is to * leave room for possible carry here.) */ if (divisor[1] < HALF_NBASE) @@ -5063,7 +5064,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * If next2digits are 0, then quotient digit must be 0 and there's - * no need to adjust the working dividend. It's worth testing + * no need to adjust the working dividend. It's worth testing * here to fall out ASAP when processing trailing zeroes in a * dividend. */ @@ -5081,7 +5082,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * Adjust quotient digit if it's too large. Knuth proves that * after this step, the quotient digit will be either correct or - * just one too large. (Note: it's OK to use dividend[j+2] here + * just one too large. (Note: it's OK to use dividend[j+2] here * because we know the divisor length is at least 2.) */ while (divisor2 * qhat > @@ -5256,7 +5257,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result, * dividend's digits (plus appended zeroes to reach the desired precision * including guard digits). Each step of the main loop computes an * (approximate) quotient digit and stores it into div[], removing one - * position of dividend space. A final pass of carry propagation takes + * position of dividend space. A final pass of carry propagation takes * care of any mistaken quotient digits. */ div = (int *) palloc0((div_ndigits + 1) * sizeof(int)); @@ -6106,7 +6107,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale) /* * The general case repeatedly multiplies base according to the bit - * pattern of exp. We do the multiplications with some extra precision. + * pattern of exp. We do the multiplications with some extra precision. */ neg = (exp < 0); exp = Abs(exp); diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c index 8945ef43f01..2badb558f03 100644 --- a/src/backend/utils/adt/oid.c +++ b/src/backend/utils/adt/oid.c @@ -318,7 +318,7 @@ oidparse(Node *node) /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid OID + * constants by the lexer. Accept these if they are valid OID * strings. */ return oidin_subr(strVal(node), NULL); diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c index 99577a549e6..efb0411c228 100644 --- a/src/backend/utils/adt/orderedsetaggs.c +++ b/src/backend/utils/adt/orderedsetaggs.c @@ -462,7 +462,7 @@ percentile_disc_final(PG_FUNCTION_ARGS) /* * Note: we *cannot* clean up the tuplesort object here, because the value - * to be returned is allocated inside its sortcontext. We could use + * to be returned is allocated inside its sortcontext. We could use * datumCopy to copy it out of there, but it doesn't seem worth the * trouble, since the cleanup callback will clear the tuplesort later. */ @@ -580,7 +580,7 @@ percentile_cont_final_common(FunctionCallInfo fcinfo, /* * Note: we *cannot* clean up the tuplesort object here, because the value - * to be returned may be allocated inside its sortcontext. We could use + * to be returned may be allocated inside its sortcontext. We could use * datumCopy to copy it out of there, but it doesn't seem worth the * trouble, since the cleanup callback will clear the tuplesort later. */ @@ -1086,7 +1086,7 @@ mode_final(PG_FUNCTION_ARGS) /* * Note: we *cannot* clean up the tuplesort object here, because the value - * to be returned is allocated inside its sortcontext. We could use + * to be returned is allocated inside its sortcontext. We could use * datumCopy to copy it out of there, but it doesn't seem worth the * trouble, since the cleanup callback will clear the tuplesort later. */ @@ -1331,7 +1331,7 @@ hypothetical_dense_rank_final(PG_FUNCTION_ARGS) /* * We alternate fetching into tupslot and extraslot so that we have the - * previous row available for comparisons. This is accomplished by + * previous row available for comparisons. This is accomplished by * swapping the slot pointer variables after each row. */ extraslot = MakeSingleTupleTableSlot(osastate->qstate->tupdesc); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 0c8474df54a..94bb5a47bb7 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -20,12 +20,12 @@ * * The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also * settable at run-time. However, we don't actually set those locale - * categories permanently. This would have bizarre effects like no + * categories permanently. This would have bizarre effects like no * longer accepting standard floating-point literals in some locales. * Instead, we only set the locales briefly when needed, cache the * required information obtained from localeconv(), and set them back. * The cached information is only used by the formatting functions - * (to_char, etc.) and the money type. For the user, this should all be + * (to_char, etc.) and the money type. For the user, this should all be * transparent. * * !!! NOW HEAR THIS !!! @@ -39,7 +39,7 @@ * fail = true; * setlocale(category, save); * DOES NOT WORK RELIABLY: on some platforms the second setlocale() call - * will change the memory save is pointing at. To do this sort of thing + * will change the memory save is pointing at. To do this sort of thing * safely, you *must* pstrdup what setlocale returns the first time. * * FYI, The Open Group locale standard is defined here: @@ -243,7 +243,7 @@ pg_perm_setlocale(int category, const char *locale) * Is the locale name valid for the locale category? * * If successful, and canonname isn't NULL, a palloc'd copy of the locale's - * canonical name is stored there. This is especially useful for figuring out + * canonical name is stored there. This is especially useful for figuring out * what locale name "" means (ie, the server environment value). (Actually, * it seems that on most implementations that's the only thing it's good for; * we could wish that setlocale gave back a canonically spelled version of @@ -286,7 +286,7 @@ check_locale(int category, const char *locale, char **canonname) * * For most locale categories, the assign hook doesn't actually set the locale * permanently, just reset flags so that the next use will cache the - * appropriate values. (See explanation at the top of this file.) + * appropriate values. (See explanation at the top of this file.) * * Note: we accept value = "" as selecting the postmaster's environment * value, whatever it was (so long as the environment setting is legal). @@ -463,6 +463,7 @@ PGLC_localeconv(void) save_lc_numeric = pstrdup(save_lc_numeric); #ifdef WIN32 + /* * Ideally, monetary and numeric local symbols could be returned in any * server encoding. Unfortunately, the WIN32 API does not allow @@ -644,6 +645,7 @@ cache_locale_time(void) save_lc_time = pstrdup(save_lc_time); #ifdef WIN32 + /* * On WIN32, there is no way to get locale-specific time values in a * specified locale, like we do for monetary/numeric. We can only get @@ -729,13 +731,13 @@ cache_locale_time(void) * Convert a Windows setlocale() argument to a Unix-style one. * * Regardless of platform, we install message catalogs under a Unix-style - * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings + * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings * following that style will elicit localized interface strings. * * Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C" * (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>], * case-insensitive. setlocale() returns the fully-qualified form; for - * example, setlocale("thaI") returns "Thai_Thailand.874". Internally, + * example, setlocale("thaI") returns "Thai_Thailand.874". Internally, * setlocale() and _create_locale() select a "locale identifier"[1] and store * it in an undocumented _locale_t field. From that LCID, we can retrieve the * ISO 639 language and the ISO 3166 country. Character encoding does not @@ -746,12 +748,12 @@ cache_locale_time(void) * Studio 2012, setlocale() accepts locale names in addition to the strings it * accepted historically. It does not standardize them; setlocale("Th-tH") * returns "Th-tH". setlocale(category, "") still returns a traditional - * string. Furthermore, msvcr110.dll changed the undocumented _locale_t + * string. Furthermore, msvcr110.dll changed the undocumented _locale_t * content to carry locale names instead of locale identifiers. * * MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol. * IsoLocaleName() always fails in a MinGW-built postgres.exe, so only - * Unix-style values of the lc_messages GUC can elicit localized messages. In + * Unix-style values of the lc_messages GUC can elicit localized messages. In * particular, every lc_messages setting that initdb can select automatically * will yield only C-locale messages. XXX This could be fixed by running the * fully-qualified locale name through a lookup table. @@ -795,7 +797,7 @@ IsoLocaleName(const char *winlocname) * need not standardize letter case here. So long as we do not ship * message catalogs for which it would matter, we also need not * translate the script/variant portion, e.g. uz-Cyrl-UZ to - * uz_UZ@cyrillic. Simply replace the hyphen with an underscore. + * uz_UZ@cyrillic. Simply replace the hyphen with an underscore. * * Note that the locale name can be less-specific than the value we * would derive under earlier Visual Studio releases. For example, @@ -850,7 +852,7 @@ IsoLocaleName(const char *winlocname) * could fail if the locale is C, so str_tolower() shouldn't call it * in that case. * - * Note that we currently lack any way to flush the cache. Since we don't + * Note that we currently lack any way to flush the cache. Since we don't * support ALTER COLLATION, this is OK. The worst case is that someone * drops a collation, and a useless cache entry hangs around in existing * backends. @@ -1044,7 +1046,7 @@ report_newlocale_failure(const char *localename) /* - * Create a locale_t from a collation OID. Results are cached for the + * Create a locale_t from a collation OID. Results are cached for the * lifetime of the backend. Thus, do not free the result with freelocale(). * * As a special optimization, the default/database collation returns 0. @@ -1170,6 +1172,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale) return 0; #ifdef WIN32 + /* * On Windows, the "Unicode" locales assume UTF16 not UTF8 encoding, and * for some reason mbstowcs and wcstombs won't do this for us, so we use @@ -1226,7 +1229,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale) * This has almost the API of mbstowcs_l(), except that *from need not be * null-terminated; instead, the number of input bytes is specified as * fromlen. Also, we ereport() rather than returning -1 for invalid - * input encoding. tolen is the maximum number of wchar_t's to store at *to. + * input encoding. tolen is the maximum number of wchar_t's to store at *to. * The output will be zero-terminated iff there is room. */ size_t diff --git a/src/backend/utils/adt/pg_lsn.c b/src/backend/utils/adt/pg_lsn.c index e2b528a2435..d1448aee7bd 100644 --- a/src/backend/utils/adt/pg_lsn.c +++ b/src/backend/utils/adt/pg_lsn.c @@ -29,8 +29,10 @@ Datum pg_lsn_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); - int len1, len2; - uint32 id, off; + int len1, + len2; + uint32 id, + off; XLogRecPtr result; /* Sanity check input format. */ @@ -38,12 +40,12 @@ pg_lsn_in(PG_FUNCTION_ARGS) if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || str[len1] != '/') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type pg_lsn: \"%s\"", str))); + errmsg("invalid input syntax for type pg_lsn: \"%s\"", str))); len2 = strspn(str + len1 + 1, "0123456789abcdefABCDEF"); if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || str[len1 + 1 + len2] != '\0') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type pg_lsn: \"%s\"", str))); + errmsg("invalid input syntax for type pg_lsn: \"%s\"", str))); /* Decode result. */ id = (uint32) strtoul(str, NULL, 16); @@ -59,7 +61,8 @@ pg_lsn_out(PG_FUNCTION_ARGS) XLogRecPtr lsn = PG_GETARG_LSN(0); char buf[MAXPG_LSNLEN + 1]; char *result; - uint32 id, off; + uint32 id, + off; /* Decode ID and offset */ id = (uint32) (lsn >> 32); @@ -83,7 +86,7 @@ pg_lsn_recv(PG_FUNCTION_ARGS) Datum pg_lsn_send(PG_FUNCTION_ARGS) { - XLogRecPtr lsn = PG_GETARG_LSN(0); + XLogRecPtr lsn = PG_GETARG_LSN(0); StringInfoData buf; pq_begintypsend(&buf); @@ -99,8 +102,8 @@ pg_lsn_send(PG_FUNCTION_ARGS) Datum pg_lsn_eq(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); PG_RETURN_BOOL(lsn1 == lsn2); } @@ -108,8 +111,8 @@ pg_lsn_eq(PG_FUNCTION_ARGS) Datum pg_lsn_ne(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); PG_RETURN_BOOL(lsn1 != lsn2); } @@ -117,8 +120,8 @@ pg_lsn_ne(PG_FUNCTION_ARGS) Datum pg_lsn_lt(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); PG_RETURN_BOOL(lsn1 < lsn2); } @@ -126,8 +129,8 @@ pg_lsn_lt(PG_FUNCTION_ARGS) Datum pg_lsn_gt(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); PG_RETURN_BOOL(lsn1 > lsn2); } @@ -135,8 +138,8 @@ pg_lsn_gt(PG_FUNCTION_ARGS) Datum pg_lsn_le(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); PG_RETURN_BOOL(lsn1 <= lsn2); } @@ -144,8 +147,8 @@ pg_lsn_le(PG_FUNCTION_ARGS) Datum pg_lsn_ge(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); PG_RETURN_BOOL(lsn1 >= lsn2); } @@ -158,8 +161,8 @@ pg_lsn_ge(PG_FUNCTION_ARGS) Datum pg_lsn_mi(PG_FUNCTION_ARGS) { - XLogRecPtr lsn1 = PG_GETARG_LSN(0); - XLogRecPtr lsn2 = PG_GETARG_LSN(1); + XLogRecPtr lsn1 = PG_GETARG_LSN(0); + XLogRecPtr lsn2 = PG_GETARG_LSN(1); char buf[256]; Datum result; diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c index 30f1c0ab1fe..fe088901f03 100644 --- a/src/backend/utils/adt/pg_lzcompress.c +++ b/src/backend/utils/adt/pg_lzcompress.c @@ -576,9 +576,9 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, /* * Experiments suggest that these hash sizes work pretty well. A large - * hash table minimizes collision, but has a higher startup cost. For - * a small input, the startup cost dominates. The table size must be - * a power of two. + * hash table minimizes collision, but has a higher startup cost. For a + * small input, the startup cost dominates. The table size must be a power + * of two. */ if (slen < 128) hashsz = 512; @@ -615,7 +615,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, /* * If we've emitted more than first_success_by bytes without finding - * anything compressible at all, fail. This lets us fall out + * anything compressible at all, fail. This lets us fall out * reasonably quickly when looking at incompressible input (such as * pre-compressed data). */ @@ -639,7 +639,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, hist_next, hist_recycle, dp, dend, mask); dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ + /* The macro would do it four times - Jan. */ } found_match = true; } @@ -653,7 +653,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, hist_next, hist_recycle, dp, dend, mask); dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ + /* The macro would do it four times - Jan. */ } } diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index bf3084fce67..44ccd37e998 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -1797,5 +1797,5 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS) /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum( - heap_form_tuple(tupdesc, values, nulls))); + heap_form_tuple(tupdesc, values, nulls))); } diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c index a553c1abf1c..475ce13abf4 100644 --- a/src/backend/utils/adt/pseudotypes.c +++ b/src/backend/utils/adt/pseudotypes.c @@ -6,7 +6,7 @@ * A pseudo-type isn't really a type and never has any operations, but * we do need to supply input and output functions to satisfy the links * in the pseudo-type's entry in pg_type. In most cases the functions - * just throw an error if invoked. (XXX the error messages here cover + * just throw an error if invoked. (XXX the error messages here cover * the most common case, but might be confusing in some contexts. Can * we do better?) * @@ -139,7 +139,7 @@ anyarray_out(PG_FUNCTION_ARGS) * anyarray_recv - binary input routine for pseudo-type ANYARRAY. * * XXX this could actually be made to work, since the incoming array - * data will contain the element type OID. Need to think through + * data will contain the element type OID. Need to think through * type-safety issues before allowing it, however. */ Datum @@ -216,7 +216,7 @@ anyrange_out(PG_FUNCTION_ARGS) * void_in - input routine for pseudo-type VOID. * * We allow this so that PL functions can return VOID without any special - * hack in the PL handler. Whatever value the PL thinks it's returning + * hack in the PL handler. Whatever value the PL thinks it's returning * will just be ignored. */ Datum diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c index 38b51035aec..bc8a480ed3e 100644 --- a/src/backend/utils/adt/rangetypes.c +++ b/src/backend/utils/adt/rangetypes.c @@ -1441,7 +1441,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS) * * This is for use by range-related functions that follow the convention * of using the fn_extra field as a pointer to the type cache entry for - * the range type. Functions that need to cache more information than + * the range type. Functions that need to cache more information than * that must fend for themselves. */ TypeCacheEntry * @@ -1465,7 +1465,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid) * range_serialize: construct a range value from bounds and empty-flag * * This does not force canonicalization of the range value. In most cases, - * external callers should only be canonicalization functions. Note that + * external callers should only be canonicalization functions. Note that * we perform some datatype-independent canonicalization checks anyway. */ RangeType * @@ -1802,7 +1802,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2) * Compare two range boundary point values, returning <0, 0, or >0 according * to whether b1 is less than, equal to, or greater than b2. * - * This is similar to but simpler than range_cmp_bounds(). We just compare + * This is similar to but simpler than range_cmp_bounds(). We just compare * the values held in b1 and b2, ignoring inclusive/exclusive flags. The * lower/upper flags only matter for infinities, where they tell us if the * infinity is plus or minus. @@ -2283,7 +2283,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val) /* * datum_compute_size() and datum_write() are used to insert the bound - * values into a range object. They are modeled after heaptuple.c's + * values into a range object. They are modeled after heaptuple.c's * heap_compute_data_size() and heap_fill_tuple(), but we need not handle * null values here. TYPE_IS_PACKABLE must test the same conditions as * heaptuple.c's ATT_IS_PACKABLE macro. diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c index 13c87ea4a34..2bd28f50389 100644 --- a/src/backend/utils/adt/rangetypes_gist.c +++ b/src/backend/utils/adt/rangetypes_gist.c @@ -300,7 +300,7 @@ range_gist_penalty(PG_FUNCTION_ARGS) else if (orig_lower.infinite && orig_upper.infinite) { /* - * Original range requires broadening. (-inf; +inf) is most far + * Original range requires broadening. (-inf; +inf) is most far * from normal range in this case. */ *penalty = 2 * CONTAIN_EMPTY_PENALTY; @@ -497,7 +497,7 @@ range_gist_penalty(PG_FUNCTION_ARGS) /* * The GiST PickSplit method for ranges * - * Primarily, we try to segregate ranges of different classes. If splitting + * Primarily, we try to segregate ranges of different classes. If splitting * ranges of the same class, use the appropriate split method for that class. */ Datum @@ -668,7 +668,7 @@ range_gist_same(PG_FUNCTION_ARGS) /* * range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check - * that for ourselves. More generally, if the entries have been properly + * that for ourselves. More generally, if the entries have been properly * normalized, then unequal flags bytes must mean unequal ranges ... so * let's just test all the flag bits at once. */ @@ -816,7 +816,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy, /* * Empty ranges are contained by anything, so if key is or - * contains any empty ranges, we must descend into it. Otherwise, + * contains any empty ranges, we must descend into it. Otherwise, * descend only if key overlaps the query. */ if (RangeIsOrContainsEmpty(key)) diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index 7c5b0d53bcf..caf45ef85f9 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) char errMsg[100]; /* - * Look for a match among previously compiled REs. Since the data + * Look for a match among previously compiled REs. Since the data * structure is self-organizing with most-used entries at the front, our * search strategy can just be to scan from the front. */ @@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) /* * Here and in other places in this file, do CHECK_FOR_INTERRUPTS - * before reporting a regex error. This is so that if the regex + * before reporting a regex error. This is so that if the regex * library aborts and returns REG_CANCEL, we don't print an error * message that implies the regex was invalid. */ @@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len, * dat_len --- the length of the data string * nmatch, pmatch --- optional return area for match details * - * Data is given in the database encoding. We internally + * Data is given in the database encoding. We internally * convert to array of pg_wchar which is what Spencer's regex package wants. */ static bool diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index 6210f45a195..c0314ee5322 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -85,7 +85,7 @@ regprocin(PG_FUNCTION_ARGS) /* * In bootstrap mode we assume the given name is not schema-qualified, and - * just search pg_proc for a unique match. This is needed for + * just search pg_proc for a unique match. This is needed for * initializing other system catalogs (pg_namespace may not exist yet, and * certainly there are no schemas other than pg_catalog). */ @@ -165,8 +165,8 @@ to_regproc(PG_FUNCTION_ARGS) FuncCandidateList clist; /* - * Parse the name into components and see if it matches any pg_proc entries - * in the current search path. + * Parse the name into components and see if it matches any pg_proc + * entries in the current search path. */ names = stringToQualifiedNameList(pro_name); clist = FuncnameGetCandidates(names, -1, NIL, false, false, true); @@ -295,7 +295,7 @@ regprocedurein(PG_FUNCTION_ARGS) /* * Else it's a name and arguments. Parse the name and arguments, look up * potential matches in the current namespace search list, and scan to see - * which one exactly matches the given argument types. (There will not be + * which one exactly matches the given argument types. (There will not be * more than one match.) * * XXX at present, this code will not work in bootstrap mode, hence this @@ -339,7 +339,7 @@ to_regprocedure(PG_FUNCTION_ARGS) /* * Parse the name and arguments, look up potential matches in the current * namespace search list, and scan to see which one exactly matches the - * given argument types. (There will not be more than one match.) + * given argument types. (There will not be more than one match.) */ parseNameAndArgTypes(pro_name, false, &names, &nargs, argtypes); @@ -376,7 +376,7 @@ format_procedure_qualified(Oid procedure_oid) * Routine to produce regprocedure names; see format_procedure above. * * force_qualify says whether to schema-qualify; if true, the name is always - * qualified regardless of search_path visibility. Otherwise the name is only + * qualified regardless of search_path visibility. Otherwise the name is only * qualified if the function is not in path. */ static char * @@ -510,7 +510,7 @@ regoperin(PG_FUNCTION_ARGS) /* * In bootstrap mode we assume the given name is not schema-qualified, and - * just search pg_operator for a unique match. This is needed for + * just search pg_operator for a unique match. This is needed for * initializing other system catalogs (pg_namespace may not exist yet, and * certainly there are no schemas other than pg_catalog). */ @@ -724,7 +724,7 @@ regoperatorin(PG_FUNCTION_ARGS) /* * Else it's a name and arguments. Parse the name and arguments, look up * potential matches in the current namespace search list, and scan to see - * which one exactly matches the given argument types. (There will not be + * which one exactly matches the given argument types. (There will not be * more than one match.) * * XXX at present, this code will not work in bootstrap mode, hence this @@ -770,7 +770,7 @@ to_regoperator(PG_FUNCTION_ARGS) /* * Parse the name and arguments, look up potential matches in the current * namespace search list, and scan to see which one exactly matches the - * given argument types. (There will not be more than one match.) + * given argument types. (There will not be more than one match.) */ parseNameAndArgTypes(opr_name_or_oid, true, &names, &nargs, argtypes); if (nargs == 1) @@ -1006,8 +1006,8 @@ to_regclass(PG_FUNCTION_ARGS) List *names; /* - * Parse the name into components and see if it matches any pg_class entries - * in the current search path. + * Parse the name into components and see if it matches any pg_class + * entries in the current search path. */ names = stringToQualifiedNameList(class_name); @@ -1045,7 +1045,7 @@ regclassout(PG_FUNCTION_ARGS) /* * In bootstrap mode, skip the fancy namespace stuff and just return - * the class name. (This path is only needed for debugging output + * the class name. (This path is only needed for debugging output * anyway.) */ if (IsBootstrapProcessingMode()) @@ -1560,7 +1560,7 @@ stringToQualifiedNameList(const char *string) /* * Given a C string, parse it into a qualified function or operator name - * followed by a parenthesized list of type names. Reduce the + * followed by a parenthesized list of type names. Reduce the * type names to an array of OIDs (returned into *nargs and *argtypes; * the argtypes array should be of size FUNC_MAX_ARGS). The function or * operator name is returned to *names as a List of Strings. diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 1e1e616fa48..d30847b34e6 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -698,7 +698,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action) /* * If another PK row now exists providing the old key values, we - * should not do anything. However, this check should only be + * should not do anything. However, this check should only be * made in the NO ACTION case; in RESTRICT cases we don't wish to * allow another row to be substituted. */ @@ -922,7 +922,7 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action) /* * If another PK row now exists providing the old key values, we - * should not do anything. However, this check should only be + * should not do anything. However, this check should only be * made in the NO ACTION case; in RESTRICT cases we don't wish to * allow another row to be substituted. */ @@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS) * believe no check is necessary. So we need to do another lookup * now and in case a reference still exists, abort the operation. * That is already implemented in the NO ACTION trigger, so just - * run it. (This recheck is only needed in the SET DEFAULT case, + * run it. (This recheck is only needed in the SET DEFAULT case, * since CASCADE would remove such rows, while SET NULL is certain * to result in rows that satisfy the FK constraint.) */ @@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS) * believe no check is necessary. So we need to do another lookup * now and in case a reference still exists, abort the operation. * That is already implemented in the NO ACTION trigger, so just - * run it. (This recheck is only needed in the SET DEFAULT case, + * run it. (This recheck is only needed in the SET DEFAULT case, * since CASCADE must change the FK key values, while SET NULL is * certain to result in rows that satisfy the FK constraint.) */ @@ -2397,7 +2397,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel) * Temporarily increase work_mem so that the check query can be executed * more efficiently. It seems okay to do this because the query is simple * enough to not use a multiple of work_mem, and one typically would not - * have many large foreign-key validations happening concurrently. So + * have many large foreign-key validations happening concurrently. So * this seems to meet the criteria for being considered a "maintenance" * operation, and accordingly we use maintenance_work_mem. * @@ -2451,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel) /* * The columns to look at in the result tuple are 1..N, not whatever - * they are in the fk_rel. Hack up riinfo so that the subroutines + * they are in the fk_rel. Hack up riinfo so that the subroutines * called here will behave properly. * * In addition to this, we have to pass the correct tupdesc to @@ -3180,7 +3180,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo, errhint("This is most likely due to a rule having rewritten the query."))); /* - * Determine which relation to complain about. If tupdesc wasn't passed + * Determine which relation to complain about. If tupdesc wasn't passed * by caller, assume the violator tuple came from there. */ onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK); diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c index 521c3daea7e..9543d01d492 100644 --- a/src/backend/utils/adt/rowtypes.c +++ b/src/backend/utils/adt/rowtypes.c @@ -279,7 +279,7 @@ record_in(PG_FUNCTION_ARGS) /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree - * our result. So must copy the info into a new palloc chunk. + * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); @@ -623,7 +623,7 @@ record_recv(PG_FUNCTION_ARGS) /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree - * our result. So must copy the info into a new palloc chunk. + * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); @@ -861,7 +861,7 @@ record_cmp(FunctionCallInfo fcinfo) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; @@ -1097,7 +1097,7 @@ record_eq(PG_FUNCTION_ARGS) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; @@ -1356,7 +1356,7 @@ record_image_cmp(FunctionCallInfo fcinfo) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; @@ -1390,11 +1390,12 @@ record_image_cmp(FunctionCallInfo fcinfo) format_type_be(tupdesc2->attrs[i2]->atttypid), j + 1))); - /* - * The same type should have the same length (or both should be variable). - */ - Assert(tupdesc1->attrs[i1]->attlen == - tupdesc2->attrs[i2]->attlen); + /* + * The same type should have the same length (or both should be + * variable). + */ + Assert(tupdesc1->attrs[i1]->attlen == + tupdesc2->attrs[i2]->attlen); /* * We consider two NULLs equal; NULL > not-NULL. @@ -1421,8 +1422,8 @@ record_image_cmp(FunctionCallInfo fcinfo) { Size len1, len2; - struct varlena *arg1val; - struct varlena *arg2val; + struct varlena *arg1val; + struct varlena *arg2val; len1 = toast_raw_datum_size(values1[i1]); len2 = toast_raw_datum_size(values2[i2]); @@ -1632,7 +1633,7 @@ record_image_eq(PG_FUNCTION_ARGS) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; @@ -1690,8 +1691,8 @@ record_image_eq(PG_FUNCTION_ARGS) result = false; else { - struct varlena *arg1val; - struct varlena *arg2val; + struct varlena *arg1val; + struct varlena *arg2val; arg1val = PG_DETOAST_DATUM_PACKED(values1[i1]); arg2val = PG_DETOAST_DATUM_PACKED(values2[i2]); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 36d9953108b..a30d8febf85 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -155,11 +155,11 @@ typedef struct * * Selecting aliases is unreasonably complicated because of the need to dump * rules/views whose underlying tables may have had columns added, deleted, or - * renamed since the query was parsed. We must nonetheless print the rule/view + * renamed since the query was parsed. We must nonetheless print the rule/view * in a form that can be reloaded and will produce the same results as before. * * For each RTE used in the query, we must assign column aliases that are - * unique within that RTE. SQL does not require this of the original query, + * unique within that RTE. SQL does not require this of the original query, * but due to factors such as *-expansion we need to be able to uniquely * reference every column in a decompiled query. As long as we qualify all * column references, per-RTE uniqueness is sufficient for that. @@ -214,8 +214,8 @@ typedef struct /* * new_colnames is an array containing column aliases to use for columns * that would exist if the query was re-parsed against the current - * definitions of its base tables. This is what to print as the column - * alias list for the RTE. This array does not include dropped columns, + * definitions of its base tables. This is what to print as the column + * alias list for the RTE. This array does not include dropped columns, * but it will include columns added since original parsing. Indexes in * it therefore have little to do with current varattno values. As above, * entries are unique unless this is for an unnamed JOIN RTE. (In such an @@ -1077,7 +1077,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, context = deparse_context_for(get_relation_name(indrelid), indrelid); /* - * Start the index definition. Note that the index's name should never be + * Start the index definition. Note that the index's name should never be * schema-qualified, but the indexed rel's name may be. */ initStringInfo(&buf); @@ -1304,9 +1304,9 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, HeapTuple tup; Form_pg_constraint conForm; StringInfoData buf; - SysScanDesc scandesc; + SysScanDesc scandesc; ScanKeyData scankey[1]; - Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot()); + Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot()); Relation relation = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scankey[0], @@ -1315,15 +1315,15 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, ObjectIdGetDatum(constraintId)); scandesc = systable_beginscan(relation, - ConstraintOidIndexId, - true, - snapshot, - 1, - scankey); + ConstraintOidIndexId, + true, + snapshot, + 1, + scankey); /* - * We later use the tuple with SysCacheGetAttr() as if we - * had obtained it via SearchSysCache, which works fine. + * We later use the tuple with SysCacheGetAttr() as if we had obtained it + * via SearchSysCache, which works fine. */ tup = systable_getnext(scandesc); @@ -1806,7 +1806,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS) SysScanDesc scan; HeapTuple tup; - /* Look up table name. Can't lock it - we might not have privileges. */ + /* Look up table name. Can't lock it - we might not have privileges. */ tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename)); tableOid = RangeVarGetRelid(tablerv, NoLock, false); @@ -2406,8 +2406,10 @@ pg_get_function_arg_default(PG_FUNCTION_ARGS) proc = (Form_pg_proc) GETSTRUCT(proctup); - /* Calculate index into proargdefaults: proargdefaults corresponds to the - * last N input arguments, where N = pronargdefaults. */ + /* + * Calculate index into proargdefaults: proargdefaults corresponds to the + * last N input arguments, where N = pronargdefaults. + */ nth_default = nth_inputarg - 1 - (proc->pronargs - proc->pronargdefaults); if (nth_default < 0 || nth_default >= list_length(argdefaults)) @@ -2444,7 +2446,7 @@ deparse_expression(Node *expr, List *dpcontext, * tree (ie, not the raw output of gram.y). * * dpcontext is a list of deparse_namespace nodes representing the context - * for interpreting Vars in the node tree. It can be NIL if no Vars are + * for interpreting Vars in the node tree. It can be NIL if no Vars are * expected. * * forceprefix is TRUE to force all Vars to be prefixed with their table names. @@ -2484,7 +2486,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext, * * Given the reference name (alias) and OID of a relation, build deparsing * context for an expression referencing only that relation (as varno 1, - * varlevelsup 0). This is sufficient for many uses of deparse_expression. + * varlevelsup 0). This is sufficient for many uses of deparse_expression. * ---------- */ List * @@ -2555,7 +2557,7 @@ deparse_context_for_planstate(Node *planstate, List *ancestors, dpns->ctes = NIL; /* - * Set up column name aliases. We will get rather bogus results for join + * Set up column name aliases. We will get rather bogus results for join * RTEs, but that doesn't matter because plan trees don't contain any join * alias Vars. */ @@ -3113,7 +3115,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* * Scan the columns, select a unique alias for each one, and store it in * colinfo->colnames and colinfo->new_colnames. The former array has NULL - * entries for dropped columns, the latter omits them. Also mark + * entries for dropped columns, the latter omits them. Also mark * new_colnames entries as to whether they are new since parse time; this * is the case for entries beyond the length of rte->eref->colnames. */ @@ -3168,7 +3170,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* * For a relation RTE, we need only print the alias column names if any - * are different from the underlying "real" names. For a function RTE, + * are different from the underlying "real" names. For a function RTE, * always emit a complete column alias list; this is to protect against * possible instability of the default column names (eg, from altering * parameter names). For other RTE types, print if we changed anything OR @@ -3631,7 +3633,7 @@ identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, /* * If there's a USING clause, deconstruct the join quals to identify the - * merged columns. This is a tad painful but if we cannot rely on the + * merged columns. This is a tad painful but if we cannot rely on the * column names, there is no other representation of which columns were * joined by USING. (Unless the join type is FULL, we can't tell from the * joinaliasvars list which columns are merged.) Note: we assume that the @@ -3765,7 +3767,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) * We special-case Append and MergeAppend to pretend that the first child * plan is the OUTER referent; we have to interpret OUTER Vars in their * tlists according to one of the children, and the first one is the most - * natural choice. Likewise special-case ModifyTable to pretend that the + * natural choice. Likewise special-case ModifyTable to pretend that the * first child plan is the OUTER referent; this is to support RETURNING * lists containing references to non-target relations. */ @@ -4167,8 +4169,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace, /* * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed + * relations, and fix up deleted columns in JOIN RTEs. This ensures + * consistent results. Note we assume it's OK to scribble on the passed * querytree! * * We are only deparsing the query (we are not about to execute it), so we @@ -4641,7 +4643,7 @@ get_target_list(List *targetList, deparse_context *context, } /* - * Figure out what the result column should be called. In the context + * Figure out what the result column should be called. In the context * of a view, use the view's tuple descriptor (so as to pick up the * effects of any column RENAME that's been done on the view). * Otherwise, just use what we can find in the TLE. @@ -4863,7 +4865,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno, * expression is a constant, force it to be dumped with an explicit cast * as decoration --- this is because a simple integer constant is * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we - * dump it without any decoration. Otherwise, just dump the expression + * dump it without any decoration. Otherwise, just dump the expression * normally. */ if (force_colno) @@ -5558,8 +5560,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) /* * If it's an unnamed join, look at the expansion of the alias variable. * If it's a simple reference to one of the input vars, then recursively - * print the name of that var instead. When it's not a simple reference, - * we have to just print the unqualified join column name. (This can only + * print the name of that var instead. When it's not a simple reference, + * we have to just print the unqualified join column name. (This can only * happen with "dangerous" merged columns in a JOIN USING; we took pains * previously to make the unqualified column name unique in such cases.) * @@ -5587,7 +5589,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) /* * Unnamed join has no refname. (Note: since it's unnamed, there is * no way the user could have referenced it to create a whole-row Var - * for it. So we don't have to cover that case below.) + * for it. So we don't have to cover that case below.) */ Assert(refname == NULL); } @@ -5628,7 +5630,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) /* - * Get the name of a field of an expression of composite type. The + * Get the name of a field of an expression of composite type. The * expression is usually a Var, but we handle other cases too. * * levelsup is an extra offset to interpret the Var's varlevelsup correctly. @@ -5638,7 +5640,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) * could also be RECORD. Since no actual table or view column is allowed to * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE * or to a subquery output. We drill down to find the ultimate defining - * expression and attempt to infer the field name from it. We ereport if we + * expression and attempt to infer the field name from it. We ereport if we * can't determine the name. * * Similarly, a PARAM of type RECORD has to refer to some expression of @@ -6003,7 +6005,7 @@ get_name_for_var_field(Var *var, int fieldno, /* * We now have an expression we can't expand any more, so see if - * get_expr_result_type() can do anything with it. If not, pass to + * get_expr_result_type() can do anything with it. If not, pass to * lookup_rowtype_tupdesc() which will probably fail, but will give an * appropriate error message while failing. */ @@ -6021,7 +6023,7 @@ get_name_for_var_field(Var *var, int fieldno, * reference a parameter supplied by an upper NestLoop or SubPlan plan node. * * If successful, return the expression and set *dpns_p and *ancestor_cell_p - * appropriately for calling push_ancestor_plan(). If no referent can be + * appropriately for calling push_ancestor_plan(). If no referent can be * found, return NULL. */ static Node * @@ -6153,7 +6155,7 @@ get_parameter(Param *param, deparse_context *context) /* * If it's a PARAM_EXEC parameter, try to locate the expression from which - * the parameter was computed. Note that failing to find a referent isn't + * the parameter was computed. Note that failing to find a referent isn't * an error, since the Param might well be a subplan output rather than an * input. */ @@ -6631,10 +6633,10 @@ get_rule_expr(Node *node, deparse_context *context, /* * If there's a refassgnexpr, we want to print the node in the - * format "array[subscripts] := refassgnexpr". This is not + * format "array[subscripts] := refassgnexpr". This is not * legal SQL, so decompilation of INSERT or UPDATE statements * should always use processIndirection as part of the - * statement-level syntax. We should only see this when + * statement-level syntax. We should only see this when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. */ @@ -6793,7 +6795,7 @@ get_rule_expr(Node *node, deparse_context *context, /* * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to + * only while EXPLAINing a query plan. We don't try to * reconstruct the original SQL, just reference the subplan * that appears elsewhere in EXPLAIN's result. */ @@ -6866,14 +6868,14 @@ get_rule_expr(Node *node, deparse_context *context, * There is no good way to represent a FieldStore as real SQL, * so decompilation of INSERT or UPDATE statements should * always use processIndirection as part of the - * statement-level syntax. We should only get here when + * statement-level syntax. We should only get here when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. The plan case is even harder than * ordinary rules would be, because the planner tries to * collapse multiple assignments to the same field or subfield * into one FieldStore; so we can see a list of target fields * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target + * themselves. We don't bother to try to print the target * field names; we just print the source arguments, with a * ROW() around them if there's more than one. This isn't * terribly complete, but it's probably good enough for @@ -7668,7 +7670,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context) { /* * Ordered-set aggregates do not use "*" syntax. Also, we needn't - * worry about inserting VARIADIC. So we can just dump the direct + * worry about inserting VARIADIC. So we can just dump the direct * args as-is. */ Assert(!aggref->aggvariadic); @@ -7810,7 +7812,7 @@ get_coercion_expr(Node *arg, deparse_context *context, * Since parse_coerce.c doesn't immediately collapse application of * length-coercion functions to constants, what we'll typically see in * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of + * right above it. Avoid generating redundant output. However, beware of * suppressing casts when the user actually wrote something like * 'foo'::text::char(3). */ @@ -7892,7 +7894,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype) /* * These types are printed without quotes unless they contain * values that aren't accepted by the scanner unquoted (e.g., - * 'NaN'). Note that strtod() and friends might accept NaN, + * 'NaN'). Note that strtod() and friends might accept NaN, * so we can't use that to test. * * In reality we only need to defend against infinity and NaN, @@ -8416,7 +8418,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) else if (rte->rtekind == RTE_FUNCTION) { /* - * For a function RTE, always print alias. This covers possible + * For a function RTE, always print alias. This covers possible * renaming of the function and/or instability of the * FigureColname rules for things that aren't simple functions. * Note we'd need to force it anyway for the columndef list case. @@ -8672,7 +8674,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype, if (!OidIsValid(actual_datatype) || GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) { - /* Okay, we need the opclass name. Do we need to qualify it? */ + /* Okay, we need the opclass name. Do we need to qualify it? */ opcname = NameStr(opcrec->opcname); if (OpclassIsVisible(opclass)) appendStringInfo(buf, " %s", quote_identifier(opcname)); @@ -8967,13 +8969,13 @@ generate_relation_name(Oid relid, List *namespaces) * generate_function_name * Compute the name to display for a function specified by OID, * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) + * types. (Those matter because of ambiguous-function resolution rules.) * * If we're dealing with a potentially variadic function (in practice, this * means a FuncExpr or Aggref, not some other way of calling a function), then * has_variadic must specify whether variadic arguments have been merged, * and *use_variadic_p will be set to indicate whether to print VARIADIC in - * the output. For non-FuncExpr cases, has_variadic should be FALSE and + * the output. For non-FuncExpr cases, has_variadic should be FALSE and * use_variadic_p can be NULL. * * The result includes all necessary quoting and schema-prefixing. diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 1ffc0160b77..e932ccf0da5 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -72,7 +72,7 @@ * float8 oprjoin (internal, oid, internal, int2, internal); * * (Before Postgres 8.4, join estimators had only the first four of these - * parameters. That signature is still allowed, but deprecated.) The + * parameters. That signature is still allowed, but deprecated.) The * relationship between jointype and sjinfo is explained in the comments for * clause_selectivity() --- the short version is that jointype is usually * best ignored in favor of examining sjinfo. @@ -209,7 +209,7 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals); * * Note: this routine is also used to estimate selectivity for some * operators that are not "=" but have comparable selectivity behavior, - * such as "~=" (geometric approximate-match). Even for "=", we must + * such as "~=" (geometric approximate-match). Even for "=", we must * keep in mind that the left and right datatypes may differ. */ Datum @@ -273,7 +273,7 @@ var_eq_const(VariableStatData *vardata, Oid operator, /* * If we matched the var to a unique index or DISTINCT clause, assume - * there is exactly one match regardless of anything else. (This is + * there is exactly one match regardless of anything else. (This is * slightly bogus, since the index or clause's equality operator might be * different from ours, but it's much more likely to be right than * ignoring the information.) @@ -296,7 +296,7 @@ var_eq_const(VariableStatData *vardata, Oid operator, /* * Is the constant "=" to any of the column's most common values? * (Although the given operator may not really be "=", we will assume - * that seeing whether it returns TRUE is an appropriate test. If you + * that seeing whether it returns TRUE is an appropriate test. If you * don't like this, maybe you shouldn't be using eqsel for your * operator...) */ @@ -408,7 +408,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator, /* * If we matched the var to a unique index or DISTINCT clause, assume - * there is exactly one match regardless of anything else. (This is + * there is exactly one match regardless of anything else. (This is * slightly bogus, since the index or clause's equality operator might be * different from ours, but it's much more likely to be right than * ignoring the information.) @@ -432,7 +432,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator, * result averaged over all possible values whether common or * uncommon. (Essentially, we are assuming that the not-yet-known * comparison value is equally likely to be any of the possible - * values, regardless of their frequency in the table. Is that a good + * values, regardless of their frequency in the table. Is that a good * idea?) */ selec = 1.0 - stats->stanullfrac; @@ -655,7 +655,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, * essentially using the histogram just as a representative sample. However, * small histograms are unlikely to be all that representative, so the caller * should be prepared to fall back on some other estimation approach when the - * histogram is missing or very small. It may also be prudent to combine this + * histogram is missing or very small. It may also be prudent to combine this * approach with another one when the histogram is small. * * If the actual histogram size is not at least min_hist_size, we won't bother @@ -673,7 +673,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, * * Note that the result disregards both the most-common-values (if any) and * null entries. The caller is expected to combine this result with - * statistics for those portions of the column population. It may also be + * statistics for those portions of the column population. It may also be * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs. */ double @@ -786,7 +786,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * * If the binary search accesses the first or last histogram * entry, we try to replace that endpoint with the true column min - * or max as found by get_actual_variable_range(). This + * or max as found by get_actual_variable_range(). This * ameliorates misestimates when the min or max is moving as a * result of changes since the last ANALYZE. Note that this could * result in effectively including MCVs into the histogram that @@ -890,7 +890,7 @@ ineq_histogram_selectivity(PlannerInfo *root, /* * Watch out for the possibility that we got a NaN or - * Infinity from the division. This can happen + * Infinity from the division. This can happen * despite the previous checks, if for example "low" * is -Infinity. */ @@ -905,7 +905,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * Ideally we'd produce an error here, on the grounds that * the given operator shouldn't have scalarXXsel * registered as its selectivity func unless we can deal - * with its operand types. But currently, all manner of + * with its operand types. But currently, all manner of * stuff is invoking scalarXXsel, so give a default * estimate until that can be fixed. */ @@ -931,7 +931,7 @@ ineq_histogram_selectivity(PlannerInfo *root, /* * The histogram boundaries are only approximate to begin with, - * and may well be out of date anyway. Therefore, don't believe + * and may well be out of date anyway. Therefore, don't believe * extremely small or large selectivity estimates --- unless we * got actual current endpoint values from the table. */ @@ -1128,7 +1128,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * If this is for a NOT LIKE or similar operator, get the corresponding - * positive-match operator and work with that. Set result to the correct + * positive-match operator and work with that. Set result to the correct * default estimate, too. */ if (negate) @@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * Pull out any fixed prefix implied by the pattern, and estimate the - * fractional selectivity of the remainder of the pattern. Unlike many of + * fractional selectivity of the remainder of the pattern. Unlike many of * the other functions in this file, we use the pattern operator's actual * collation for this step. This is not because we expect the collation * to make a big difference in the selectivity estimate (it seldom would), @@ -1332,7 +1332,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * If we have most-common-values info, add up the fractions of the MCV * entries that satisfy MCV OP PATTERN. These fractions contribute - * directly to the result selectivity. Also add up the total fraction + * directly to the result selectivity. Also add up the total fraction * represented by MCV entries. */ mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true, @@ -1838,7 +1838,7 @@ scalararraysel(PlannerInfo *root, /* * For generic operators, we assume the probability of success is - * independent for each array element. But for "= ANY" or "<> ALL", + * independent for each array element. But for "= ANY" or "<> ALL", * if the array elements are distinct (which'd typically be the case) * then the probabilities are disjoint, and we should just sum them. * @@ -2253,9 +2253,9 @@ eqjoinsel_inner(Oid operator, if (have_mcvs1 && have_mcvs2) { /* - * We have most-common-value lists for both relations. Run through + * We have most-common-value lists for both relations. Run through * the lists to see which MCVs actually join to each other with the - * given operator. This allows us to determine the exact join + * given operator. This allows us to determine the exact join * selectivity for the portion of the relations represented by the MCV * lists. We still have to estimate for the remaining population, but * in a skewed distribution this gives us a big leg up in accuracy. @@ -2287,7 +2287,7 @@ eqjoinsel_inner(Oid operator, /* * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could + * other MCV list. If the operator isn't really equality, there could * be multiple matches --- but we don't look for them, both for speed * and because the math wouldn't add up... */ @@ -2452,7 +2452,7 @@ eqjoinsel_semi(Oid operator, /* * We clamp nd2 to be not more than what we estimate the inner relation's - * size to be. This is intuitively somewhat reasonable since obviously + * size to be. This is intuitively somewhat reasonable since obviously * there can't be more than that many distinct values coming from the * inner rel. The reason for the asymmetry (ie, that we don't clamp nd1 * likewise) is that this is the only pathway by which restriction clauses @@ -2497,9 +2497,9 @@ eqjoinsel_semi(Oid operator, if (have_mcvs1 && have_mcvs2 && OidIsValid(operator)) { /* - * We have most-common-value lists for both relations. Run through + * We have most-common-value lists for both relations. Run through * the lists to see which MCVs actually join to each other with the - * given operator. This allows us to determine the exact join + * given operator. This allows us to determine the exact join * selectivity for the portion of the relations represented by the MCV * lists. We still have to estimate for the remaining population, but * in a skewed distribution this gives us a big leg up in accuracy. @@ -2530,7 +2530,7 @@ eqjoinsel_semi(Oid operator, /* * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could + * other MCV list. If the operator isn't really equality, there could * be multiple matches --- but we don't look for them, both for speed * and because the math wouldn't add up... */ @@ -2567,7 +2567,7 @@ eqjoinsel_semi(Oid operator, /* * Now we need to estimate the fraction of relation 1 that has at - * least one join partner. We know for certain that the matched MCVs + * least one join partner. We know for certain that the matched MCVs * do, so that gives us a lower bound, but we're really in the dark * about everything else. Our crude approach is: if nd1 <= nd2 then * assume all non-null rel1 rows have join partners, else assume for @@ -3165,11 +3165,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * case (all possible cross-product terms actually appear as groups) since * very often the grouped-by Vars are highly correlated. Our current approach * is as follows: - * 1. Expressions yielding boolean are assumed to contribute two groups, + * 1. Expressions yielding boolean are assumed to contribute two groups, * independently of their content, and are ignored in the subsequent - * steps. This is mainly because tests like "col IS NULL" break the + * steps. This is mainly because tests like "col IS NULL" break the * heuristic used in step 2 especially badly. - * 2. Reduce the given expressions to a list of unique Vars used. For + * 2. Reduce the given expressions to a list of unique Vars used. For * example, GROUP BY a, a + b is treated the same as GROUP BY a, b. * It is clearly correct not to count the same Var more than once. * It is also reasonable to treat f(x) the same as x: f() cannot @@ -3179,14 +3179,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * As a special case, if a GROUP BY expression can be matched to an * expressional index for which we have statistics, then we treat the * whole expression as though it were just a Var. - * 3. If the list contains Vars of different relations that are known equal + * 3. If the list contains Vars of different relations that are known equal * due to equivalence classes, then drop all but one of the Vars from each * known-equal set, keeping the one with smallest estimated # of values * (since the extra values of the others can't appear in joined rows). * Note the reason we only consider Vars of different relations is that * if we considered ones of the same rel, we'd be double-counting the * restriction selectivity of the equality in the next step. - * 4. For Vars within a single source rel, we multiply together the numbers + * 4. For Vars within a single source rel, we multiply together the numbers * of values, clamp to the number of rows in the rel (divided by 10 if * more than one Var), and then multiply by the selectivity of the * restriction clauses for that rel. When there's more than one Var, @@ -3197,7 +3197,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * by the restriction selectivity is effectively assuming that the * restriction clauses are independent of the grouping, which is a crummy * assumption, but it's hard to do better. - * 5. If there are Vars from multiple rels, we repeat step 4 for each such + * 5. If there are Vars from multiple rels, we repeat step 4 for each such * rel, and multiply the results together. * Note that rels not containing grouped Vars are ignored completely, as are * join clauses. Such rels cannot increase the number of groups, and we @@ -3228,7 +3228,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) return 1.0; /* - * Count groups derived from boolean grouping expressions. For other + * Count groups derived from boolean grouping expressions. For other * expressions, find the unique Vars used, treating an expression as a Var * if we can find stats for it. For each one, record the statistical * estimate of number of distinct values (total in its table, without @@ -3317,7 +3317,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) * Group Vars by relation and estimate total numdistinct. * * For each iteration of the outer loop, we process the frontmost Var in - * varinfos, plus all other Vars in the same relation. We remove these + * varinfos, plus all other Vars in the same relation. We remove these * Vars from the newvarinfos list for the next iteration. This is the * easiest way to group Vars of same rel together. */ @@ -3418,11 +3418,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) * distribution, so this will have to do for now. * * We are passed the number of buckets the executor will use for the given - * input relation. If the data were perfectly distributed, with the same + * input relation. If the data were perfectly distributed, with the same * number of tuples going into each available bucket, then the bucketsize * fraction would be 1/nbuckets. But this happy state of affairs will occur * only if (a) there are at least nbuckets distinct data values, and (b) - * we have a not-too-skewed data distribution. Otherwise the buckets will + * we have a not-too-skewed data distribution. Otherwise the buckets will * be nonuniformly occupied. If the other relation in the join has a key * distribution similar to this one's, then the most-loaded buckets are * exactly those that will be probed most often. Therefore, the "average" @@ -3595,7 +3595,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, * operators to estimate selectivity for the other's. This is outright * wrong in some cases --- in particular signed versus unsigned * interpretation could trip us up. But it's useful enough in the - * majority of cases that we do it anyway. Should think about more + * majority of cases that we do it anyway. Should think about more * rigorous ways to do it. */ switch (valuetypid) @@ -3950,6 +3950,7 @@ convert_string_datum(Datum value, Oid typid) xfrmlen = strxfrm(NULL, val, 0); #endif #ifdef WIN32 + /* * On Windows, strxfrm returns INT_MAX when an error occurs. Instead * of trying to allocate this much memory (and fail), just return the @@ -4178,7 +4179,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid, right = (Node *) lsecond(args); /* - * Examine both sides. Note that when varRelid is nonzero, Vars of other + * Examine both sides. Note that when varRelid is nonzero, Vars of other * relations will be treated as pseudoconstants. */ examine_variable(root, left, varRelid, vardata); @@ -4323,7 +4324,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, /* * Okay, it's a more complicated expression. Determine variable - * membership. Note that when varRelid isn't zero, only vars of that + * membership. Note that when varRelid isn't zero, only vars of that * relation are considered "real" vars. */ varnos = pull_varnos(basenode); @@ -4372,13 +4373,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, if (onerel) { /* - * We have an expression in vars of a single relation. Try to match + * We have an expression in vars of a single relation. Try to match * it to expressional index columns, in hopes of finding some * statistics. * * XXX it's conceivable that there are multiple matches with different * index opfamilies; if so, we need to pick one that matches the - * operator we are estimating for. FIXME later. + * operator we are estimating for. FIXME later. */ ListCell *ilist; @@ -4580,7 +4581,7 @@ examine_simple_variable(PlannerInfo *root, Var *var, * * This is probably a harsher restriction than necessary; it's * certainly OK for the selectivity estimator (which is a C function, - * and therefore omnipotent anyway) to look at the statistics. But + * and therefore omnipotent anyway) to look at the statistics. But * many selectivity estimators will happily *invoke the operator * function* to try to work out a good estimate - and that's not OK. * So for now, don't dig down for stats. @@ -4633,7 +4634,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) *isdefault = false; /* - * Determine the stadistinct value to use. There are cases where we can + * Determine the stadistinct value to use. There are cases where we can * get an estimate even without a pg_statistic entry, or can get a better * value than is in pg_statistic. */ @@ -4757,7 +4758,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop, /* * XXX It's very tempting to try to use the actual column min and max, if - * we can get them relatively-cheaply with an index probe. However, since + * we can get them relatively-cheaply with an index probe. However, since * this function is called many times during join planning, that could * have unpleasant effects on planning speed. Need more investigation * before enabling this. @@ -5008,7 +5009,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata, * and it can be very expensive if a lot of uncommitted rows * exist at the end of the index (because we'll laboriously * fetch each one and reject it). What seems like a good - * compromise is to use SnapshotDirty. That will accept + * compromise is to use SnapshotDirty. That will accept * uncommitted rows, and thus avoid fetching multiple heap * tuples in this scenario. On the other hand, it will reject * known-dead rows, and thus not give a bogus answer when the @@ -5147,7 +5148,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids) * Check whether char is a letter (and, hence, subject to case-folding) * * In multibyte character sets, we can't use isalpha, and it does not seem - * worth trying to convert to wchar_t to use iswalpha. Instead, just assume + * worth trying to convert to wchar_t to use iswalpha. Instead, just assume * any multibyte char is potentially case-varying. */ static int @@ -5399,7 +5400,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation, * together with info about MCVs and NULLs. * * We use the >= and < operators from the specified btree opfamily to do the - * estimation. The given variable and Const must be of the associated + * estimation. The given variable and Const must be of the associated * datatype. * * XXX Note: we make use of the upper bound to estimate operator selectivity @@ -5458,7 +5459,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata, /* * Merge the two selectivities in the same way as for a range query - * (see clauselist_selectivity()). Note that we don't need to worry + * (see clauselist_selectivity()). Note that we don't need to worry * about double-exclusion of nulls, since ineq_histogram_selectivity * doesn't count those anyway. */ @@ -5695,7 +5696,7 @@ byte_increment(unsigned char *ptr, int len) * that is not a bulletproof guarantee that an extension of the string might * not sort after it; an example is that "foo " is less than "foo!", but it * is not clear that a "dictionary" sort ordering will consider "foo!" less - * than "foo bar". CAUTION: Therefore, this function should be used only for + * than "foo bar". CAUTION: Therefore, this function should be used only for * estimation purposes when working in a non-C collation. * * To try to catch most cases where an extended string might otherwise sort @@ -5952,7 +5953,7 @@ string_to_bytea_const(const char *str, size_t str_len) * genericcostestimate is a general-purpose estimator that can be used for * most index types. In some cases we use genericcostestimate as the base * code and then incorporate additional index-type-specific knowledge in - * the type-specific calling function. To avoid code duplication, we make + * the type-specific calling function. To avoid code duplication, we make * genericcostestimate return a number of intermediate values as well as * its preliminary estimates of the output cost values. The GenericCosts * struct includes all these values. @@ -6072,7 +6073,7 @@ genericcostestimate(PlannerInfo *root, * * In practice access to upper index levels is often nearly free because * those tend to stay in cache under load; moreover, the cost involved is - * highly dependent on index type. We therefore ignore such costs here + * highly dependent on index type. We therefore ignore such costs here * and leave it to the caller to add a suitable charge if needed. */ if (index->pages > 1 && index->tuples > 1) @@ -6091,9 +6092,9 @@ genericcostestimate(PlannerInfo *root, * The above calculations are all per-index-scan. However, if we are in a * nestloop inner scan, we can expect the scan to be repeated (with * different search keys) for each row of the outer relation. Likewise, - * ScalarArrayOpExpr quals result in multiple index scans. This creates + * ScalarArrayOpExpr quals result in multiple index scans. This creates * the potential for cache effects to reduce the number of disk page - * fetches needed. We want to estimate the average per-scan I/O cost in + * fetches needed. We want to estimate the average per-scan I/O cost in * the presence of caching. * * We use the Mackert-Lohman formula (see costsize.c for details) to @@ -6140,7 +6141,7 @@ genericcostestimate(PlannerInfo *root, * evaluated once at the start of the scan to reduce them to runtime keys * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per - * indexqual operator. Because we have numIndexTuples as a per-scan + * indexqual operator. Because we have numIndexTuples as a per-scan * number, we have to multiply by num_sa_scans to get the correct result * for ScalarArrayOpExpr cases. Similarly add in costs for any index * ORDER BY expressions. @@ -6187,16 +6188,16 @@ genericcostestimate(PlannerInfo *root, * ANDing the index predicate with the explicitly given indexquals produces * a more accurate idea of the index's selectivity. However, we need to be * careful not to insert redundant clauses, because clauselist_selectivity() - * is easily fooled into computing a too-low selectivity estimate. Our + * is easily fooled into computing a too-low selectivity estimate. Our * approach is to add only the predicate clause(s) that cannot be proven to - * be implied by the given indexquals. This successfully handles cases such + * be implied by the given indexquals. This successfully handles cases such * as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50". * There are many other cases where we won't detect redundancy, leading to a * too-low selectivity estimate, which will bias the system in favor of using - * partial indexes where possible. That is not necessarily bad though. + * partial indexes where possible. That is not necessarily bad though. * * Note that indexQuals contains RestrictInfo nodes while the indpred - * does not, so the output list will be mixed. This is OK for both + * does not, so the output list will be mixed. This is OK for both * predicate_implied_by() and clauselist_selectivity(), but might be * problematic if the result were passed to other things. */ @@ -6255,7 +6256,7 @@ btcostestimate(PG_FUNCTION_ARGS) * the index scan). Additional quals can suppress visits to the heap, so * it's OK to count them in indexSelectivity, but they should not count * for estimating numIndexTuples. So we must examine the given indexquals - * to find out which ones count as boundary quals. We rely on the + * to find out which ones count as boundary quals. We rely on the * knowledge that they are given in index column order. * * For a RowCompareExpr, we consider only the first column, just as @@ -6594,7 +6595,7 @@ hashcostestimate(PG_FUNCTION_ARGS) * because the hash AM makes sure that's always one page. * * Likewise, we could consider charging some CPU for each index tuple in - * the bucket, if we knew how many there were. But the per-tuple cost is + * the bucket, if we knew how many there were. But the per-tuple cost is * just a hash value comparison, not a general datatype-dependent * comparison, so any such charge ought to be quite a bit less than * cpu_operator_cost; which makes it probably not worth worrying about. @@ -6652,7 +6653,7 @@ gistcostestimate(PG_FUNCTION_ARGS) /* * Add a CPU-cost component to represent the costs of initial descent. We * just use log(N) here not log2(N) since the branching factor isn't - * necessarily two anyway. As for btree, charge once per SA scan. + * necessarily two anyway. As for btree, charge once per SA scan. */ if (index->tuples > 1) /* avoid computing log(0) */ { @@ -6714,7 +6715,7 @@ spgcostestimate(PG_FUNCTION_ARGS) /* * Add a CPU-cost component to represent the costs of initial descent. We * just use log(N) here not log2(N) since the branching factor isn't - * necessarily two anyway. As for btree, charge once per SA scan. + * necessarily two anyway. As for btree, charge once per SA scan. */ if (index->tuples > 1) /* avoid computing log(0) */ { @@ -6791,7 +6792,7 @@ gincost_pattern(IndexOptInfo *index, int indexcol, /* * Get the operator's strategy number and declared input data types within - * the index opfamily. (We don't need the latter, but we use + * the index opfamily. (We don't need the latter, but we use * get_op_opfamily_properties because it will throw error if it fails to * find a matching pg_amop entry.) */ @@ -6937,7 +6938,7 @@ gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, OpExpr *clause, * each of which involves one value from the RHS array, plus all the * non-array quals (if any). To model this, we average the counts across * the RHS elements, and add the averages to the counts in *counts (which - * correspond to per-indexscan costs). We also multiply counts->arrayScans + * correspond to per-indexscan costs). We also multiply counts->arrayScans * by N, causing gincostestimate to scale up its estimates accordingly. */ static bool @@ -7107,7 +7108,7 @@ gincostestimate(PG_FUNCTION_ARGS) /* * nPendingPages can be trusted, but the other fields are as of the last - * VACUUM. Scale them by the ratio numPages / nTotalPages to account for + * VACUUM. Scale them by the ratio numPages / nTotalPages to account for * growth since then. If the fields are zero (implying no VACUUM at all, * and an index created pre-9.1), assume all pages are entry pages. */ @@ -7252,7 +7253,7 @@ gincostestimate(PG_FUNCTION_ARGS) /* * Add an estimate of entry pages read by partial match algorithm. It's a - * scan over leaf pages in entry tree. We haven't any useful stats here, + * scan over leaf pages in entry tree. We haven't any useful stats here, * so estimate it as proportion. */ entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries); @@ -7294,17 +7295,17 @@ gincostestimate(PG_FUNCTION_ARGS) * * We assume every entry to have the same number of items, and that there * is no overlap between them. (XXX: tsvector and array opclasses collect - * statistics on the frequency of individual keys; it would be nice to - * use those here.) + * statistics on the frequency of individual keys; it would be nice to use + * those here.) */ dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries); /* - * If there is a lot of overlap among the entries, in particular if one - * of the entries is very frequent, the above calculation can grossly - * under-estimate. As a simple cross-check, calculate a lower bound - * based on the overall selectivity of the quals. At a minimum, we must - * read one item pointer for each matching entry. + * If there is a lot of overlap among the entries, in particular if one of + * the entries is very frequent, the above calculation can grossly + * under-estimate. As a simple cross-check, calculate a lower bound based + * on the overall selectivity of the quals. At a minimum, we must read + * one item pointer for each matching entry. * * The width of each item pointer varies, based on the level of * compression. We don't have statistics on that, but an average of diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index efc1e9b9925..11007c6d894 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -41,7 +41,7 @@ #error -ffast-math is known to break this code #endif -#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0)) +#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0)) #ifndef INT64_MAX #define INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF) @@ -391,7 +391,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod) * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but - * the integer code always rounds up (away from zero). Is it worth + * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP @@ -488,7 +488,7 @@ timestamptz_in(PG_FUNCTION_ARGS) * if it's acceptable. Otherwise, an error is thrown. */ static int -parse_sane_timezone(struct pg_tm *tm, text *zone) +parse_sane_timezone(struct pg_tm * tm, text *zone) { char tzname[TZ_STRLEN_MAX + 1]; int rt; @@ -497,7 +497,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone) text_to_cstring_buffer(zone, tzname, sizeof(tzname)); /* - * Look up the requested timezone. First we try to interpret it as a + * Look up the requested timezone. First we try to interpret it as a * numeric timezone specification; if DecodeTimezone decides it doesn't * like the format, we look in the date token table (to handle cases like * "EST"), and if that also fails, we look in the timezone database (to @@ -507,7 +507,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone) * offset abbreviations.) * * Note pg_tzset happily parses numeric input that DecodeTimezone would - * reject. To avoid having it accept input that would otherwise be seen + * reject. To avoid having it accept input that would otherwise be seen * as invalid, it's enough to disallow having a digit in the first * position of our input string. */ @@ -528,7 +528,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone) if (rt == DTERR_TZDISP_OVERFLOW) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("numeric time zone \"%s\" out of range", tzname))); + errmsg("numeric time zone \"%s\" out of range", tzname))); else if (rt != DTERR_BAD_FORMAT) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -997,7 +997,7 @@ interval_send(PG_FUNCTION_ARGS) /* * The interval typmod stores a "range" in its high 16 bits and a "precision" - * in its low 16 bits. Both contribute to defining the resolution of the + * in its low 16 bits. Both contribute to defining the resolution of the * type. Range addresses resolution granules larger than one second, and * precision specifies resolution below one second. This representation can * express all SQL standard resolutions, but we implement them all in terms of @@ -1205,7 +1205,7 @@ interval_transform(PG_FUNCTION_ARGS) /* * Temporally-smaller fields occupy higher positions in the range - * bitmap. Since only the temporally-smallest bit matters for length + * bitmap. Since only the temporally-smallest bit matters for length * coercion purposes, we compare the last-set bits in the ranges. * Precision, which is to say, sub-second precision, only affects * ranges that include SECOND. @@ -1294,7 +1294,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) * that fields to the right of the last one specified are zeroed out, * but those to the left of it remain valid. Thus for example there * is no operational difference between INTERVAL YEAR TO MONTH and - * INTERVAL MONTH. In some cases we could meaningfully enforce that + * INTERVAL MONTH. In some cases we could meaningfully enforce that * higher-order fields are zero; for example INTERVAL DAY could reject * nonzero "month" field. However that seems a bit pointless when we * can't do it consistently. (We cannot enforce a range limit on the @@ -1304,9 +1304,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) * * Note: before PG 8.4 we interpreted a limited set of fields as * actually causing a "modulo" operation on a given value, potentially - * losing high-order as well as low-order information. But there is + * losing high-order as well as low-order information. But there is * no support for such behavior in the standard, and it seems fairly - * undesirable on data consistency grounds anyway. Now we only + * undesirable on data consistency grounds anyway. Now we only * perform truncation or rounding of low-order fields. */ if (range == INTERVAL_FULL_RANGE) @@ -1426,7 +1426,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) /* * Note: this round-to-nearest code is not completely consistent * about rounding values that are exactly halfway between integral - * values. On most platforms, rint() will implement + * values. On most platforms, rint() will implement * round-to-nearest-even, but the integer code always rounds up * (away from zero). Is it worth trying to be consistent? */ @@ -1470,7 +1470,7 @@ make_interval(PG_FUNCTION_ARGS) Interval *result; /* - * Reject out-of-range inputs. We really ought to check the integer + * Reject out-of-range inputs. We really ought to check the integer * inputs as well, but it's not entirely clear what limits to apply. */ if (isinf(secs) || isnan(secs)) @@ -1718,7 +1718,7 @@ timestamptz_to_time_t(TimestampTz t) * Produce a C-string representation of a TimestampTz. * * This is mostly for use in emitting messages. The primary difference - * from timestamptz_out is that we force the output format to ISO. Note + * from timestamptz_out is that we force the output format to ISO. Note * also that the result is in a static buffer, not pstrdup'd. */ const char * @@ -1862,7 +1862,7 @@ recalc_t: * * First, convert to an integral timestamp, avoiding possibly * platform-specific roundoff-in-wrong-direction errors, and adjust to - * Unix epoch. Then see if we can convert to pg_time_t without loss. This + * Unix epoch. Then see if we can convert to pg_time_t without loss. This * coding avoids hardwiring any assumptions about the width of pg_time_t, * so it should behave sanely on machines without int64. */ @@ -2010,7 +2010,7 @@ recalc: int tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span) { - double total_months = (double)tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon; + double total_months = (double) tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon; if (total_months > INT_MAX || total_months < INT_MIN) return -1; @@ -4888,7 +4888,7 @@ timestamp_zone(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMPTZ(timestamp); /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's @@ -5061,7 +5061,7 @@ timestamptz_zone(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMP(timestamp); /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c index df47105d0b2..bdef47f093c 100644 --- a/src/backend/utils/adt/tsginidx.c +++ b/src/backend/utils/adt/tsginidx.c @@ -204,9 +204,12 @@ checkcondition_gin(void *checkval, QueryOperand *val) */ static GinTernaryValue TS_execute_ternary(QueryItem *curitem, void *checkval, - GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val)) + GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val)) { - GinTernaryValue val1, val2, result; + GinTernaryValue val1, + val2, + result; + /* since this function recurses, it could be driven to stack overflow */ check_stack_depth(); @@ -223,7 +226,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval, case OP_AND: val1 = TS_execute_ternary(curitem + curitem->qoperator.left, - checkval, chkcond); + checkval, chkcond); if (val1 == GIN_FALSE) return GIN_FALSE; val2 = TS_execute_ternary(curitem + 1, checkval, chkcond); @@ -236,7 +239,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval, case OP_OR: val1 = TS_execute_ternary(curitem + curitem->qoperator.left, - checkval, chkcond); + checkval, chkcond); if (val1 == GIN_TRUE) return GIN_TRUE; val2 = TS_execute_ternary(curitem + 1, checkval, chkcond); @@ -339,7 +342,7 @@ gin_tsquery_triconsistent(PG_FUNCTION_ARGS) * Formerly, gin_extract_tsvector had only two arguments. Now it has three, * but we still need a pg_proc entry with two args to support reloading * pre-9.1 contrib/tsearch2 opclass declarations. This compatibility - * function should go away eventually. (Note: you might say "hey, but the + * function should go away eventually. (Note: you might say "hey, but the * code above is only *using* two args, so let's just declare it that way". * If you try that you'll find the opr_sanity regression test complains.) */ diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 502ca44e04a..72b9f99dbc9 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -257,7 +257,7 @@ bpcharsend(PG_FUNCTION_ARGS) * * Truncation rules: for an explicit cast, silently truncate to the given * length; for an implicit cast, raise error unless extra characters are - * all spaces. (This is sort-of per SQL: the spec would actually have us + * all spaces. (This is sort-of per SQL: the spec would actually have us * raise a "completion condition" for the explicit cast case, but Postgres * hasn't got such a concept.) */ @@ -584,7 +584,7 @@ varchar_transform(PG_FUNCTION_ARGS) * * Truncation rules: for an explicit cast, silently truncate to the given * length; for an implicit cast, raise error unless extra characters are - * all spaces. (This is sort-of per SQL: the spec would actually have us + * all spaces. (This is sort-of per SQL: the spec would actually have us * raise a "completion condition" for the explicit cast case, but Postgres * hasn't got such a concept.) */ diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index aab4897f618..f8d9fec34e4 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -591,7 +591,7 @@ textlen(PG_FUNCTION_ARGS) * Does the real work for textlen() * * This is broken out so it can be called directly by other string processing - * functions. Note that the argument is passed as a Datum, to indicate that + * functions. Note that the argument is passed as a Datum, to indicate that * it may still be in compressed form. We can avoid decompressing it at all * in some cases. */ @@ -763,7 +763,7 @@ text_substr_no_len(PG_FUNCTION_ARGS) * Does the real work for text_substr() and text_substr_no_len() * * This is broken out so it can be called directly by other string processing - * functions. Note that the argument is passed as a Datum, to indicate that + * functions. Note that the argument is passed as a Datum, to indicate that * it may still be in compressed/toasted form. We can avoid detoasting all * of it in some cases. * @@ -1113,7 +1113,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) * searched (t1) and the "needle" is the pattern being sought (t2). * * If the needle is empty or bigger than the haystack then there is no - * point in wasting cycles initializing the table. We also choose not to + * point in wasting cycles initializing the table. We also choose not to * use B-M-H for needles of length 1, since the skip table can't possibly * save anything in that case. */ @@ -1129,7 +1129,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) * declaration of TextPositionState allows up to 256 elements, but for * short search problems we don't really want to have to initialize so * many elements --- it would take too long in comparison to the - * actual search time. So we choose a useful skip table size based on + * actual search time. So we choose a useful skip table size based on * the haystack length minus the needle length. The closer the needle * length is to the haystack length the less useful skipping becomes. * @@ -1161,7 +1161,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) state->skiptable[i] = len2; /* - * Now examine the needle. For each character except the last one, + * Now examine the needle. For each character except the last one, * set the corresponding table element to the appropriate skip * distance. Note that when two characters share the same skip table * entry, the one later in the needle must determine the skip @@ -1249,11 +1249,11 @@ text_position_next(int start_pos, TextPositionState *state) /* * No match, so use the haystack char at hptr to decide how - * far to advance. If the needle had any occurrence of that + * far to advance. If the needle had any occurrence of that * character (or more precisely, one sharing the same * skiptable entry) before its last character, then we advance * far enough to align the last such needle character with - * that haystack position. Otherwise we can advance by the + * that haystack position. Otherwise we can advance by the * whole needle length. */ hptr += state->skiptable[(unsigned char) *hptr & skiptablemask]; @@ -1305,11 +1305,11 @@ text_position_next(int start_pos, TextPositionState *state) /* * No match, so use the haystack char at hptr to decide how - * far to advance. If the needle had any occurrence of that + * far to advance. If the needle had any occurrence of that * character (or more precisely, one sharing the same * skiptable entry) before its last character, then we advance * far enough to align the last such needle character with - * that haystack position. Otherwise we can advance by the + * that haystack position. Otherwise we can advance by the * whole needle length. */ hptr += state->skiptable[*hptr & skiptablemask]; @@ -1344,7 +1344,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) /* * Unfortunately, there is no strncoll(), so in the non-C locale case we - * have to do some memory copying. This turns out to be significantly + * have to do some memory copying. This turns out to be significantly * slower, so we optimize the case where LC_COLLATE is C. We also try to * optimize relatively-short strings by avoiding palloc/pfree overhead. */ @@ -2334,7 +2334,7 @@ textToQualifiedNameList(text *textval) * SplitIdentifierString --- parse a string containing identifiers * * This is the guts of textToQualifiedNameList, and is exported for use in - * other situations such as parsing GUC variables. In the GUC case, it's + * other situations such as parsing GUC variables. In the GUC case, it's * important to avoid memory leaks, so the API is designed to minimize the * amount of stuff that needs to be allocated and freed. * @@ -2342,7 +2342,7 @@ textToQualifiedNameList(text *textval) * rawstring: the input string; must be overwritable! On return, it's * been modified to contain the separated identifiers. * separator: the separator punctuation expected between identifiers - * (typically '.' or ','). Whitespace may also appear around + * (typically '.' or ','). Whitespace may also appear around * identifiers. * Outputs: * namelist: filled with a palloc'd list of pointers to identifiers within @@ -2411,7 +2411,7 @@ SplitIdentifierString(char *rawstring, char separator, * * XXX because we want to overwrite the input in-place, we cannot * support a downcasing transformation that increases the string - * length. This is not a problem given the current implementation + * length. This is not a problem given the current implementation * of downcase_truncate_identifier, but we'll probably have to do * something about this someday. */ @@ -2468,7 +2468,7 @@ SplitIdentifierString(char *rawstring, char separator, * Inputs: * rawstring: the input string; must be modifiable! * separator: the separator punctuation expected between directories - * (typically ',' or ';'). Whitespace may also appear around + * (typically ',' or ';'). Whitespace may also appear around * directories. * Outputs: * namelist: filled with a palloc'd list of directory names. @@ -2875,7 +2875,7 @@ check_replace_text_has_escape_char(const text *replace_text) * appendStringInfoRegexpSubstr * * Append replace_text to str, substituting regexp back references for - * \n escapes. start_ptr is the start of the match in the source string, + * \n escapes. start_ptr is the start of the match in the source string, * at logical character position data_pos. */ static void @@ -2958,7 +2958,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text, if (so != -1 && eo != -1) { /* - * Copy the text that is back reference of regexp. Note so and eo + * Copy the text that is back reference of regexp. Note so and eo * are counted in characters not bytes. */ char *chunk_start; @@ -3836,7 +3836,7 @@ concat_internal(const char *sepstr, int argidx, /* * Non-null argument had better be an array. We assume that any call * context that could let get_fn_expr_variadic return true will have - * checked that a VARIADIC-labeled parameter actually is an array. So + * checked that a VARIADIC-labeled parameter actually is an array. So * it should be okay to just Assert that it's an array rather than * doing a full-fledged error check. */ @@ -4237,7 +4237,7 @@ text_format(PG_FUNCTION_ARGS) /* * Get the appropriate typOutput function, reusing previous one if - * same type as previous argument. That's particularly useful in the + * same type as previous argument. That's particularly useful in the * variadic-array case, but often saves work even for ordinary calls. */ if (typid != prev_type) @@ -4329,12 +4329,12 @@ text_format_parse_digits(const char **ptr, const char *end_ptr, int *value) * * Inputs are start_ptr (the position after '%') and end_ptr (string end + 1). * Output parameters: - * argpos: argument position for value to be printed. -1 means unspecified. - * widthpos: argument position for width. Zero means the argument position + * argpos: argument position for value to be printed. -1 means unspecified. + * widthpos: argument position for width. Zero means the argument position * was unspecified (ie, take the next arg) and -1 means no width * argument (width was omitted or specified as a constant). * flags: bitmask of flags. - * width: directly-specified width value. Zero means the width was omitted + * width: directly-specified width value. Zero means the width was omitted * (note it's not necessary to distinguish this case from an explicit * zero width value). * diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 765469c623e..422be69bd6d 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -19,7 +19,7 @@ * fail. For one thing, this avoids having to manage variant catalog * installations. But it also has nice effects such as that you can * dump a database containing XML type data even if the server is not - * linked with libxml. Thus, make sure xml_out() works even if nothing + * linked with libxml. Thus, make sure xml_out() works even if nothing * else does. */ @@ -286,7 +286,7 @@ xml_out(PG_FUNCTION_ARGS) xmltype *x = PG_GETARG_XML_P(0); /* - * xml_out removes the encoding property in all cases. This is because we + * xml_out removes the encoding property in all cases. This is because we * cannot control from here whether the datum will be converted to a * different client encoding, so we'd do more harm than good by including * it. @@ -454,7 +454,7 @@ xmlcomment(PG_FUNCTION_ARGS) /* * TODO: xmlconcat needs to merge the notations and unparsed entities - * of the argument values. Not very important in practice, though. + * of the argument values. Not very important in practice, though. */ xmltype * xmlconcat(List *args) @@ -589,7 +589,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext) /* * We first evaluate all the arguments, then start up libxml and create - * the result. This avoids issues if one of the arguments involves a call + * the result. This avoids issues if one of the arguments involves a call * to some other function or subsystem that wants to use libxml on its own * terms. */ @@ -926,7 +926,7 @@ pg_xml_init_library(void) * pg_xml_init --- set up for use of libxml and register an error handler * * This should be called by each function that is about to use libxml - * facilities and requires error handling. It initializes libxml with + * facilities and requires error handling. It initializes libxml with * pg_xml_init_library() and establishes our libxml error handler. * * strictness determines which errors are reported and which are ignored. @@ -972,7 +972,7 @@ pg_xml_init(PgXmlStrictness strictness) /* * Verify that xmlSetStructuredErrorFunc set the context variable we - * expected it to. If not, the error context pointer we just saved is not + * expected it to. If not, the error context pointer we just saved is not * the correct thing to restore, and since that leaves us without a way to * restore the context in pg_xml_done, we must fail. * @@ -1129,7 +1129,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp, int utf8len; /* - * Only initialize libxml. We don't need error handling here, but we do + * Only initialize libxml. We don't need error handling here, but we do * need to make sure libxml is initialized before calling any of its * functions. Note that this is safe (and a no-op) if caller has already * done pg_xml_init(). @@ -1272,7 +1272,7 @@ finished: /* * Write an XML declaration. On output, we adjust the XML declaration - * as follows. (These rules are the moral equivalent of the clause + * as follows. (These rules are the moral equivalent of the clause * "Serialization of an XML value" in the SQL standard.) * * We try to avoid generating an XML declaration if possible. This is @@ -1496,7 +1496,7 @@ xml_pstrdup(const char *string) /* * xmlPgEntityLoader --- entity loader callback function * - * Silently prevent any external entity URL from being loaded. We don't want + * Silently prevent any external entity URL from being loaded. We don't want * to throw an error, so instead make the entity appear to expand to an empty * string. * @@ -1665,8 +1665,8 @@ xml_errorHandler(void *data, xmlErrorPtr error) chopStringInfoNewlines(errorBuf); /* - * Legacy error handling mode. err_occurred is never set, we just add the - * message to err_buf. This mode exists because the xml2 contrib module + * Legacy error handling mode. err_occurred is never set, we just add the + * message to err_buf. This mode exists because the xml2 contrib module * uses our error-handling infrastructure, but we don't want to change its * behaviour since it's deprecated anyway. This is also why we don't * distinguish between notices, warnings and errors here --- the old-style @@ -1887,7 +1887,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped, static char * unicode_to_sqlchar(pg_wchar c) { - char utf8string[8]; /* need room for trailing zero */ + char utf8string[8]; /* need room for trailing zero */ char *result; memset(utf8string, 0, sizeof(utf8string)); @@ -1939,8 +1939,8 @@ map_xml_name_to_sql_identifier(char *name) * * When xml_escape_strings is true, then certain characters in string * values are replaced by entity references (< etc.), as specified - * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is - * wanted. The false case is mainly useful when the resulting value + * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is + * wanted. The false case is mainly useful when the resulting value * is used with xmlTextWriterWriteAttribute() to write out an * attribute, because that function does the escaping itself. */ @@ -2221,13 +2221,13 @@ _SPI_strdup(const char *s) * * There are two kinds of mappings: Mapping SQL data (table contents) * to XML documents, and mapping SQL structure (the "schema") to XML - * Schema. And there are functions that do both at the same time. + * Schema. And there are functions that do both at the same time. * * Then you can map a database, a schema, or a table, each in both * ways. This breaks down recursively: Mapping a database invokes * mapping schemas, which invokes mapping tables, which invokes * mapping rows, which invokes mapping columns, although you can't - * call the last two from the outside. Because of this, there are a + * call the last two from the outside. Because of this, there are a * number of xyz_internal() functions which are to be called both from * the function manager wrapper and from some upper layer in a * recursive call. @@ -2236,7 +2236,7 @@ _SPI_strdup(const char *s) * nulls, tableforest, and targetns mean. * * Some style guidelines for XML output: Use double quotes for quoting - * XML attributes. Indent XML elements by two spaces, but remember + * XML attributes. Indent XML elements by two spaces, but remember * that a lot of code is called recursively at different levels, so * it's better not to indent rather than create output that indents * and outdents weirdly. Add newlines to make the output look nice. @@ -2400,12 +2400,12 @@ cursor_to_xml(PG_FUNCTION_ARGS) * Write the start tag of the root element of a data mapping. * * top_level means that this is the very top level of the eventual - * output. For example, when the user calls table_to_xml, then a call + * output. For example, when the user calls table_to_xml, then a call * with a table name to this function is the top level. When the user * calls database_to_xml, then a call with a schema name to this * function is not the top level. If top_level is false, then the XML * namespace declarations are omitted, because they supposedly already - * appeared earlier in the output. Repeating them is not wrong, but + * appeared earlier in the output. Repeating them is not wrong, but * it looks ugly. */ static void @@ -2937,7 +2937,7 @@ map_multipart_sql_identifier_to_xml_name(char *a, char *b, char *c, char *d) if (a) appendStringInfoString(&result, - map_sql_identifier_to_xml_name(a, true, true)); + map_sql_identifier_to_xml_name(a, true, true)); if (b) appendStringInfo(&result, ".%s", map_sql_identifier_to_xml_name(b, true, true)); @@ -3348,7 +3348,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list) * SQL/XML:2008 sections 9.5 and 9.6. * * (The distinction between 9.5 and 9.6 is basically that 9.6 adds - * a name attribute, which this function does. The name-less version + * a name attribute, which this function does. The name-less version * 9.5 doesn't appear to be required anywhere.) */ static const char * @@ -3362,11 +3362,11 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) if (typeoid == XMLOID) { appendStringInfoString(&result, - "<xsd:complexType mixed=\"true\">\n" - " <xsd:sequence>\n" - " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n" - " </xsd:sequence>\n" - "</xsd:complexType>\n"); + "<xsd:complexType mixed=\"true\">\n" + " <xsd:sequence>\n" + " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n" + " </xsd:sequence>\n" + "</xsd:complexType>\n"); } else { @@ -3440,12 +3440,12 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) case FLOAT8OID: appendStringInfoString(&result, - " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n"); + " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n"); break; case BOOLOID: appendStringInfoString(&result, - " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n"); + " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n"); break; case TIMEOID: @@ -3496,9 +3496,9 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) case DATEOID: appendStringInfoString(&result, - " <xsd:restriction base=\"xsd:date\">\n" - " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n" - " </xsd:restriction>\n"); + " <xsd:restriction base=\"xsd:date\">\n" + " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n" + " </xsd:restriction>\n"); break; default: @@ -3524,7 +3524,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) /* * Map an SQL row to an XML element, taking the row from the active - * SPI cursor. See also SQL/XML:2008 section 9.10. + * SPI cursor. See also SQL/XML:2008 section 9.10. */ static void SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename, diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c index 516f40ba84e..5fcf0dd7c75 100644 --- a/src/backend/utils/cache/attoptcache.c +++ b/src/backend/utils/cache/attoptcache.c @@ -46,7 +46,7 @@ typedef struct * Flush all cache entries when pg_attribute is updated. * * When pg_attribute is updated, we must flush the cache entry at least - * for that attribute. Currently, we just flush them all. Since attribute + * for that attribute. Currently, we just flush them all. Since attribute * options are not currently used in performance-critical paths (such as * query execution), this seems OK. */ diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index d17b6b0ba58..954b435bffa 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -836,9 +836,10 @@ RehashCatCache(CatCache *cp) for (i = 0; i < cp->cc_nbuckets; i++) { dlist_mutable_iter iter; + dlist_foreach_modify(iter, &cp->cc_bucket[i]) { - CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); + CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur); int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets); dlist_delete(iter.cur); @@ -856,7 +857,7 @@ RehashCatCache(CatCache *cp) * CatalogCacheInitializeCache * * This function does final initialization of a catcache: obtain the tuple - * descriptor and set up the hash and equality function links. We assume + * descriptor and set up the hash and equality function links. We assume * that the relcache entry can be opened at this point! */ #ifdef CACHEDEBUG @@ -1081,7 +1082,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey) * if necessary (on the first access to a particular cache). * * The result is NULL if not found, or a pointer to a HeapTuple in - * the cache. The caller must not modify the tuple, and must call + * the cache. The caller must not modify the tuple, and must call * ReleaseCatCache() when done with it. * * The search key values should be expressed as Datums of the key columns' @@ -1214,8 +1215,8 @@ SearchCatCache(CatCache *cache, * the relation --- for example, due to shared-cache-inval messages being * processed during heap_open(). This is OK. It's even possible for one * of those lookups to find and enter the very same tuple we are trying to - * fetch here. If that happens, we will enter a second copy of the tuple - * into the cache. The first copy will never be referenced again, and + * fetch here. If that happens, we will enter a second copy of the tuple + * into the cache. The first copy will never be referenced again, and * will eventually age out of the cache, so there's no functional problem. * This case is rare enough that it's not worth expending extra cycles to * detect. @@ -1254,7 +1255,7 @@ SearchCatCache(CatCache *cache, * * In bootstrap mode, we don't build negative entries, because the cache * invalidation mechanism isn't alive and can't clear them if the tuple - * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need + * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need * cache inval for that.) */ if (ct == NULL) @@ -1584,7 +1585,7 @@ SearchCatCacheList(CatCache *cache, /* * We are now past the last thing that could trigger an elog before we * have finished building the CatCList and remembering it in the - * resource owner. So it's OK to fall out of the PG_TRY, and indeed + * resource owner. So it's OK to fall out of the PG_TRY, and indeed * we'd better do so before we start marking the members as belonging * to the list. */ @@ -1673,7 +1674,7 @@ ReleaseCatCacheList(CatCList *list) /* * CatalogCacheCreateEntry * Create a new CatCTup entry, copying the given HeapTuple and other - * supplied data into it. The new entry initially has refcount 0. + * supplied data into it. The new entry initially has refcount 0. */ static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, @@ -1724,8 +1725,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, CacheHdr->ch_ntup++; /* - * If the hash table has become too full, enlarge the buckets array. - * Quite arbitrarily, we enlarge when fill factor > 2. + * If the hash table has become too full, enlarge the buckets array. Quite + * arbitrarily, we enlarge when fill factor > 2. */ if (cache->cc_ntup > cache->cc_nbuckets * 2) RehashCatCache(cache); diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 115bcac5d23..59714697c69 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -29,23 +29,23 @@ * * If we successfully complete the transaction, we have to broadcast all * these invalidation events to other backends (via the SI message queue) - * so that they can flush obsolete entries from their caches. Note we have + * so that they can flush obsolete entries from their caches. Note we have * to record the transaction commit before sending SI messages, otherwise * the other backends won't see our updated tuples as good. * * When a subtransaction aborts, we can process and discard any events - * it has queued. When a subtransaction commits, we just add its events + * it has queued. When a subtransaction commits, we just add its events * to the pending lists of the parent transaction. * * In short, we need to remember until xact end every insert or delete - * of a tuple that might be in the system caches. Updates are treated as + * of a tuple that might be in the system caches. Updates are treated as * two events, delete + insert, for simplicity. (If the update doesn't * change the tuple hash value, catcache.c optimizes this into one event.) * * We do not need to register EVERY tuple operation in this way, just those - * on tuples in relations that have associated catcaches. We do, however, + * on tuples in relations that have associated catcaches. We do, however, * have to register every operation on every tuple that *could* be in a - * catcache, whether or not it currently is in our cache. Also, if the + * catcache, whether or not it currently is in our cache. Also, if the * tuple is in a relation that has multiple catcaches, we need to register * an invalidation message for each such catcache. catcache.c's * PrepareToInvalidateCacheTuple() routine provides the knowledge of which @@ -113,7 +113,7 @@ /* * To minimize palloc traffic, we keep pending requests in successively- * larger chunks (a slightly more sophisticated version of an expansible - * array). All request types can be stored as SharedInvalidationMessage + * array). All request types can be stored as SharedInvalidationMessage * records. The ordering of requests within a list is never significant. */ typedef struct InvalidationChunk @@ -650,7 +650,7 @@ AcceptInvalidationMessages(void) * * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This * slows things by at least a factor of 10000, so I wouldn't suggest - * trying to run the entire regression tests that way. It's useful to try + * trying to run the entire regression tests that way. It's useful to try * a few simple tests, to make sure that cache reload isn't subject to * internal cache-flush hazards, but after you've done a few thousand * recursive reloads it's unlikely you'll learn more. @@ -863,12 +863,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list * to the shared invalidation message queue. Note that these will be read * not only by other backends, but also by our own backend at the next - * transaction start (via AcceptInvalidationMessages). This means that + * transaction start (via AcceptInvalidationMessages). This means that * we can skip immediate local processing of anything that's still in * CurrentCmdInvalidMsgs, and just send that list out too. * * If not isCommit, we are aborting, and must locally process the messages - * in PriorCmdInvalidMsgs. No messages need be sent to other backends, + * in PriorCmdInvalidMsgs. No messages need be sent to other backends, * since they'll not have seen our changed tuples anyway. We can forget * about CurrentCmdInvalidMsgs too, since those changes haven't touched * the caches yet. @@ -927,11 +927,11 @@ AtEOXact_Inval(bool isCommit) * parent's PriorCmdInvalidMsgs list. * * If not isCommit, we are aborting, and must locally process the messages - * in PriorCmdInvalidMsgs. No messages need be sent to other backends. + * in PriorCmdInvalidMsgs. No messages need be sent to other backends. * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't * touched the caches yet. * - * In any case, pop the transaction stack. We need not physically free memory + * In any case, pop the transaction stack. We need not physically free memory * here, since CurTransactionContext is about to be emptied anyway * (if aborting). Beware of the possibility of aborting the same nesting * level twice, though. @@ -987,7 +987,7 @@ AtEOSubXact_Inval(bool isCommit) * in a transaction. * * Here, we send no messages to the shared queue, since we don't know yet if - * we will commit. We do need to locally process the CurrentCmdInvalidMsgs + * we will commit. We do need to locally process the CurrentCmdInvalidMsgs * list, so as to flush our caches of any entries we have outdated in the * current command. We then move the current-cmd list over to become part * of the prior-cmds list. @@ -1094,7 +1094,7 @@ CacheInvalidateHeapTuple(Relation relation, * This essentially means that only backends in this same database * will react to the relcache flush request. This is in fact * appropriate, since only those backends could see our pg_attribute - * change anyway. It looks a bit ugly though. (In practice, shared + * change anyway. It looks a bit ugly though. (In practice, shared * relations can't have schema changes after bootstrap, so we should * never come here for a shared rel anyway.) */ @@ -1106,7 +1106,7 @@ CacheInvalidateHeapTuple(Relation relation, /* * When a pg_index row is updated, we should send out a relcache inval - * for the index relation. As above, we don't know the shared status + * for the index relation. As above, we don't know the shared status * of the index, but in practice it doesn't matter since indexes of * shared catalogs can't have such updates. */ @@ -1214,7 +1214,7 @@ CacheInvalidateRelcacheByRelid(Oid relid) * * Sending this type of invalidation msg forces other backends to close open * smgr entries for the rel. This should be done to flush dangling open-file - * references when the physical rel is being dropped or truncated. Because + * references when the physical rel is being dropped or truncated. Because * these are nontransactional (i.e., not-rollback-able) operations, we just * send the inval message immediately without any queuing. * diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index a4ce7163ea6..4b5ef99531b 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -186,13 +186,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, * (This indicates that the operator is not a valid ordering operator.) * * Note: the operator could be registered in multiple families, for example - * if someone were to build a "reverse sort" opfamily. This would result in + * if someone were to build a "reverse sort" opfamily. This would result in * uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST * or NULLS LAST, as well as inefficient planning due to failure to match up * pathkeys that should be the same. So we want a determinate result here. * Because of the way the syscache search works, we'll use the interpretation * associated with the opfamily with smallest OID, which is probably - * determinate enough. Since there is no longer any particularly good reason + * determinate enough. Since there is no longer any particularly good reason * to build reverse-sort opfamilies, it doesn't seem worth expending any * additional effort on ensuring consistency. */ @@ -403,7 +403,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type) * * The planner currently uses simple equal() tests to compare the lists * returned by this function, which makes the list order relevant, though - * strictly speaking it should not be. Because of the way syscache list + * strictly speaking it should not be. Because of the way syscache list * searches are handled, in normal operation the result will be sorted by OID * so everything works fine. If running with system index usage disabled, * the result ordering is unspecified and hence the planner might fail to @@ -1212,7 +1212,7 @@ op_mergejoinable(Oid opno, Oid inputtype) * * In some cases (currently only array_eq), hashjoinability depends on the * specific input data type the operator is invoked for, so that must be - * passed as well. We currently assume that only one input's type is needed + * passed as well. We currently assume that only one input's type is needed * to check this --- by convention, pass the left input's data type. */ bool @@ -1880,7 +1880,7 @@ get_typbyval(Oid typid) * A two-fer: given the type OID, return both typlen and typbyval. * * Since both pieces of info are needed to know how to copy a Datum, - * many places need both. Might as well get them with one cache lookup + * many places need both. Might as well get them with one cache lookup * instead of two. Also, this routine raises an error instead of * returning a bogus value when given a bad type OID. */ diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index d492cbb55e7..d03d3b3cdff 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -11,7 +11,7 @@ * The logic for choosing generic or custom plans is in choose_custom_plan, * which see for comments. * - * Cache invalidation is driven off sinval events. Any CachedPlanSource + * Cache invalidation is driven off sinval events. Any CachedPlanSource * that matches the event is marked invalid, as is its generic CachedPlan * if it has one. When (and if) the next demand for a cached plan occurs, * parse analysis and rewrite is repeated to build a new valid query tree, @@ -27,7 +27,7 @@ * caller to notice changes and cope with them. * * Currently, we track exactly the dependencies of plans on relations and - * user-defined functions. On relcache invalidation events or pg_proc + * user-defined functions. On relcache invalidation events or pg_proc * syscache invalidation events, we invalidate just those plans that depend * on the particular object being modified. (Note: this scheme assumes * that any table modification that requires replanning will generate a @@ -123,7 +123,7 @@ InitPlanCache(void) * CreateCachedPlan: initially create a plan cache entry. * * Creation of a cached plan is divided into two steps, CreateCachedPlan and - * CompleteCachedPlan. CreateCachedPlan should be called after running the + * CompleteCachedPlan. CreateCachedPlan should be called after running the * query through raw_parser, but before doing parse analysis and rewrite; * CompleteCachedPlan is called after that. The reason for this arrangement * is that it can save one round of copying of the raw parse tree, since @@ -217,7 +217,7 @@ CreateCachedPlan(Node *raw_parse_tree, * in that context. * * A one-shot plan cannot be saved or copied, since we make no effort to - * preserve the raw parse tree unmodified. There is also no support for + * preserve the raw parse tree unmodified. There is also no support for * invalidation, so plan use must be completed in the current transaction, * and DDL that might invalidate the querytree_list must be avoided as well. * @@ -274,13 +274,13 @@ CreateOneShotCachedPlan(Node *raw_parse_tree, * CompleteCachedPlan: second step of creating a plan cache entry. * * Pass in the analyzed-and-rewritten form of the query, as well as the - * required subsidiary data about parameters and such. All passed values will + * required subsidiary data about parameters and such. All passed values will * be copied into the CachedPlanSource's memory, except as specified below. * After this is called, GetCachedPlan can be called to obtain a plan, and * optionally the CachedPlanSource can be saved using SaveCachedPlan. |