summaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
authorTom Lane2017-08-14 21:29:33 +0000
committerTom Lane2017-08-14 21:29:33 +0000
commit21d304dfedb4f26d0d6587d9ac39b1b5c499bb55 (patch)
treebd5328464e037f15bf069fb91d54db06509b459c /src/backend
parent5b6289c1e07dc45f09c3169a189e60d2fcaec2b3 (diff)
Final pgindent + perltidy run for v10.
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/hash/hashpage.c8
-rw-r--r--src/backend/access/transam/slru.c2
-rw-r--r--src/backend/catalog/namespace.c14
-rw-r--r--src/backend/catalog/partition.c6
-rw-r--r--src/backend/commands/copy.c3
-rw-r--r--src/backend/commands/subscriptioncmds.c3
-rw-r--r--src/backend/commands/tablecmds.c4
-rw-r--r--src/backend/commands/trigger.c14
-rw-r--r--src/backend/commands/vacuumlazy.c10
-rw-r--r--src/backend/executor/execProcnode.c6
-rw-r--r--src/backend/executor/nodeModifyTable.c8
-rw-r--r--src/backend/libpq/be-secure-openssl.c5
-rw-r--r--src/backend/optimizer/geqo/geqo_cx.c2
-rw-r--r--src/backend/optimizer/geqo/geqo_ox1.c2
-rw-r--r--src/backend/optimizer/geqo/geqo_ox2.c2
-rw-r--r--src/backend/optimizer/geqo/geqo_px.c2
-rw-r--r--src/backend/optimizer/geqo/geqo_recombination.c2
-rw-r--r--src/backend/parser/parse_utilcmd.c4
-rw-r--r--src/backend/replication/logical/launcher.c11
-rw-r--r--src/backend/replication/logical/origin.c6
-rw-r--r--src/backend/replication/logical/snapbuild.c6
-rw-r--r--src/backend/replication/slot.c6
-rw-r--r--src/backend/replication/syncrep.c8
-rw-r--r--src/backend/storage/ipc/procarray.c4
-rw-r--r--src/backend/utils/adt/ruleutils.c4
25 files changed, 73 insertions, 69 deletions
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 08eaf1d7bf4..7b2906b0ca9 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1320,10 +1320,10 @@ _hash_splitbucket(Relation rel,
/*
* If possible, clean up the old bucket. We might not be able to do this
* if someone else has a pin on it, but if not then we can go ahead. This
- * isn't absolutely necessary, but it reduces bloat; if we don't do it now,
- * VACUUM will do it eventually, but maybe not until new overflow pages
- * have been allocated. Note that there's no need to clean up the new
- * bucket.
+ * isn't absolutely necessary, but it reduces bloat; if we don't do it
+ * now, VACUUM will do it eventually, but maybe not until new overflow
+ * pages have been allocated. Note that there's no need to clean up the
+ * new bucket.
*/
if (IsBufferCleanupOK(bucket_obuf))
{
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index d037c369a72..77edc51e1c9 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -233,7 +233,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
}
/* Should fit to estimated shmem size */
- Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
+ Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
}
else
Assert(found);
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 6859a973632..5d71302ded2 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -3802,14 +3802,14 @@ InitTempTableNamespace(void)
get_database_name(MyDatabaseId))));
/*
- * Do not allow a Hot Standby session to make temp tables. Aside
- * from problems with modifying the system catalogs, there is a naming
+ * Do not allow a Hot Standby session to make temp tables. Aside from
+ * problems with modifying the system catalogs, there is a naming
* conflict: pg_temp_N belongs to the session with BackendId N on the
- * master, not to a hot standby session with the same BackendId. We should not
- * be able to get here anyway due to XactReadOnly checks, but let's just
- * make real sure. Note that this also backstops various operations that
- * allow XactReadOnly transactions to modify temp tables; they'd need
- * RecoveryInProgress checks if not for this.
+ * master, not to a hot standby session with the same BackendId. We
+ * should not be able to get here anyway due to XactReadOnly checks, but
+ * let's just make real sure. Note that this also backstops various
+ * operations that allow XactReadOnly transactions to modify temp tables;
+ * they'd need RecoveryInProgress checks if not for this.
*/
if (RecoveryInProgress())
ereport(ERROR,
diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c
index 0e4b343ab28..71bc4b3d105 100644
--- a/src/backend/catalog/partition.c
+++ b/src/backend/catalog/partition.c
@@ -728,9 +728,9 @@ check_new_partition_bound(char *relname, Relation parent,
errmsg("empty range bound specified for partition \"%s\"",
relname),
errdetail("Specified lower bound %s is greater than or equal to upper bound %s.",
- get_range_partbound_string(spec->lowerdatums),
- get_range_partbound_string(spec->upperdatums)),
- parser_errposition(pstate, spec->location)));
+ get_range_partbound_string(spec->lowerdatums),
+ get_range_partbound_string(spec->upperdatums)),
+ parser_errposition(pstate, spec->location)));
}
if (partdesc->nparts > 0)
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 53e296559a9..a258965c200 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -1454,7 +1454,7 @@ BeginCopy(ParseState *pstate,
*/
if (cstate->transition_capture != NULL)
{
- int i;
+ int i;
cstate->transition_tupconv_maps = (TupleConversionMap **)
palloc0(sizeof(TupleConversionMap *) *
@@ -2651,6 +2651,7 @@ CopyFrom(CopyState cstate)
cstate->transition_capture->tcs_map = NULL;
}
}
+
/*
* We might need to convert from the parent rowtype to the
* partition rowtype.
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index ae40f7164d8..005e74201d4 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -919,9 +919,10 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
LWLockAcquire(LogicalRepWorkerLock, LW_SHARED);
subworkers = logicalrep_workers_find(subid, false);
LWLockRelease(LogicalRepWorkerLock);
- foreach (lc, subworkers)
+ foreach(lc, subworkers)
{
LogicalRepWorker *w = (LogicalRepWorker *) lfirst(lc);
+
if (slotname)
logicalrep_worker_stop(w->subid, w->relid);
else
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 2afde0abd8b..513a9ec4857 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -13509,8 +13509,8 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
* having to construct this list again, so we request the strongest lock
* on all partitions. We need the strongest lock, because we may decide
* to scan them if we find out that the table being attached (or its leaf
- * partitions) may contain rows that violate the partition constraint.
- * If the table has a constraint that would prevent such rows, which by
+ * partitions) may contain rows that violate the partition constraint. If
+ * the table has a constraint that would prevent such rows, which by
* definition is present in all the partitions, we need not scan the
* table, nor its partitions. But we cannot risk a deadlock by taking a
* weaker lock now and the stronger one only when needed.
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index b502941b08b..da0850bfd6d 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -2071,11 +2071,11 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
{
if (trigdesc != NULL)
{
- int i;
+ int i;
for (i = 0; i < trigdesc->numtriggers; ++i)
{
- Trigger *trigger = &trigdesc->triggers[i];
+ Trigger *trigger = &trigdesc->triggers[i];
if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
return trigger->tgname;
@@ -5253,12 +5253,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
*/
if (row_trigger && transition_capture != NULL)
{
- HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
+ HeapTuple original_insert_tuple = transition_capture->tcs_original_insert_tuple;
TupleConversionMap *map = transition_capture->tcs_map;
- bool delete_old_table = transition_capture->tcs_delete_old_table;
- bool update_old_table = transition_capture->tcs_update_old_table;
- bool update_new_table = transition_capture->tcs_update_new_table;
- bool insert_new_table = transition_capture->tcs_insert_new_table;;
+ bool delete_old_table = transition_capture->tcs_delete_old_table;
+ bool update_old_table = transition_capture->tcs_update_old_table;
+ bool update_new_table = transition_capture->tcs_update_new_table;
+ bool insert_new_table = transition_capture->tcs_insert_new_table;;
if ((event == TRIGGER_EVENT_DELETE && delete_old_table) ||
(event == TRIGGER_EVENT_UPDATE && update_old_table))
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index fabb2f8d527..e9b4045fe56 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -529,11 +529,11 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* safely set for relfrozenxid or relminmxid.
*
* Before entering the main loop, establish the invariant that
- * next_unskippable_block is the next block number >= blkno that we
- * can't skip based on the visibility map, either all-visible for a
- * regular scan or all-frozen for an aggressive scan. We set it to
- * nblocks if there's no such block. We also set up the skipping_blocks
- * flag correctly at this stage.
+ * next_unskippable_block is the next block number >= blkno that we can't
+ * skip based on the visibility map, either all-visible for a regular scan
+ * or all-frozen for an aggressive scan. We set it to nblocks if there's
+ * no such block. We also set up the skipping_blocks flag correctly at
+ * this stage.
*
* Note: The value returned by visibilitymap_get_status could be slightly
* out-of-date, since we make this test before reading the corresponding
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 396920c0a23..36d2914249c 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -411,9 +411,9 @@ ExecProcNodeFirst(PlanState *node)
/*
* Perform stack depth check during the first execution of the node. We
* only do so the first time round because it turns out to not be cheap on
- * some common architectures (eg. x86). This relies on the assumption that
- * ExecProcNode calls for a given plan node will always be made at roughly
- * the same stack depth.
+ * some common architectures (eg. x86). This relies on the assumption
+ * that ExecProcNode calls for a given plan node will always be made at
+ * roughly the same stack depth.
*/
check_stack_depth();
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 30add8e3c7a..36b2b43bc62 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1469,7 +1469,7 @@ static void
ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
{
ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate);
- int i;
+ int i;
/* Check for transition tables on the directly targeted relation. */
mtstate->mt_transition_capture =
@@ -1483,7 +1483,7 @@ ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
if (mtstate->mt_transition_capture != NULL)
{
ResultRelInfo *resultRelInfos;
- int numResultRelInfos;
+ int numResultRelInfos;
/* Find the set of partitions so that we can find their TupleDescs. */
if (mtstate->mt_partition_dispatch_info != NULL)
@@ -2254,8 +2254,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
else if (relkind == RELKIND_FOREIGN_TABLE)
{
/*
- * When there is a row-level trigger, there should be a
- * wholerow attribute.
+ * When there is a row-level trigger, there should be
+ * a wholerow attribute.
*/
j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
}
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 00f17f78431..fe15227a773 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -730,9 +730,10 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
n = -1;
break;
case SSL_ERROR_ZERO_RETURN:
+
/*
- * the SSL connnection was closed, leave it to the caller
- * to ereport it
+ * the SSL connnection was closed, leave it to the caller to
+ * ereport it
*/
errno = ECONNRESET;
n = -1;
diff --git a/src/backend/optimizer/geqo/geqo_cx.c b/src/backend/optimizer/geqo/geqo_cx.c
index d05327d8abd..a54690884a4 100644
--- a/src/backend/optimizer/geqo/geqo_cx.c
+++ b/src/backend/optimizer/geqo/geqo_cx.c
@@ -46,7 +46,7 @@
*/
int
cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
- int num_gene, City *city_table)
+ int num_gene, City * city_table)
{
int i,
start_pos,
diff --git a/src/backend/optimizer/geqo/geqo_ox1.c b/src/backend/optimizer/geqo/geqo_ox1.c
index 53dacb811f6..10d2d0a33ae 100644
--- a/src/backend/optimizer/geqo/geqo_ox1.c
+++ b/src/backend/optimizer/geqo/geqo_ox1.c
@@ -45,7 +45,7 @@
*/
void
ox1(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
- City *city_table)
+ City * city_table)
{
int left,
right,
diff --git a/src/backend/optimizer/geqo/geqo_ox2.c b/src/backend/optimizer/geqo/geqo_ox2.c
index 8d5baa9826f..72b9b0fb871 100644
--- a/src/backend/optimizer/geqo/geqo_ox2.c
+++ b/src/backend/optimizer/geqo/geqo_ox2.c
@@ -44,7 +44,7 @@
* position crossover
*/
void
-ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City *city_table)
+ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene, City * city_table)
{
int k,
j,
diff --git a/src/backend/optimizer/geqo/geqo_px.c b/src/backend/optimizer/geqo/geqo_px.c
index 2e7748c5aab..ad5ad3f1e57 100644
--- a/src/backend/optimizer/geqo/geqo_px.c
+++ b/src/backend/optimizer/geqo/geqo_px.c
@@ -45,7 +45,7 @@
*/
void
px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
- City *city_table)
+ City * city_table)
{
int num_positions;
int i,
diff --git a/src/backend/optimizer/geqo/geqo_recombination.c b/src/backend/optimizer/geqo/geqo_recombination.c
index eb6ab428088..a5d3e47ad11 100644
--- a/src/backend/optimizer/geqo/geqo_recombination.c
+++ b/src/backend/optimizer/geqo/geqo_recombination.c
@@ -84,7 +84,7 @@ alloc_city_table(PlannerInfo *root, int num_gene)
* deallocate memory of city table
*/
void
-free_city_table(PlannerInfo *root, City *city_table)
+free_city_table(PlannerInfo *root, City * city_table)
{
pfree(city_table);
}
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 87cb4188a39..495ba3dffcb 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -2131,8 +2131,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation)
/*
* If creating a new table (but not a foreign table), we can safely skip
- * validation of check constraints, and nonetheless mark them valid.
- * (This will override any user-supplied NOT VALID flag.)
+ * validation of check constraints, and nonetheless mark them valid. (This
+ * will override any user-supplied NOT VALID flag.)
*/
if (skipValidation)
{
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 0f9e5755b9e..4c6d4b27723 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -75,8 +75,8 @@ LogicalRepCtxStruct *LogicalRepCtx;
typedef struct LogicalRepWorkerId
{
- Oid subid;
- Oid relid;
+ Oid subid;
+ Oid relid;
} LogicalRepWorkerId;
static List *on_commit_stop_workers = NIL;
@@ -552,7 +552,7 @@ void
logicalrep_worker_stop_at_commit(Oid subid, Oid relid)
{
LogicalRepWorkerId *wid;
- MemoryContext oldctx;
+ MemoryContext oldctx;
/* Make sure we store the info in context that survives until commit. */
oldctx = MemoryContextSwitchTo(TopTransactionContext);
@@ -824,11 +824,12 @@ AtEOXact_ApplyLauncher(bool isCommit)
{
if (isCommit)
{
- ListCell *lc;
+ ListCell *lc;
- foreach (lc, on_commit_stop_workers)
+ foreach(lc, on_commit_stop_workers)
{
LogicalRepWorkerId *wid = lfirst(lc);
+
logicalrep_worker_stop(wid->subid, wid->relid);
}
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 9e1b19bb354..14cb3d0bf23 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -353,7 +353,7 @@ restart:
{
if (state->acquired_by != 0)
{
- ConditionVariable *cv;
+ ConditionVariable *cv;
if (nowait)
ereport(ERROR,
@@ -977,7 +977,7 @@ replorigin_get_progress(RepOriginId node, bool flush)
static void
ReplicationOriginExitCleanup(int code, Datum arg)
{
- ConditionVariable *cv = NULL;
+ ConditionVariable *cv = NULL;
LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
@@ -1097,7 +1097,7 @@ replorigin_session_setup(RepOriginId node)
void
replorigin_session_reset(void)
{
- ConditionVariable *cv;
+ ConditionVariable *cv;
Assert(max_replication_slots != 0);
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 0ca4fa5d256..fba57a0470c 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -1117,9 +1117,9 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact
* only ever look at those.
*
* NB: We only increase xmax when a catalog modifying transaction commits
- * (see SnapBuildCommitTxn). Because of this, xmax can be lower than xmin,
- * which looks odd but is correct and actually more efficient, since we hit
- * fast paths in tqual.c.
+ * (see SnapBuildCommitTxn). Because of this, xmax can be lower than
+ * xmin, which looks odd but is correct and actually more efficient, since
+ * we hit fast paths in tqual.c.
*/
builder->xmin = running->oldestRunningXid;
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 63e1aaa9102..a8a16f55e98 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -351,8 +351,8 @@ retry:
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
{
/*
- * This is the slot we want. We don't know yet if it's active,
- * so get ready to sleep on it in case it is. (We may end up not
+ * This is the slot we want. We don't know yet if it's active, so
+ * get ready to sleep on it in case it is. (We may end up not
* sleeping, but we don't want to do this while holding the
* spinlock.)
*/
@@ -397,7 +397,7 @@ retry:
goto retry;
}
else
- ConditionVariableCancelSleep(); /* no sleep needed after all */
+ ConditionVariableCancelSleep(); /* no sleep needed after all */
/* Let everybody know we've modified this slot */
ConditionVariableBroadcast(&slot->active_cv);
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 77e80f16123..8677235411c 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -293,8 +293,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
* WalSender has checked our LSN and has removed us from queue. Clean up
* state and leave. It's OK to reset these shared memory fields without
* holding SyncRepLock, because any walsenders will ignore us anyway when
- * we're not on the queue. We need a read barrier to make sure we see
- * the changes to the queue link (this might be unnecessary without
+ * we're not on the queue. We need a read barrier to make sure we see the
+ * changes to the queue link (this might be unnecessary without
* assertions, but better safe than sorry).
*/
pg_read_barrier();
@@ -715,7 +715,7 @@ SyncRepGetSyncStandbysQuorum(bool *am_sync)
for (i = 0; i < max_wal_senders; i++)
{
XLogRecPtr flush;
- WalSndState state;
+ WalSndState state;
int pid;
walsnd = &WalSndCtl->walsnds[i];
@@ -794,7 +794,7 @@ SyncRepGetSyncStandbysPriority(bool *am_sync)
for (i = 0; i < max_wal_senders; i++)
{
XLogRecPtr flush;
- WalSndState state;
+ WalSndState state;
int pid;
walsnd = &WalSndCtl->walsnds[i];
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index eab218e3166..ff96e2a86fe 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1408,8 +1408,8 @@ GetOldestXmin(Relation rel, int flags)
* being careful not to generate a "permanent" XID.
*
* vacuum_defer_cleanup_age provides some additional "slop" for the
- * benefit of hot standby queries on standby servers. This is quick and
- * dirty, and perhaps not all that useful unless the master has a
+ * benefit of hot standby queries on standby servers. This is quick
+ * and dirty, and perhaps not all that useful unless the master has a
* predictable transaction rate, but it offers some protection when
* there's no walsender connection. Note that we are assuming
* vacuum_defer_cleanup_age isn't large enough to cause wraparound ---
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 0faa0204cef..e9bd64b7a88 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -8723,8 +8723,8 @@ get_rule_expr(Node *node, deparse_context *context,
list_length(spec->upperdatums));
appendStringInfo(buf, "FOR VALUES FROM %s TO %s",
- get_range_partbound_string(spec->lowerdatums),
- get_range_partbound_string(spec->upperdatums));
+ get_range_partbound_string(spec->lowerdatums),
+ get_range_partbound_string(spec->upperdatums));
break;
default: