summaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorTom Lane2015-05-24 19:04:10 +0000
committerTom Lane2015-05-24 19:04:10 +0000
commit2aa0476dc38f7e510b8cde627e83b4c76fa05d61 (patch)
tree19dad7292f6cad84a485e9670c786b2f1dcd3c18 /src/backend/executor
parent17b48a1a9f87f7479d38dcc78a27c23f1f8124f8 (diff)
Manual cleanup of pgindent results.
Fix some places where pgindent did silly stuff, often because project style wasn't followed to begin with. (I've not touched the atomics headers, though.)
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execUtils.c4
-rw-r--r--src/backend/executor/nodeAgg.c8
-rw-r--r--src/backend/executor/nodeHash.c8
3 files changed, 12 insertions, 8 deletions
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 7e15b797a7e..3c611b938bc 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -645,10 +645,12 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
* overall targetlist's econtext. GroupingFunc arguments are never
* evaluated at all.
*/
- if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
+ if (IsA(node, Aggref))
return false;
if (IsA(node, WindowFunc))
return false;
+ if (IsA(node, GroupingFunc))
+ return false;
return expression_tree_walker(node, get_last_attnums,
(void *) projInfo);
}
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 31d74e94778..2bf48c54e3c 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1519,8 +1519,9 @@ agg_retrieve_direct(AggState *aggstate)
/*
* get state info from node
*
- * econtext is the per-output-tuple expression context tmpcontext is the
- * per-input-tuple expression context
+ * econtext is the per-output-tuple expression context
+ *
+ * tmpcontext is the per-input-tuple expression context
*/
econtext = aggstate->ss.ps.ps_ExprContext;
tmpcontext = aggstate->tmpcontext;
@@ -1609,7 +1610,7 @@ agg_retrieve_direct(AggState *aggstate)
else
nextSetSize = 0;
- /*-
+ /*----------
* If a subgroup for the current grouping set is present, project it.
*
* We have a new group if:
@@ -1624,6 +1625,7 @@ agg_retrieve_direct(AggState *aggstate)
* AND
* - the previous and pending rows differ on the grouping columns
* of the next grouping set
+ *----------
*/
if (aggstate->input_done ||
(node->aggstrategy == AGG_SORTED &&
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 2a049240549..906cb46b658 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -527,8 +527,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Buckets are simple pointers to hashjoin tuples, while tupsize
* includes the pointer, hash code, and MinimalTupleData. So buckets
* should never really exceed 25% of work_mem (even for
- * NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
- * 2^N bytes, where we might get more * because of doubling. So let's
+ * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
+ * 2^N bytes, where we might get more because of doubling. So let's
* look for 50% here.
*/
Assert(bucket_bytes <= hash_table_bytes / 2);
@@ -691,9 +691,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
if (batchno == curbatch)
{
/* keep tuple in memory - copy it into the new chunk */
- HashJoinTuple copyTuple =
- (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
+ HashJoinTuple copyTuple;
+ copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
memcpy(copyTuple, hashTuple, hashTupleSize);
/* and add it back to the appropriate bucket */