From 3708c2a624485c40375117b848c26ff0ff6b539f Mon Sep 17 00:00:00 2001 From: Zhang Mingli Date: Sun, 9 Feb 2025 23:08:44 +0800 Subject: [PATCH] fix: update outdated comments in nodeAgg.c Replaced references to the removed function lookup_hash_entry() in comments with the appropriate current function. Authored-by: Zhang Mingli avamingli@gmail.com --- src/backend/executor/nodeAgg.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index ae9cc256b8cd..ceb8c8a8039a 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -1454,7 +1454,7 @@ find_cols_walker(Node *node, FindColsContext *context) * To implement hashed aggregation, we need a hashtable that stores a * representative tuple and an array of AggStatePerGroup structs for each * distinct set of GROUP BY column values. We compute the hash key from the - * GROUP BY columns. The per-group data is allocated in lookup_hash_entry(), + * GROUP BY columns. The per-group data is allocated in initialize_hash_entry(), * for each entry. * * We have a separate hashtable and associated perhash data structure for each @@ -1551,7 +1551,7 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets) * at all. Only columns of the first two types need to be stored in the * hashtable, and getting rid of the others can make the table entries * significantly smaller. The hashtable only contains the relevant columns, - * and is packed/unpacked in lookup_hash_entry() / agg_retrieve_hash_table() + * and is packed/unpacked in lookup_hash_entries() / agg_retrieve_hash_table() * into the format of the normal input descriptor. * * Additional columns, in addition to the columns grouped by, come from two @@ -2102,8 +2102,6 @@ initialize_hash_entry(AggState *aggstate, TupleHashTable hashtable, /* * Look up hash entries for the current tuple in all hashed grouping sets. * - * Be aware that lookup_hash_entry can reset the tmpcontext. - * * Some entries may be left NULL if we are in "spill mode". The same tuple * will belong to different groups for each grouping set, so may match a group * already in memory for one set and match a group not in memory for another