summaryrefslogtreecommitdiff
path: root/src/test/regress
diff options
context:
space:
mode:
authorTom Lane2022-10-03 14:56:16 +0000
committerTom Lane2022-10-03 14:56:16 +0000
commitf4c7c410ee4a7baa06f51ebb8d5333c169691dd3 (patch)
tree8b0811e2be7edf69c6e2216af085112335374b1b /src/test/regress
parentf60eb3f2827db292edf71bb7296fbdf5958ace3d (diff)
Revert "Optimize order of GROUP BY keys".
This reverts commit db0d67db2401eb6238ccc04c6407a4fd4f985832 and several follow-on fixes. The idea of making a cost-based choice of the order of the sorting columns is not fundamentally unsound, but it requires cost information and data statistics that we don't really have. For example, relying on procost to distinguish the relative costs of different sort comparators is pretty pointless so long as most such comparator functions are labeled with cost 1.0. Moreover, estimating the number of comparisons done by Quicksort requires more than just an estimate of the number of distinct values in the input: you also need some idea of the sizes of the larger groups, if you want an estimate that's good to better than a factor of three or so. That's data that's often unknown or not very reliable. Worse, to arrive at estimates of the number of calls made to the lower-order-column comparison functions, the code needs to make estimates of the numbers of distinct values of multiple columns, which are necessarily even less trustworthy than per-column stats. Even if all the inputs are perfectly reliable, the cost algorithm as-implemented cannot offer useful information about how to order sorting columns beyond the point at which the average group size is estimated to drop to 1. Close inspection of the code added by db0d67db2 shows that there are also multiple small bugs. These could have been fixed, but there's not much point if we don't trust the estimates to be accurate in-principle. Finally, the changes in cost_sort's behavior made for very large changes (often a factor of 2 or so) in the cost estimates for all sorting operations, not only those for multi-column GROUP BY. That naturally changes plan choices in many situations, and there's precious little evidence to show that the changes are for the better. Given the above doubts about whether the new estimates are really trustworthy, it's hard to summon much confidence that these changes are better on the average. Since we're hard up against the release deadline for v15, let's revert these changes for now. We can always try again later. Note: in v15, I left T_PathKeyInfo in place in nodes.h even though it's unreferenced. Removing it would be an ABI break, and it seems a bit late in the release cycle for that. Discussion: https://postgr.es/m/TYAPR01MB586665EB5FB2C3807E893941F5579@TYAPR01MB5866.jpnprd01.prod.outlook.com
Diffstat (limited to 'src/test/regress')
-rw-r--r--src/test/regress/expected/aggregates.out244
-rw-r--r--src/test/regress/expected/incremental_sort.out2
-rw-r--r--src/test/regress/expected/join.out51
-rw-r--r--src/test/regress/expected/merge.out15
-rw-r--r--src/test/regress/expected/partition_aggregate.out44
-rw-r--r--src/test/regress/expected/partition_join.out75
-rw-r--r--src/test/regress/expected/sysviews.out3
-rw-r--r--src/test/regress/expected/union.out60
-rw-r--r--src/test/regress/sql/aggregates.sql99
-rw-r--r--src/test/regress/sql/incremental_sort.sql2
10 files changed, 142 insertions, 453 deletions
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index b2198724e3c..fc2bd40be2d 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -1210,8 +1210,7 @@ explain (costs off)
select distinct min(f1), max(f1) from minmaxtest;
QUERY PLAN
---------------------------------------------------------------------------------------------
- HashAggregate
- Group Key: $0, $1
+ Unique
InitPlan 1 (returns $0)
-> Limit
-> Merge Append
@@ -1234,8 +1233,10 @@ explain (costs off)
-> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8
Index Cond: (f1 IS NOT NULL)
-> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9
- -> Result
-(25 rows)
+ -> Sort
+ Sort Key: ($0), ($1)
+ -> Result
+(26 rows)
select distinct min(f1), max(f1) from minmaxtest;
min | max
@@ -2525,241 +2526,6 @@ SELECT balk(hundred) FROM tenk1;
(1 row)
ROLLBACK;
--- GROUP BY optimization by reorder columns
-SELECT
- i AS id,
- i/2 AS p,
- format('%60s', i%2) AS v,
- i/4 AS c,
- i/8 AS d,
- (random() * (10000/8))::int as e --the same as d but no correlation with p
- INTO btg
-FROM
- generate_series(1, 10000) i;
-VACUUM btg;
-ANALYZE btg;
--- GROUP BY optimization by reorder columns by frequency
-SET enable_hashagg=off;
-SET max_parallel_workers= 0;
-SET max_parallel_workers_per_gather = 0;
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, v;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, v
- -> Sort
- Sort Key: p, v
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, v
- -> Sort
- Sort Key: p, v
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, c, v
- -> Sort
- Sort Key: p, c, v
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c ORDER BY v, p, c;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: v, p, c
- -> Sort
- Sort Key: v, p, c
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, d, c;
- QUERY PLAN
-------------------------------
- GroupAggregate
- Group Key: p, d, c, v
- -> Sort
- Sort Key: p, d, c, v
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, d, c ORDER BY v, p, d ,c;
- QUERY PLAN
-------------------------------
- GroupAggregate
- Group Key: v, p, d, c
- -> Sort
- Sort Key: v, p, d, c
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, d, c ORDER BY p, v, d ,c;
- QUERY PLAN
-------------------------------
- GroupAggregate
- Group Key: p, v, d, c
- -> Sort
- Sort Key: p, v, d, c
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, d, e;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, d, e
- -> Sort
- Sort Key: p, d, e
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, e, d;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, e, d
- -> Sort
- Sort Key: p, e, d
- -> Seq Scan on btg
-(5 rows)
-
-CREATE STATISTICS btg_dep ON d, e, p FROM btg;
-ANALYZE btg;
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, d, e;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, d, e
- -> Sort
- Sort Key: p, d, e
- -> Seq Scan on btg
-(5 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, e, d;
- QUERY PLAN
------------------------------
- GroupAggregate
- Group Key: p, e, d
- -> Sort
- Sort Key: p, e, d
- -> Seq Scan on btg
-(5 rows)
-
--- GROUP BY optimization by reorder columns by index scan
-CREATE INDEX ON btg(p, v);
-SET enable_seqscan=off;
-SET enable_bitmapscan=off;
-VACUUM btg;
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, v;
- QUERY PLAN
-------------------------------------------------
- GroupAggregate
- Group Key: p, v
- -> Index Only Scan using btg_p_v_idx on btg
-(3 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, v ORDER BY p, v;
- QUERY PLAN
-------------------------------------------------
- GroupAggregate
- Group Key: p, v
- -> Index Only Scan using btg_p_v_idx on btg
-(3 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p;
- QUERY PLAN
-------------------------------------------------
- GroupAggregate
- Group Key: p, v
- -> Index Only Scan using btg_p_v_idx on btg
-(3 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p ORDER BY p, v;
- QUERY PLAN
-------------------------------------------------
- GroupAggregate
- Group Key: p, v
- -> Index Only Scan using btg_p_v_idx on btg
-(3 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c;
- QUERY PLAN
--------------------------------------------------
- GroupAggregate
- Group Key: p, c, v
- -> Incremental Sort
- Sort Key: p, c, v
- Presorted Key: p
- -> Index Scan using btg_p_v_idx on btg
-(6 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c ORDER BY p, v;
- QUERY PLAN
--------------------------------------------------
- GroupAggregate
- Group Key: p, v, c
- -> Incremental Sort
- Sort Key: p, v, c
- Presorted Key: p, v
- -> Index Scan using btg_p_v_idx on btg
-(6 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, c, p, d;
- QUERY PLAN
--------------------------------------------------
- GroupAggregate
- Group Key: p, c, d, v
- -> Incremental Sort
- Sort Key: p, c, d, v
- Presorted Key: p
- -> Index Scan using btg_p_v_idx on btg
-(6 rows)
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, c, p, d ORDER BY p, v;
- QUERY PLAN
--------------------------------------------------
- GroupAggregate
- Group Key: p, v, c, d
- -> Incremental Sort
- Sort Key: p, v, c, d
- Presorted Key: p, v
- -> Index Scan using btg_p_v_idx on btg
-(6 rows)
-
-DROP TABLE btg;
-RESET enable_hashagg;
-RESET max_parallel_workers;
-RESET max_parallel_workers_per_gather;
-RESET enable_seqscan;
-RESET enable_bitmapscan;
-- Secondly test the case of a parallel aggregate combiner function
-- returning NULL. For that use normal transition function, but a
-- combiner function returning NULL.
diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out
index 49953eaaded..0a631124c22 100644
--- a/src/test/regress/expected/incremental_sort.out
+++ b/src/test/regress/expected/incremental_sort.out
@@ -1439,7 +1439,7 @@ set parallel_setup_cost = 0;
set parallel_tuple_cost = 0;
set max_parallel_workers_per_gather = 2;
create table t (a int, b int, c int);
-insert into t select mod(i,10),mod(i,10),i from generate_series(1,60000) s(i);
+insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i);
create index on t (a);
analyze t;
set enable_incremental_sort = off;
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 2ed2e542a44..08334761ae6 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -1984,8 +1984,8 @@ USING (name);
------+----+----
bb | 12 | 13
cc | 22 | 23
- ee | 42 |
dd | | 33
+ ee | 42 |
(4 rows)
-- Cases with non-nullable expressions in subquery results;
@@ -2019,8 +2019,8 @@ NATURAL FULL JOIN
------+------+------+------+------
bb | 12 | 2 | 13 | 3
cc | 22 | 2 | 23 | 3
- ee | 42 | 2 | |
dd | | | 33 | 3
+ ee | 42 | 2 | |
(4 rows)
SELECT * FROM
@@ -4676,20 +4676,18 @@ select d.* from d left join (select * from b group by b.id, b.c_id) s
explain (costs off)
select d.* from d left join (select distinct * from b) s
on d.a = s.id;
- QUERY PLAN
----------------------------------------------
- Merge Left Join
- Merge Cond: (d.a = s.id)
+ QUERY PLAN
+--------------------------------------
+ Merge Right Join
+ Merge Cond: (b.id = d.a)
+ -> Unique
+ -> Sort
+ Sort Key: b.id, b.c_id
+ -> Seq Scan on b
-> Sort
Sort Key: d.a
-> Seq Scan on d
- -> Sort
- Sort Key: s.id
- -> Subquery Scan on s
- -> HashAggregate
- Group Key: b.id, b.c_id
- -> Seq Scan on b
-(11 rows)
+(9 rows)
-- check join removal works when uniqueness of the join condition is enforced
-- by a UNION
@@ -6399,39 +6397,44 @@ select * from j1 natural join j2;
explain (verbose, costs off)
select * from j1
inner join (select distinct id from j3) j3 on j1.id = j3.id;
- QUERY PLAN
------------------------------------
+ QUERY PLAN
+-----------------------------------------
Nested Loop
Output: j1.id, j3.id
Inner Unique: true
Join Filter: (j1.id = j3.id)
- -> HashAggregate
+ -> Unique
Output: j3.id
- Group Key: j3.id
- -> Seq Scan on public.j3
+ -> Sort
Output: j3.id
+ Sort Key: j3.id
+ -> Seq Scan on public.j3
+ Output: j3.id
-> Seq Scan on public.j1
Output: j1.id
-(11 rows)
+(13 rows)
-- ensure group by clause allows the inner to become unique
explain (verbose, costs off)
select * from j1
inner join (select id from j3 group by id) j3 on j1.id = j3.id;
- QUERY PLAN
------------------------------------
+ QUERY PLAN
+-----------------------------------------
Nested Loop
Output: j1.id, j3.id
Inner Unique: true
Join Filter: (j1.id = j3.id)
- -> HashAggregate
+ -> Group
Output: j3.id
Group Key: j3.id
- -> Seq Scan on public.j3
+ -> Sort
Output: j3.id
+ Sort Key: j3.id
+ -> Seq Scan on public.j3
+ Output: j3.id
-> Seq Scan on public.j1
Output: j1.id
-(11 rows)
+(14 rows)
drop table j1;
drop table j2;
diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out
index 4047c3e761e..787af41dfe5 100644
--- a/src/test/regress/expected/merge.out
+++ b/src/test/regress/expected/merge.out
@@ -1460,15 +1460,18 @@ WHEN MATCHED AND t.a < 10 THEN
explain_merge
--------------------------------------------------------------------
Merge on ex_mtarget t (actual rows=0 loops=1)
- -> Hash Join (actual rows=0 loops=1)
- Hash Cond: (s.a = t.a)
- -> Seq Scan on ex_msource s (actual rows=1 loops=1)
- -> Hash (actual rows=0 loops=1)
- Buckets: xxx Batches: xxx Memory Usage: xxx
+ -> Merge Join (actual rows=0 loops=1)
+ Merge Cond: (t.a = s.a)
+ -> Sort (actual rows=0 loops=1)
+ Sort Key: t.a
+ Sort Method: quicksort Memory: xxx
-> Seq Scan on ex_mtarget t (actual rows=0 loops=1)
Filter: (a < '-1000'::integer)
Rows Removed by Filter: 54
-(9 rows)
+ -> Sort (never executed)
+ Sort Key: s.a
+ -> Seq Scan on ex_msource s (never executed)
+(12 rows)
DROP TABLE ex_msource, ex_mtarget;
DROP FUNCTION explain_merge(text);
diff --git a/src/test/regress/expected/partition_aggregate.out b/src/test/regress/expected/partition_aggregate.out
index db36e3a150a..a82b8fb8fb7 100644
--- a/src/test/regress/expected/partition_aggregate.out
+++ b/src/test/regress/expected/partition_aggregate.out
@@ -949,12 +949,12 @@ SET parallel_setup_cost = 0;
-- is not partial agg safe.
EXPLAIN (COSTS OFF)
SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3;
- QUERY PLAN
---------------------------------------------------------------------------------------------
- Gather Merge
- Workers Planned: 2
- -> Sort
- Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c))
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Sort
+ Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c))
+ -> Gather
+ Workers Planned: 2
-> Parallel Append
-> GroupAggregate
Group Key: pagg_tab_ml.a
@@ -1381,26 +1381,28 @@ SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) <
-- When GROUP BY clause does not match; partial aggregation is performed for each partition.
EXPLAIN (COSTS OFF)
SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3;
- QUERY PLAN
--------------------------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------------------------------
Sort
Sort Key: pagg_tab_para.y, (sum(pagg_tab_para.x)), (avg(pagg_tab_para.x))
- -> Finalize HashAggregate
+ -> Finalize GroupAggregate
Group Key: pagg_tab_para.y
Filter: (avg(pagg_tab_para.x) < '12'::numeric)
- -> Gather
+ -> Gather Merge
Workers Planned: 2
- -> Parallel Append
- -> Partial HashAggregate
- Group Key: pagg_tab_para.y
- -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para
- -> Partial HashAggregate
- Group Key: pagg_tab_para_1.y
- -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1
- -> Partial HashAggregate
- Group Key: pagg_tab_para_2.y
- -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2
-(17 rows)
+ -> Sort
+ Sort Key: pagg_tab_para.y
+ -> Parallel Append
+ -> Partial HashAggregate
+ Group Key: pagg_tab_para.y
+ -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para
+ -> Partial HashAggregate
+ Group Key: pagg_tab_para_1.y
+ -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1
+ -> Partial HashAggregate
+ Group Key: pagg_tab_para_2.y
+ -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2
+(19 rows)
SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3;
y | sum | avg | count
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 03926a84138..bb5b7c47a45 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -466,41 +466,52 @@ EXPLAIN (COSTS OFF)
SELECT a, b FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b)
WHERE a BETWEEN 490 AND 510
GROUP BY 1, 2 ORDER BY 1, 2;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------
Group
Group Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b))
- -> Sort
+ -> Merge Append
Sort Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b))
- -> Append
- -> Merge Full Join
- Merge Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b))
- Filter: ((COALESCE(prt1_1.a, p2_1.a) >= 490) AND (COALESCE(prt1_1.a, p2_1.a) <= 510))
- -> Sort
- Sort Key: prt1_1.a, prt1_1.b
- -> Seq Scan on prt1_p1 prt1_1
- -> Sort
- Sort Key: p2_1.a, p2_1.b
- -> Seq Scan on prt2_p1 p2_1
- -> Merge Full Join
- Merge Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b))
- Filter: ((COALESCE(prt1_2.a, p2_2.a) >= 490) AND (COALESCE(prt1_2.a, p2_2.a) <= 510))
- -> Sort
- Sort Key: prt1_2.a, prt1_2.b
- -> Seq Scan on prt1_p2 prt1_2
- -> Sort
- Sort Key: p2_2.a, p2_2.b
- -> Seq Scan on prt2_p2 p2_2
- -> Merge Full Join
- Merge Cond: ((prt1_3.b = p2_3.b) AND (prt1_3.a = p2_3.a))
- Filter: ((COALESCE(prt1_3.a, p2_3.a) >= 490) AND (COALESCE(prt1_3.a, p2_3.a) <= 510))
- -> Sort
- Sort Key: prt1_3.b, prt1_3.a
- -> Seq Scan on prt1_p3 prt1_3
- -> Sort
- Sort Key: p2_3.b, p2_3.a
- -> Seq Scan on prt2_p3 p2_3
-(32 rows)
+ -> Group
+ Group Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b))
+ -> Sort
+ Sort Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b))
+ -> Merge Full Join
+ Merge Cond: ((prt1.a = p2.a) AND (prt1.b = p2.b))
+ Filter: ((COALESCE(prt1.a, p2.a) >= 490) AND (COALESCE(prt1.a, p2.a) <= 510))
+ -> Sort
+ Sort Key: prt1.a, prt1.b
+ -> Seq Scan on prt1_p1 prt1
+ -> Sort
+ Sort Key: p2.a, p2.b
+ -> Seq Scan on prt2_p1 p2
+ -> Group
+ Group Key: (COALESCE(prt1_1.a, p2_1.a)), (COALESCE(prt1_1.b, p2_1.b))
+ -> Sort
+ Sort Key: (COALESCE(prt1_1.a, p2_1.a)), (COALESCE(prt1_1.b, p2_1.b))
+ -> Merge Full Join
+ Merge Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b))
+ Filter: ((COALESCE(prt1_1.a, p2_1.a) >= 490) AND (COALESCE(prt1_1.a, p2_1.a) <= 510))
+ -> Sort
+ Sort Key: prt1_1.a, prt1_1.b
+ -> Seq Scan on prt1_p2 prt1_1
+ -> Sort
+ Sort Key: p2_1.a, p2_1.b
+ -> Seq Scan on prt2_p2 p2_1
+ -> Group
+ Group Key: (COALESCE(prt1_2.a, p2_2.a)), (COALESCE(prt1_2.b, p2_2.b))
+ -> Sort
+ Sort Key: (COALESCE(prt1_2.a, p2_2.a)), (COALESCE(prt1_2.b, p2_2.b))
+ -> Merge Full Join
+ Merge Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b))
+ Filter: ((COALESCE(prt1_2.a, p2_2.a) >= 490) AND (COALESCE(prt1_2.a, p2_2.a) <= 510))
+ -> Sort
+ Sort Key: prt1_2.a, prt1_2.b
+ -> Seq Scan on prt1_p3 prt1_2
+ -> Sort
+ Sort Key: p2_2.a, p2_2.b
+ -> Seq Scan on prt2_p3 p2_2
+(43 rows)
SELECT a, b FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b)
WHERE a BETWEEN 490 AND 510
diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out
index 4e775af1758..579b861d84f 100644
--- a/src/test/regress/expected/sysviews.out
+++ b/src/test/regress/expected/sysviews.out
@@ -114,7 +114,6 @@ select name, setting from pg_settings where name like 'enable%';
enable_async_append | on
enable_bitmapscan | on
enable_gathermerge | on
- enable_group_by_reordering | on
enable_hashagg | on
enable_hashjoin | on
enable_incremental_sort | on
@@ -132,7 +131,7 @@ select name, setting from pg_settings where name like 'enable%';
enable_seqscan | on
enable_sort | on
enable_tidscan | on
-(21 rows)
+(20 rows)
-- Test that the pg_timezone_names and pg_timezone_abbrevs views are
-- more-or-less working. We can't test their contents in any great detail
diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out
index 7ac4a9380e2..dece7310cfe 100644
--- a/src/test/regress/expected/union.out
+++ b/src/test/regress/expected/union.out
@@ -1303,22 +1303,24 @@ select distinct q1 from
union all
select distinct * from int8_tbl i82) ss
where q2 = q2;
- QUERY PLAN
-----------------------------------------------------
- HashAggregate
- Group Key: "*SELECT* 1".q1
- -> Append
+ QUERY PLAN
+----------------------------------------------------------
+ Unique
+ -> Merge Append
+ Sort Key: "*SELECT* 1".q1
-> Subquery Scan on "*SELECT* 1"
- -> HashAggregate
- Group Key: i81.q1, i81.q2
- -> Seq Scan on int8_tbl i81
- Filter: (q2 IS NOT NULL)
+ -> Unique
+ -> Sort
+ Sort Key: i81.q1, i81.q2
+ -> Seq Scan on int8_tbl i81
+ Filter: (q2 IS NOT NULL)
-> Subquery Scan on "*SELECT* 2"
- -> HashAggregate
- Group Key: i82.q1, i82.q2
- -> Seq Scan on int8_tbl i82
- Filter: (q2 IS NOT NULL)
-(13 rows)
+ -> Unique
+ -> Sort
+ Sort Key: i82.q1, i82.q2
+ -> Seq Scan on int8_tbl i82
+ Filter: (q2 IS NOT NULL)
+(15 rows)
select distinct q1 from
(select distinct * from int8_tbl i81
@@ -1337,22 +1339,24 @@ select distinct q1 from
union all
select distinct * from int8_tbl i82) ss
where -q1 = q2;
- QUERY PLAN
---------------------------------------------------
- HashAggregate
- Group Key: "*SELECT* 1".q1
- -> Append
+ QUERY PLAN
+--------------------------------------------------------
+ Unique
+ -> Merge Append
+ Sort Key: "*SELECT* 1".q1
-> Subquery Scan on "*SELECT* 1"
- -> HashAggregate
- Group Key: i81.q1, i81.q2
- -> Seq Scan on int8_tbl i81
- Filter: ((- q1) = q2)
+ -> Unique
+ -> Sort
+ Sort Key: i81.q1, i81.q2
+ -> Seq Scan on int8_tbl i81
+ Filter: ((- q1) = q2)
-> Subquery Scan on "*SELECT* 2"
- -> HashAggregate
- Group Key: i82.q1, i82.q2
- -> Seq Scan on int8_tbl i82
- Filter: ((- q1) = q2)
-(13 rows)
+ -> Unique
+ -> Sort
+ Sort Key: i82.q1, i82.q2
+ -> Seq Scan on int8_tbl i82
+ Filter: ((- q1) = q2)
+(15 rows)
select distinct q1 from
(select distinct * from int8_tbl i81
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 4540a06f454..a4c00ff7a9d 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -1068,105 +1068,6 @@ SELECT balk(hundred) FROM tenk1;
ROLLBACK;
--- GROUP BY optimization by reorder columns
-
-SELECT
- i AS id,
- i/2 AS p,
- format('%60s', i%2) AS v,
- i/4 AS c,
- i/8 AS d,
- (random() * (10000/8))::int as e --the same as d but no correlation with p
- INTO btg
-FROM
- generate_series(1, 10000) i;
-
-VACUUM btg;
-ANALYZE btg;
-
--- GROUP BY optimization by reorder columns by frequency
-
-SET enable_hashagg=off;
-SET max_parallel_workers= 0;
-SET max_parallel_workers_per_gather = 0;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, v;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c ORDER BY v, p, c;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, d, c;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, d, c ORDER BY v, p, d ,c;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, d, c ORDER BY p, v, d ,c;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, d, e;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, e, d;
-
-CREATE STATISTICS btg_dep ON d, e, p FROM btg;
-ANALYZE btg;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, d, e;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, e, d;
-
-
--- GROUP BY optimization by reorder columns by index scan
-
-CREATE INDEX ON btg(p, v);
-SET enable_seqscan=off;
-SET enable_bitmapscan=off;
-VACUUM btg;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, v;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY p, v ORDER BY p, v;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p ORDER BY p, v;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, p, c ORDER BY p, v;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, c, p, d;
-
-EXPLAIN (COSTS off)
-SELECT count(*) FROM btg GROUP BY v, c, p, d ORDER BY p, v;
-
-DROP TABLE btg;
-
-RESET enable_hashagg;
-RESET max_parallel_workers;
-RESET max_parallel_workers_per_gather;
-RESET enable_seqscan;
-RESET enable_bitmapscan;
-
-
-- Secondly test the case of a parallel aggregate combiner function
-- returning NULL. For that use normal transition function, but a
-- combiner function returning NULL.
diff --git a/src/test/regress/sql/incremental_sort.sql b/src/test/regress/sql/incremental_sort.sql
index 6a0e87c7f64..284a354dbb7 100644
--- a/src/test/regress/sql/incremental_sort.sql
+++ b/src/test/regress/sql/incremental_sort.sql
@@ -213,7 +213,7 @@ set parallel_tuple_cost = 0;
set max_parallel_workers_per_gather = 2;
create table t (a int, b int, c int);
-insert into t select mod(i,10),mod(i,10),i from generate_series(1,60000) s(i);
+insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i);
create index on t (a);
analyze t;