diff options
| author | David Rowley | 2021-04-01 00:33:23 +0000 |
|---|---|---|
| committer | David Rowley | 2021-04-01 00:33:23 +0000 |
| commit | 28b3e3905c982c42fb10ee800e6f881e9742c89d (patch) | |
| tree | 5368a84ddedf0c0da61fc9ec73ea148658821f0b /src/test/regress/sql | |
| parent | b6002a796dc0bfe721db5eaa54ba9d24fd9fd416 (diff) | |
Revert b6002a796
This removes "Add Result Cache executor node". It seems that something
weird is going on with the tracking of cache hits and misses as
highlighted by many buildfarm animals. It's not yet clear what the
problem is as other parts of the plan indicate that the cache did work
correctly, it's just the hits and misses that were being reported as 0.
This is especially a bad time to have the buildfarm so broken, so
reverting before too many more animals go red.
Discussion: https://postgr.es/m/CAApHDvq_hydhfovm4=izgWs+C5HqEeRScjMbOgbpC-jRAeK3Yw@mail.gmail.com
Diffstat (limited to 'src/test/regress/sql')
| -rw-r--r-- | src/test/regress/sql/aggregates.sql | 2 | ||||
| -rw-r--r-- | src/test/regress/sql/join.sql | 2 | ||||
| -rw-r--r-- | src/test/regress/sql/partition_prune.sql | 3 | ||||
| -rw-r--r-- | src/test/regress/sql/resultcache.sql | 85 |
4 files changed, 0 insertions, 92 deletions
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index eb80a2fe063..eb53668299c 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -1098,11 +1098,9 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) -- Make sure that generation of HashAggregate for uniqification purposes -- does not lead to array overflow due to unexpected duplicate hash keys -- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_resultcache to off; explain (costs off) select 1 from tenk1 where (hundred, thousand) in (select twothousand, twothousand from onek); -reset enable_resultcache; -- -- Hash Aggregation Spill tests diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 7f866c603b8..8164383fb53 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -550,7 +550,6 @@ reset enable_nestloop; set work_mem to '64kB'; set enable_mergejoin to off; -set enable_resultcache to off; explain (costs off) select count(*) from tenk1 a, tenk1 b @@ -560,7 +559,6 @@ select count(*) from tenk1 a, tenk1 b reset work_mem; reset enable_mergejoin; -reset enable_resultcache; -- -- regression test for 8.2 bug with improper re-ordering of left joins diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql index bd40779d31e..6ccb52ad1d6 100644 --- a/src/test/regress/sql/partition_prune.sql +++ b/src/test/regress/sql/partition_prune.sql @@ -464,9 +464,6 @@ begin ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N'); ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N'); ln := regexp_replace(ln, 'Rows Removed by Filter: \d+', 'Rows Removed by Filter: N'); - ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N'); - ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N'); - ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N'); return next ln; end loop; end; diff --git a/src/test/regress/sql/resultcache.sql b/src/test/regress/sql/resultcache.sql deleted file mode 100644 index 150820449ce..00000000000 --- a/src/test/regress/sql/resultcache.sql +++ /dev/null @@ -1,85 +0,0 @@ --- Perform tests on the Result Cache node. - --- The cache hits/misses/evictions from the Result Cache node can vary between --- machines. Let's just replace the number with an 'N'. In order to allow us --- to perform validation when the measure was zero, we replace a zero value --- with "Zero". All other numbers are replaced with 'N'. -create function explain_resultcache(query text, hide_hitmiss bool) returns setof text -language plpgsql as -$$ -declare - ln text; -begin - for ln in - execute format('explain (analyze, costs off, summary off, timing off) %s', - query) - loop - if hide_hitmiss = true then - ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero'); - ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N'); - ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero'); - ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N'); - end if; - ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero'); - ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N'); - ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N'); - return next ln; - end loop; -end; -$$; - --- Ensure we get a result cache on the inner side of the nested loop -SET enable_hashjoin TO off; -SELECT explain_resultcache(' -SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 -INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty -WHERE t2.unique1 < 1000;', false); - --- And check we get the expected results. -SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 -INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty -WHERE t2.unique1 < 1000; - --- Try with LATERAL joins -SELECT explain_resultcache(' -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 -WHERE t1.unique1 < 1000;', false); - --- And check we get the expected results. -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 -WHERE t1.unique1 < 1000; - --- Reduce work_mem so that we see some cache evictions -SET work_mem TO '64kB'; -SET enable_mergejoin TO off; --- Ensure we get some evictions. We're unable to validate the hits and misses --- here as the number of entries that fit in the cache at once will vary --- between different machines. -SELECT explain_resultcache(' -SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 -INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand -WHERE t2.unique1 < 800;', true); -RESET enable_mergejoin; -RESET work_mem; -RESET enable_hashjoin; - --- Test parallel plans with Result Cache. -SET min_parallel_table_scan_size TO 0; -SET parallel_setup_cost TO 0; -SET parallel_tuple_cost TO 0; - --- Ensure we get a parallel plan. -EXPLAIN (COSTS OFF) -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 -WHERE t1.unique1 < 1000; - --- And ensure the parallel plan gives us the correct results. -SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, -LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 -WHERE t1.unique1 < 1000; -RESET parallel_tuple_cost; -RESET parallel_setup_cost; -RESET min_parallel_table_scan_size; |
