summaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
authorBruce Momjian2017-05-17 23:01:23 +0000
committerBruce Momjian2017-05-17 23:01:23 +0000
commitce554810329b9b8e862eade08b598148931eb456 (patch)
treeddc702112bdabfcdf78d1c4648614b92fd11dadd /src/test
parenta6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2 (diff)
Post-PG 10 beta1 pgperltidy run
Diffstat (limited to 'src/test')
-rw-r--r--src/test/authentication/t/001_password.pl32
-rw-r--r--src/test/authentication/t/002_saslprep.pl35
-rw-r--r--src/test/modules/commit_ts/t/002_standby.pl3
-rw-r--r--src/test/modules/commit_ts/t/004_restart.pl4
-rw-r--r--src/test/modules/test_pg_dump/t/001_base.pl70
-rw-r--r--src/test/perl/PostgresNode.pm116
-rw-r--r--src/test/perl/TestLib.pm5
-rw-r--r--src/test/recovery/t/001_stream_rep.pl122
-rw-r--r--src/test/recovery/t/003_recovery_targets.pl10
-rw-r--r--src/test/recovery/t/004_timeline_switch.pl6
-rw-r--r--src/test/recovery/t/005_replay_delay.pl3
-rw-r--r--src/test/recovery/t/006_logical_decoding.pl109
-rw-r--r--src/test/recovery/t/007_sync_rep.pl18
-rw-r--r--src/test/recovery/t/008_fsm_truncation.pl32
-rw-r--r--src/test/recovery/t/009_twophase.pl100
-rw-r--r--src/test/recovery/t/010_logical_decoding_timelines.pl59
-rw-r--r--src/test/recovery/t/011_crash_recovery.pl26
-rw-r--r--src/test/recovery/t/012_subtransactions.pl131
-rw-r--r--src/test/ssl/ServerSetup.pm17
-rw-r--r--src/test/subscription/t/001_rep_changes.pl132
-rw-r--r--src/test/subscription/t/002_types.pl35
-rw-r--r--src/test/subscription/t/003_constraints.pl31
-rw-r--r--src/test/subscription/t/004_sync.pl54
-rw-r--r--src/test/subscription/t/005_encoding.pl32
24 files changed, 704 insertions, 478 deletions
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index 928b36a2b20..2d3f674144e 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -11,7 +11,7 @@ use warnings;
use PostgresNode;
use TestLib;
use Test::More;
-if ($windows_os)
+if ($windows_os)
{
plan skip_all => "authentication tests cannot run on Windows";
}
@@ -25,7 +25,7 @@ else
# and then execute a reload to refresh it.
sub reset_pg_hba
{
- my $node = shift;
+ my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
@@ -36,17 +36,18 @@ sub reset_pg_hba
# Test access for a single role, useful to wrap all tests into one.
sub test_role
{
- my $node = shift;
- my $role = shift;
- my $method = shift;
- my $expected_res = shift;
+ my $node = shift;
+ my $role = shift;
+ my $method = shift;
+ my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
- my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
+ my $res =
+ $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res,
- "authentication $status_string for method $method, role $role");
+ "authentication $status_string for method $method, role $role");
}
# Initialize master node
@@ -56,27 +57,30 @@ $node->start;
# Create 3 roles with different password methods for each one. The same
# password is used for all of them.
-$node->safe_psql('postgres', "SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';");
-$node->safe_psql('postgres', "SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
+$node->safe_psql('postgres',
+"SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';"
+);
+$node->safe_psql('postgres',
+"SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
$ENV{"PGPASSWORD"} = 'pass';
# For "trust" method, all users should be able to connect.
reset_pg_hba($node, 'trust');
test_role($node, 'scram_role', 'trust', 0);
-test_role($node, 'md5_role', 'trust', 0);
+test_role($node, 'md5_role', 'trust', 0);
# For plain "password" method, all users should also be able to connect.
reset_pg_hba($node, 'password');
test_role($node, 'scram_role', 'password', 0);
-test_role($node, 'md5_role', 'password', 0);
+test_role($node, 'md5_role', 'password', 0);
# For "scram-sha-256" method, user "scram_role" should be able to connect.
reset_pg_hba($node, 'scram-sha-256');
test_role($node, 'scram_role', 'scram-sha-256', 0);
-test_role($node, 'md5_role', 'scram-sha-256', 2);
+test_role($node, 'md5_role', 'scram-sha-256', 2);
# For "md5" method, all users should be able to connect (SCRAM
# authentication will be performed for the user with a scram verifier.)
reset_pg_hba($node, 'md5');
test_role($node, 'scram_role', 'md5', 0);
-test_role($node, 'md5_role', 'md5', 0);
+test_role($node, 'md5_role', 'md5', 0);
diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl
index 7cc701dc64c..df9f85d6a9f 100644
--- a/src/test/authentication/t/002_saslprep.pl
+++ b/src/test/authentication/t/002_saslprep.pl
@@ -8,7 +8,7 @@ use warnings;
use PostgresNode;
use TestLib;
use Test::More;
-if ($windows_os)
+if ($windows_os)
{
plan skip_all => "authentication tests cannot run on Windows";
}
@@ -21,7 +21,7 @@ else
# and then execute a reload to refresh it.
sub reset_pg_hba
{
- my $node = shift;
+ my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
@@ -32,24 +32,26 @@ sub reset_pg_hba
# Test access for a single role, useful to wrap all tests into one.
sub test_login
{
- my $node = shift;
- my $role = shift;
- my $password = shift;
- my $expected_res = shift;
+ my $node = shift;
+ my $role = shift;
+ my $password = shift;
+ my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
$ENV{"PGPASSWORD"} = $password;
- my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
+ my $res =
+ $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res,
- "authentication $status_string for role $role with password $password");
+ "authentication $status_string for role $role with password $password"
+ );
}
# Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII
# characters in the passwords below.
my $node = get_new_node('master');
-$node->init(extra => ['--locale=C', '--encoding=UTF8']);
+$node->init(extra => [ '--locale=C', '--encoding=UTF8' ]);
$node->start;
# These tests are based on the example strings from RFC4013.txt,
@@ -66,8 +68,9 @@ $node->start;
# 7 <U+0627><U+0031> Error - bidirectional check
# Create test roles.
-$node->safe_psql('postgres',
-"SET password_encryption='scram-sha-256';
+$node->safe_psql(
+ 'postgres',
+ "SET password_encryption='scram-sha-256';
SET client_encoding='utf8';
CREATE ROLE saslpreptest1_role LOGIN PASSWORD 'IX';
CREATE ROLE saslpreptest4a_role LOGIN PASSWORD 'a';
@@ -80,23 +83,23 @@ CREATE ROLE saslpreptest7_role LOGIN PASSWORD E'foo\\u0627\\u0031bar';
reset_pg_hba($node, 'scram-sha-256');
# Check that #1 and #5 are treated the same as just 'IX'
-test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
+test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
test_login($node, 'saslpreptest1_role', "\xe2\x85\xa8", 0);
# but different from lower case 'ix'
test_login($node, 'saslpreptest1_role', "ix", 2);
# Check #4
-test_login($node, 'saslpreptest4a_role', "a", 0);
+test_login($node, 'saslpreptest4a_role', "a", 0);
test_login($node, 'saslpreptest4a_role', "\xc2\xaa", 0);
-test_login($node, 'saslpreptest4b_role', "a", 0);
+test_login($node, 'saslpreptest4b_role', "a", 0);
test_login($node, 'saslpreptest4b_role', "\xc2\xaa", 0);
# Check #6 and #7 - In PostgreSQL, contrary to the spec, if the password
# contains prohibited characters, we use it as is, without normalization.
test_login($node, 'saslpreptest6_role', "foo\x07bar", 0);
-test_login($node, 'saslpreptest6_role', "foobar", 2);
+test_login($node, 'saslpreptest6_role', "foobar", 2);
test_login($node, 'saslpreptest7_role', "foo\xd8\xa71bar", 0);
test_login($node, 'saslpreptest7_role', "foo1\xd8\xa7bar", 2);
-test_login($node, 'saslpreptest7_role', "foobar", 2);
+test_login($node, 'saslpreptest7_role', "foobar", 2);
diff --git a/src/test/modules/commit_ts/t/002_standby.pl b/src/test/modules/commit_ts/t/002_standby.pl
index e7221e982be..1437519aa19 100644
--- a/src/test/modules/commit_ts/t/002_standby.pl
+++ b/src/test/modules/commit_ts/t/002_standby.pl
@@ -44,8 +44,7 @@ is($master_ts, $standby_ts, "standby gives same value as master");
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart;
$master->safe_psql('postgres', 'checkpoint');
-$master_lsn =
- $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
+$master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "slave never caught up";
diff --git a/src/test/modules/commit_ts/t/004_restart.pl b/src/test/modules/commit_ts/t/004_restart.pl
index b686925a7e0..daf42d3a029 100644
--- a/src/test/modules/commit_ts/t/004_restart.pl
+++ b/src/test/modules/commit_ts/t/004_restart.pl
@@ -22,12 +22,12 @@ like(
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]);
-is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
+is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
is($stdout, '', 'timestamp of BootstrapTransactionId is null');
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]);
-is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
+is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
is($stdout, '', 'timestamp of FrozenTransactionId is null');
# Since FirstNormalTransactionId will've occurred during initdb, long before we
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index 3e45ccb005f..de70f4716be 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -41,12 +41,9 @@ my $tempdir_short = TestLib::tempdir_short;
my %pgdump_runs = (
binary_upgrade => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/binary_upgrade.sql",
- '--schema-only',
- '--binary-upgrade',
- '--dbname=postgres', ], },
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/binary_upgrade.sql", '--schema-only',
+ '--binary-upgrade', '--dbname=postgres', ], },
clean => {
dump_cmd => [
'pg_dump', "--file=$tempdir/clean.sql",
@@ -63,19 +60,16 @@ my %pgdump_runs = (
'postgres', ], },
column_inserts => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/column_inserts.sql",
- '-a',
- '--column-inserts',
- 'postgres', ], },
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/column_inserts.sql", '-a',
+ '--column-inserts', 'postgres', ], },
createdb => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/createdb.sql",
'-C',
- '-R', # no-op, just for testing
+ '-R', # no-op, just for testing
'postgres', ], },
data_only => {
dump_cmd => [
@@ -83,7 +77,7 @@ my %pgdump_runs = (
'--no-sync',
"--file=$tempdir/data_only.sql",
'-a',
- '-v', # no-op, just make sure it works
+ '-v', # no-op, just make sure it works
'postgres', ], },
defaults => {
dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ],
@@ -126,52 +120,35 @@ my %pgdump_runs = (
"$tempdir/defaults_tar_format.tar", ], },
pg_dumpall_globals => {
dump_cmd => [
- 'pg_dumpall',
- '--no-sync',
- "--file=$tempdir/pg_dumpall_globals.sql",
- '-g', ],
- },
+ 'pg_dumpall', '--no-sync',
+ "--file=$tempdir/pg_dumpall_globals.sql", '-g', ], },
no_privs => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_privs.sql",
- '-x',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_privs.sql", '-x',
'postgres', ], },
no_owner => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_owner.sql",
- '-O',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_owner.sql", '-O',
'postgres', ], },
schema_only => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/schema_only.sql",
- '-s',
- 'postgres', ],
- },
+ 'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql",
+ '-s', 'postgres', ], },
section_pre_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/section_pre_data.sql",
- '--section=pre-data',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/section_pre_data.sql", '--section=pre-data',
'postgres', ], },
section_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/section_data.sql",
- '--section=data',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/section_data.sql", '--section=data',
'postgres', ], },
section_post_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/section_post_data.sql",
+ 'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql",
'--section=post-data', 'postgres', ], },);
###############################################################
@@ -492,9 +469,8 @@ my %tests = (
pg_dumpall_globals => 1,
section_post_data => 1, }, },
- 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role'
- => {
- create_order => 4,
+ 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' =>
+ { create_order => 4,
create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
TO regress_dump_test_role;',
regexp => qr/^
diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm
index 61d2c5fdf5d..42e66edec93 100644
--- a/src/test/perl/PostgresNode.pm
+++ b/src/test/perl/PostgresNode.pm
@@ -729,7 +729,7 @@ sub restart
my $name = $self->name;
print "### Restarting node \"$name\"\n";
TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'restart');
+ 'restart');
$self->_update_pid(1);
}
@@ -750,7 +750,7 @@ sub promote
my $name = $self->name;
print "### Promoting node \"$name\"\n";
TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'promote');
+ 'promote');
}
# Internal routine to enable streaming replication on a standby node.
@@ -846,6 +846,7 @@ sub _update_pid
$self->{_pid} = undef;
print "# No postmaster PID for node \"$name\"\n";
+
# Complain if we expected to find a pidfile.
BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running;
}
@@ -1140,10 +1141,12 @@ sub psql
my $exc_save = $@;
if ($exc_save)
{
+
# IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired
die $exc_save
- if (blessed($exc_save) || $exc_save !~ /^\Q$timeout_exception\E/);
+ if (blessed($exc_save)
+ || $exc_save !~ /^\Q$timeout_exception\E/);
$ret = undef;
@@ -1191,7 +1194,8 @@ sub psql
if $ret == 1;
die "connection error: '$$stderr'\nwhile running '@psql_params'"
if $ret == 2;
- die "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
+ die
+"error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
if $ret == 3;
die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
}
@@ -1362,15 +1366,17 @@ mode must be specified.
sub lsn
{
my ($self, $mode) = @_;
- my %modes = ('insert' => 'pg_current_wal_insert_lsn()',
- 'flush' => 'pg_current_wal_flush_lsn()',
- 'write' => 'pg_current_wal_lsn()',
- 'receive' => 'pg_last_wal_receive_lsn()',
- 'replay' => 'pg_last_wal_replay_lsn()');
+ my %modes = (
+ 'insert' => 'pg_current_wal_insert_lsn()',
+ 'flush' => 'pg_current_wal_flush_lsn()',
+ 'write' => 'pg_current_wal_lsn()',
+ 'receive' => 'pg_last_wal_receive_lsn()',
+ 'replay' => 'pg_last_wal_replay_lsn()');
$mode = '<undef>' if !defined($mode);
- die "unknown mode for 'lsn': '$mode', valid modes are " . join(', ', keys %modes)
- if !defined($modes{$mode});
+ die "unknown mode for 'lsn': '$mode', valid modes are "
+ . join(', ', keys %modes)
+ if !defined($modes{$mode});
my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
chomp($result);
@@ -1409,18 +1415,29 @@ sub wait_for_catchup
{
my ($self, $standby_name, $mode, $target_lsn) = @_;
$mode = defined($mode) ? $mode : 'replay';
- my %valid_modes = ( 'sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1 );
- die "unknown mode $mode for 'wait_for_catchup', valid modes are " . join(', ', keys(%valid_modes)) unless exists($valid_modes{$mode});
+ my %valid_modes =
+ ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+ die "unknown mode $mode for 'wait_for_catchup', valid modes are "
+ . join(', ', keys(%valid_modes))
+ unless exists($valid_modes{$mode});
+
# Allow passing of a PostgresNode instance as shorthand
- if ( blessed( $standby_name ) && $standby_name->isa("PostgresNode") )
+ if (blessed($standby_name) && $standby_name->isa("PostgresNode"))
{
$standby_name = $standby_name->name;
}
die 'target_lsn must be specified' unless defined($target_lsn);
- print "Waiting for replication conn " . $standby_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n";
- my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
+ print "Waiting for replication conn "
+ . $standby_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
$self->poll_query_until('postgres', $query)
- or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)');
+ or die "timed out waiting for catchup, current location is "
+ . ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n";
}
@@ -1453,10 +1470,17 @@ sub wait_for_slot_catchup
die "valid modes are restart, confirmed_flush";
}
die 'target lsn must be specified' unless defined($target_lsn);
- print "Waiting for replication slot " . $slot_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n";
- my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
+ print "Waiting for replication slot "
+ . $slot_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
$self->poll_query_until('postgres', $query)
- or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)');
+ or die "timed out waiting for catchup, current location is "
+ . ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n";
}
@@ -1485,18 +1509,23 @@ null columns.
sub query_hash
{
my ($self, $dbname, $query, @columns) = @_;
- die 'calls in array context for multi-row results not supported yet' if (wantarray);
+ die 'calls in array context for multi-row results not supported yet'
+ if (wantarray);
+
# Replace __COLUMNS__ if found
- substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) = join(', ', @columns)
- if index($query, '__COLUMNS__') >= 0;
+ substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
+ join(', ', @columns)
+ if index($query, '__COLUMNS__') >= 0;
my $result = $self->safe_psql($dbname, $query);
+
# hash slice, see http://stackoverflow.com/a/16755894/398670 .
#
# Fills the hash with empty strings produced by x-operator element
# duplication if result is an empty row
#
my %val;
- @val{@columns} = $result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
+ @val{@columns} =
+ $result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
return \%val;
}
@@ -1518,8 +1547,14 @@ either.
sub slot
{
my ($self, $slot_name) = @_;
- my @columns = ('plugin', 'slot_type', 'datoid', 'database', 'active', 'active_pid', 'xmin', 'catalog_xmin', 'restart_lsn');
- return $self->query_hash('postgres', "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'", @columns);
+ my @columns = (
+ 'plugin', 'slot_type', 'datoid', 'database',
+ 'active', 'active_pid', 'xmin', 'catalog_xmin',
+ 'restart_lsn');
+ return $self->query_hash(
+ 'postgres',
+"SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
+ @columns);
}
=pod
@@ -1543,29 +1578,36 @@ to check for timeout. retval is undef on timeout.
sub pg_recvlogical_upto
{
- my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) = @_;
+ my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) =
+ @_;
my ($stdout, $stderr);
my $timeout_exception = 'pg_recvlogical timed out';
die 'slot name must be specified' unless defined($slot_name);
- die 'endpos must be specified' unless defined($endpos);
+ die 'endpos must be specified' unless defined($endpos);
- my @cmd = ('pg_recvlogical', '-S', $slot_name, '--dbname', $self->connstr($dbname));
+ my @cmd = (
+ 'pg_recvlogical', '-S', $slot_name, '--dbname',
+ $self->connstr($dbname));
push @cmd, '--endpos', $endpos;
push @cmd, '-f', '-', '--no-loop', '--start';
while (my ($k, $v) = each %plugin_options)
{
- die "= is not permitted to appear in replication option name" if ($k =~ qr/=/);
+ die "= is not permitted to appear in replication option name"
+ if ($k =~ qr/=/);
push @cmd, "-o", "$k=$v";
}
my $timeout;
- $timeout = IPC::Run::timeout($timeout_secs, exception => $timeout_exception ) if $timeout_secs;
+ $timeout =
+ IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
+ if $timeout_secs;
my $ret = 0;
- do {
+ do
+ {
local $@;
eval {
IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
@@ -1574,6 +1616,7 @@ sub pg_recvlogical_upto
my $exc_save = $@;
if ($exc_save)
{
+
# IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired
die $exc_save
@@ -1584,8 +1627,9 @@ sub pg_recvlogical_upto
die "Got timeout exception '$exc_save' but timer not expired?!"
unless $timeout->is_expired;
- die "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
- unless wantarray;
+ die
+"$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
+ unless wantarray;
}
};
@@ -1598,7 +1642,9 @@ sub pg_recvlogical_upto
}
else
{
- die "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'" if $ret;
+ die
+"pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
+ if $ret;
return $stdout;
}
}
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
index da65c9287ae..fe09689fec2 100644
--- a/src/test/perl/TestLib.pm
+++ b/src/test/perl/TestLib.pm
@@ -17,6 +17,7 @@ use File::Spec;
use File::Temp ();
use IPC::Run;
use SimpleTee;
+
# specify a recent enough version of Test::More to support the note() function
use Test::More 0.82;
@@ -91,8 +92,8 @@ INIT
# Hijack STDOUT and STDERR to the log file
open(my $orig_stdout, '>&', \*STDOUT);
open(my $orig_stderr, '>&', \*STDERR);
- open(STDOUT, '>&', $testlog);
- open(STDERR, '>&', $testlog);
+ open(STDOUT, '>&', $testlog);
+ open(STDERR, '>&', $testlog);
# The test output (ok ...) needs to be printed to the original STDOUT so
# that the 'prove' program can parse it, and display it to the user in
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index 0ebe366a016..266d27c8a2a 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -40,8 +40,10 @@ $node_master->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a");
# Wait for standbys to catch up
-$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert'));
-$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay'));
+$node_master->wait_for_catchup($node_standby_1, 'replay',
+ $node_master->lsn('insert'));
+$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $node_standby_1->lsn('replay'));
my $result =
$node_standby_1->safe_psql('postgres', "SELECT count(*) FROM tab_int");
@@ -66,11 +68,11 @@ note "testing connection parameter \"target_session_attrs\"";
# target_session_attrs with multiple nodes.
sub test_target_session_attrs
{
- my $node1 = shift;
- my $node2 = shift;
+ my $node1 = shift;
+ my $node2 = shift;
my $target_node = shift;
- my $mode = shift;
- my $status = shift;
+ my $mode = shift;
+ my $status = shift;
my $node1_host = $node1->host;
my $node1_port = $node1->port;
@@ -89,25 +91,32 @@ sub test_target_session_attrs
# The client used for the connection does not matter, only the backend
# point does.
my ($ret, $stdout, $stderr) =
- $node1->psql('postgres', 'SHOW port;', extra_params => ['-d', $connstr]);
- is($status == $ret && $stdout eq $target_node->port, 1,
- "connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed");
+ $node1->psql('postgres', 'SHOW port;',
+ extra_params => [ '-d', $connstr ]);
+ is( $status == $ret && $stdout eq $target_node->port,
+ 1,
+"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed"
+ );
}
# Connect to master in "read-write" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master,
- "read-write", 0);
+ "read-write", 0);
+
# Connect to master in "read-write" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_master,
- "read-write", 0);
+ "read-write", 0);
+
# Connect to master in "any" mode with master,standby1 list.
-test_target_session_attrs($node_master, $node_standby_1, $node_master,
- "any", 0);
+test_target_session_attrs($node_master, $node_standby_1, $node_master, "any",
+ 0);
+
# Connect to standby1 in "any" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_standby_1,
- "any", 0);
+ "any", 0);
note "switching to physical replication slot";
+
# Switch to using a physical replication slot. We can do this without a new
# backup since physical slots can go backwards if needed. Do so on both
# standbys. Since we're going to be testing things that affect the slot state,
@@ -115,14 +124,26 @@ note "switching to physical replication slot";
my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2');
$node_master->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_master->restart;
-is($node_master->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_1');]), 0, 'physical slot created on master');
-$node_standby_1->append_conf('recovery.conf', "primary_slot_name = $slotname_1");
-$node_standby_1->append_conf('postgresql.conf', "wal_receiver_status_interval = 1");
+is( $node_master->psql(
+ 'postgres',
+ qq[SELECT pg_create_physical_replication_slot('$slotname_1');]),
+ 0,
+ 'physical slot created on master');
+$node_standby_1->append_conf('recovery.conf',
+ "primary_slot_name = $slotname_1");
+$node_standby_1->append_conf('postgresql.conf',
+ "wal_receiver_status_interval = 1");
$node_standby_1->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_standby_1->restart;
-is($node_standby_1->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_2');]), 0, 'physical slot created on intermediate replica');
-$node_standby_2->append_conf('recovery.conf', "primary_slot_name = $slotname_2");
-$node_standby_2->append_conf('postgresql.conf', "wal_receiver_status_interval = 1");
+is( $node_standby_1->psql(
+ 'postgres',
+ qq[SELECT pg_create_physical_replication_slot('$slotname_2');]),
+ 0,
+ 'physical slot created on intermediate replica');
+$node_standby_2->append_conf('recovery.conf',
+ "primary_slot_name = $slotname_2");
+$node_standby_2->append_conf('postgresql.conf',
+ "wal_receiver_status_interval = 1");
$node_standby_2->restart;
sub get_slot_xmins
@@ -135,11 +156,11 @@ sub get_slot_xmins
# There's no hot standby feedback and there are no logical slots on either peer
# so xmin and catalog_xmin should be null on both slots.
my ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
-is($xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
+is($xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
is($catalog_xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
-is($xmin, '', 'cascaded slot xmin null with no hs_feedback');
+is($xmin, '', 'cascaded slot xmin null with no hs_feedback');
is($catalog_xmin, '', 'cascaded slot xmin null with no hs_feedback');
# Replication still works?
@@ -147,23 +168,32 @@ $node_master->safe_psql('postgres', 'CREATE TABLE replayed(val integer);');
sub replay_check
{
- my $newval = $node_master->safe_psql('postgres', 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val');
- $node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert'));
- $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay'));
- $node_standby_1->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval])
- or die "standby_1 didn't replay master value $newval";
- $node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval])
- or die "standby_2 didn't replay standby_1 value $newval";
+ my $newval = $node_master->safe_psql('postgres',
+'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
+ );
+ $node_master->wait_for_catchup($node_standby_1, 'replay',
+ $node_master->lsn('insert'));
+ $node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $node_standby_1->lsn('replay'));
+ $node_standby_1->safe_psql('postgres',
+ qq[SELECT 1 FROM replayed WHERE val = $newval])
+ or die "standby_1 didn't replay master value $newval";
+ $node_standby_2->safe_psql('postgres',
+ qq[SELECT 1 FROM replayed WHERE val = $newval])
+ or die "standby_2 didn't replay standby_1 value $newval";
}
replay_check();
note "enabling hot_standby_feedback";
+
# Enable hs_feedback. The slot should gain an xmin. We set the status interval
# so we'll see the results promptly.
-$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
+$node_standby_1->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_1->reload;
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload;
replay_check();
sleep(2);
@@ -177,7 +207,8 @@ isnt($xmin, '', 'cascaded slot xmin non-null with hs feedback');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback');
note "doing some work to advance xmin";
-for my $i (10000..11000) {
+for my $i (10000 .. 11000)
+{
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES ($i);]);
}
$node_master->safe_psql('postgres', 'VACUUM;');
@@ -186,38 +217,46 @@ $node_master->safe_psql('postgres', 'CHECKPOINT;');
my ($xmin2, $catalog_xmin2) = get_slot_xmins($node_master, $slotname_1);
note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'non-cascaded slot xmin with hs feedback has changed');
-is($catalog_xmin2, '', 'non-cascaded slot xmin still null with hs_feedback unchanged');
+is($catalog_xmin2, '',
+ 'non-cascaded slot xmin still null with hs_feedback unchanged');
($xmin2, $catalog_xmin2) = get_slot_xmins($node_standby_1, $slotname_2);
note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'cascaded slot xmin with hs feedback has changed');
-is($catalog_xmin2, '', 'cascaded slot xmin still null with hs_feedback unchanged');
+is($catalog_xmin2, '',
+ 'cascaded slot xmin still null with hs_feedback unchanged');
note "disabling hot_standby_feedback";
+
# Disable hs_feedback. Xmin should be cleared.
-$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
+$node_standby_1->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_1->reload;
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->reload;
replay_check();
sleep(2);
($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
is($xmin, '', 'non-cascaded slot xmin null with hs feedback reset');
-is($catalog_xmin, '', 'non-cascaded slot xmin still null with hs_feedback reset');
+is($catalog_xmin, '',
+ 'non-cascaded slot xmin still null with hs_feedback reset');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
-is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
+is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback reset');
note "re-enabling hot_standby_feedback and disabling while stopped";
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload;
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES (11000);]);
replay_check();
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->stop;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
@@ -227,4 +266,5 @@ isnt($xmin, '', 'cascaded slot xmin non-null with postgres shut down');
$node_standby_2->start;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
-is($xmin, '', 'cascaded slot xmin reset after startup with hs feedback reset');
+is($xmin, '',
+ 'cascaded slot xmin reset after startup with hs feedback reset');
diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl
index 66025cdbe3e..cc7c04b6cb9 100644
--- a/src/test/recovery/t/003_recovery_targets.pl
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -22,8 +22,7 @@ sub test_recovery_standby
foreach my $param_item (@$recovery_params)
{
- $node_standby->append_conf(
- 'recovery.conf', qq($param_item));
+ $node_standby->append_conf('recovery.conf', qq($param_item));
}
$node_standby->start;
@@ -71,8 +70,8 @@ my ($lsn2, $recovery_txid) = split /\|/, $ret;
# More data, with recovery target timestamp
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(2001,3000))");
-$ret = $node_master->safe_psql('postgres',
- "SELECT pg_current_wal_lsn(), now();");
+$ret =
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn(), now();");
my ($lsn3, $recovery_time) = split /\|/, $ret;
# Even more data, this time with a recovery target name
@@ -87,7 +86,8 @@ $node_master->safe_psql('postgres',
# And now for a recovery target LSN
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(4001,5000))");
-my $recovery_lsn = $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
+my $recovery_lsn =
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $lsn5 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl
index 7c6587a961f..34ee3351296 100644
--- a/src/test/recovery/t/004_timeline_switch.pl
+++ b/src/test/recovery/t/004_timeline_switch.pl
@@ -34,7 +34,8 @@ $node_master->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
# Wait until standby has replayed enough data on standby 1
-$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('write'));
+$node_master->wait_for_catchup($node_standby_1, 'replay',
+ $node_master->lsn('write'));
# Stop and remove master, and promote standby 1, switching it to a new timeline
$node_master->teardown_node;
@@ -55,7 +56,8 @@ $node_standby_2->restart;
# to ensure that the timeline switch has been done.
$node_standby_1->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))");
-$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('write'));
+$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $node_standby_1->lsn('write'));
my $result =
$node_standby_2->safe_psql('postgres', "SELECT count(*) FROM tab_int");
diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl
index 4185f58e0d8..94c49443a5d 100644
--- a/src/test/recovery/t/005_replay_delay.pl
+++ b/src/test/recovery/t/005_replay_delay.pl
@@ -50,8 +50,7 @@ while ($remaining-- > 0)
# Done waiting?
my $replay_status = $node_standby->safe_psql('postgres',
- "SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0"
- );
+ "SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0");
last if $replay_status eq 't';
# No, sleep some more.
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index 1430c12923f..72428be0bf8 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -14,21 +14,27 @@ use Config;
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->append_conf(
- 'postgresql.conf', qq(
+ 'postgresql.conf', qq(
wal_level = logical
));
$node_master->start;
my $backup_name = 'master_backup';
-$node_master->safe_psql('postgres', qq[CREATE TABLE decoding_test(x integer, y text);]);
+$node_master->safe_psql('postgres',
+ qq[CREATE TABLE decoding_test(x integer, y text);]);
-$node_master->safe_psql('postgres', qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
+$node_master->safe_psql('postgres',
+qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
-$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]);
+$node_master->safe_psql('postgres',
+qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]
+);
# Basic decoding works
-my($result) = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
-is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
+my ($result) = $node_master->safe_psql('postgres',
+ qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
+is(scalar(my @foobar = split /^/m, $result),
+ 12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
# If we immediately crash the server we might lose the progress we just made
# and replay the same changes again. But a clean shutdown should never repeat
@@ -36,13 +42,16 @@ is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc
$node_master->restart('fast');
# There are no new writes, so the result should be empty.
-$result = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
+$result = $node_master->safe_psql('postgres',
+ qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
chomp($result);
is($result, '', 'Decoding after fast restart repeats no rows');
# Insert some rows and verify that we get the same results from pg_recvlogical
# and the SQL interface.
-$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]);
+$node_master->safe_psql('postgres',
+qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]
+);
my $expected = q{BEGIN
table public.decoding_test: INSERT: x[integer]:1 y[text]:'1'
@@ -51,59 +60,91 @@ table public.decoding_test: INSERT: x[integer]:3 y[text]:'3'
table public.decoding_test: INSERT: x[integer]:4 y[text]:'4'
COMMIT};
-my $stdout_sql = $node_master->safe_psql('postgres', qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]);
+my $stdout_sql = $node_master->safe_psql('postgres',
+qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]
+);
is($stdout_sql, $expected, 'got expected output from SQL decoding session');
-my $endpos = $node_master->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;");
+my $endpos = $node_master->safe_psql('postgres',
+"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
+);
print "waiting to replay $endpos\n";
-my $stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1');
+my $stdout_recv = $node_master->pg_recvlogical_upto(
+ 'postgres', 'test_slot', $endpos, 10,
+ 'include-xids' => '0',
+ 'skip-empty-xacts' => '1');
chomp($stdout_recv);
-is($stdout_recv, $expected, 'got same expected output from pg_recvlogical decoding session');
+is($stdout_recv, $expected,
+ 'got same expected output from pg_recvlogical decoding session');
-$stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1');
+$stdout_recv = $node_master->pg_recvlogical_upto(
+ 'postgres', 'test_slot', $endpos, 10,
+ 'include-xids' => '0',
+ 'skip-empty-xacts' => '1');
chomp($stdout_recv);
-is($stdout_recv, '', 'pg_recvlogical acknowledged changes, nothing pending on slot');
+is($stdout_recv, '',
+ 'pg_recvlogical acknowledged changes, nothing pending on slot');
$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb');
-is($node_master->psql('otherdb', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"), 3,
+is( $node_master->psql(
+ 'otherdb',
+"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
+ ),
+ 3,
'replaying logical slot from another database fails');
-$node_master->safe_psql('otherdb', qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]);
+$node_master->safe_psql('otherdb',
+qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]
+);
# make sure you can't drop a slot while active
SKIP:
{
- # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
+
+ # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
skip "Test fails on Windows perl", 2 if $Config{osname} eq 'MSWin32';
- my $pg_recvlogical = IPC::Run::start(['pg_recvlogical', '-d', $node_master->connstr('otherdb'), '-S', 'otherdb_slot', '-f', '-', '--start']);
- $node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)");
- is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 3,
- 'dropping a DB with inactive logical slots fails');
+ my $pg_recvlogical = IPC::Run::start(
+ [ 'pg_recvlogical', '-d', $node_master->connstr('otherdb'),
+ '-S', 'otherdb_slot', '-f', '-', '--start' ]);
+ $node_master->poll_query_until('otherdb',
+"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"
+ );
+ is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
+ 3, 'dropping a DB with inactive logical slots fails');
$pg_recvlogical->kill_kill;
- is($node_master->slot('otherdb_slot')->{'slot_name'}, undef,
- 'logical slot still exists');
+ is($node_master->slot('otherdb_slot')->{'slot_name'},
+ undef, 'logical slot still exists');
}
-$node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)");
-is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 0,
- 'dropping a DB with inactive logical slots succeeds');
-is($node_master->slot('otherdb_slot')->{'slot_name'}, undef,
- 'logical slot was actually dropped with DB');
+$node_master->poll_query_until('otherdb',
+"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)"
+);
+is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
+ 0, 'dropping a DB with inactive logical slots succeeds');
+is($node_master->slot('otherdb_slot')->{'slot_name'},
+ undef, 'logical slot was actually dropped with DB');
# Restarting a node with wal_level = logical that has existing
# slots must succeed, but decoding from those slots must fail.
$node_master->safe_psql('postgres', 'ALTER SYSTEM SET wal_level = replica');
-is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'logical', 'wal_level is still logical before restart');
+is($node_master->safe_psql('postgres', 'SHOW wal_level'),
+ 'logical', 'wal_level is still logical before restart');
$node_master->restart;
-is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'replica', 'wal_level is replica');
-isnt($node_master->slot('test_slot')->{'catalog_xmin'}, '0',
- 'restored slot catalog_xmin is nonzero');
-is($node_master->psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]), 3,
+is($node_master->safe_psql('postgres', 'SHOW wal_level'),
+ 'replica', 'wal_level is replica');
+isnt($node_master->slot('test_slot')->{'catalog_xmin'},
+ '0', 'restored slot catalog_xmin is nonzero');
+is( $node_master->psql(
+ 'postgres',
+ qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]),
+ 3,
'reading from slot with wal_level < logical fails');
-is($node_master->psql('postgres', q[SELECT pg_drop_replication_slot('test_slot')]), 0,
+is( $node_master->psql(
+ 'postgres', q[SELECT pg_drop_replication_slot('test_slot')]),
+ 0,
'can drop logical slot while wal_level = replica');
is($node_master->slot('test_slot')->{'catalog_xmin'}, '', 'slot was dropped');
diff --git a/src/test/recovery/t/007_sync_rep.pl b/src/test/recovery/t/007_sync_rep.pl
index e56bbb9d86f..8e3cc5e42e1 100644
--- a/src/test/recovery/t/007_sync_rep.pl
+++ b/src/test/recovery/t/007_sync_rep.pl
@@ -176,20 +176,20 @@ standby4|1|potential),
# Check that standby1 and standby2 are chosen as sync standbys
# based on their priorities.
test_sync_state(
-$node_master, qq(standby1|1|sync
+ $node_master, qq(standby1|1|sync
standby2|2|sync
standby4|0|async),
-'priority-based sync replication specified by FIRST keyword',
-'FIRST 2(standby1, standby2)');
+ 'priority-based sync replication specified by FIRST keyword',
+ 'FIRST 2(standby1, standby2)');
# Check that all the listed standbys are considered as candidates
# for sync standbys in a quorum-based sync replication.
test_sync_state(
-$node_master, qq(standby1|1|quorum
+ $node_master, qq(standby1|1|quorum
standby2|1|quorum
standby4|0|async),
-'2 quorum and 1 async',
-'ANY 2(standby1, standby2)');
+ '2 quorum and 1 async',
+ 'ANY 2(standby1, standby2)');
# Start Standby3 which will be considered in 'quorum' state.
$node_standby_3->start;
@@ -197,9 +197,9 @@ $node_standby_3->start;
# Check that the setting of 'ANY 2(*)' chooses all standbys as
# candidates for quorum sync standbys.
test_sync_state(
-$node_master, qq(standby1|1|quorum
+ $node_master, qq(standby1|1|quorum
standby2|1|quorum
standby3|1|quorum
standby4|1|quorum),
-'all standbys are considered as candidates for quorum sync standbys',
-'ANY 2(*)');
+ 'all standbys are considered as candidates for quorum sync standbys',
+ 'ANY 2(*)');
diff --git a/src/test/recovery/t/008_fsm_truncation.pl b/src/test/recovery/t/008_fsm_truncation.pl
index 055cac324c5..56eecf722c5 100644
--- a/src/test/recovery/t/008_fsm_truncation.pl
+++ b/src/test/recovery/t/008_fsm_truncation.pl
@@ -12,7 +12,8 @@ use Test::More tests => 1;
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
-$node_master->append_conf('postgresql.conf', qq{
+$node_master->append_conf(
+ 'postgresql.conf', qq{
fsync = on
wal_log_hints = on
max_prepared_transactions = 5
@@ -29,7 +30,8 @@ $node_standby->init_from_backup($node_master, 'master_backup',
has_streaming => 1);
$node_standby->start;
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
create table testtab (a int, b char(100));
insert into testtab select generate_series(1,1000), 'foo';
insert into testtab select generate_series(1,1000), 'foo';
@@ -37,7 +39,8 @@ delete from testtab where ctid > '(8,0)';
});
# Take a lock on the table to prevent following vacuum from truncating it
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
begin;
lock table testtab in row share mode;
prepare transaction 'p1';
@@ -51,7 +54,8 @@ $node_master->psql('postgres', 'checkpoint');
# Now do some more insert/deletes, another vacuum to ensure full-page writes
# are done
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
insert into testtab select generate_series(1,1000), 'foo';
delete from testtab where ctid > '(8,0)';
vacuum verbose testtab;
@@ -61,25 +65,25 @@ vacuum verbose testtab;
$node_standby->psql('postgres', 'checkpoint');
# Release the lock, vacuum again which should lead to truncation
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
rollback prepared 'p1';
vacuum verbose testtab;
});
$node_master->psql('postgres', 'checkpoint');
my $until_lsn =
- $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
# Wait long enough for standby to receive and apply all WAL
my $caughtup_query =
- "SELECT '$until_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
+ "SELECT '$until_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
$node_standby->poll_query_until('postgres', $caughtup_query)
- or die "Timed out while waiting for standby to catch up";
+ or die "Timed out while waiting for standby to catch up";
# Promote the standby
$node_standby->promote;
-$node_standby->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_standby->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_standby->psql('postgres', 'checkpoint');
@@ -87,6 +91,8 @@ $node_standby->psql('postgres', 'checkpoint');
$node_standby->restart;
# Insert should work on standby
-is($node_standby->psql('postgres',
- qq{insert into testtab select generate_series(1,1000), 'foo';}),
- 0, 'INSERT succeeds with truncated relation FSM');
+is( $node_standby->psql(
+ 'postgres',
+ qq{insert into testtab select generate_series(1,1000), 'foo';}),
+ 0,
+ 'INSERT succeeds with truncated relation FSM');
diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl
index 73103252a72..13b4a042050 100644
--- a/src/test/recovery/t/009_twophase.pl
+++ b/src/test/recovery/t/009_twophase.pl
@@ -9,7 +9,8 @@ use Test::More tests => 12;
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
@@ -19,17 +20,19 @@ $node_master->psql('postgres', "CREATE TABLE t_009_tbl (id int)");
# Setup slave node
my $node_slave = get_new_node('slave');
-$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1);
+$node_slave->init_from_backup($node_master, 'master_backup',
+ has_streaming => 1);
$node_slave->start;
# Switch to synchronous replication
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
-my $psql_rc = '';
+my $psql_rc = '';
###############################################################################
# Check that we can commit and abort transaction after soft restart.
@@ -38,7 +41,8 @@ my $psql_rc = '';
# files.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@@ -64,7 +68,8 @@ is($psql_rc, '0', 'Rollback prepared transaction after restart');
# transaction using dedicated WAL records.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
@@ -89,7 +94,8 @@ is($psql_rc, '0', 'Rollback prepared transaction after teardown');
# Check that WAL replay can handle several transactions with same GID name.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
@@ -113,7 +119,8 @@ is($psql_rc, '0', 'Replay several transactions with same GID');
# while replaying transaction commits.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@@ -122,7 +129,8 @@ $node_master->psql('postgres', "
COMMIT PREPARED 'xact_009_1';");
$node_master->teardown_node;
$node_master->start;
-$psql_rc = $node_master->psql('postgres', "
+$psql_rc = $node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@@ -138,24 +146,28 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# Check that WAL replay will cleanup its shared memory state on running slave.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (43);
PREPARE TRANSACTION 'xact_009_1';
COMMIT PREPARED 'xact_009_1';");
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '0',
- "Cleanup of shared memory state on running standby without checkpoint");
+ "Cleanup of shared memory state on running standby without checkpoint");
###############################################################################
# Same as in previous case, but let's force checkpoint on slave between
# prepare and commit to use on-disk twophase files.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@@ -163,16 +175,19 @@ $node_master->psql('postgres', "
PREPARE TRANSACTION 'xact_009_1';");
$node_slave->psql('postgres', "CHECKPOINT");
$node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '0',
- "Cleanup of shared memory state on running standby after checkpoint");
+ "Cleanup of shared memory state on running standby after checkpoint");
###############################################################################
# Check that prepared transactions can be committed on promoted slave.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@@ -180,8 +195,7 @@ $node_master->psql('postgres', "
PREPARE TRANSACTION 'xact_009_1';");
$node_master->teardown_node;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$psql_rc = $node_slave->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
@@ -190,7 +204,8 @@ is($psql_rc, '0', "Restore of prepared transaction on promoted slave");
# change roles
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
@@ -202,7 +217,8 @@ $node_slave->start;
# consistent.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@@ -211,19 +227,21 @@ $node_master->psql('postgres', "
$node_master->stop;
$node_slave->restart;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '1',
- "Restore prepared transactions from files with master down");
+ "Restore prepared transactions from files with master down");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
@@ -234,7 +252,8 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# restart while master is down.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (242);
SAVEPOINT s1;
@@ -245,19 +264,21 @@ $node_master->stop;
$node_slave->teardown_node;
$node_slave->start;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '1',
- "Restore prepared transactions from records with master down");
+ "Restore prepared transactions from records with master down");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
@@ -269,7 +290,8 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# XLOG_STANDBY_LOCK wal record.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
CREATE TABLE t_009_tbl2 (id int);
SAVEPOINT s1;
@@ -280,6 +302,8 @@ $node_master->psql('postgres', "
CHECKPOINT;
COMMIT PREPARED 'xact_009_1';");
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '0', "Replay prepared transaction with DDL");
diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl
index e168cbc161a..65f6ba2fcad 100644
--- a/src/test/recovery/t/010_logical_decoding_timelines.pl
+++ b/src/test/recovery/t/010_logical_decoding_timelines.pl
@@ -34,7 +34,8 @@ my ($stdout, $stderr, $ret);
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, has_archiving => 1);
-$node_master->append_conf('postgresql.conf', q[
+$node_master->append_conf(
+ 'postgresql.conf', q[
wal_level = 'logical'
max_replication_slots = 3
max_wal_senders = 2
@@ -60,8 +61,7 @@ $node_master->safe_psql('postgres',
# the same physical copy trick, so:
$node_master->safe_psql('postgres', 'CREATE DATABASE dropme;');
$node_master->safe_psql('dropme',
-"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');"
-);
+"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');");
$node_master->safe_psql('postgres', 'CHECKPOINT;');
@@ -76,20 +76,23 @@ $node_replica->init_from_backup(
$node_master, $backup_name,
has_streaming => 1,
has_restoring => 1);
-$node_replica->append_conf(
- 'recovery.conf', q[primary_slot_name = 'phys_slot']);
+$node_replica->append_conf('recovery.conf',
+ q[primary_slot_name = 'phys_slot']);
$node_replica->start;
# If we drop 'dropme' on the master, the standby should drop the
# db and associated slot.
-is($node_master->psql('postgres', 'DROP DATABASE dropme'), 0,
- 'dropped DB with logical slot OK on master');
-$node_master->wait_for_catchup($node_replica, 'replay', $node_master->lsn('insert'));
-is($node_replica->safe_psql('postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']), '',
+is($node_master->psql('postgres', 'DROP DATABASE dropme'),
+ 0, 'dropped DB with logical slot OK on master');
+$node_master->wait_for_catchup($node_replica, 'replay',
+ $node_master->lsn('insert'));
+is( $node_replica->safe_psql(
+ 'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
+ '',
'dropped DB dropme on standby');
-is($node_master->slot('dropme_slot')->{'slot_name'}, undef,
- 'logical slot was actually dropped on standby');
+is($node_master->slot('dropme_slot')->{'slot_name'},
+ undef, 'logical slot was actually dropped on standby');
# Back to testing failover...
$node_master->safe_psql('postgres',
@@ -109,19 +112,22 @@ is($stdout, 'before_basebackup',
# from the master to make sure its hot_standby_feedback
# has locked in a catalog_xmin on the physical slot, and that
# any xmin is < the catalog_xmin
-$node_master->poll_query_until('postgres', q[
+$node_master->poll_query_until(
+ 'postgres', q[
SELECT catalog_xmin IS NOT NULL
FROM pg_replication_slots
WHERE slot_name = 'phys_slot'
]);
my $phys_slot = $node_master->slot('phys_slot');
-isnt($phys_slot->{'xmin'}, '',
- 'xmin assigned on physical slot of master');
-isnt($phys_slot->{'catalog_xmin'}, '',
- 'catalog_xmin assigned on physical slot of master');
+isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master');
+isnt($phys_slot->{'catalog_xmin'},
+ '', 'catalog_xmin assigned on physical slot of master');
+
# Ignore wrap-around here, we're on a new cluster:
-cmp_ok($phys_slot->{'xmin'}, '>=', $phys_slot->{'catalog_xmin'},
- 'xmin on physical slot must not be lower than catalog_xmin');
+cmp_ok(
+ $phys_slot->{'xmin'}, '>=',
+ $phys_slot->{'catalog_xmin'},
+ 'xmin on physical slot must not be lower than catalog_xmin');
$node_master->safe_psql('postgres', 'CHECKPOINT');
@@ -162,23 +168,30 @@ COMMIT
BEGIN
table public.decoding: INSERT: blah[text]:'after failover'
COMMIT);
-is($stdout, $final_expected_output_bb, 'decoded expected data from slot before_basebackup');
+is($stdout, $final_expected_output_bb,
+ 'decoded expected data from slot before_basebackup');
is($stderr, '', 'replay from slot before_basebackup produces no stderr');
# So far we've peeked the slots, so when we fetch the same info over
# pg_recvlogical we should get complete results. First, find out the commit lsn
# of the last transaction. There's no max(pg_lsn), so:
-my $endpos = $node_replica->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;");
+my $endpos = $node_replica->safe_psql('postgres',
+"SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
+);
# now use the walsender protocol to peek the slot changes and make sure we see
# the same results.
-$stdout = $node_replica->pg_recvlogical_upto('postgres', 'before_basebackup',
- $endpos, 30, 'include-xids' => '0', 'skip-empty-xacts' => '1');
+$stdout = $node_replica->pg_recvlogical_upto(
+ 'postgres', 'before_basebackup',
+ $endpos, 30,
+ 'include-xids' => '0',
+ 'skip-empty-xacts' => '1');
# walsender likes to add a newline
chomp($stdout);
-is($stdout, $final_expected_output_bb, 'got same output from walsender via pg_recvlogical on before_basebackup');
+is($stdout, $final_expected_output_bb,
+ 'got same output from walsender via pg_recvlogical on before_basebackup');
$node_replica->teardown_node();
diff --git a/src/test/recovery/t/011_crash_recovery.pl b/src/test/recovery/t/011_crash_recovery.pl
index 6e924d69554..7afa94a8274 100644
--- a/src/test/recovery/t/011_crash_recovery.pl
+++ b/src/test/recovery/t/011_crash_recovery.pl
@@ -7,9 +7,10 @@ use PostgresNode;
use TestLib;
use Test::More;
use Config;
-if ($Config{osname} eq 'MSWin32')
+if ($Config{osname} eq 'MSWin32')
{
- # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
+
+ # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
plan skip_all => "Test fails on Windows perl";
}
else
@@ -28,8 +29,14 @@ my ($stdin, $stdout, $stderr) = ('', '', '');
# an xact to be in-progress when we crash and we need to know
# its xid.
my $tx = IPC::Run::start(
- ['psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', $node->connstr('postgres')],
- '<', \$stdin, '>', \$stdout, '2>', \$stderr);
+ [ 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
+ $node->connstr('postgres') ],
+ '<',
+ \$stdin,
+ '>',
+ \$stdout,
+ '2>',
+ \$stderr);
$stdin .= q[
BEGIN;
CREATE TABLE mine(x integer);
@@ -41,16 +48,19 @@ $tx->pump until $stdout =~ /[[:digit:]]+[\r\n]$/;
my $xid = $stdout;
chomp($xid);
-is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'in progress', 'own xid is in-progres');
+is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
+ 'in progress', 'own xid is in-progres');
# Crash and restart the postmaster
$node->stop('immediate');
$node->start;
# Make sure we really got a new xid
-cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'), '>', $xid,
- 'new xid after restart is greater');
+cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'),
+ '>', $xid, 'new xid after restart is greater');
+
# and make sure we show the in-progress xact as aborted
-is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'aborted', 'xid is aborted after crash');
+is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
+ 'aborted', 'xid is aborted after crash');
$tx->kill_kill;
diff --git a/src/test/recovery/t/012_subtransactions.pl b/src/test/recovery/t/012_subtransactions.pl
index 5e02c28cb88..30677e16752 100644
--- a/src/test/recovery/t/012_subtransactions.pl
+++ b/src/test/recovery/t/012_subtransactions.pl
@@ -9,7 +9,8 @@ use Test::More tests => 12;
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
@@ -19,24 +20,27 @@ $node_master->psql('postgres', "CREATE TABLE t_012_tbl (id int)");
# Setup slave node
my $node_slave = get_new_node('slave');
-$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1);
+$node_slave->init_from_backup($node_master, 'master_backup',
+ has_streaming => 1);
$node_slave->start;
# Switch to synchronous replication
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
-my $psql_rc = '';
+my $psql_rc = '';
###############################################################################
# Check that replay will correctly set SUBTRANS and properly advance nextXid
# so that it won't conflict with savepoint xids.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
DELETE FROM t_012_tbl;
INSERT INTO t_012_tbl VALUES (43);
@@ -55,7 +59,8 @@ $node_master->psql('postgres', "
$node_master->stop;
$node_master->start;
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
-- here we can get xid of previous savepoint if nextXid
-- wasn't properly advanced
BEGIN;
@@ -63,8 +68,10 @@ $node_master->psql('postgres', "
ROLLBACK;
COMMIT PREPARED 'xact_012_1';");
-$node_master->psql('postgres', "SELECT count(*) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->psql(
+ 'postgres',
+ "SELECT count(*) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
###############################################################################
@@ -75,7 +82,8 @@ is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
LANGUAGE plpgsql
@@ -87,39 +95,48 @@ $node_master->psql('postgres', "
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
SELECT hs_subxids(127);
COMMIT;");
-$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->wait_for_catchup($node_slave, 'replay',
+ $node_master->lsn('insert'));
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->stop;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
LANGUAGE plpgsql
@@ -131,67 +148,87 @@ $node_master->psql('postgres', "
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
SELECT hs_subxids(127);
PREPARE TRANSACTION 'xact_012_1';");
-$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->wait_for_catchup($node_slave, 'replay',
+ $node_master->lsn('insert'));
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'");
-is($psql_rc, '0', "Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave");
-
-$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+is($psql_rc, '0',
+"Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
+);
+
+$node_master->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
SELECT hs_subxids(201);
PREPARE TRANSACTION 'xact_012_1';");
-$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->wait_for_catchup($node_slave, 'replay',
+ $node_master->lsn('insert'));
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'");
-is($psql_rc, '0', "Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave");
-
-$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+is($psql_rc, '0',
+"Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
+);
+
+$node_master->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm
index 72d60e8bc88..f63c81cfc67 100644
--- a/src/test/ssl/ServerSetup.pm
+++ b/src/test/ssl/ServerSetup.pm
@@ -74,12 +74,12 @@ sub configure_test_server_for_ssl
open my $sslconf, '>', "$pgdata/sslconfig.conf";
close $sslconf;
- # Copy all server certificates and keys, and client root cert, to the data dir
+# Copy all server certificates and keys, and client root cert, to the data dir
copy_files("ssl/server-*.crt", $pgdata);
copy_files("ssl/server-*.key", $pgdata);
chmod(0600, glob "$pgdata/server-*.key") or die $!;
copy_files("ssl/root+client_ca.crt", $pgdata);
- copy_files("ssl/root_ca.crt", $pgdata);
+ copy_files("ssl/root_ca.crt", $pgdata);
copy_files("ssl/root+client.crl", $pgdata);
# Stop and restart server to load new listen_addresses.
@@ -95,10 +95,11 @@ sub switch_server_cert
{
my $node = $_[0];
my $certfile = $_[1];
- my $cafile = $_[2] || "root+client_ca";
+ my $cafile = $_[2] || "root+client_ca";
my $pgdata = $node->data_dir;
- note "reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
+ note
+ "reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl=on\n";
@@ -117,10 +118,10 @@ sub configure_hba_for_ssl
my $serverhost = $_[1];
my $pgdata = $node->data_dir;
- # Only accept SSL connections from localhost. Our tests don't depend on this
- # but seems best to keep it as narrow as possible for security reasons.
- #
- # When connecting to certdb, also check the client certificate.
+ # Only accept SSL connections from localhost. Our tests don't depend on this
+ # but seems best to keep it as narrow as possible for security reasons.
+ #
+ # When connecting to certdb, also check the client certificate.
open my $hba, '>', "$pgdata/pg_hba.conf";
print $hba
"# TYPE DATABASE USER ADDRESS METHOD\n";
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index 1545a5c54aa..6d65388b225 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -26,19 +26,15 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_notrep (a int)");
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_ins (a int)");
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_full (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_ins (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full (a int)");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)");
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION tap_pub");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)");
$node_publisher->safe_psql('postgres',
@@ -48,7 +44,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
@@ -72,27 +69,23 @@ is($result, qq(1002), 'check initial data was copied to subscriber');
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1,50)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_ins WHERE a > 20");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_ins SET a = -a");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_ins SET a = -a");
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_rep SELECT generate_series(1,50)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_rep WHERE a > 20");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_rep SET a = -a");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_rep SET a = -a");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002), 'check replicated inserts on subscriber');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1), 'check replicated changes on subscriber');
# insert some duplicate rows
@@ -110,107 +103,114 @@ $node_subscriber->safe_psql('postgres',
"ALTER TABLE tab_ins REPLICA IDENTITY FULL");
# and do the update
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_full SET a = a * a");
+$node_publisher->safe_psql('postgres', "UPDATE tab_full SET a = a * a");
# Wait for subscription to catch up
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full");
-is($result, qq(20|1|100), 'update works with REPLICA IDENTITY FULL and duplicate tuples');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_full");
+is($result, qq(20|1|100),
+ 'update works with REPLICA IDENTITY FULL and duplicate tuples');
# check that change of connection string and/or publication list causes
# restart of subscription workers. Not all of these are registered as tests
# as we need to poll for a change but the test suite will fail none the less
# when something goes wrong.
my $oldpid = $node_publisher->safe_psql('postgres',
- "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
+);
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'");
+"ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'"
+);
$node_publisher->poll_query_until('postgres',
- "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for apply to restart";
+"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
+) or die "Timed out while waiting for apply to restart";
$oldpid = $node_publisher->safe_psql('postgres',
- "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
+);
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)");
+"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)"
+);
$node_publisher->poll_query_until('postgres',
- "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for apply to restart";
+"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
+) or die "Timed out while waiting for apply to restart";
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1001,1100)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_rep");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
-is($result, qq(1152|1|1100), 'check replicated inserts after subscription publication change');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1152|1|1100),
+ 'check replicated inserts after subscription publication change');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep");
-is($result, qq(20|-20|-1), 'check changes skipped after subscription publication change');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_rep");
+is($result, qq(20|-20|-1),
+ 'check changes skipped after subscription publication change');
# check alter publication (relcache invalidation etc)
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only SET (publish = 'insert, delete')");
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_full");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_ins WHERE a > 0");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 0");
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab_full VALUES(0)");
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)"
+);
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_full VALUES(0)");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# note that data are different on provider and subscriber
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
-is($result, qq(1052|1|1002), 'check replicated deletes after alter publication');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1052|1|1002),
+ 'check replicated deletes after alter publication');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(21|0|100), 'check replicated insert after alter publication');
# check restart on rename
$oldpid = $node_publisher->safe_psql('postgres',
- "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
+);
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
$node_publisher->poll_query_until('postgres',
- "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for apply to restart";
+"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
+) or die "Timed out while waiting for apply to restart";
# check all the cleanup
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
-$result =
- $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
-$result =
- $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/002_types.pl b/src/test/subscription/t/002_types.pl
index a9fa4af9205..3ca027ecb4b 100644
--- a/src/test/subscription/t/002_types.pl
+++ b/src/test/subscription/t/002_types.pl
@@ -17,7 +17,7 @@ $node_subscriber->init(allows_streaming => 'logical');
$node_subscriber->start;
# Create some preexisting content on publisher
-my $ddl = qq(
+my $ddl = qq(
CREATE EXTENSION hstore WITH SCHEMA public;
CREATE TABLE public.tst_one_array (
a INTEGER PRIMARY KEY,
@@ -103,7 +103,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
@@ -118,7 +119,8 @@ $node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Insert initial test data
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
-- test_tbl_one_array_col
INSERT INTO tst_one_array (a, b) VALUES
(1, '{1, 2, 3}'),
@@ -248,7 +250,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
-my $result = $node_subscriber->safe_psql('postgres', qq(
+my $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@@ -266,7 +269,7 @@ my $result = $node_subscriber->safe_psql('postgres', qq(
SELECT a, b FROM tst_hstore ORDER BY a;
));
-is($result, '1|{1,2,3}
+is( $result, '1|{1,2,3}
2|{2,3,1}
3|{3,2,1}
4|{4,3,2}
@@ -331,10 +334,11 @@ e|{d,NULL}
2|"zzz"=>"foo"
3|"123"=>"321"
4|"yellow horse"=>"moaned"',
-'check replicated inserts on subscriber');
+ 'check replicated inserts on subscriber');
# Run batch of updates
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1;
UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3;
UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}';
@@ -368,7 +372,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
-$result = $node_subscriber->safe_psql('postgres', qq(
+$result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@@ -386,7 +391,7 @@ $result = $node_subscriber->safe_psql('postgres', qq(
SELECT a, b FROM tst_hstore ORDER BY a;
));
-is($result, '1|{4,5,6}
+is( $result, '1|{4,5,6}
2|{2,3,1}
3|{3,2,1}
4|{4,5,6,1}
@@ -451,10 +456,11 @@ e|{e,d}
2|"updated"=>"value"
3|"also"=>"updated"
4|"yellow horse"=>"moaned"',
-'check replicated updates on subscriber');
+ 'check replicated updates on subscriber');
# Run batch of deletes
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DELETE FROM tst_one_array WHERE a = 1;
DELETE FROM tst_one_array WHERE b = '{2, 3, 1}';
DELETE FROM tst_arrays WHERE a = '{1, 2, 3}';
@@ -487,7 +493,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
-$result = $node_subscriber->safe_psql('postgres', qq(
+$result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@@ -505,7 +512,7 @@ $result = $node_subscriber->safe_psql('postgres', qq(
SELECT a, b FROM tst_hstore ORDER BY a;
));
-is($result, '3|{3,2,1}
+is( $result, '3|{3,2,1}
4|{4,5,6,1}
5|{4,5,6,1}
{3,1,2}|{c,a,b}|{3.3,1.1,2.2}|{"3 years","1 year","2 years"}
@@ -539,7 +546,7 @@ e|{e,d}
2|"updated"=>"value"
3|"also"=>"updated"
4|"yellow horse"=>"moaned"',
-'check replicated deletes on subscriber');
+ 'check replicated deletes on subscriber');
$node_subscriber->stop('fast');
$node_publisher->stop('fast');
diff --git a/src/test/subscription/t/003_constraints.pl b/src/test/subscription/t/003_constraints.pl
index b8282af2171..06863aef84a 100644
--- a/src/test/subscription/t/003_constraints.pl
+++ b/src/test/subscription/t/003_constraints.pl
@@ -19,13 +19,15 @@ $node_subscriber->start;
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));");
+"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
+);
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));");
+"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@@ -34,7 +36,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
@@ -51,17 +54,16 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check data on subscriber
-my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
is($result, qq(1|1|1), 'check replicated tab_fk inserts on subscriber');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(1|1|1), 'check replicated tab_fk_ref inserts on subscriber');
# Drop the fk on publisher
-$node_publisher->safe_psql('postgres',
- "DROP TABLE tab_fk CASCADE;");
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_fk CASCADE;");
# Insert data
$node_publisher->safe_psql('postgres',
@@ -71,12 +73,13 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# FK is not enforced on subscriber
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check FK ignored on subscriber');
# Add replica trigger
-$node_subscriber->safe_psql('postgres', qq{
+$node_subscriber->safe_psql(
+ 'postgres', qq{
CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS \$\$
BEGIN
IF (TG_OP = 'INSERT') THEN
@@ -105,8 +108,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# The row should be skipped on subscriber
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check replica trigger applied on subscriber');
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl
index fbc49e3a2ae..05fd2f0e6cc 100644
--- a/src/test/subscription/t/004_sync.pl
+++ b/src/test/subscription/t/004_sync.pl
@@ -13,7 +13,8 @@ $node_publisher->start;
# Create subscriber node
my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical');
-$node_subscriber->append_conf('postgresql.conf', "wal_retrieve_retry_interval = 1ms");
+$node_subscriber->append_conf('postgresql.conf',
+ "wal_retrieve_retry_interval = 1ms");
$node_subscriber->start;
# Create some preexisting content on publisher
@@ -33,7 +34,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
@@ -59,17 +61,16 @@ $node_publisher->safe_psql('postgres',
# recreate the subscription, it will try to do initial copy
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
+);
# but it will be stuck on data copy as it will fail on constraint
-my $started_query =
-"SELECT srsubstate = 'd' FROM pg_subscription_rel;";
+my $started_query = "SELECT srsubstate = 'd' FROM pg_subscription_rel;";
$node_subscriber->poll_query_until('postgres', $started_query)
or die "Timed out while waiting for subscriber to start sync";
# remove the conflicting data
-$node_subscriber->safe_psql('postgres',
- "DELETE FROM tab_rep;");
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
# wait for sync to finish this time
$node_subscriber->poll_query_until('postgres', $synced_query)
@@ -82,28 +83,30 @@ is($result, qq(20), 'initial data synced for second sub');
# now check another subscription for the same node pair
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)");
+"CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
+);
# wait for it to start
-$node_subscriber->poll_query_until('postgres', "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL")
- or die "Timed out while waiting for subscriber to start";
+$node_subscriber->poll_query_until('postgres',
+"SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL"
+) or die "Timed out while waiting for subscriber to start";
# and drop both subscriptions
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub2");
# check subscriptions are removed
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'second and third sub are dropped');
# remove the conflicting data
-$node_subscriber->safe_psql('postgres',
- "DELETE FROM tab_rep;");
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
# recreate the subscription again
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
+);
# and wait for data sync to finish again
$node_subscriber->poll_query_until('postgres', $synced_query)
@@ -115,8 +118,7 @@ $result =
is($result, qq(20), 'initial data synced for fourth sub');
# add new table on subscriber
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_rep_next (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep_next (a int)");
# setup structure with existing data on pubisher
$node_publisher->safe_psql('postgres',
@@ -126,8 +128,8 @@ $node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
is($result, qq(0), 'no data for table added after subscription initialized');
# ask for data sync
@@ -138,9 +140,10 @@ $node_subscriber->safe_psql('postgres',
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
-is($result, qq(10), 'data for table added after subscription initialized are now synced');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(10),
+ 'data for table added after subscription initialized are now synced');
# Add some data
$node_publisher->safe_psql('postgres',
@@ -150,9 +153,10 @@ $node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
-is($result, qq(20), 'changes for table added after subscription initialized replicated');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(20),
+ 'changes for table added after subscription initialized replicated');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
diff --git a/src/test/subscription/t/005_encoding.pl b/src/test/subscription/t/005_encoding.pl
index 7a62e05cc6b..26a40c0b7f1 100644
--- a/src/test/subscription/t/005_encoding.pl
+++ b/src/test/subscription/t/005_encoding.pl
@@ -10,16 +10,20 @@ sub wait_for_caught_up
my ($node, $appname) = @_;
$node->poll_query_until('postgres',
- "SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for subscriber to catch up";
+"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"
+ ) or die "Timed out while waiting for subscriber to catch up";
}
my $node_publisher = get_new_node('publisher');
-$node_publisher->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=UTF8']);
+$node_publisher->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_publisher->start;
my $node_subscriber = get_new_node('subscriber');
-$node_subscriber->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=LATIN1']);
+$node_subscriber->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=LATIN1' ]);
$node_subscriber->start;
my $ddl = "CREATE TABLE test1 (a int, b text);";
@@ -27,20 +31,26 @@ $node_publisher->safe_psql('postgres', $ddl);
$node_subscriber->safe_psql('postgres', $ddl);
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-my $appname = 'encoding_test';
+my $appname = 'encoding_test';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION mypub FOR ALL TABLES;");
-$node_subscriber->safe_psql('postgres', "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION mypub FOR ALL TABLES;");
+$node_subscriber->safe_psql('postgres',
+"CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;"
+);
wait_for_caught_up($node_publisher, $appname);
-$node_publisher->safe_psql('postgres', q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
wait_for_caught_up($node_publisher, $appname);
-is($node_subscriber->safe_psql('postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}), # LATIN1
- qq(1),
- 'data replicated to subscriber');
+is( $node_subscriber->safe_psql(
+ 'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}
+ ), # LATIN1
+ qq(1),
+ 'data replicated to subscriber');
$node_subscriber->stop;
$node_publisher->stop;