Run pgperltidy
This is required before the creation of a new branch. pgindent is clean, as well as is reformat-dat-files. perltidy version is v20230309, as documented in pgindent's README.
This commit is contained in:
parent
5450820917
commit
7dcc6f8e6d
@ -289,10 +289,13 @@ $node_p->restart;
|
||||
# Create failover slot to test its removal
|
||||
my $fslotname = 'failover_slot';
|
||||
$node_p->safe_psql($db1,
|
||||
"SELECT pg_create_logical_replication_slot('$fslotname', 'pgoutput', false, false, true)");
|
||||
"SELECT pg_create_logical_replication_slot('$fslotname', 'pgoutput', false, false, true)"
|
||||
);
|
||||
$node_s->start;
|
||||
$node_s->safe_psql('postgres', "SELECT pg_sync_replication_slots()");
|
||||
my $result = $node_s->safe_psql('postgres', "SELECT slot_name FROM pg_replication_slots WHERE slot_name = '$fslotname' AND synced AND NOT temporary");
|
||||
my $result = $node_s->safe_psql('postgres',
|
||||
"SELECT slot_name FROM pg_replication_slots WHERE slot_name = '$fslotname' AND synced AND NOT temporary"
|
||||
);
|
||||
is($result, 'failover_slot', 'failover slot is synced');
|
||||
$node_s->stop;
|
||||
|
||||
@ -381,7 +384,8 @@ $node_s->wait_for_subscription_sync($node_p, $subnames[1]);
|
||||
|
||||
# Confirm the failover slot has been removed
|
||||
$result = $node_s->safe_psql($db1,
|
||||
"SELECT count(*) FROM pg_replication_slots WHERE slot_name = '$fslotname'");
|
||||
"SELECT count(*) FROM pg_replication_slots WHERE slot_name = '$fslotname'"
|
||||
);
|
||||
is($result, qq(0), 'failover slot was removed');
|
||||
|
||||
# Check result on database $db1
|
||||
|
@ -72,7 +72,8 @@ $node2->command_ok(
|
||||
|
||||
# Restore the incremental backup and use it to create a new node.
|
||||
my $node3 = PostgreSQL::Test::Cluster->new('node3');
|
||||
$node3->init_from_backup($node1, 'backup3',
|
||||
$node3->init_from_backup(
|
||||
$node1, 'backup3',
|
||||
combine_with_prior => [ 'backup1', 'backup2' ],
|
||||
combine_mode => $mode);
|
||||
$node3->start();
|
||||
|
@ -58,7 +58,8 @@ sub combine_and_test_one_backup
|
||||
combine_and_test_one_backup('nomanifest',
|
||||
qr/could not open file.*backup_manifest/,
|
||||
'--no-manifest');
|
||||
combine_and_test_one_backup('csum_none', undef, '--manifest-checksums=NONE', $mode);
|
||||
combine_and_test_one_backup('csum_none', undef, '--manifest-checksums=NONE',
|
||||
$mode);
|
||||
combine_and_test_one_backup('csum_sha224',
|
||||
undef, '--manifest-checksums=SHA224', $mode);
|
||||
|
||||
|
@ -84,13 +84,19 @@ my $resultpath = $node1->backup_dir . '/result';
|
||||
|
||||
# Can't combine 2 full backups.
|
||||
$node1->command_fails_like(
|
||||
[ 'pg_combinebackup', $backup1path, $backup1path, '-o', $resultpath, $mode ],
|
||||
[
|
||||
'pg_combinebackup', $backup1path, $backup1path, '-o',
|
||||
$resultpath, $mode
|
||||
],
|
||||
qr/is a full backup, but only the first backup should be a full backup/,
|
||||
"can't combine full backups");
|
||||
|
||||
# Can't combine 2 incremental backups.
|
||||
$node1->command_fails_like(
|
||||
[ 'pg_combinebackup', $backup2path, $backup2path, '-o', $resultpath, $mode ],
|
||||
[
|
||||
'pg_combinebackup', $backup2path, $backup2path, '-o',
|
||||
$resultpath, $mode
|
||||
],
|
||||
qr/is an incremental backup, but the first backup should be a full backup/,
|
||||
"can't combine full backups");
|
||||
|
||||
@ -121,7 +127,10 @@ move("$backup2path/backup_manifest.orig", "$backup2path/backup_manifest")
|
||||
|
||||
# Can't omit a required backup.
|
||||
$node1->command_fails_like(
|
||||
[ 'pg_combinebackup', $backup1path, $backup3path, '-o', $resultpath, $mode ],
|
||||
[
|
||||
'pg_combinebackup', $backup1path, $backup3path, '-o',
|
||||
$resultpath, $mode
|
||||
],
|
||||
qr/starts at LSN.*but expected/,
|
||||
"can't omit a required backup");
|
||||
|
||||
@ -154,13 +163,21 @@ $node1->command_ok(
|
||||
|
||||
# Can combine result of previous step with second incremental.
|
||||
$node1->command_ok(
|
||||
[ 'pg_combinebackup', $synthetic12path, $backup3path, '-o', $resultpath, $mode ],
|
||||
[
|
||||
'pg_combinebackup', $synthetic12path,
|
||||
$backup3path, '-o',
|
||||
$resultpath, $mode
|
||||
],
|
||||
"can combine synthetic backup with later incremental");
|
||||
rmtree($resultpath);
|
||||
|
||||
# Can't combine result of 1+2 with 2.
|
||||
$node1->command_fails_like(
|
||||
[ 'pg_combinebackup', $synthetic12path, $backup2path, '-o', $resultpath, $mode ],
|
||||
[
|
||||
'pg_combinebackup', $synthetic12path,
|
||||
$backup2path, '-o',
|
||||
$resultpath, $mode
|
||||
],
|
||||
qr/starts at LSN.*but expected/,
|
||||
"can't combine synthetic backup with included incremental");
|
||||
|
||||
|
@ -49,7 +49,8 @@ $primary->command_ok(
|
||||
|
||||
# Recover the incremental backup.
|
||||
my $restore = PostgreSQL::Test::Cluster->new('restore');
|
||||
$restore->init_from_backup($primary, 'backup2',
|
||||
$restore->init_from_backup(
|
||||
$primary, 'backup2',
|
||||
combine_with_prior => ['backup1'],
|
||||
combine_mode => $mode);
|
||||
$restore->start();
|
||||
|
@ -55,8 +55,8 @@ sub run_test
|
||||
"$test_standby_datadir/tst_standby_dir/standby_subdir/standby_file4",
|
||||
"in standby4";
|
||||
# Skip testing .DS_Store files on macOS to avoid risk of side effects
|
||||
append_to_file
|
||||
"$test_standby_datadir/tst_standby_dir/.DS_Store", "macOS system file"
|
||||
append_to_file "$test_standby_datadir/tst_standby_dir/.DS_Store",
|
||||
"macOS system file"
|
||||
unless ($Config{osname} eq 'darwin');
|
||||
|
||||
mkdir "$test_primary_datadir/tst_primary_dir";
|
||||
|
@ -108,8 +108,8 @@ for my $scenario (@scenario)
|
||||
SKIP:
|
||||
{
|
||||
skip "unix-style permissions not supported on Windows", 4
|
||||
if ($scenario->{'skip_on_windows'} &&
|
||||
($windows_os || $Config::Config{osname} eq 'cygwin'));
|
||||
if ($scenario->{'skip_on_windows'}
|
||||
&& ($windows_os || $Config::Config{osname} eq 'cygwin'));
|
||||
|
||||
# Take a backup and check that it verifies OK.
|
||||
my $backup_path = $primary->backup_dir . '/' . $name;
|
||||
|
@ -330,32 +330,34 @@ $cur_primary->stop;
|
||||
$cur_standby->restart;
|
||||
|
||||
# Acquire a snapshot in standby, before we commit the prepared transaction
|
||||
my $standby_session = $cur_standby->background_psql('postgres', on_error_die => 1);
|
||||
my $standby_session =
|
||||
$cur_standby->background_psql('postgres', on_error_die => 1);
|
||||
$standby_session->query_safe("BEGIN ISOLATION LEVEL REPEATABLE READ");
|
||||
$psql_out = $standby_session->query_safe(
|
||||
"SELECT count(*) FROM t_009_tbl_standby_mvcc");
|
||||
$psql_out =
|
||||
$standby_session->query_safe("SELECT count(*) FROM t_009_tbl_standby_mvcc");
|
||||
is($psql_out, '0',
|
||||
"Prepared transaction not visible in standby before commit");
|
||||
|
||||
# Commit the transaction in primary
|
||||
$cur_primary->start;
|
||||
$cur_primary->psql('postgres', "
|
||||
$cur_primary->psql(
|
||||
'postgres', "
|
||||
SET synchronous_commit='remote_apply'; -- To ensure the standby is caught up
|
||||
COMMIT PREPARED 'xact_009_standby_mvcc';
|
||||
");
|
||||
|
||||
# Still not visible to the old snapshot
|
||||
$psql_out = $standby_session->query_safe(
|
||||
"SELECT count(*) FROM t_009_tbl_standby_mvcc");
|
||||
$psql_out =
|
||||
$standby_session->query_safe("SELECT count(*) FROM t_009_tbl_standby_mvcc");
|
||||
is($psql_out, '0',
|
||||
"Committed prepared transaction not visible to old snapshot in standby");
|
||||
|
||||
# Is visible to a new snapshot
|
||||
$standby_session->query_safe("COMMIT");
|
||||
$psql_out = $standby_session->query_safe(
|
||||
"SELECT count(*) FROM t_009_tbl_standby_mvcc");
|
||||
$psql_out =
|
||||
$standby_session->query_safe("SELECT count(*) FROM t_009_tbl_standby_mvcc");
|
||||
is($psql_out, '2',
|
||||
"Committed prepared transaction is visible to new snapshot in standby");
|
||||
"Committed prepared transaction is visible to new snapshot in standby");
|
||||
$standby_session->quit;
|
||||
|
||||
###############################################################################
|
||||
|
Loading…
x
Reference in New Issue
Block a user