pg_probackup
3658 строк · 145.8 Кб
1import unittest2import os3import re4from time import sleep, time5from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException6import shutil7from distutils.dir_util import copy_tree8from testgres import ProcessType, QueryException9import subprocess10
11
12class BackupTest(ProbackupTest, unittest.TestCase):13
14def test_full_backup(self):15"""16Just test full backup with at least two segments
17"""
18node = self.make_simple_node(19base_dir=os.path.join(self.module_name, self.fname, 'node'),20initdb_params=['--data-checksums'],21# we need to write a lot. Lets speedup a bit.22pg_options={"fsync": "off", "synchronous_commit": "off"})23
24backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')25self.init_pb(backup_dir)26self.add_instance(backup_dir, 'node', node)27self.set_archiving(backup_dir, 'node', node)28node.slow_start()29
30# Fill with data31# Have to use scale=100 to create second segment.32node.pgbench_init(scale=100, no_vacuum=True)33
34# FULL35backup_id = self.backup_node(backup_dir, 'node', node)36
37out = self.validate_pb(backup_dir, 'node', backup_id)38self.assertIn(39"INFO: Backup {0} is valid".format(backup_id),40out)41
42def test_full_backup_stream(self):43"""44Just test full backup with at least two segments in stream mode
45"""
46node = self.make_simple_node(47base_dir=os.path.join(self.module_name, self.fname, 'node'),48initdb_params=['--data-checksums'],49# we need to write a lot. Lets speedup a bit.50pg_options={"fsync": "off", "synchronous_commit": "off"})51
52backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')53self.init_pb(backup_dir)54self.add_instance(backup_dir, 'node', node)55node.slow_start()56
57# Fill with data58# Have to use scale=100 to create second segment.59node.pgbench_init(scale=100, no_vacuum=True)60
61# FULL62backup_id = self.backup_node(backup_dir, 'node', node,63options=["--stream"])64
65out = self.validate_pb(backup_dir, 'node', backup_id)66self.assertIn(67"INFO: Backup {0} is valid".format(backup_id),68out)69
70# @unittest.skip("skip")71# @unittest.expectedFailure72# PGPRO-70773def test_backup_modes_archive(self):74"""standart backup modes with ARCHIVE WAL method"""75node = self.make_simple_node(76base_dir=os.path.join(self.module_name, self.fname, 'node'),77initdb_params=['--data-checksums'])78
79backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')80self.init_pb(backup_dir)81self.add_instance(backup_dir, 'node', node)82self.set_archiving(backup_dir, 'node', node)83node.slow_start()84
85full_backup_id = self.backup_node(backup_dir, 'node', node)86show_backup = self.show_pb(backup_dir, 'node')[0]87
88self.assertEqual(show_backup['status'], "OK")89self.assertEqual(show_backup['backup-mode'], "FULL")90
91# postmaster.pid and postmaster.opts shouldn't be copied92excluded = True93db_dir = os.path.join(94backup_dir, "backups", 'node', full_backup_id, "database")95
96for f in os.listdir(db_dir):97if (98os.path.isfile(os.path.join(db_dir, f)) and99(100f == "postmaster.pid" or101f == "postmaster.opts"102)103):104excluded = False105self.assertEqual(excluded, True)106
107# page backup mode108page_backup_id = self.backup_node(109backup_dir, 'node', node, backup_type="page")110
111show_backup_1 = self.show_pb(backup_dir, 'node')[1]112self.assertEqual(show_backup_1['status'], "OK")113self.assertEqual(show_backup_1['backup-mode'], "PAGE")114
115# delta backup mode116delta_backup_id = self.backup_node(117backup_dir, 'node', node, backup_type="delta")118
119show_backup_2 = self.show_pb(backup_dir, 'node')[2]120self.assertEqual(show_backup_2['status'], "OK")121self.assertEqual(show_backup_2['backup-mode'], "DELTA")122
123# Check parent backup124self.assertEqual(125full_backup_id,126self.show_pb(127backup_dir, 'node',128backup_id=show_backup_1['id'])["parent-backup-id"])129
130self.assertEqual(131page_backup_id,132self.show_pb(133backup_dir, 'node',134backup_id=show_backup_2['id'])["parent-backup-id"])135
136# @unittest.skip("skip")137def test_smooth_checkpoint(self):138"""full backup with smooth checkpoint"""139node = self.make_simple_node(140base_dir=os.path.join(self.module_name, self.fname, 'node'),141initdb_params=['--data-checksums'])142
143backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')144self.init_pb(backup_dir)145self.add_instance(backup_dir, 'node', node)146self.set_archiving(backup_dir, 'node', node)147node.slow_start()148
149self.backup_node(150backup_dir, 'node', node,151options=["-C"])152self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")153node.stop()154
155# @unittest.skip("skip")156def test_incremental_backup_without_full(self):157"""page backup without validated full backup"""158node = self.make_simple_node(159base_dir=os.path.join(self.module_name, self.fname, 'node'),160initdb_params=['--data-checksums'])161
162backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')163self.init_pb(backup_dir)164self.add_instance(backup_dir, 'node', node)165self.set_archiving(backup_dir, 'node', node)166node.slow_start()167
168try:169self.backup_node(backup_dir, 'node', node, backup_type="page")170# we should die here because exception is what we expect to happen171self.assertEqual(1721, 0,173"Expecting Error because page backup should not be possible "174"without valid full backup.\n Output: {0} \n CMD: {1}".format(175repr(self.output), self.cmd))176except ProbackupException as e:177self.assertTrue(178"WARNING: Valid full backup on current timeline 1 is not found" in e.message and179"ERROR: Create new full backup before an incremental one" in e.message,180"\n Unexpected Error Message: {0}\n CMD: {1}".format(181repr(e.message), self.cmd))182
183self.assertEqual(184self.show_pb(backup_dir, 'node')[0]['status'],185"ERROR")186
187# @unittest.skip("skip")188def test_incremental_backup_corrupt_full(self):189"""page-level backup with corrupted full backup"""190node = self.make_simple_node(191base_dir=os.path.join(self.module_name, self.fname, 'node'),192initdb_params=['--data-checksums'])193
194backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')195self.init_pb(backup_dir)196self.add_instance(backup_dir, 'node', node)197self.set_archiving(backup_dir, 'node', node)198node.slow_start()199
200backup_id = self.backup_node(backup_dir, 'node', node)201file = os.path.join(202backup_dir, "backups", "node", backup_id,203"database", "postgresql.conf")204os.remove(file)205
206try:207self.validate_pb(backup_dir, 'node')208# we should die here because exception is what we expect to happen209self.assertEqual(2101, 0,211"Expecting Error because of validation of corrupted backup.\n"212" Output: {0} \n CMD: {1}".format(213repr(self.output), self.cmd))214except ProbackupException as e:215self.assertTrue(216"INFO: Validate backups of the instance 'node'" in e.message and217"WARNING: Backup file" in e.message and "is not found" in e.message and218"WARNING: Backup {0} data files are corrupted".format(219backup_id) in e.message and220"WARNING: Some backups are not valid" in e.message,221"\n Unexpected Error Message: {0}\n CMD: {1}".format(222repr(e.message), self.cmd))223
224try:225self.backup_node(backup_dir, 'node', node, backup_type="page")226# we should die here because exception is what we expect to happen227self.assertEqual(2281, 0,229"Expecting Error because page backup should not be possible "230"without valid full backup.\n Output: {0} \n CMD: {1}".format(231repr(self.output), self.cmd))232except ProbackupException as e:233self.assertTrue(234"WARNING: Valid full backup on current timeline 1 is not found" in e.message and235"ERROR: Create new full backup before an incremental one" in e.message,236"\n Unexpected Error Message: {0}\n CMD: {1}".format(237repr(e.message), self.cmd))238
239self.assertEqual(240self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT")241self.assertEqual(242self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")243
244# @unittest.skip("skip")245def test_delta_threads_stream(self):246"""delta multi thread backup mode and stream"""247node = self.make_simple_node(248base_dir=os.path.join(self.module_name, self.fname, 'node'),249set_replication=True,250initdb_params=['--data-checksums'])251
252backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')253self.init_pb(backup_dir)254self.add_instance(backup_dir, 'node', node)255node.slow_start()256
257self.backup_node(258backup_dir, 'node', node, backup_type="full",259options=["-j", "4", "--stream"])260
261self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")262self.backup_node(263backup_dir, 'node', node,264backup_type="delta", options=["-j", "4", "--stream"])265self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")266
267# @unittest.skip("skip")268def test_page_detect_corruption(self):269"""make node, corrupt some page, check that backup failed"""270
271node = self.make_simple_node(272base_dir=os.path.join(self.module_name, self.fname, 'node'),273set_replication=True,274ptrack_enable=self.ptrack,275initdb_params=['--data-checksums'])276
277backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')278
279self.init_pb(backup_dir)280self.add_instance(backup_dir, 'node', node)281node.slow_start()282
283self.backup_node(284backup_dir, 'node', node,285backup_type="full", options=["-j", "4", "--stream"])286
287node.safe_psql(288"postgres",289"create table t_heap as select 1 as id, md5(i::text) as text, "290"md5(repeat(i::text,10))::tsvector as tsvector "291"from generate_series(0,1000) i")292
293node.safe_psql(294"postgres",295"CHECKPOINT")296
297heap_path = node.safe_psql(298"postgres",299"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()300
301path = os.path.join(node.data_dir, heap_path)302with open(path, "rb+", 0) as f:303f.seek(9000)304f.write(b"bla")305f.flush()306f.close307
308try:309self.backup_node(310backup_dir, 'node', node, backup_type="full",311options=["-j", "4", "--stream", "--log-level-file=VERBOSE"])312self.assertEqual(3131, 0,314"Expecting Error because data file is corrupted"315"\n Output: {0} \n CMD: {1}".format(316repr(self.output), self.cmd))317except ProbackupException as e:318self.assertTrue(319'ERROR: Corruption detected in file "{0}", '320'block 1: page verification failed, calculated checksum'.format(path),321e.message)322
323self.assertEqual(324self.show_pb(backup_dir, 'node')[1]['status'],325'ERROR',326"Backup Status should be ERROR")327
328# @unittest.skip("skip")329def test_backup_detect_corruption(self):330"""make node, corrupt some page, check that backup failed"""331node = self.make_simple_node(332base_dir=os.path.join(self.module_name, self.fname, 'node'),333set_replication=True,334ptrack_enable=self.ptrack,335initdb_params=['--data-checksums'])336
337backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')338
339self.init_pb(backup_dir)340self.add_instance(backup_dir, 'node', node)341self.set_archiving(backup_dir, 'node', node)342node.slow_start()343
344if self.ptrack:345node.safe_psql(346"postgres",347"create extension ptrack")348
349self.backup_node(350backup_dir, 'node', node,351backup_type="full", options=["-j", "4", "--stream"])352
353node.safe_psql(354"postgres",355"create table t_heap as select 1 as id, md5(i::text) as text, "356"md5(repeat(i::text,10))::tsvector as tsvector "357"from generate_series(0,10000) i")358
359heap_path = node.safe_psql(360"postgres",361"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()362
363self.backup_node(364backup_dir, 'node', node,365backup_type="full", options=["-j", "4", "--stream"])366
367node.safe_psql(368"postgres",369"select count(*) from t_heap")370
371node.safe_psql(372"postgres",373"update t_heap set id = id + 10000")374
375node.stop()376
377heap_fullpath = os.path.join(node.data_dir, heap_path)378
379with open(heap_fullpath, "rb+", 0) as f:380f.seek(9000)381f.write(b"bla")382f.flush()383f.close384
385node.slow_start()386
387try:388self.backup_node(389backup_dir, 'node', node,390backup_type="full", options=["-j", "4", "--stream"])391# we should die here because exception is what we expect to happen392self.assertEqual(3931, 0,394"Expecting Error because of block corruption"395"\n Output: {0} \n CMD: {1}".format(396repr(self.output), self.cmd))397except ProbackupException as e:398self.assertIn(399'ERROR: Corruption detected in file "{0}", block 1: '400'page verification failed, calculated checksum'.format(401heap_fullpath),402e.message,403'\n Unexpected Error Message: {0}\n CMD: {1}'.format(404repr(e.message), self.cmd))405
406sleep(1)407
408try:409self.backup_node(410backup_dir, 'node', node,411backup_type="delta", options=["-j", "4", "--stream"])412# we should die here because exception is what we expect to happen413self.assertEqual(4141, 0,415"Expecting Error because of block corruption"416"\n Output: {0} \n CMD: {1}".format(417repr(self.output), self.cmd))418except ProbackupException as e:419self.assertIn(420'ERROR: Corruption detected in file "{0}", block 1: '421'page verification failed, calculated checksum'.format(422heap_fullpath),423e.message,424'\n Unexpected Error Message: {0}\n CMD: {1}'.format(425repr(e.message), self.cmd))426
427sleep(1)428
429try:430self.backup_node(431backup_dir, 'node', node,432backup_type="page", options=["-j", "4", "--stream"])433# we should die here because exception is what we expect to happen434self.assertEqual(4351, 0,436"Expecting Error because of block corruption"437"\n Output: {0} \n CMD: {1}".format(438repr(self.output), self.cmd))439except ProbackupException as e:440self.assertIn(441'ERROR: Corruption detected in file "{0}", block 1: '442'page verification failed, calculated checksum'.format(443heap_fullpath),444e.message,445'\n Unexpected Error Message: {0}\n CMD: {1}'.format(446repr(e.message), self.cmd))447
448sleep(1)449
450if self.ptrack:451try:452self.backup_node(453backup_dir, 'node', node,454backup_type="ptrack", options=["-j", "4", "--stream"])455# we should die here because exception is what we expect to happen456self.assertEqual(4571, 0,458"Expecting Error because of block corruption"459"\n Output: {0} \n CMD: {1}".format(460repr(self.output), self.cmd))461except ProbackupException as e:462self.assertIn(463'ERROR: Corruption detected in file "{0}", block 1: '464'page verification failed, calculated checksum'.format(465heap_fullpath),466e.message,467'\n Unexpected Error Message: {0}\n CMD: {1}'.format(468repr(e.message), self.cmd))469
470# @unittest.skip("skip")471def test_backup_detect_invalid_block_header(self):472"""make node, corrupt some page, check that backup failed"""473node = self.make_simple_node(474base_dir=os.path.join(self.module_name, self.fname, 'node'),475set_replication=True,476ptrack_enable=self.ptrack,477initdb_params=['--data-checksums'])478
479backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')480
481self.init_pb(backup_dir)482self.add_instance(backup_dir, 'node', node)483self.set_archiving(backup_dir, 'node', node)484node.slow_start()485
486if self.ptrack:487node.safe_psql(488"postgres",489"create extension ptrack")490
491node.safe_psql(492"postgres",493"create table t_heap as select 1 as id, md5(i::text) as text, "494"md5(repeat(i::text,10))::tsvector as tsvector "495"from generate_series(0,10000) i")496
497heap_path = node.safe_psql(498"postgres",499"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()500
501self.backup_node(502backup_dir, 'node', node,503backup_type="full", options=["-j", "4", "--stream"])504
505node.safe_psql(506"postgres",507"select count(*) from t_heap")508
509node.safe_psql(510"postgres",511"update t_heap set id = id + 10000")512
513node.stop()514
515heap_fullpath = os.path.join(node.data_dir, heap_path)516with open(heap_fullpath, "rb+", 0) as f:517f.seek(8193)518f.write(b"blahblahblahblah")519f.flush()520f.close521
522node.slow_start()523
524# self.backup_node(
525# backup_dir, 'node', node,
526# backup_type="full", options=["-j", "4", "--stream"])
527
528try:529self.backup_node(530backup_dir, 'node', node,531backup_type="full", options=["-j", "4", "--stream"])532# we should die here because exception is what we expect to happen533self.assertEqual(5341, 0,535"Expecting Error because of block corruption"536"\n Output: {0} \n CMD: {1}".format(537repr(self.output), self.cmd))538except ProbackupException as e:539self.assertIn(540'ERROR: Corruption detected in file "{0}", block 1: '541'page header invalid, pd_lower'.format(heap_fullpath),542e.message,543'\n Unexpected Error Message: {0}\n CMD: {1}'.format(544repr(e.message), self.cmd))545
546sleep(1)547
548try:549self.backup_node(550backup_dir, 'node', node,551backup_type="delta", options=["-j", "4", "--stream"])552# we should die here because exception is what we expect to happen553self.assertEqual(5541, 0,555"Expecting Error because of block corruption"556"\n Output: {0} \n CMD: {1}".format(557repr(self.output), self.cmd))558except ProbackupException as e:559self.assertIn(560'ERROR: Corruption detected in file "{0}", block 1: '561'page header invalid, pd_lower'.format(heap_fullpath),562e.message,563'\n Unexpected Error Message: {0}\n CMD: {1}'.format(564repr(e.message), self.cmd))565
566sleep(1)567
568try:569self.backup_node(570backup_dir, 'node', node,571backup_type="page", options=["-j", "4", "--stream"])572# we should die here because exception is what we expect to happen573self.assertEqual(5741, 0,575"Expecting Error because of block corruption"576"\n Output: {0} \n CMD: {1}".format(577repr(self.output), self.cmd))578except ProbackupException as e:579self.assertIn(580'ERROR: Corruption detected in file "{0}", block 1: '581'page header invalid, pd_lower'.format(heap_fullpath),582e.message,583'\n Unexpected Error Message: {0}\n CMD: {1}'.format(584repr(e.message), self.cmd))585
586sleep(1)587
588if self.ptrack:589try:590self.backup_node(591backup_dir, 'node', node,592backup_type="ptrack", options=["-j", "4", "--stream"])593# we should die here because exception is what we expect to happen594self.assertEqual(5951, 0,596"Expecting Error because of block corruption"597"\n Output: {0} \n CMD: {1}".format(598repr(self.output), self.cmd))599except ProbackupException as e:600self.assertIn(601'ERROR: Corruption detected in file "{0}", block 1: '602'page header invalid, pd_lower'.format(heap_fullpath),603e.message,604'\n Unexpected Error Message: {0}\n CMD: {1}'.format(605repr(e.message), self.cmd))606
607# @unittest.skip("skip")608def test_backup_detect_missing_permissions(self):609"""make node, corrupt some page, check that backup failed"""610node = self.make_simple_node(611base_dir=os.path.join(self.module_name, self.fname, 'node'),612set_replication=True,613ptrack_enable=self.ptrack,614initdb_params=['--data-checksums'])615
616backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')617
618self.init_pb(backup_dir)619self.add_instance(backup_dir, 'node', node)620self.set_archiving(backup_dir, 'node', node)621node.slow_start()622
623if self.ptrack:624node.safe_psql(625"postgres",626"create extension ptrack")627
628node.safe_psql(629"postgres",630"create table t_heap as select 1 as id, md5(i::text) as text, "631"md5(repeat(i::text,10))::tsvector as tsvector "632"from generate_series(0,10000) i")633
634heap_path = node.safe_psql(635"postgres",636"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()637
638self.backup_node(639backup_dir, 'node', node,640backup_type="full", options=["-j", "4", "--stream"])641
642node.safe_psql(643"postgres",644"select count(*) from t_heap")645
646node.safe_psql(647"postgres",648"update t_heap set id = id + 10000")649
650node.stop()651
652heap_fullpath = os.path.join(node.data_dir, heap_path)653with open(heap_fullpath, "rb+", 0) as f:654f.seek(8193)655f.write(b"blahblahblahblah")656f.flush()657f.close658
659node.slow_start()660
661# self.backup_node(
662# backup_dir, 'node', node,
663# backup_type="full", options=["-j", "4", "--stream"])
664
665try:666self.backup_node(667backup_dir, 'node', node,668backup_type="full", options=["-j", "4", "--stream"])669# we should die here because exception is what we expect to happen670self.assertEqual(6711, 0,672"Expecting Error because of block corruption"673"\n Output: {0} \n CMD: {1}".format(674repr(self.output), self.cmd))675except ProbackupException as e:676self.assertIn(677'ERROR: Corruption detected in file "{0}", block 1: '678'page header invalid, pd_lower'.format(heap_fullpath),679e.message,680'\n Unexpected Error Message: {0}\n CMD: {1}'.format(681repr(e.message), self.cmd))682
683sleep(1)684
685try:686self.backup_node(687backup_dir, 'node', node,688backup_type="delta", options=["-j", "4", "--stream"])689# we should die here because exception is what we expect to happen690self.assertEqual(6911, 0,692"Expecting Error because of block corruption"693"\n Output: {0} \n CMD: {1}".format(694repr(self.output), self.cmd))695except ProbackupException as e:696self.assertIn(697'ERROR: Corruption detected in file "{0}", block 1: '698'page header invalid, pd_lower'.format(heap_fullpath),699e.message,700'\n Unexpected Error Message: {0}\n CMD: {1}'.format(701repr(e.message), self.cmd))702
703sleep(1)704
705try:706self.backup_node(707backup_dir, 'node', node,708backup_type="page", options=["-j", "4", "--stream"])709# we should die here because exception is what we expect to happen710self.assertEqual(7111, 0,712"Expecting Error because of block corruption"713"\n Output: {0} \n CMD: {1}".format(714repr(self.output), self.cmd))715except ProbackupException as e:716self.assertIn(717'ERROR: Corruption detected in file "{0}", block 1: '718'page header invalid, pd_lower'.format(heap_fullpath),719e.message,720'\n Unexpected Error Message: {0}\n CMD: {1}'.format(721repr(e.message), self.cmd))722
723sleep(1)724
725if self.ptrack:726try:727self.backup_node(728backup_dir, 'node', node,729backup_type="ptrack", options=["-j", "4", "--stream"])730# we should die here because exception is what we expect to happen731self.assertEqual(7321, 0,733"Expecting Error because of block corruption"734"\n Output: {0} \n CMD: {1}".format(735repr(self.output), self.cmd))736except ProbackupException as e:737self.assertIn(738'ERROR: Corruption detected in file "{0}", block 1: '739'page header invalid, pd_lower'.format(heap_fullpath),740e.message,741'\n Unexpected Error Message: {0}\n CMD: {1}'.format(742repr(e.message), self.cmd))743
744# @unittest.skip("skip")745def test_backup_truncate_misaligned(self):746"""747make node, truncate file to size not even to BLCKSIZE,
748take backup
749"""
750node = self.make_simple_node(751base_dir=os.path.join(self.module_name, self.fname, 'node'),752set_replication=True,753initdb_params=['--data-checksums'])754
755backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')756
757self.init_pb(backup_dir)758self.add_instance(backup_dir, 'node', node)759node.slow_start()760
761node.safe_psql(762"postgres",763"create table t_heap as select 1 as id, md5(i::text) as text, "764"md5(repeat(i::text,10))::tsvector as tsvector "765"from generate_series(0,100000) i")766
767node.safe_psql(768"postgres",769"CHECKPOINT;")770
771heap_path = node.safe_psql(772"postgres",773"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()774
775heap_size = node.safe_psql(776"postgres",777"select pg_relation_size('t_heap')")778
779with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:780f.truncate(int(heap_size) - 4096)781f.flush()782f.close783
784output = self.backup_node(785backup_dir, 'node', node, backup_type="full",786options=["-j", "4", "--stream"], return_id=False)787
788self.assertIn("WARNING: File", output)789self.assertIn("invalid file size", output)790
791# @unittest.skip("skip")792def test_tablespace_in_pgdata_pgpro_1376(self):793"""PGPRO-1376 """794node = self.make_simple_node(795base_dir=os.path.join(self.module_name, self.fname, 'node'),796set_replication=True,797initdb_params=['--data-checksums'])798
799backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')800
801self.init_pb(backup_dir)802self.add_instance(backup_dir, 'node', node)803node.slow_start()804
805self.create_tblspace_in_node(806node, 'tblspace1',807tblspc_path=(808os.path.join(809node.data_dir, 'somedirectory', '100500'))810)811
812self.create_tblspace_in_node(813node, 'tblspace2',814tblspc_path=(os.path.join(node.data_dir))815)816
817node.safe_psql(818"postgres",819"create table t_heap1 tablespace tblspace1 as select 1 as id, "820"md5(i::text) as text, "821"md5(repeat(i::text,10))::tsvector as tsvector "822"from generate_series(0,1000) i")823
824node.safe_psql(825"postgres",826"create table t_heap2 tablespace tblspace2 as select 1 as id, "827"md5(i::text) as text, "828"md5(repeat(i::text,10))::tsvector as tsvector "829"from generate_series(0,1000) i")830
831backup_id_1 = self.backup_node(832backup_dir, 'node', node, backup_type="full",833options=["-j", "4", "--stream"])834
835node.safe_psql(836"postgres",837"drop table t_heap2")838node.safe_psql(839"postgres",840"drop tablespace tblspace2")841
842self.backup_node(843backup_dir, 'node', node, backup_type="full",844options=["-j", "4", "--stream"])845
846pgdata = self.pgdata_content(node.data_dir)847
848relfilenode = node.safe_psql(849"postgres",850"select 't_heap1'::regclass::oid"851).decode('utf-8').rstrip()852
853list = []854for root, dirs, files in os.walk(os.path.join(855backup_dir, 'backups', 'node', backup_id_1)):856for file in files:857if file == relfilenode:858path = os.path.join(root, file)859list = list + [path]860
861# We expect that relfilenode can be encountered only once862if len(list) > 1:863message = ""864for string in list:865message = message + string + "\n"866self.assertEqual(8671, 0,868"Following file copied twice by backup:\n {0}".format(869message)870)871
872node.cleanup()873
874self.restore_node(875backup_dir, 'node', node, options=["-j", "4"])876
877if self.paranoia:878pgdata_restored = self.pgdata_content(node.data_dir)879self.compare_pgdata(pgdata, pgdata_restored)880
881# @unittest.skip("skip")882def test_basic_tablespace_handling(self):883"""884make node, take full backup, check that restore with
885tablespace mapping will end with error, take page backup,
886check that restore with tablespace mapping will end with
887success
888"""
889node = self.make_simple_node(890base_dir=os.path.join(self.module_name, self.fname, 'node'),891set_replication=True,892initdb_params=['--data-checksums'])893
894backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')895
896self.init_pb(backup_dir)897self.add_instance(backup_dir, 'node', node)898node.slow_start()899
900backup_id = self.backup_node(901backup_dir, 'node', node, backup_type="full",902options=["-j", "4", "--stream"])903
904tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')905tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old')906
907self.create_tblspace_in_node(908node, 'some_lame_tablespace')909
910self.create_tblspace_in_node(911node, 'tblspace1',912tblspc_path=tblspace1_old_path)913
914self.create_tblspace_in_node(915node, 'tblspace2',916tblspc_path=tblspace2_old_path)917
918node.safe_psql(919"postgres",920"create table t_heap_lame tablespace some_lame_tablespace "921"as select 1 as id, md5(i::text) as text, "922"md5(repeat(i::text,10))::tsvector as tsvector "923"from generate_series(0,1000) i")924
925node.safe_psql(926"postgres",927"create table t_heap2 tablespace tblspace2 as select 1 as id, "928"md5(i::text) as text, "929"md5(repeat(i::text,10))::tsvector as tsvector "930"from generate_series(0,1000) i")931
932tblspace1_new_path = self.get_tblspace_path(node, 'tblspace1_new')933tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new')934
935node_restored = self.make_simple_node(936base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))937node_restored.cleanup()938
939try:940self.restore_node(941backup_dir, 'node', node_restored,942options=[943"-j", "4",944"-T", "{0}={1}".format(945tblspace1_old_path, tblspace1_new_path),946"-T", "{0}={1}".format(947tblspace2_old_path, tblspace2_new_path)])948# we should die here because exception is what we expect to happen949self.assertEqual(9501, 0,951"Expecting Error because tablespace mapping is incorrect"952"\n Output: {0} \n CMD: {1}".format(953repr(self.output), self.cmd))954except ProbackupException as e:955self.assertIn(956'ERROR: Backup {0} has no tablespaceses, '957'nothing to remap'.format(backup_id),958e.message,959'\n Unexpected Error Message: {0}\n CMD: {1}'.format(960repr(e.message), self.cmd))961
962node.safe_psql(963"postgres",964"drop table t_heap_lame")965
966node.safe_psql(967"postgres",968"drop tablespace some_lame_tablespace")969
970self.backup_node(971backup_dir, 'node', node, backup_type="delta",972options=["-j", "4", "--stream"])973
974self.restore_node(975backup_dir, 'node', node_restored,976options=[977"-j", "4",978"-T", "{0}={1}".format(979tblspace1_old_path, tblspace1_new_path),980"-T", "{0}={1}".format(981tblspace2_old_path, tblspace2_new_path)])982
983if self.paranoia:984pgdata = self.pgdata_content(node.data_dir)985
986if self.paranoia:987pgdata_restored = self.pgdata_content(node_restored.data_dir)988self.compare_pgdata(pgdata, pgdata_restored)989
990# @unittest.skip("skip")991def test_tablespace_handling_1(self):992"""993make node with tablespace A, take full backup, check that restore with
994tablespace mapping of tablespace B will end with error
995"""
996node = self.make_simple_node(997base_dir=os.path.join(self.module_name, self.fname, 'node'),998set_replication=True,999initdb_params=['--data-checksums'])1000
1001backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1002
1003self.init_pb(backup_dir)1004self.add_instance(backup_dir, 'node', node)1005node.slow_start()1006
1007tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')1008tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old')1009
1010tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')1011
1012self.create_tblspace_in_node(1013node, 'tblspace1',1014tblspc_path=tblspace1_old_path)1015
1016self.backup_node(1017backup_dir, 'node', node, backup_type="full",1018options=["-j", "4", "--stream"])1019
1020node_restored = self.make_simple_node(1021base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))1022node_restored.cleanup()1023
1024try:1025self.restore_node(1026backup_dir, 'node', node_restored,1027options=[1028"-j", "4",1029"-T", "{0}={1}".format(1030tblspace2_old_path, tblspace_new_path)])1031# we should die here because exception is what we expect to happen1032self.assertEqual(10331, 0,1034"Expecting Error because tablespace mapping is incorrect"1035"\n Output: {0} \n CMD: {1}".format(1036repr(self.output), self.cmd))1037except ProbackupException as e:1038self.assertTrue(1039'ERROR: --tablespace-mapping option' in e.message and1040'have an entry in tablespace_map file' in e.message,1041'\n Unexpected Error Message: {0}\n CMD: {1}'.format(1042repr(e.message), self.cmd))1043
1044# @unittest.skip("skip")1045def test_tablespace_handling_2(self):1046"""1047make node without tablespaces, take full backup, check that restore with
1048tablespace mapping will end with error
1049"""
1050node = self.make_simple_node(1051base_dir=os.path.join(self.module_name, self.fname, 'node'),1052set_replication=True,1053initdb_params=['--data-checksums'])1054
1055backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1056
1057self.init_pb(backup_dir)1058self.add_instance(backup_dir, 'node', node)1059node.slow_start()1060
1061tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')1062tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')1063
1064backup_id = self.backup_node(1065backup_dir, 'node', node, backup_type="full",1066options=["-j", "4", "--stream"])1067
1068node_restored = self.make_simple_node(1069base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))1070node_restored.cleanup()1071
1072try:1073self.restore_node(1074backup_dir, 'node', node_restored,1075options=[1076"-j", "4",1077"-T", "{0}={1}".format(1078tblspace1_old_path, tblspace_new_path)])1079# we should die here because exception is what we expect to happen1080self.assertEqual(10811, 0,1082"Expecting Error because tablespace mapping is incorrect"1083"\n Output: {0} \n CMD: {1}".format(1084repr(self.output), self.cmd))1085except ProbackupException as e:1086self.assertIn(1087'ERROR: Backup {0} has no tablespaceses, '1088'nothing to remap'.format(backup_id), e.message,1089'\n Unexpected Error Message: {0}\n CMD: {1}'.format(1090repr(e.message), self.cmd))1091
1092# @unittest.skip("skip")1093def test_drop_rel_during_full_backup(self):1094""""""1095self._check_gdb_flag_or_skip_test()1096
1097backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1098node = self.make_simple_node(1099base_dir=os.path.join(self.module_name, self.fname, 'node'),1100set_replication=True,1101initdb_params=['--data-checksums'])1102
1103self.init_pb(backup_dir)1104self.add_instance(backup_dir, 'node', node)1105node.slow_start()1106
1107for i in range(1, 512):1108node.safe_psql(1109"postgres",1110"create table t_heap_{0} as select i"1111" as id from generate_series(0,100) i".format(i))1112
1113node.safe_psql(1114"postgres",1115"VACUUM")1116
1117node.pgbench_init(scale=10)1118
1119relative_path_1 = node.safe_psql(1120"postgres",1121"select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip()1122
1123relative_path_2 = node.safe_psql(1124"postgres",1125"select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip()1126
1127absolute_path_1 = os.path.join(node.data_dir, relative_path_1)1128absolute_path_2 = os.path.join(node.data_dir, relative_path_2)1129
1130# FULL backup1131gdb = self.backup_node(1132backup_dir, 'node', node,1133options=['--stream', '--log-level-file=LOG', '--log-level-console=LOG', '--progress'],1134gdb=True)1135
1136gdb.set_breakpoint('backup_files')1137gdb.run_until_break()1138
1139# REMOVE file1140for i in range(1, 512):1141node.safe_psql(1142"postgres",1143"drop table t_heap_{0}".format(i))1144
1145node.safe_psql(1146"postgres",1147"CHECKPOINT")1148
1149node.safe_psql(1150"postgres",1151"CHECKPOINT")1152
1153# File removed, we can proceed with backup1154gdb.continue_execution_until_exit()1155
1156pgdata = self.pgdata_content(node.data_dir)1157
1158#with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:1159# log_content = f.read()1160# self.assertTrue(1161# 'LOG: File "{0}" is not found'.format(absolute_path) in log_content,1162# 'File "{0}" should be deleted but it`s not'.format(absolute_path))1163
1164node.cleanup()1165self.restore_node(backup_dir, 'node', node)1166
1167# Physical comparison1168pgdata_restored = self.pgdata_content(node.data_dir)1169self.compare_pgdata(pgdata, pgdata_restored)1170
1171@unittest.skip("skip")1172def test_drop_db_during_full_backup(self):1173""""""1174backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1175node = self.make_simple_node(1176base_dir=os.path.join(self.module_name, self.fname, 'node'),1177set_replication=True,1178initdb_params=['--data-checksums'])1179
1180self.init_pb(backup_dir)1181self.add_instance(backup_dir, 'node', node)1182node.slow_start()1183
1184for i in range(1, 2):1185node.safe_psql(1186"postgres",1187"create database t_heap_{0}".format(i))1188
1189node.safe_psql(1190"postgres",1191"VACUUM")1192
1193# FULL backup1194gdb = self.backup_node(1195backup_dir, 'node', node, gdb=True,1196options=[1197'--stream', '--log-level-file=LOG',1198'--log-level-console=LOG', '--progress'])1199
1200gdb.set_breakpoint('backup_files')1201gdb.run_until_break()1202
1203# REMOVE file1204for i in range(1, 2):1205node.safe_psql(1206"postgres",1207"drop database t_heap_{0}".format(i))1208
1209node.safe_psql(1210"postgres",1211"CHECKPOINT")1212
1213node.safe_psql(1214"postgres",1215"CHECKPOINT")1216
1217# File removed, we can proceed with backup1218gdb.continue_execution_until_exit()1219
1220pgdata = self.pgdata_content(node.data_dir)1221
1222#with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:1223# log_content = f.read()1224# self.assertTrue(1225# 'LOG: File "{0}" is not found'.format(absolute_path) in log_content,1226# 'File "{0}" should be deleted but it`s not'.format(absolute_path))1227
1228node.cleanup()1229self.restore_node(backup_dir, 'node', node)1230
1231# Physical comparison1232pgdata_restored = self.pgdata_content(node.data_dir)1233self.compare_pgdata(pgdata, pgdata_restored)1234
1235# @unittest.skip("skip")1236def test_drop_rel_during_backup_delta(self):1237""""""1238self._check_gdb_flag_or_skip_test()1239
1240backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1241node = self.make_simple_node(1242base_dir=os.path.join(self.module_name, self.fname, 'node'),1243set_replication=True,1244initdb_params=['--data-checksums'])1245
1246self.init_pb(backup_dir)1247self.add_instance(backup_dir, 'node', node)1248self.set_archiving(backup_dir, 'node', node)1249node.slow_start()1250
1251node.pgbench_init(scale=10)1252
1253node.safe_psql(1254"postgres",1255"create table t_heap as select i"1256" as id from generate_series(0,100) i")1257
1258relative_path = node.safe_psql(1259"postgres",1260"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()1261
1262absolute_path = os.path.join(node.data_dir, relative_path)1263
1264# FULL backup1265self.backup_node(backup_dir, 'node', node, options=['--stream'])1266
1267# DELTA backup1268gdb = self.backup_node(1269backup_dir, 'node', node, backup_type='delta',1270gdb=True, options=['--log-level-file=LOG'])1271
1272gdb.set_breakpoint('backup_files')1273gdb.run_until_break()1274
1275# REMOVE file1276node.safe_psql(1277"postgres",1278"DROP TABLE t_heap")1279
1280node.safe_psql(1281"postgres",1282"CHECKPOINT")1283
1284# File removed, we can proceed with backup1285gdb.continue_execution_until_exit()1286
1287pgdata = self.pgdata_content(node.data_dir)1288
1289with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:1290log_content = f.read()1291self.assertTrue(1292'LOG: File not found: "{0}"'.format(absolute_path) in log_content,1293'File "{0}" should be deleted but it`s not'.format(absolute_path))1294
1295node.cleanup()1296self.restore_node(backup_dir, 'node', node, options=["-j", "4"])1297
1298# Physical comparison1299pgdata_restored = self.pgdata_content(node.data_dir)1300self.compare_pgdata(pgdata, pgdata_restored)1301
1302# @unittest.skip("skip")1303def test_drop_rel_during_backup_page(self):1304""""""1305self._check_gdb_flag_or_skip_test()1306
1307backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1308node = self.make_simple_node(1309base_dir=os.path.join(self.module_name, self.fname, 'node'),1310set_replication=True,1311initdb_params=['--data-checksums'])1312
1313self.init_pb(backup_dir)1314self.add_instance(backup_dir, 'node', node)1315self.set_archiving(backup_dir, 'node', node)1316node.slow_start()1317
1318node.safe_psql(1319"postgres",1320"create table t_heap as select i"1321" as id from generate_series(0,100) i")1322
1323relative_path = node.safe_psql(1324"postgres",1325"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()1326
1327absolute_path = os.path.join(node.data_dir, relative_path)1328
1329# FULL backup1330self.backup_node(backup_dir, 'node', node, options=['--stream'])1331
1332node.safe_psql(1333"postgres",1334"insert into t_heap select i"1335" as id from generate_series(101,102) i")1336
1337# PAGE backup1338gdb = self.backup_node(1339backup_dir, 'node', node, backup_type='page',1340gdb=True, options=['--log-level-file=LOG'])1341
1342gdb.set_breakpoint('backup_files')1343gdb.run_until_break()1344
1345# REMOVE file1346os.remove(absolute_path)1347
1348# File removed, we can proceed with backup1349gdb.continue_execution_until_exit()1350gdb.kill()1351
1352pgdata = self.pgdata_content(node.data_dir)1353
1354backup_id = self.show_pb(backup_dir, 'node')[1]['id']1355
1356filelist = self.get_backup_filelist(backup_dir, 'node', backup_id)1357self.assertNotIn(relative_path, filelist)1358
1359node.cleanup()1360self.restore_node(backup_dir, 'node', node, options=["-j", "4"])1361
1362# Physical comparison1363pgdata_restored = self.pgdata_content(node.data_dir)1364self.compare_pgdata(pgdata, pgdata_restored)1365
1366# @unittest.skip("skip")1367def test_persistent_slot_for_stream_backup(self):1368""""""1369backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1370node = self.make_simple_node(1371base_dir=os.path.join(self.module_name, self.fname, 'node'),1372set_replication=True,1373initdb_params=['--data-checksums'],1374pg_options={1375'max_wal_size': '40MB'})1376
1377self.init_pb(backup_dir)1378self.add_instance(backup_dir, 'node', node)1379self.set_archiving(backup_dir, 'node', node)1380node.slow_start()1381
1382node.safe_psql(1383"postgres",1384"SELECT pg_create_physical_replication_slot('slot_1')")1385
1386# FULL backup1387self.backup_node(1388backup_dir, 'node', node,1389options=['--stream', '--slot=slot_1'])1390
1391# FULL backup1392self.backup_node(1393backup_dir, 'node', node,1394options=['--stream', '--slot=slot_1'])1395
1396# @unittest.skip("skip")1397def test_basic_temp_slot_for_stream_backup(self):1398""""""1399backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1400node = self.make_simple_node(1401base_dir=os.path.join(self.module_name, self.fname, 'node'),1402set_replication=True,1403initdb_params=['--data-checksums'],1404pg_options={'max_wal_size': '40MB'})1405
1406if self.get_version(node) < self.version_to_num('10.0'):1407self.skipTest('You need PostgreSQL >= 10 for this test')1408
1409self.init_pb(backup_dir)1410self.add_instance(backup_dir, 'node', node)1411self.set_archiving(backup_dir, 'node', node)1412node.slow_start()1413
1414# FULL backup1415self.backup_node(1416backup_dir, 'node', node,1417options=['--stream', '--temp-slot'])1418
1419# FULL backup1420self.backup_node(1421backup_dir, 'node', node,1422options=['--stream', '--slot=slot_1', '--temp-slot'])1423
1424# @unittest.skip("skip")1425def test_backup_concurrent_drop_table(self):1426""""""1427self._check_gdb_flag_or_skip_test()1428
1429backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1430node = self.make_simple_node(1431base_dir=os.path.join(self.module_name, self.fname, 'node'),1432set_replication=True,1433initdb_params=['--data-checksums'])1434
1435self.init_pb(backup_dir)1436self.add_instance(backup_dir, 'node', node)1437self.set_archiving(backup_dir, 'node', node)1438node.slow_start()1439
1440node.pgbench_init(scale=1)1441
1442# FULL backup1443gdb = self.backup_node(1444backup_dir, 'node', node,1445options=['--stream', '--compress'],1446gdb=True)1447
1448gdb.set_breakpoint('backup_data_file')1449gdb.run_until_break()1450
1451node.safe_psql(1452'postgres',1453'DROP TABLE pgbench_accounts')1454
1455# do checkpoint to guarantee filenode removal1456node.safe_psql(1457'postgres',1458'CHECKPOINT')1459
1460gdb.remove_all_breakpoints()1461gdb.continue_execution_until_exit()1462gdb.kill()1463
1464show_backup = self.show_pb(backup_dir, 'node')[0]1465
1466self.assertEqual(show_backup['status'], "OK")1467
1468# @unittest.skip("skip")1469def test_pg_11_adjusted_wal_segment_size(self):1470""""""1471if self.pg_config_version < self.version_to_num('11.0'):1472self.skipTest('You need PostgreSQL >= 11 for this test')1473
1474backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1475node = self.make_simple_node(1476base_dir=os.path.join(self.module_name, self.fname, 'node'),1477set_replication=True,1478initdb_params=[1479'--data-checksums',1480'--wal-segsize=64'],1481pg_options={1482'min_wal_size': '128MB'})1483
1484self.init_pb(backup_dir)1485self.add_instance(backup_dir, 'node', node)1486self.set_archiving(backup_dir, 'node', node)1487node.slow_start()1488
1489node.pgbench_init(scale=5)1490
1491# FULL STREAM backup1492self.backup_node(1493backup_dir, 'node', node, options=['--stream'])1494
1495pgbench = node.pgbench(options=['-T', '5', '-c', '2'])1496pgbench.wait()1497
1498# PAGE STREAM backup1499self.backup_node(1500backup_dir, 'node', node,1501backup_type='page', options=['--stream'])1502
1503pgbench = node.pgbench(options=['-T', '5', '-c', '2'])1504pgbench.wait()1505
1506# DELTA STREAM backup1507self.backup_node(1508backup_dir, 'node', node,1509backup_type='delta', options=['--stream'])1510
1511pgbench = node.pgbench(options=['-T', '5', '-c', '2'])1512pgbench.wait()1513
1514# FULL ARCHIVE backup1515self.backup_node(backup_dir, 'node', node)1516
1517pgbench = node.pgbench(options=['-T', '5', '-c', '2'])1518pgbench.wait()1519
1520# PAGE ARCHIVE backup1521self.backup_node(backup_dir, 'node', node, backup_type='page')1522
1523pgbench = node.pgbench(options=['-T', '5', '-c', '2'])1524pgbench.wait()1525
1526# DELTA ARCHIVE backup1527backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta')1528pgdata = self.pgdata_content(node.data_dir)1529
1530# delete1531output = self.delete_pb(1532backup_dir, 'node',1533options=[1534'--expired',1535'--delete-wal',1536'--retention-redundancy=1'])1537
1538# validate1539self.validate_pb(backup_dir)1540
1541# merge1542self.merge_backup(backup_dir, 'node', backup_id=backup_id)1543
1544# restore1545node.cleanup()1546self.restore_node(1547backup_dir, 'node', node, backup_id=backup_id)1548
1549pgdata_restored = self.pgdata_content(node.data_dir)1550self.compare_pgdata(pgdata, pgdata_restored)1551
1552# @unittest.skip("skip")1553def test_sigint_handling(self):1554""""""1555self._check_gdb_flag_or_skip_test()1556
1557backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1558node = self.make_simple_node(1559base_dir=os.path.join(self.module_name, self.fname, 'node'),1560set_replication=True,1561initdb_params=['--data-checksums'])1562
1563self.init_pb(backup_dir)1564self.add_instance(backup_dir, 'node', node)1565node.slow_start()1566
1567# FULL backup1568gdb = self.backup_node(1569backup_dir, 'node', node, gdb=True,1570options=['--stream', '--log-level-file=LOG'])1571
1572gdb.set_breakpoint('backup_non_data_file')1573gdb.run_until_break()1574
1575gdb.continue_execution_until_break(20)1576gdb.remove_all_breakpoints()1577
1578gdb._execute('signal SIGINT')1579gdb.continue_execution_until_error()1580gdb.kill()1581
1582backup_id = self.show_pb(backup_dir, 'node')[0]['id']1583
1584self.assertEqual(1585'ERROR',1586self.show_pb(backup_dir, 'node', backup_id)['status'],1587'Backup STATUS should be "ERROR"')1588
1589# @unittest.skip("skip")1590def test_sigterm_handling(self):1591""""""1592self._check_gdb_flag_or_skip_test()1593
1594backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1595node = self.make_simple_node(1596base_dir=os.path.join(self.module_name, self.fname, 'node'),1597set_replication=True,1598initdb_params=['--data-checksums'])1599
1600self.init_pb(backup_dir)1601self.add_instance(backup_dir, 'node', node)1602node.slow_start()1603
1604# FULL backup1605gdb = self.backup_node(1606backup_dir, 'node', node, gdb=True,1607options=['--stream', '--log-level-file=LOG'])1608
1609gdb.set_breakpoint('backup_non_data_file')1610gdb.run_until_break()1611
1612gdb.continue_execution_until_break(20)1613gdb.remove_all_breakpoints()1614
1615gdb._execute('signal SIGTERM')1616gdb.continue_execution_until_error()1617
1618backup_id = self.show_pb(backup_dir, 'node')[0]['id']1619
1620self.assertEqual(1621'ERROR',1622self.show_pb(backup_dir, 'node', backup_id)['status'],1623'Backup STATUS should be "ERROR"')1624
1625# @unittest.skip("skip")1626def test_sigquit_handling(self):1627""""""1628self._check_gdb_flag_or_skip_test()1629
1630backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1631node = self.make_simple_node(1632base_dir=os.path.join(self.module_name, self.fname, 'node'),1633set_replication=True,1634initdb_params=['--data-checksums'])1635
1636self.init_pb(backup_dir)1637self.add_instance(backup_dir, 'node', node)1638node.slow_start()1639
1640# FULL backup1641gdb = self.backup_node(1642backup_dir, 'node', node, gdb=True, options=['--stream'])1643
1644gdb.set_breakpoint('backup_non_data_file')1645gdb.run_until_break()1646
1647gdb.continue_execution_until_break(20)1648gdb.remove_all_breakpoints()1649
1650gdb._execute('signal SIGQUIT')1651gdb.continue_execution_until_error()1652
1653backup_id = self.show_pb(backup_dir, 'node')[0]['id']1654
1655self.assertEqual(1656'ERROR',1657self.show_pb(backup_dir, 'node', backup_id)['status'],1658'Backup STATUS should be "ERROR"')1659
1660# @unittest.skip("skip")1661def test_drop_table(self):1662""""""1663backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1664node = self.make_simple_node(1665base_dir=os.path.join(self.module_name, self.fname, 'node'),1666set_replication=True,1667initdb_params=['--data-checksums'])1668
1669self.init_pb(backup_dir)1670self.add_instance(backup_dir, 'node', node)1671node.slow_start()1672
1673connect_1 = node.connect("postgres")1674connect_1.execute(1675"create table t_heap as select i"1676" as id from generate_series(0,100) i")1677connect_1.commit()1678
1679connect_2 = node.connect("postgres")1680connect_2.execute("SELECT * FROM t_heap")1681connect_2.commit()1682
1683# DROP table1684connect_2.execute("DROP TABLE t_heap")1685connect_2.commit()1686
1687# FULL backup1688self.backup_node(1689backup_dir, 'node', node, options=['--stream'])1690
1691# @unittest.skip("skip")1692def test_basic_missing_file_permissions(self):1693""""""1694if os.name == 'nt':1695self.skipTest('Skipped because it is POSIX only test')1696
1697backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1698node = self.make_simple_node(1699base_dir=os.path.join(self.module_name, self.fname, 'node'),1700set_replication=True,1701initdb_params=['--data-checksums'])1702
1703self.init_pb(backup_dir)1704self.add_instance(backup_dir, 'node', node)1705node.slow_start()1706
1707relative_path = node.safe_psql(1708"postgres",1709"select pg_relation_filepath('pg_class')").decode('utf-8').rstrip()1710
1711full_path = os.path.join(node.data_dir, relative_path)1712
1713os.chmod(full_path, 000)1714
1715try:1716# FULL backup1717self.backup_node(1718backup_dir, 'node', node, options=['--stream'])1719# we should die here because exception is what we expect to happen1720self.assertEqual(17211, 0,1722"Expecting Error because of missing permissions"1723"\n Output: {0} \n CMD: {1}".format(1724repr(self.output), self.cmd))1725except ProbackupException as e:1726self.assertIn(1727'ERROR: Cannot open file',1728e.message,1729'\n Unexpected Error Message: {0}\n CMD: {1}'.format(1730repr(e.message), self.cmd))1731
1732os.chmod(full_path, 700)1733
1734# @unittest.skip("skip")1735def test_basic_missing_dir_permissions(self):1736""""""1737if os.name == 'nt':1738self.skipTest('Skipped because it is POSIX only test')1739
1740backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1741node = self.make_simple_node(1742base_dir=os.path.join(self.module_name, self.fname, 'node'),1743set_replication=True,1744initdb_params=['--data-checksums'])1745
1746self.init_pb(backup_dir)1747self.add_instance(backup_dir, 'node', node)1748node.slow_start()1749
1750full_path = os.path.join(node.data_dir, 'pg_twophase')1751
1752os.chmod(full_path, 000)1753
1754try:1755# FULL backup1756self.backup_node(1757backup_dir, 'node', node, options=['--stream'])1758# we should die here because exception is what we expect to happen1759self.assertEqual(17601, 0,1761"Expecting Error because of missing permissions"1762"\n Output: {0} \n CMD: {1}".format(1763repr(self.output), self.cmd))1764except ProbackupException as e:1765self.assertIn(1766'ERROR: Cannot open directory',1767e.message,1768'\n Unexpected Error Message: {0}\n CMD: {1}'.format(1769repr(e.message), self.cmd))1770
1771os.rmdir(full_path)1772
1773# @unittest.skip("skip")1774def test_backup_with_least_privileges_role(self):1775""""""1776backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')1777node = self.make_simple_node(1778base_dir=os.path.join(self.module_name, self.fname, 'node'),1779set_replication=True,1780ptrack_enable=self.ptrack,1781initdb_params=['--data-checksums'],1782pg_options={'archive_timeout': '30s'})1783
1784self.init_pb(backup_dir)1785self.add_instance(backup_dir, 'node', node)1786self.set_archiving(backup_dir, 'node', node)1787node.slow_start()1788
1789node.safe_psql(1790'postgres',1791'CREATE DATABASE backupdb')1792
1793if self.ptrack:1794node.safe_psql(1795"backupdb",1796"CREATE SCHEMA ptrack; "1797"CREATE EXTENSION ptrack WITH SCHEMA ptrack")1798
1799# PG 9.51800if self.get_version(node) < 90600:1801node.safe_psql(1802'backupdb',1803"REVOKE ALL ON DATABASE backupdb from PUBLIC; "1804"REVOKE ALL ON SCHEMA public from PUBLIC; "1805"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "1806"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "1807"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "1808"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "1809"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "1810"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "1811"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "1812"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "1813"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "1814"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "1815"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "1816"CREATE ROLE backup WITH LOGIN REPLICATION; "1817"GRANT CONNECT ON DATABASE backupdb to backup; "1818"GRANT USAGE ON SCHEMA pg_catalog TO backup; "1819"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "1820"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "1821"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack1822"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "1823"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "1824"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "1825"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "1826"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "1827"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "1828"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "1829"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "1830"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "1831"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "1832"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")1833# PG 9.61834elif self.get_version(node) > 90600 and self.get_version(node) < 100000:1835node.safe_psql(1836'backupdb',1837"REVOKE ALL ON DATABASE backupdb from PUBLIC; "1838"REVOKE ALL ON SCHEMA public from PUBLIC; "1839"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "1840"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "1841"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "1842"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "1843"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "1844"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "1845"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "1846"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "1847"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "1848"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "1849"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "1850"CREATE ROLE backup WITH LOGIN REPLICATION; "1851"GRANT CONNECT ON DATABASE backupdb to backup; "1852"GRANT USAGE ON SCHEMA pg_catalog TO backup; "1853"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "1854"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "1855"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack1856"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "1857"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "1858"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "1859"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "1860"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "1861"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "1862"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "1863"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "1864"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "1865"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "1866"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "1867"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "1868"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "1869"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "1870"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"1871)1872# >= 10 && < 151873elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:1874node.safe_psql(1875'backupdb',1876"REVOKE ALL ON DATABASE backupdb from PUBLIC; "1877"REVOKE ALL ON SCHEMA public from PUBLIC; "1878"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "1879"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "1880"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "1881"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "1882"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "1883"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "1884"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "1885"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "1886"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "1887"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "1888"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "1889"CREATE ROLE backup WITH LOGIN REPLICATION; "1890"GRANT CONNECT ON DATABASE backupdb to backup; "1891"GRANT USAGE ON SCHEMA pg_catalog TO backup; "1892"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "1893"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "1894"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "1895"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack1896"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "1897"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "1898"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "1899"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "1900"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "1901"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "1902"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "1903"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "1904"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "1905"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "1906"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "1907"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "1908"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"1909)1910# >= 151911else:1912node.safe_psql(1913'backupdb',1914"REVOKE ALL ON DATABASE backupdb from PUBLIC; "1915"REVOKE ALL ON SCHEMA public from PUBLIC; "1916"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "1917"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "1918"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "1919"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "1920"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "1921"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "1922"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "1923"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "1924"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "1925"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "1926"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "1927"CREATE ROLE backup WITH LOGIN REPLICATION; "1928"GRANT CONNECT ON DATABASE backupdb to backup; "1929"GRANT USAGE ON SCHEMA pg_catalog TO backup; "1930"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "1931"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "1932"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "1933"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack1934"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "1935"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "1936"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "1937"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "1938"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "1939"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "1940"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "1941"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "1942"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "1943"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "1944"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "1945"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "1946"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"1947)1948
1949if self.ptrack:1950node.safe_psql(1951"backupdb",1952"GRANT USAGE ON SCHEMA ptrack TO backup")1953
1954node.safe_psql(1955"backupdb",1956"GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; "1957"GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;")1958
1959if ProbackupTest.enterprise:1960node.safe_psql(1961"backupdb",1962"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "1963"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;")1964
1965# FULL backup1966self.backup_node(1967backup_dir, 'node', node,1968datname='backupdb', options=['--stream', '-U', 'backup'])1969self.backup_node(1970backup_dir, 'node', node,1971datname='backupdb', options=['-U', 'backup'])1972
1973# PAGE1974self.backup_node(1975backup_dir, 'node', node, backup_type='page',1976datname='backupdb', options=['-U', 'backup'])1977self.backup_node(1978backup_dir, 'node', node, backup_type='page', datname='backupdb',1979options=['--stream', '-U', 'backup'])1980
1981# DELTA1982self.backup_node(1983backup_dir, 'node', node, backup_type='delta',1984datname='backupdb', options=['-U', 'backup'])1985self.backup_node(1986backup_dir, 'node', node, backup_type='delta',1987datname='backupdb', options=['--stream', '-U', 'backup'])1988
1989# PTRACK1990if self.ptrack:1991self.backup_node(1992backup_dir, 'node', node, backup_type='ptrack',1993datname='backupdb', options=['-U', 'backup'])1994self.backup_node(1995backup_dir, 'node', node, backup_type='ptrack',1996datname='backupdb', options=['--stream', '-U', 'backup'])1997
1998# @unittest.skip("skip")1999def test_parent_choosing(self):2000"""2001PAGE3 <- RUNNING(parent should be FULL)
2002PAGE2 <- OK
2003PAGE1 <- CORRUPT
2004FULL
2005"""
2006backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2007node = self.make_simple_node(2008base_dir=os.path.join(self.module_name, self.fname, 'node'),2009set_replication=True,2010initdb_params=['--data-checksums'])2011
2012self.init_pb(backup_dir)2013self.add_instance(backup_dir, 'node', node)2014self.set_archiving(backup_dir, 'node', node)2015node.slow_start()2016
2017full_id = self.backup_node(backup_dir, 'node', node)2018
2019# PAGE12020page1_id = self.backup_node(2021backup_dir, 'node', node, backup_type='page')2022
2023# PAGE22024page2_id = self.backup_node(2025backup_dir, 'node', node, backup_type='page')2026
2027# Change PAGE1 to ERROR2028self.change_backup_status(backup_dir, 'node', page1_id, 'ERROR')2029
2030# PAGE32031page3_id = self.backup_node(2032backup_dir, 'node', node,2033backup_type='page', options=['--log-level-file=LOG'])2034
2035log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')2036with open(log_file_path) as f:2037log_file_content = f.read()2038
2039self.assertIn(2040"WARNING: Backup {0} has invalid parent: {1}. "2041"Cannot be a parent".format(page2_id, page1_id),2042log_file_content)2043
2044self.assertIn(2045"WARNING: Backup {0} has status: ERROR. "2046"Cannot be a parent".format(page1_id),2047log_file_content)2048
2049self.assertIn(2050"Parent backup: {0}".format(full_id),2051log_file_content)2052
2053self.assertEqual(2054self.show_pb(2055backup_dir, 'node', backup_id=page3_id)['parent-backup-id'],2056full_id)2057
2058# @unittest.skip("skip")2059def test_parent_choosing_1(self):2060"""2061PAGE3 <- RUNNING(parent should be FULL)
2062PAGE2 <- OK
2063PAGE1 <- (missing)
2064FULL
2065"""
2066backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2067node = self.make_simple_node(2068base_dir=os.path.join(self.module_name, self.fname, 'node'),2069set_replication=True,2070initdb_params=['--data-checksums'])2071
2072self.init_pb(backup_dir)2073self.add_instance(backup_dir, 'node', node)2074self.set_archiving(backup_dir, 'node', node)2075node.slow_start()2076
2077full_id = self.backup_node(backup_dir, 'node', node)2078
2079# PAGE12080page1_id = self.backup_node(2081backup_dir, 'node', node, backup_type='page')2082
2083# PAGE22084page2_id = self.backup_node(2085backup_dir, 'node', node, backup_type='page')2086
2087# Delete PAGE12088shutil.rmtree(2089os.path.join(backup_dir, 'backups', 'node', page1_id))2090
2091# PAGE32092page3_id = self.backup_node(2093backup_dir, 'node', node,2094backup_type='page', options=['--log-level-file=LOG'])2095
2096log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')2097with open(log_file_path) as f:2098log_file_content = f.read()2099
2100self.assertIn(2101"WARNING: Backup {0} has missing parent: {1}. "2102"Cannot be a parent".format(page2_id, page1_id),2103log_file_content)2104
2105self.assertIn(2106"Parent backup: {0}".format(full_id),2107log_file_content)2108
2109self.assertEqual(2110self.show_pb(2111backup_dir, 'node', backup_id=page3_id)['parent-backup-id'],2112full_id)2113
2114# @unittest.skip("skip")2115def test_parent_choosing_2(self):2116"""2117PAGE3 <- RUNNING(backup should fail)
2118PAGE2 <- OK
2119PAGE1 <- OK
2120FULL <- (missing)
2121"""
2122backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2123node = self.make_simple_node(2124base_dir=os.path.join(self.module_name, self.fname, 'node'),2125set_replication=True,2126initdb_params=['--data-checksums'])2127
2128self.init_pb(backup_dir)2129self.add_instance(backup_dir, 'node', node)2130self.set_archiving(backup_dir, 'node', node)2131node.slow_start()2132
2133full_id = self.backup_node(backup_dir, 'node', node)2134
2135# PAGE12136page1_id = self.backup_node(2137backup_dir, 'node', node, backup_type='page')2138
2139# PAGE22140page2_id = self.backup_node(2141backup_dir, 'node', node, backup_type='page')2142
2143# Delete FULL2144shutil.rmtree(2145os.path.join(backup_dir, 'backups', 'node', full_id))2146
2147# PAGE32148try:2149self.backup_node(2150backup_dir, 'node', node,2151backup_type='page', options=['--log-level-file=LOG'])2152# we should die here because exception is what we expect to happen2153self.assertEqual(21541, 0,2155"Expecting Error because FULL backup is missing"2156"\n Output: {0} \n CMD: {1}".format(2157repr(self.output), self.cmd))2158except ProbackupException as e:2159self.assertTrue(2160'WARNING: Valid full backup on current timeline 1 is not found' in e.message and2161'ERROR: Create new full backup before an incremental one' in e.message,2162'\n Unexpected Error Message: {0}\n CMD: {1}'.format(2163repr(e.message), self.cmd))2164
2165self.assertEqual(2166self.show_pb(2167backup_dir, 'node')[2]['status'],2168'ERROR')2169
2170# @unittest.skip("skip")2171def test_backup_with_less_privileges_role(self):2172"""2173check permissions correctness from documentation:
2174https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster
2175"""
2176backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2177node = self.make_simple_node(2178base_dir=os.path.join(self.module_name, self.fname, 'node'),2179set_replication=True,2180ptrack_enable=self.ptrack,2181initdb_params=['--data-checksums'],2182pg_options={2183'archive_timeout': '30s',2184'archive_mode': 'always',2185'checkpoint_timeout': '60s',2186'wal_level': 'logical'})2187
2188self.init_pb(backup_dir)2189self.add_instance(backup_dir, 'node', node)2190self.set_config(backup_dir, 'node', options=['--archive-timeout=60s'])2191self.set_archiving(backup_dir, 'node', node)2192node.slow_start()2193
2194node.safe_psql(2195'postgres',2196'CREATE DATABASE backupdb')2197
2198if self.ptrack:2199node.safe_psql(2200'backupdb',2201'CREATE EXTENSION ptrack')2202
2203# PG 9.52204if self.get_version(node) < 90600:2205node.safe_psql(2206'backupdb',2207"CREATE ROLE backup WITH LOGIN; "2208"GRANT USAGE ON SCHEMA pg_catalog TO backup; "2209"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "2210"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "2211"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "2212"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "2213"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "2214"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "2215"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "2216"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "2217"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")2218# PG 9.62219elif self.get_version(node) > 90600 and self.get_version(node) < 100000:2220node.safe_psql(2221'backupdb',2222"CREATE ROLE backup WITH LOGIN; "2223"GRANT USAGE ON SCHEMA pg_catalog TO backup; "2224"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "2225"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "2226"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "2227"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "2228"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "2229"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "2230"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "2231"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "2232"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "2233"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "2234"COMMIT;"2235)2236# >= 10 && < 152237elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:2238node.safe_psql(2239'backupdb',2240"CREATE ROLE backup WITH LOGIN; "2241"GRANT USAGE ON SCHEMA pg_catalog TO backup; "2242"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "2243"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "2244"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "2245"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "2246"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "2247"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "2248"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "2249"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "2250"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "2251"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "2252"COMMIT;"2253)2254# >= 152255else:2256node.safe_psql(2257'backupdb',2258"BEGIN; "2259"CREATE ROLE backup WITH LOGIN; "2260"GRANT USAGE ON SCHEMA pg_catalog TO backup; "2261"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "2262"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "2263"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "2264"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "2265"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "2266"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "2267"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "2268"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "2269"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "2270"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "2271"COMMIT;"2272)2273
2274# enable STREAM backup2275node.safe_psql(2276'backupdb',2277'ALTER ROLE backup WITH REPLICATION;')2278
2279# FULL backup2280self.backup_node(2281backup_dir, 'node', node,2282datname='backupdb', options=['--stream', '-U', 'backup'])2283self.backup_node(2284backup_dir, 'node', node,2285datname='backupdb', options=['-U', 'backup'])2286
2287# PAGE2288self.backup_node(2289backup_dir, 'node', node, backup_type='page',2290datname='backupdb', options=['-U', 'backup'])2291self.backup_node(2292backup_dir, 'node', node, backup_type='page', datname='backupdb',2293options=['--stream', '-U', 'backup'])2294
2295# DELTA2296self.backup_node(2297backup_dir, 'node', node, backup_type='delta',2298datname='backupdb', options=['-U', 'backup'])2299self.backup_node(2300backup_dir, 'node', node, backup_type='delta',2301datname='backupdb', options=['--stream', '-U', 'backup'])2302
2303# PTRACK2304if self.ptrack:2305self.backup_node(2306backup_dir, 'node', node, backup_type='ptrack',2307datname='backupdb', options=['-U', 'backup'])2308self.backup_node(2309backup_dir, 'node', node, backup_type='ptrack',2310datname='backupdb', options=['--stream', '-U', 'backup'])2311
2312if self.get_version(node) < 90600:2313return2314
2315# Restore as replica2316replica = self.make_simple_node(2317base_dir=os.path.join(self.module_name, self.fname, 'replica'))2318replica.cleanup()2319
2320self.restore_node(backup_dir, 'node', replica)2321self.set_replica(node, replica)2322self.add_instance(backup_dir, 'replica', replica)2323self.set_config(2324backup_dir, 'replica',2325options=['--archive-timeout=120s', '--log-level-console=LOG'])2326self.set_archiving(backup_dir, 'replica', replica, replica=True)2327self.set_auto_conf(replica, {'hot_standby': 'on'})2328
2329# freeze bgwriter to get rid of RUNNING XACTS records2330# bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0]2331# gdb_checkpointer = self.gdb_attach(bgwriter_pid)2332
2333copy_tree(2334os.path.join(backup_dir, 'wal', 'node'),2335os.path.join(backup_dir, 'wal', 'replica'))2336
2337replica.slow_start(replica=True)2338
2339# self.switch_wal_segment(node)2340# self.switch_wal_segment(node)2341
2342self.backup_node(2343backup_dir, 'replica', replica,2344datname='backupdb', options=['-U', 'backup'])2345
2346# stream full backup from replica2347self.backup_node(2348backup_dir, 'replica', replica,2349datname='backupdb', options=['--stream', '-U', 'backup'])2350
2351# self.switch_wal_segment(node)
2352
2353# PAGE backup from replica2354self.switch_wal_segment(node)2355self.backup_node(2356backup_dir, 'replica', replica, backup_type='page',2357datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s'])2358
2359self.backup_node(2360backup_dir, 'replica', replica, backup_type='page',2361datname='backupdb', options=['--stream', '-U', 'backup'])2362
2363# DELTA backup from replica2364self.switch_wal_segment(node)2365self.backup_node(2366backup_dir, 'replica', replica, backup_type='delta',2367datname='backupdb', options=['-U', 'backup'])2368self.backup_node(2369backup_dir, 'replica', replica, backup_type='delta',2370datname='backupdb', options=['--stream', '-U', 'backup'])2371
2372# PTRACK backup from replica2373if self.ptrack:2374self.switch_wal_segment(node)2375self.backup_node(2376backup_dir, 'replica', replica, backup_type='ptrack',2377datname='backupdb', options=['-U', 'backup'])2378self.backup_node(2379backup_dir, 'replica', replica, backup_type='ptrack',2380datname='backupdb', options=['--stream', '-U', 'backup'])2381
2382@unittest.skip("skip")2383def test_issue_132(self):2384"""2385https://github.com/postgrespro/pg_probackup/issues/132
2386"""
2387backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2388node = self.make_simple_node(2389base_dir=os.path.join(self.module_name, self.fname, 'node'),2390set_replication=True,2391initdb_params=['--data-checksums'])2392
2393self.init_pb(backup_dir)2394self.add_instance(backup_dir, 'node', node)2395node.slow_start()2396
2397with node.connect("postgres") as conn:2398for i in range(50000):2399conn.execute(2400"CREATE TABLE t_{0} as select 1".format(i))2401conn.commit()2402
2403self.backup_node(2404backup_dir, 'node', node, options=['--stream'])2405
2406pgdata = self.pgdata_content(node.data_dir)2407
2408node.cleanup()2409self.restore_node(backup_dir, 'node', node)2410
2411pgdata_restored = self.pgdata_content(node.data_dir)2412self.compare_pgdata(pgdata, pgdata_restored)2413
2414exit(1)2415
2416@unittest.skip("skip")2417def test_issue_132_1(self):2418"""2419https://github.com/postgrespro/pg_probackup/issues/132
2420"""
2421backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2422node = self.make_simple_node(2423base_dir=os.path.join(self.module_name, self.fname, 'node'),2424set_replication=True,2425initdb_params=['--data-checksums'])2426
2427# TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.12428
2429self.init_pb(backup_dir)2430self.add_instance(backup_dir, 'node', node)2431node.slow_start()2432
2433with node.connect("postgres") as conn:2434for i in range(30000):2435conn.execute(2436"CREATE TABLE t_{0} as select 1".format(i))2437conn.commit()2438
2439full_id = self.backup_node(2440backup_dir, 'node', node, options=['--stream'], old_binary=True)2441
2442delta_id = self.backup_node(2443backup_dir, 'node', node, backup_type='delta',2444options=['--stream'], old_binary=True)2445
2446node.cleanup()2447
2448# make sure that new binary can detect corruption2449try:2450self.validate_pb(backup_dir, 'node', backup_id=full_id)2451# we should die here because exception is what we expect to happen2452self.assertEqual(24531, 0,2454"Expecting Error because FULL backup is CORRUPT"2455"\n Output: {0} \n CMD: {1}".format(2456repr(self.output), self.cmd))2457except ProbackupException as e:2458self.assertIn(2459'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id),2460e.message,2461'\n Unexpected Error Message: {0}\n CMD: {1}'.format(2462repr(e.message), self.cmd))2463
2464try:2465self.validate_pb(backup_dir, 'node', backup_id=delta_id)2466# we should die here because exception is what we expect to happen2467self.assertEqual(24681, 0,2469"Expecting Error because FULL backup is CORRUPT"2470"\n Output: {0} \n CMD: {1}".format(2471repr(self.output), self.cmd))2472except ProbackupException as e:2473self.assertIn(2474'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id),2475e.message,2476'\n Unexpected Error Message: {0}\n CMD: {1}'.format(2477repr(e.message), self.cmd))2478
2479self.assertEqual(2480'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'],2481'Backup STATUS should be "CORRUPT"')2482
2483self.assertEqual(2484'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'],2485'Backup STATUS should be "ORPHAN"')2486
2487# check that revalidation is working correctly2488try:2489self.restore_node(2490backup_dir, 'node', node, backup_id=delta_id)2491# we should die here because exception is what we expect to happen2492self.assertEqual(24931, 0,2494"Expecting Error because FULL backup is CORRUPT"2495"\n Output: {0} \n CMD: {1}".format(2496repr(self.output), self.cmd))2497except ProbackupException as e:2498self.assertIn(2499'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id),2500e.message,2501'\n Unexpected Error Message: {0}\n CMD: {1}'.format(2502repr(e.message), self.cmd))2503
2504self.assertEqual(2505'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'],2506'Backup STATUS should be "CORRUPT"')2507
2508self.assertEqual(2509'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'],2510'Backup STATUS should be "ORPHAN"')2511
2512# check that '--no-validate' do not allow to restore ORPHAN backup2513# try:
2514# self.restore_node(
2515# backup_dir, 'node', node, backup_id=delta_id,
2516# options=['--no-validate'])
2517# # we should die here because exception is what we expect to happen
2518# self.assertEqual(
2519# 1, 0,
2520# "Expecting Error because FULL backup is CORRUPT"
2521# "\n Output: {0} \n CMD: {1}".format(
2522# repr(self.output), self.cmd))
2523# except ProbackupException as e:
2524# self.assertIn(
2525# 'Insert data',
2526# e.message,
2527# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
2528# repr(e.message), self.cmd))
2529
2530node.cleanup()2531
2532output = self.restore_node(2533backup_dir, 'node', node, backup_id=full_id, options=['--force'])2534
2535self.assertIn(2536'WARNING: Backup {0} has status: CORRUPT'.format(full_id),2537output)2538
2539self.assertIn(2540'WARNING: Backup {0} is corrupt.'.format(full_id),2541output)2542
2543self.assertIn(2544'WARNING: Backup {0} is not valid, restore is forced'.format(full_id),2545output)2546
2547self.assertIn(2548'INFO: Restore of backup {0} completed.'.format(full_id),2549output)2550
2551node.cleanup()2552
2553output = self.restore_node(2554backup_dir, 'node', node, backup_id=delta_id, options=['--force'])2555
2556self.assertIn(2557'WARNING: Backup {0} is orphan.'.format(delta_id),2558output)2559
2560self.assertIn(2561'WARNING: Backup {0} is not valid, restore is forced'.format(full_id),2562output)2563
2564self.assertIn(2565'WARNING: Backup {0} is not valid, restore is forced'.format(delta_id),2566output)2567
2568self.assertIn(2569'INFO: Restore of backup {0} completed.'.format(delta_id),2570output)2571
2572def test_note_sanity(self):2573"""2574test that adding note to backup works as expected
2575"""
2576backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2577node = self.make_simple_node(2578base_dir=os.path.join(self.module_name, self.fname, 'node'),2579set_replication=True,2580initdb_params=['--data-checksums'])2581
2582self.init_pb(backup_dir)2583self.add_instance(backup_dir, 'node', node)2584self.set_archiving(backup_dir, 'node', node)2585node.slow_start()2586
2587# FULL backup2588backup_id = self.backup_node(2589backup_dir, 'node', node,2590options=['--stream', '--log-level-file=LOG', '--note=test_note'])2591
2592show_backups = self.show_pb(backup_dir, 'node')2593
2594print(self.show_pb(backup_dir, as_text=True, as_json=True))2595
2596self.assertEqual(show_backups[0]['note'], "test_note")2597
2598self.set_backup(backup_dir, 'node', backup_id, options=['--note=none'])2599
2600backup_meta = self.show_pb(backup_dir, 'node', backup_id)2601
2602self.assertNotIn(2603'note',2604backup_meta)2605
2606# @unittest.skip("skip")2607def test_parent_backup_made_by_newer_version(self):2608"""incremental backup with parent made by newer version"""2609node = self.make_simple_node(2610base_dir=os.path.join(self.module_name, self.fname, 'node'),2611initdb_params=['--data-checksums'])2612
2613backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2614self.init_pb(backup_dir)2615self.add_instance(backup_dir, 'node', node)2616self.set_archiving(backup_dir, 'node', node)2617node.slow_start()2618
2619backup_id = self.backup_node(backup_dir, 'node', node)2620
2621control_file = os.path.join(2622backup_dir, "backups", "node", backup_id,2623"backup.control")2624
2625version = self.probackup_version2626fake_new_version = str(int(version.split('.')[0]) + 1) + '.0.0'2627
2628with open(control_file, 'r') as f:2629data = f.read();2630
2631data = data.replace(version, fake_new_version)2632
2633with open(control_file, 'w') as f:2634f.write(data);2635
2636try:2637self.backup_node(backup_dir, 'node', node, backup_type="page")2638# we should die here because exception is what we expect to happen2639self.assertEqual(26401, 0,2641"Expecting Error because incremental backup should not be possible "2642"if parent made by newer version.\n Output: {0} \n CMD: {1}".format(2643repr(self.output), self.cmd))2644except ProbackupException as e:2645self.assertIn(2646"pg_probackup do not guarantee to be forward compatible. "2647"Please upgrade pg_probackup binary.",2648e.message,2649"\n Unexpected Error Message: {0}\n CMD: {1}".format(2650repr(e.message), self.cmd))2651
2652self.assertEqual(2653self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")2654
2655# @unittest.skip("skip")2656def test_issue_289(self):2657"""2658https://github.com/postgrespro/pg_probackup/issues/289
2659"""
2660node = self.make_simple_node(2661base_dir=os.path.join(self.module_name, self.fname, 'node'),2662initdb_params=['--data-checksums'])2663
2664backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2665self.init_pb(backup_dir)2666self.add_instance(backup_dir, 'node', node)2667
2668node.slow_start()2669
2670try:2671self.backup_node(2672backup_dir, 'node', node,2673backup_type='page', options=['--archive-timeout=10s'])2674# we should die here because exception is what we expect to happen2675self.assertEqual(26761, 0,2677"Expecting Error because full backup is missing"2678"\n Output: {0} \n CMD: {1}".format(2679repr(self.output), self.cmd))2680except ProbackupException as e:2681self.assertNotIn(2682"INFO: Wait for WAL segment",2683e.message,2684"\n Unexpected Error Message: {0}\n CMD: {1}".format(2685repr(e.message), self.cmd))2686
2687self.assertIn(2688"ERROR: Create new full backup before an incremental one",2689e.message,2690"\n Unexpected Error Message: {0}\n CMD: {1}".format(2691repr(e.message), self.cmd))2692
2693self.assertEqual(2694self.show_pb(backup_dir, 'node')[0]['status'], "ERROR")2695
2696# @unittest.skip("skip")2697def test_issue_290(self):2698"""2699https://github.com/postgrespro/pg_probackup/issues/290
2700"""
2701node = self.make_simple_node(2702base_dir=os.path.join(self.module_name, self.fname, 'node'),2703initdb_params=['--data-checksums'])2704
2705backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2706self.init_pb(backup_dir)2707self.add_instance(backup_dir, 'node', node)2708self.set_archiving(backup_dir, 'node', node)2709
2710os.rmdir(2711os.path.join(backup_dir, "wal", "node"))2712
2713node.slow_start()2714
2715try:2716self.backup_node(2717backup_dir, 'node', node,2718options=['--archive-timeout=10s'])2719# we should die here because exception is what we expect to happen2720self.assertEqual(27211, 0,2722"Expecting Error because full backup is missing"2723"\n Output: {0} \n CMD: {1}".format(2724repr(self.output), self.cmd))2725except ProbackupException as e:2726self.assertNotIn(2727"INFO: Wait for WAL segment",2728e.message,2729"\n Unexpected Error Message: {0}\n CMD: {1}".format(2730repr(e.message), self.cmd))2731
2732self.assertIn(2733"WAL archive directory is not accessible",2734e.message,2735"\n Unexpected Error Message: {0}\n CMD: {1}".format(2736repr(e.message), self.cmd))2737
2738self.assertEqual(2739self.show_pb(backup_dir, 'node')[0]['status'], "ERROR")2740
2741@unittest.skip("skip")2742def test_issue_203(self):2743"""2744https://github.com/postgrespro/pg_probackup/issues/203
2745"""
2746backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2747node = self.make_simple_node(2748base_dir=os.path.join(self.module_name, self.fname, 'node'),2749set_replication=True,2750initdb_params=['--data-checksums'])2751
2752self.init_pb(backup_dir)2753self.add_instance(backup_dir, 'node', node)2754node.slow_start()2755
2756with node.connect("postgres") as conn:2757for i in range(1000000):2758conn.execute(2759"CREATE TABLE t_{0} as select 1".format(i))2760conn.commit()2761
2762full_id = self.backup_node(2763backup_dir, 'node', node, options=['--stream', '-j2'])2764
2765pgdata = self.pgdata_content(node.data_dir)2766
2767node_restored = self.make_simple_node(2768base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))2769node_restored.cleanup()2770
2771self.restore_node(backup_dir, 'node',2772node_restored, data_dir=node_restored.data_dir)2773
2774pgdata_restored = self.pgdata_content(node_restored.data_dir)2775self.compare_pgdata(pgdata, pgdata_restored)2776
2777# @unittest.skip("skip")2778def test_issue_231(self):2779"""2780https://github.com/postgrespro/pg_probackup/issues/231
2781"""
2782backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2783node = self.make_simple_node(2784base_dir=os.path.join(self.module_name, self.fname, 'node'))2785
2786self.init_pb(backup_dir)2787self.add_instance(backup_dir, 'node', node)2788
2789datadir = os.path.join(node.data_dir, '123')2790
2791t0 = time()2792while True:2793with self.assertRaises(ProbackupException) as ctx:2794self.backup_node(backup_dir, 'node', node)2795pb1 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0]2796
2797t = time()2798if int(pb1, 36) == int(t) and t % 1 < 0.5:2799# ok, we have a chance to start next backup in same second2800break2801elif t - t0 > 20:2802# Oops, we are waiting for too long. Looks like this runner2803# is too slow. Lets skip the test.2804self.skipTest("runner is too slow")2805# sleep to the second's end so backup will not sleep for a second.2806sleep(1 - t % 1)2807
2808with self.assertRaises(ProbackupException) as ctx:2809self.backup_node(backup_dir, 'node', node)2810pb2 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0]2811
2812self.assertNotEqual(pb1, pb2)2813
2814def test_incr_backup_filenode_map(self):2815"""2816https://github.com/postgrespro/pg_probackup/issues/320
2817"""
2818backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2819node = self.make_simple_node(2820base_dir=os.path.join(self.module_name, self.fname, 'node'),2821initdb_params=['--data-checksums'])2822
2823self.init_pb(backup_dir)2824self.add_instance(backup_dir, 'node', node)2825self.set_archiving(backup_dir, 'node', node)2826node.slow_start()2827
2828node1 = self.make_simple_node(2829base_dir=os.path.join(self.module_name, self.fname, 'node1'),2830initdb_params=['--data-checksums'])2831node1.cleanup()2832
2833node.pgbench_init(scale=5)2834
2835# FULL backup2836backup_id = self.backup_node(backup_dir, 'node', node)2837
2838pgbench = node.pgbench(2839stdout=subprocess.PIPE, stderr=subprocess.STDOUT,2840options=['-T', '10', '-c', '1'])2841
2842backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta')2843
2844node.safe_psql(2845'postgres',2846'reindex index pg_type_oid_index')2847
2848backup_id = self.backup_node(2849backup_dir, 'node', node, backup_type='delta')2850
2851# incremental restore into node12852node.cleanup()2853
2854self.restore_node(backup_dir, 'node', node)2855node.slow_start()2856
2857node.safe_psql(2858'postgres',2859'select 1')2860
2861# @unittest.skip("skip")2862def test_missing_wal_segment(self):2863""""""2864self._check_gdb_flag_or_skip_test()2865
2866backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2867node = self.make_simple_node(2868base_dir=os.path.join(self.module_name, self.fname, 'node'),2869set_replication=True,2870ptrack_enable=self.ptrack,2871initdb_params=['--data-checksums'],2872pg_options={'archive_timeout': '30s'})2873
2874self.init_pb(backup_dir)2875self.add_instance(backup_dir, 'node', node)2876self.set_archiving(backup_dir, 'node', node)2877node.slow_start()2878
2879node.pgbench_init(scale=10)2880
2881node.safe_psql(2882'postgres',2883'CREATE DATABASE backupdb')2884
2885# get segments in pg_wal, sort then and remove all but the latest2886pg_wal_dir = os.path.join(node.data_dir, 'pg_wal')2887
2888if node.major_version >= 10:2889pg_wal_dir = os.path.join(node.data_dir, 'pg_wal')2890else:2891pg_wal_dir = os.path.join(node.data_dir, 'pg_xlog')2892
2893# Full backup in streaming mode2894gdb = self.backup_node(2895backup_dir, 'node', node, datname='backupdb',2896options=['--stream', '--log-level-file=INFO'], gdb=True)2897
2898# break at streaming start2899gdb.set_breakpoint('start_WAL_streaming')2900gdb.run_until_break()2901
2902# generate some more data2903node.pgbench_init(scale=3)2904
2905# remove redundant WAL segments in pg_wal2906files = os.listdir(pg_wal_dir)2907files.sort(reverse=True)2908
2909# leave first two files in list2910del files[:2]2911for filename in files:2912os.remove(os.path.join(pg_wal_dir, filename))2913
2914gdb.continue_execution_until_exit()2915
2916self.assertIn(2917'unexpected termination of replication stream: ERROR: requested WAL segment',2918gdb.output)2919
2920self.assertIn(2921'has already been removed',2922gdb.output)2923
2924self.assertIn(2925'ERROR: Interrupted during waiting for WAL streaming',2926gdb.output)2927
2928self.assertIn(2929'WARNING: A backup is in progress, stopping it',2930gdb.output)2931
2932# TODO: check the same for PAGE backup2933
2934# @unittest.skip("skip")2935def test_missing_replication_permission(self):2936""""""2937backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')2938node = self.make_simple_node(2939base_dir=os.path.join(self.module_name, self.fname, 'node'),2940set_replication=True,2941ptrack_enable=self.ptrack,2942initdb_params=['--data-checksums'])2943
2944self.init_pb(backup_dir)2945self.add_instance(backup_dir, 'node', node)2946# self.set_archiving(backup_dir, 'node', node)
2947node.slow_start()2948
2949# FULL backup2950self.backup_node(backup_dir, 'node', node, options=['--stream'])2951
2952# Create replica2953replica = self.make_simple_node(2954base_dir=os.path.join(self.module_name, self.fname, 'replica'))2955replica.cleanup()2956self.restore_node(backup_dir, 'node', replica)2957
2958# Settings for Replica2959self.set_replica(node, replica)2960replica.slow_start(replica=True)2961
2962node.safe_psql(2963'postgres',2964'CREATE DATABASE backupdb')2965
2966# PG 9.52967if self.get_version(node) < 90600:2968node.safe_psql(2969'backupdb',2970"CREATE ROLE backup WITH LOGIN; "2971"GRANT CONNECT ON DATABASE backupdb to backup; "2972"GRANT USAGE ON SCHEMA pg_catalog TO backup; "2973"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "2974"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "2975"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack2976"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "2977"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "2978"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "2979"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "2980"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "2981"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "2982"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "2983"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "2984"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")2985# PG 9.62986elif self.get_version(node) > 90600 and self.get_version(node) < 100000:2987node.safe_psql(2988'backupdb',2989"CREATE ROLE backup WITH LOGIN; "2990"GRANT CONNECT ON DATABASE backupdb to backup; "2991"GRANT USAGE ON SCHEMA pg_catalog TO backup; "2992"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "2993"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "2994"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack2995"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "2996"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "2997"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "2998"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "2999"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3000"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "3001"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "3002"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "3003"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "3004"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "3005"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "3006"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3007"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")3008# >= 10 && < 153009elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:3010node.safe_psql(3011'backupdb',3012"CREATE ROLE backup WITH LOGIN; "3013"GRANT CONNECT ON DATABASE backupdb to backup; "3014"GRANT USAGE ON SCHEMA pg_catalog TO backup; "3015"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3016"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "3017"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3018"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack3019"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "3020"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "3021"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3022"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "3023"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "3024"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "3025"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "3026"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "3027"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "3028"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3029"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"3030)3031# >= 153032else:3033node.safe_psql(3034'backupdb',3035"CREATE ROLE backup WITH LOGIN; "3036"GRANT CONNECT ON DATABASE backupdb to backup; "3037"GRANT USAGE ON SCHEMA pg_catalog TO backup; "3038"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3039"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "3040"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3041"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack3042"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "3043"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "3044"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3045"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "3046"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "3047"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "3048"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "3049"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "3050"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "3051"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3052"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"3053)3054
3055if ProbackupTest.enterprise:3056node.safe_psql(3057"backupdb",3058"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "3059"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;")3060
3061sleep(2)3062replica.promote()3063
3064# Delta backup3065try:3066self.backup_node(3067backup_dir, 'node', replica, backup_type='delta',3068data_dir=replica.data_dir, datname='backupdb', options=['--stream', '-U', 'backup'])3069# we should die here because exception is what we expect to happen3070self.assertEqual(30711, 0,3072"Expecting Error because incremental backup should not be possible "3073"\n Output: {0} \n CMD: {1}".format(3074repr(self.output), self.cmd))3075except ProbackupException as e:3076# 9.5: ERROR: must be superuser or replication role to run a backup3077# >=9.6: FATAL: must be superuser or replication role to start walsender3078if self.pg_config_version < 160000:3079self.assertRegex(3080e.message,3081"ERROR: must be superuser or replication role to run a backup|"3082"FATAL: must be superuser or replication role to start walsender",3083"\n Unexpected Error Message: {0}\n CMD: {1}".format(3084repr(e.message), self.cmd))3085else:3086self.assertRegex(3087e.message,3088"FATAL: permission denied to start WAL sender\n"3089"DETAIL: Only roles with the REPLICATION",3090"\n Unexpected Error Message: {0}\n CMD: {1}".format(3091repr(e.message), self.cmd))3092
3093# @unittest.skip("skip")3094def test_missing_replication_permission_1(self):3095""""""3096backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3097node = self.make_simple_node(3098base_dir=os.path.join(self.module_name, self.fname, 'node'),3099set_replication=True,3100ptrack_enable=self.ptrack,3101initdb_params=['--data-checksums'])3102
3103self.init_pb(backup_dir)3104self.add_instance(backup_dir, 'node', node)3105self.set_archiving(backup_dir, 'node', node)3106node.slow_start()3107
3108# FULL backup3109self.backup_node(backup_dir, 'node', node, options=['--stream'])3110
3111# Create replica3112replica = self.make_simple_node(3113base_dir=os.path.join(self.module_name, self.fname, 'replica'))3114replica.cleanup()3115self.restore_node(backup_dir, 'node', replica)3116
3117# Settings for Replica3118self.set_replica(node, replica)3119replica.slow_start(replica=True)3120
3121node.safe_psql(3122'postgres',3123'CREATE DATABASE backupdb')3124
3125# PG 9.53126if self.get_version(node) < 90600:3127node.safe_psql(3128'backupdb',3129"CREATE ROLE backup WITH LOGIN; "3130"GRANT CONNECT ON DATABASE backupdb to backup; "3131"GRANT USAGE ON SCHEMA pg_catalog TO backup; "3132"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "3133"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3134"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack3135"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "3136"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "3137"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "3138"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "3139"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3140"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "3141"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "3142"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3143"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")3144# PG 9.63145elif self.get_version(node) > 90600 and self.get_version(node) < 100000:3146node.safe_psql(3147'backupdb',3148"CREATE ROLE backup WITH LOGIN; "3149"GRANT CONNECT ON DATABASE backupdb to backup; "3150"GRANT USAGE ON SCHEMA pg_catalog TO backup; "3151"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3152"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "3153"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack3154"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "3155"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "3156"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "3157"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "3158"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3159"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "3160"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "3161"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "3162"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "3163"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "3164"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "3165"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3166"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"3167)3168# >= 10 && < 153169elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:3170node.safe_psql(3171'backupdb',3172"CREATE ROLE backup WITH LOGIN; "3173"GRANT CONNECT ON DATABASE backupdb to backup; "3174"GRANT USAGE ON SCHEMA pg_catalog TO backup; "3175"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3176"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "3177"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3178"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack3179"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "3180"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "3181"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3182"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "3183"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "3184"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "3185"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "3186"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "3187"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "3188"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3189"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"3190)3191# > 153192else:3193node.safe_psql(3194'backupdb',3195"CREATE ROLE backup WITH LOGIN; "3196"GRANT CONNECT ON DATABASE backupdb to backup; "3197"GRANT USAGE ON SCHEMA pg_catalog TO backup; "3198"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3199"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "3200"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "3201"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack3202"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "3203"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "3204"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "3205"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "3206"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "3207"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "3208"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "3209"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "3210"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "3211"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "3212"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"3213)3214
3215if ProbackupTest.enterprise:3216node.safe_psql(3217"backupdb",3218"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "3219"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;")3220
3221replica.promote()3222
3223# PAGE3224output = self.backup_node(3225backup_dir, 'node', replica, backup_type='page',3226data_dir=replica.data_dir, datname='backupdb', options=['-U', 'backup'],3227return_id=False)3228
3229self.assertIn(3230'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines',3231output)3232
3233# Messages before 143234# 'WARNING: could not connect to database backupdb: FATAL: must be superuser or replication role to start walsender'3235# Messages for >=143236# 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender'3237# 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender'3238# OS-dependant messages:3239# 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL: must be superuser or replication role to start walsender'3240
3241if self.pg_config_version < 160000:3242self.assertRegex(3243output,3244r'WARNING: could not connect to database backupdb:[\s\S]*?'3245r'FATAL: must be superuser or replication role to start walsender')3246else:3247self.assertRegex(3248output,3249r'WARNING: could not connect to database backupdb:[\s\S]*?'3250r'FATAL: permission denied to start WAL sender')3251
3252# @unittest.skip("skip")3253def test_basic_backup_default_transaction_read_only(self):3254""""""3255backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3256node = self.make_simple_node(3257base_dir=os.path.join(self.module_name, self.fname, 'node'),3258set_replication=True,3259initdb_params=['--data-checksums'],3260pg_options={'default_transaction_read_only': 'on'})3261
3262self.init_pb(backup_dir)3263self.add_instance(backup_dir, 'node', node)3264self.set_archiving(backup_dir, 'node', node)3265node.slow_start()3266
3267try:3268node.safe_psql(3269'postgres',3270'create temp table t1()')3271# we should die here because exception is what we expect to happen3272self.assertEqual(32731, 0,3274"Expecting Error because incremental backup should not be possible "3275"\n Output: {0} \n CMD: {1}".format(3276repr(self.output), self.cmd))3277except QueryException as e:3278self.assertIn(3279"cannot execute CREATE TABLE in a read-only transaction",3280e.message,3281"\n Unexpected Error Message: {0}\n CMD: {1}".format(3282repr(e.message), self.cmd))3283
3284# FULL backup3285self.backup_node(3286backup_dir, 'node', node,3287options=['--stream'])3288
3289# DELTA backup3290self.backup_node(3291backup_dir, 'node', node, backup_type='delta', options=['--stream'])3292
3293# PAGE backup3294self.backup_node(backup_dir, 'node', node, backup_type='page')3295
3296# @unittest.skip("skip")3297def test_backup_atexit(self):3298""""""3299self._check_gdb_flag_or_skip_test()3300
3301backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3302node = self.make_simple_node(3303base_dir=os.path.join(self.module_name, self.fname, 'node'),3304set_replication=True,3305ptrack_enable=self.ptrack,3306initdb_params=['--data-checksums'])3307
3308self.init_pb(backup_dir)3309self.add_instance(backup_dir, 'node', node)3310self.set_archiving(backup_dir, 'node', node)3311node.slow_start()3312
3313node.pgbench_init(scale=5)3314
3315# Full backup in streaming mode3316gdb = self.backup_node(3317backup_dir, 'node', node,3318options=['--stream', '--log-level-file=VERBOSE'], gdb=True)3319
3320# break at streaming start3321gdb.set_breakpoint('backup_data_file')3322gdb.run_until_break()3323
3324gdb.remove_all_breakpoints()3325gdb._execute('signal SIGINT')3326sleep(1)3327
3328self.assertEqual(3329self.show_pb(3330backup_dir, 'node')[0]['status'], 'ERROR')3331
3332with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:3333log_content = f.read()3334#print(log_content)3335self.assertIn(3336'WARNING: A backup is in progress, stopping it.',3337log_content)3338
3339if self.get_version(node) < 150000:3340self.assertIn(3341'FROM pg_catalog.pg_stop_backup',3342log_content)3343else:3344self.assertIn(3345'FROM pg_catalog.pg_backup_stop',3346log_content)3347
3348self.assertIn(3349'setting its status to ERROR',3350log_content)3351
3352# @unittest.skip("skip")3353def test_pg_stop_backup_missing_permissions(self):3354""""""3355backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3356node = self.make_simple_node(3357base_dir=os.path.join(self.module_name, self.fname, 'node'),3358set_replication=True,3359ptrack_enable=self.ptrack,3360initdb_params=['--data-checksums'])3361
3362self.init_pb(backup_dir)3363self.add_instance(backup_dir, 'node', node)3364self.set_archiving(backup_dir, 'node', node)3365node.slow_start()3366
3367node.pgbench_init(scale=5)3368
3369self.simple_bootstrap(node, 'backup')3370
3371if self.get_version(node) < 90600:3372node.safe_psql(3373'postgres',3374'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup')3375elif self.get_version(node) > 90600 and self.get_version(node) < 100000:3376node.safe_psql(3377'postgres',3378'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup')3379elif self.get_version(node) < 150000:3380node.safe_psql(3381'postgres',3382'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup')3383else:3384node.safe_psql(3385'postgres',3386'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup')3387
3388
3389# Full backup in streaming mode3390try:3391self.backup_node(3392backup_dir, 'node', node,3393options=['--stream', '-U', 'backup'])3394# we should die here because exception is what we expect to happen3395if self.get_version(node) < 150000:3396self.assertEqual(33971, 0,3398"Expecting Error because of missing permissions on pg_stop_backup "3399"\n Output: {0} \n CMD: {1}".format(3400repr(self.output), self.cmd))3401else:3402self.assertEqual(34031, 0,3404"Expecting Error because of missing permissions on pg_backup_stop "3405"\n Output: {0} \n CMD: {1}".format(3406repr(self.output), self.cmd))3407except ProbackupException as e:3408if self.get_version(node) < 150000:3409self.assertIn(3410"ERROR: permission denied for function pg_stop_backup",3411e.message,3412"\n Unexpected Error Message: {0}\n CMD: {1}".format(3413repr(e.message), self.cmd))3414else:3415self.assertIn(3416"ERROR: permission denied for function pg_backup_stop",3417e.message,3418"\n Unexpected Error Message: {0}\n CMD: {1}".format(3419repr(e.message), self.cmd))3420
3421self.assertIn(3422"query was: SELECT pg_catalog.txid_snapshot_xmax",3423e.message,3424"\n Unexpected Error Message: {0}\n CMD: {1}".format(3425repr(e.message), self.cmd))3426
3427# @unittest.skip("skip")3428def test_start_time(self):3429"""Test, that option --start-time allows to set backup_id and restore"""3430node = self.make_simple_node(3431base_dir=os.path.join(self.module_name, self.fname, 'node'),3432set_replication=True,3433ptrack_enable=self.ptrack,3434initdb_params=['--data-checksums'])3435
3436backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3437self.init_pb(backup_dir)3438self.add_instance(backup_dir, 'node', node)3439self.set_archiving(backup_dir, 'node', node)3440node.slow_start()3441
3442# FULL backup3443startTime = int(time())3444self.backup_node(3445backup_dir, 'node', node, backup_type='full',3446options=['--stream', '--start-time={0}'.format(str(startTime))])3447# restore FULL backup by backup_id calculated from start-time3448self.restore_node(3449backup_dir, 'node',3450data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'),3451backup_id=base36enc(startTime))3452
3453#FULL backup with incorrect start time3454try:3455startTime = str(int(time()-100000))3456self.backup_node(3457backup_dir, 'node', node, backup_type='full',3458options=['--stream', '--start-time={0}'.format(startTime)])3459# we should die here because exception is what we expect to happen3460self.assertEqual(34611, 0,3462'Expecting Error because start time for new backup must be newer '3463'\n Output: {0} \n CMD: {1}'.format(3464repr(self.output), self.cmd))3465except ProbackupException as e:3466self.assertRegex(3467e.message,3468r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n",3469"\n Unexpected Error Message: {0}\n CMD: {1}".format(3470repr(e.message), self.cmd))3471
3472# DELTA backup3473startTime = int(time())3474self.backup_node(3475backup_dir, 'node', node, backup_type='delta',3476options=['--stream', '--start-time={0}'.format(str(startTime))])3477# restore DELTA backup by backup_id calculated from start-time3478self.restore_node(3479backup_dir, 'node',3480data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'),3481backup_id=base36enc(startTime))3482
3483# PAGE backup3484startTime = int(time())3485self.backup_node(3486backup_dir, 'node', node, backup_type='page',3487options=['--stream', '--start-time={0}'.format(str(startTime))])3488# restore PAGE backup by backup_id calculated from start-time3489self.restore_node(3490backup_dir, 'node',3491data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'),3492backup_id=base36enc(startTime))3493
3494# PTRACK backup3495if self.ptrack:3496node.safe_psql(3497'postgres',3498'create extension ptrack')3499
3500startTime = int(time())3501self.backup_node(3502backup_dir, 'node', node, backup_type='ptrack',3503options=['--stream', '--start-time={0}'.format(str(startTime))])3504# restore PTRACK backup by backup_id calculated from start-time3505self.restore_node(3506backup_dir, 'node',3507data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'),3508backup_id=base36enc(startTime))3509
3510# @unittest.skip("skip")3511def test_start_time_few_nodes(self):3512"""Test, that we can synchronize backup_id's for different DBs"""3513node1 = self.make_simple_node(3514base_dir=os.path.join(self.module_name, self.fname, 'node1'),3515set_replication=True,3516ptrack_enable=self.ptrack,3517initdb_params=['--data-checksums'])3518
3519backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1')3520self.init_pb(backup_dir1)3521self.add_instance(backup_dir1, 'node1', node1)3522self.set_archiving(backup_dir1, 'node1', node1)3523node1.slow_start()3524
3525node2 = self.make_simple_node(3526base_dir=os.path.join(self.module_name, self.fname, 'node2'),3527set_replication=True,3528ptrack_enable=self.ptrack,3529initdb_params=['--data-checksums'])3530
3531backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2')3532self.init_pb(backup_dir2)3533self.add_instance(backup_dir2, 'node2', node2)3534self.set_archiving(backup_dir2, 'node2', node2)3535node2.slow_start()3536
3537# FULL backup3538startTime = str(int(time()))3539self.backup_node(3540backup_dir1, 'node1', node1, backup_type='full',3541options=['--stream', '--start-time={0}'.format(startTime)])3542self.backup_node(3543backup_dir2, 'node2', node2, backup_type='full',3544options=['--stream', '--start-time={0}'.format(startTime)])3545show_backup1 = self.show_pb(backup_dir1, 'node1')[0]3546show_backup2 = self.show_pb(backup_dir2, 'node2')[0]3547self.assertEqual(show_backup1['id'], show_backup2['id'])3548
3549# DELTA backup3550startTime = str(int(time()))3551self.backup_node(3552backup_dir1, 'node1', node1, backup_type='delta',3553options=['--stream', '--start-time={0}'.format(startTime)])3554self.backup_node(3555backup_dir2, 'node2', node2, backup_type='delta',3556options=['--stream', '--start-time={0}'.format(startTime)])3557show_backup1 = self.show_pb(backup_dir1, 'node1')[1]3558show_backup2 = self.show_pb(backup_dir2, 'node2')[1]3559self.assertEqual(show_backup1['id'], show_backup2['id'])3560
3561# PAGE backup3562startTime = str(int(time()))3563self.backup_node(3564backup_dir1, 'node1', node1, backup_type='page',3565options=['--stream', '--start-time={0}'.format(startTime)])3566self.backup_node(3567backup_dir2, 'node2', node2, backup_type='page',3568options=['--stream', '--start-time={0}'.format(startTime)])3569show_backup1 = self.show_pb(backup_dir1, 'node1')[2]3570show_backup2 = self.show_pb(backup_dir2, 'node2')[2]3571self.assertEqual(show_backup1['id'], show_backup2['id'])3572
3573# PTRACK backup3574if self.ptrack:3575node1.safe_psql(3576'postgres',3577'create extension ptrack')3578node2.safe_psql(3579'postgres',3580'create extension ptrack')3581
3582startTime = str(int(time()))3583self.backup_node(3584backup_dir1, 'node1', node1, backup_type='ptrack',3585options=['--stream', '--start-time={0}'.format(startTime)])3586self.backup_node(3587backup_dir2, 'node2', node2, backup_type='ptrack',3588options=['--stream', '--start-time={0}'.format(startTime)])3589show_backup1 = self.show_pb(backup_dir1, 'node1')[3]3590show_backup2 = self.show_pb(backup_dir2, 'node2')[3]3591self.assertEqual(show_backup1['id'], show_backup2['id'])3592
3593def test_regress_issue_585(self):3594"""https://github.com/postgrespro/pg_probackup/issues/585"""3595node = self.make_simple_node(3596base_dir=os.path.join(self.module_name, self.fname, 'node'),3597set_replication=True,3598initdb_params=['--data-checksums'])3599
3600backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3601self.init_pb(backup_dir)3602self.add_instance(backup_dir, 'node', node)3603node.slow_start()3604
3605# create couple of files that looks like db files3606with open(os.path.join(node.data_dir, 'pg_multixact/offsets/1000'),'wb') as f:3607pass3608with open(os.path.join(node.data_dir, 'pg_multixact/members/1000'),'wb') as f:3609pass3610
3611self.backup_node(3612backup_dir, 'node', node, backup_type='full',3613options=['--stream'])3614
3615output = self.backup_node(3616backup_dir, 'node', node, backup_type='delta',3617options=['--stream'],3618return_id=False,3619)3620self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like')3621
3622node.cleanup()3623
3624output = self.restore_node(backup_dir, 'node', node)3625self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like')3626
3627def test_2_delta_backups(self):3628"""https://github.com/postgrespro/pg_probackup/issues/596"""3629node = self.make_simple_node('node',3630initdb_params=['--data-checksums'])3631
3632backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')3633
3634self.init_pb(backup_dir)3635self.add_instance(backup_dir, 'node', node)3636# self.set_archiving(backup_dir, 'node', node)3637node.slow_start()3638
3639# FULL3640full_backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"])3641
3642# delta backup mode3643delta_backup_id1 = self.backup_node(3644backup_dir, 'node', node, backup_type="delta", options=["--stream"])3645
3646delta_backup_id2 = self.backup_node(3647backup_dir, 'node', node, backup_type="delta", options=["--stream"])3648
3649# postgresql.conf and pg_hba.conf shouldn't be copied3650conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id1, 'database', 'postgresql.conf')3651self.assertFalse(3652os.path.exists(conf_file),3653"File should not exist: {0}".format(conf_file))3654conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id2, 'database', 'postgresql.conf')3655print(conf_file)3656self.assertFalse(3657os.path.exists(conf_file),3658"File should not exist: {0}".format(conf_file))3659