pg_probackup

Форк
0
/
backup_test.py 
3658 строк · 145.8 Кб
1
import unittest
2
import os
3
import re
4
from time import sleep, time
5
from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException
6
import shutil
7
from distutils.dir_util import copy_tree
8
from testgres import ProcessType, QueryException
9
import subprocess
10

11

12
class BackupTest(ProbackupTest, unittest.TestCase):
13

14
    def test_full_backup(self):
15
        """
16
        Just test full backup with at least two segments
17
        """
18
        node = self.make_simple_node(
19
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
20
            initdb_params=['--data-checksums'],
21
            # we need to write a lot. Lets speedup a bit.
22
            pg_options={"fsync": "off", "synchronous_commit": "off"})
23

24
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
25
        self.init_pb(backup_dir)
26
        self.add_instance(backup_dir, 'node', node)
27
        self.set_archiving(backup_dir, 'node', node)
28
        node.slow_start()
29

30
        # Fill with data
31
        # Have to use scale=100 to create second segment.
32
        node.pgbench_init(scale=100, no_vacuum=True)
33

34
        # FULL
35
        backup_id = self.backup_node(backup_dir, 'node', node)
36

37
        out = self.validate_pb(backup_dir, 'node', backup_id)
38
        self.assertIn(
39
            "INFO: Backup {0} is valid".format(backup_id),
40
            out)
41

42
    def test_full_backup_stream(self):
43
        """
44
        Just test full backup with at least two segments in stream mode
45
        """
46
        node = self.make_simple_node(
47
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
48
            initdb_params=['--data-checksums'],
49
            # we need to write a lot. Lets speedup a bit.
50
            pg_options={"fsync": "off", "synchronous_commit": "off"})
51

52
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
53
        self.init_pb(backup_dir)
54
        self.add_instance(backup_dir, 'node', node)
55
        node.slow_start()
56

57
        # Fill with data
58
        # Have to use scale=100 to create second segment.
59
        node.pgbench_init(scale=100, no_vacuum=True)
60

61
        # FULL
62
        backup_id = self.backup_node(backup_dir, 'node', node,
63
                                     options=["--stream"])
64

65
        out = self.validate_pb(backup_dir, 'node', backup_id)
66
        self.assertIn(
67
            "INFO: Backup {0} is valid".format(backup_id),
68
            out)
69

70
    # @unittest.skip("skip")
71
    # @unittest.expectedFailure
72
    # PGPRO-707
73
    def test_backup_modes_archive(self):
74
        """standart backup modes with ARCHIVE WAL method"""
75
        node = self.make_simple_node(
76
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
77
            initdb_params=['--data-checksums'])
78

79
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
80
        self.init_pb(backup_dir)
81
        self.add_instance(backup_dir, 'node', node)
82
        self.set_archiving(backup_dir, 'node', node)
83
        node.slow_start()
84

85
        full_backup_id = self.backup_node(backup_dir, 'node', node)
86
        show_backup = self.show_pb(backup_dir, 'node')[0]
87

88
        self.assertEqual(show_backup['status'], "OK")
89
        self.assertEqual(show_backup['backup-mode'], "FULL")
90

91
        # postmaster.pid and postmaster.opts shouldn't be copied
92
        excluded = True
93
        db_dir = os.path.join(
94
            backup_dir, "backups", 'node', full_backup_id, "database")
95

96
        for f in os.listdir(db_dir):
97
            if (
98
                os.path.isfile(os.path.join(db_dir, f)) and
99
                (
100
                    f == "postmaster.pid" or
101
                    f == "postmaster.opts"
102
                )
103
            ):
104
                excluded = False
105
                self.assertEqual(excluded, True)
106

107
        # page backup mode
108
        page_backup_id = self.backup_node(
109
            backup_dir, 'node', node, backup_type="page")
110

111
        show_backup_1 = self.show_pb(backup_dir, 'node')[1]
112
        self.assertEqual(show_backup_1['status'], "OK")
113
        self.assertEqual(show_backup_1['backup-mode'], "PAGE")
114

115
        # delta backup mode
116
        delta_backup_id = self.backup_node(
117
            backup_dir, 'node', node, backup_type="delta")
118

119
        show_backup_2 = self.show_pb(backup_dir, 'node')[2]
120
        self.assertEqual(show_backup_2['status'], "OK")
121
        self.assertEqual(show_backup_2['backup-mode'], "DELTA")
122

123
        # Check parent backup
124
        self.assertEqual(
125
            full_backup_id,
126
            self.show_pb(
127
                backup_dir, 'node',
128
                backup_id=show_backup_1['id'])["parent-backup-id"])
129

130
        self.assertEqual(
131
            page_backup_id,
132
            self.show_pb(
133
                backup_dir, 'node',
134
                backup_id=show_backup_2['id'])["parent-backup-id"])
135

136
    # @unittest.skip("skip")
137
    def test_smooth_checkpoint(self):
138
        """full backup with smooth checkpoint"""
139
        node = self.make_simple_node(
140
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
141
            initdb_params=['--data-checksums'])
142

143
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
144
        self.init_pb(backup_dir)
145
        self.add_instance(backup_dir, 'node', node)
146
        self.set_archiving(backup_dir, 'node', node)
147
        node.slow_start()
148

149
        self.backup_node(
150
            backup_dir, 'node', node,
151
            options=["-C"])
152
        self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
153
        node.stop()
154

155
    # @unittest.skip("skip")
156
    def test_incremental_backup_without_full(self):
157
        """page backup without validated full backup"""
158
        node = self.make_simple_node(
159
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
160
            initdb_params=['--data-checksums'])
161

162
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
163
        self.init_pb(backup_dir)
164
        self.add_instance(backup_dir, 'node', node)
165
        self.set_archiving(backup_dir, 'node', node)
166
        node.slow_start()
167

168
        try:
169
            self.backup_node(backup_dir, 'node', node, backup_type="page")
170
            # we should die here because exception is what we expect to happen
171
            self.assertEqual(
172
                1, 0,
173
                "Expecting Error because page backup should not be possible "
174
                "without valid full backup.\n Output: {0} \n CMD: {1}".format(
175
                    repr(self.output), self.cmd))
176
        except ProbackupException as e:
177
            self.assertTrue(
178
                "WARNING: Valid full backup on current timeline 1 is not found" in e.message and
179
                "ERROR: Create new full backup before an incremental one" in e.message,
180
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
181
                    repr(e.message), self.cmd))
182

183
        self.assertEqual(
184
            self.show_pb(backup_dir, 'node')[0]['status'],
185
            "ERROR")
186

187
    # @unittest.skip("skip")
188
    def test_incremental_backup_corrupt_full(self):
189
        """page-level backup with corrupted full backup"""
190
        node = self.make_simple_node(
191
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
192
            initdb_params=['--data-checksums'])
193

194
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
195
        self.init_pb(backup_dir)
196
        self.add_instance(backup_dir, 'node', node)
197
        self.set_archiving(backup_dir, 'node', node)
198
        node.slow_start()
199

200
        backup_id = self.backup_node(backup_dir, 'node', node)
201
        file = os.path.join(
202
            backup_dir, "backups", "node", backup_id,
203
            "database", "postgresql.conf")
204
        os.remove(file)
205

206
        try:
207
            self.validate_pb(backup_dir, 'node')
208
            # we should die here because exception is what we expect to happen
209
            self.assertEqual(
210
                1, 0,
211
                "Expecting Error because of validation of corrupted backup.\n"
212
                " Output: {0} \n CMD: {1}".format(
213
                    repr(self.output), self.cmd))
214
        except ProbackupException as e:
215
            self.assertTrue(
216
                "INFO: Validate backups of the instance 'node'" in e.message and
217
                "WARNING: Backup file" in e.message and "is not found" in e.message and
218
                "WARNING: Backup {0} data files are corrupted".format(
219
                    backup_id) in e.message and
220
                "WARNING: Some backups are not valid" in e.message,
221
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
222
                    repr(e.message), self.cmd))
223

224
        try:
225
            self.backup_node(backup_dir, 'node', node, backup_type="page")
226
            # we should die here because exception is what we expect to happen
227
            self.assertEqual(
228
                1, 0,
229
                "Expecting Error because page backup should not be possible "
230
                "without valid full backup.\n Output: {0} \n CMD: {1}".format(
231
                    repr(self.output), self.cmd))
232
        except ProbackupException as e:
233
            self.assertTrue(
234
                "WARNING: Valid full backup on current timeline 1 is not found" in e.message and
235
                "ERROR: Create new full backup before an incremental one" in e.message,
236
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
237
                    repr(e.message), self.cmd))
238

239
        self.assertEqual(
240
            self.show_pb(backup_dir, 'node', backup_id)['status'], "CORRUPT")
241
        self.assertEqual(
242
            self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")
243

244
    # @unittest.skip("skip")
245
    def test_delta_threads_stream(self):
246
        """delta multi thread backup mode and stream"""
247
        node = self.make_simple_node(
248
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
249
            set_replication=True,
250
            initdb_params=['--data-checksums'])
251

252
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
253
        self.init_pb(backup_dir)
254
        self.add_instance(backup_dir, 'node', node)
255
        node.slow_start()
256

257
        self.backup_node(
258
            backup_dir, 'node', node, backup_type="full",
259
            options=["-j", "4", "--stream"])
260

261
        self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
262
        self.backup_node(
263
            backup_dir, 'node', node,
264
            backup_type="delta", options=["-j", "4", "--stream"])
265
        self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK")
266

267
    # @unittest.skip("skip")
268
    def test_page_detect_corruption(self):
269
        """make node, corrupt some page, check that backup failed"""
270

271
        node = self.make_simple_node(
272
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
273
            set_replication=True,
274
            ptrack_enable=self.ptrack,
275
            initdb_params=['--data-checksums'])
276

277
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
278

279
        self.init_pb(backup_dir)
280
        self.add_instance(backup_dir, 'node', node)
281
        node.slow_start()
282

283
        self.backup_node(
284
            backup_dir, 'node', node,
285
            backup_type="full", options=["-j", "4", "--stream"])
286

287
        node.safe_psql(
288
            "postgres",
289
            "create table t_heap as select 1 as id, md5(i::text) as text, "
290
            "md5(repeat(i::text,10))::tsvector as tsvector "
291
            "from generate_series(0,1000) i")
292

293
        node.safe_psql(
294
            "postgres",
295
            "CHECKPOINT")
296

297
        heap_path = node.safe_psql(
298
            "postgres",
299
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
300

301
        path = os.path.join(node.data_dir, heap_path)
302
        with open(path, "rb+", 0) as f:
303
                f.seek(9000)
304
                f.write(b"bla")
305
                f.flush()
306
                f.close
307

308
        try:
309
            self.backup_node(
310
                backup_dir, 'node', node, backup_type="full",
311
                options=["-j", "4", "--stream", "--log-level-file=VERBOSE"])
312
            self.assertEqual(
313
                1, 0,
314
                "Expecting Error because data file is corrupted"
315
                "\n Output: {0} \n CMD: {1}".format(
316
                    repr(self.output), self.cmd))
317
        except ProbackupException as e:
318
            self.assertTrue(
319
                'ERROR: Corruption detected in file "{0}", '
320
                'block 1: page verification failed, calculated checksum'.format(path),
321
                e.message)
322

323
        self.assertEqual(
324
            self.show_pb(backup_dir, 'node')[1]['status'],
325
            'ERROR',
326
            "Backup Status should be ERROR")
327

328
    # @unittest.skip("skip")
329
    def test_backup_detect_corruption(self):
330
        """make node, corrupt some page, check that backup failed"""
331
        node = self.make_simple_node(
332
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
333
            set_replication=True,
334
            ptrack_enable=self.ptrack,
335
            initdb_params=['--data-checksums'])
336

337
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
338

339
        self.init_pb(backup_dir)
340
        self.add_instance(backup_dir, 'node', node)
341
        self.set_archiving(backup_dir, 'node', node)
342
        node.slow_start()
343

344
        if self.ptrack:
345
            node.safe_psql(
346
                "postgres",
347
                "create extension ptrack")
348

349
        self.backup_node(
350
            backup_dir, 'node', node,
351
            backup_type="full", options=["-j", "4", "--stream"])
352

353
        node.safe_psql(
354
            "postgres",
355
            "create table t_heap as select 1 as id, md5(i::text) as text, "
356
            "md5(repeat(i::text,10))::tsvector as tsvector "
357
            "from generate_series(0,10000) i")
358

359
        heap_path = node.safe_psql(
360
            "postgres",
361
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
362

363
        self.backup_node(
364
            backup_dir, 'node', node,
365
            backup_type="full", options=["-j", "4", "--stream"])
366

367
        node.safe_psql(
368
            "postgres",
369
            "select count(*) from t_heap")
370

371
        node.safe_psql(
372
            "postgres",
373
            "update t_heap set id = id + 10000")
374

375
        node.stop()
376

377
        heap_fullpath = os.path.join(node.data_dir, heap_path)
378

379
        with open(heap_fullpath, "rb+", 0) as f:
380
                f.seek(9000)
381
                f.write(b"bla")
382
                f.flush()
383
                f.close
384

385
        node.slow_start()
386

387
        try:
388
            self.backup_node(
389
                backup_dir, 'node', node,
390
                backup_type="full", options=["-j", "4", "--stream"])
391
            # we should die here because exception is what we expect to happen
392
            self.assertEqual(
393
                1, 0,
394
                "Expecting Error because of block corruption"
395
                "\n Output: {0} \n CMD: {1}".format(
396
                    repr(self.output), self.cmd))
397
        except ProbackupException as e:
398
            self.assertIn(
399
                'ERROR: Corruption detected in file "{0}", block 1: '
400
                'page verification failed, calculated checksum'.format(
401
                    heap_fullpath),
402
                e.message,
403
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
404
                    repr(e.message), self.cmd))
405

406
        sleep(1)
407

408
        try:
409
            self.backup_node(
410
                backup_dir, 'node', node,
411
                backup_type="delta", options=["-j", "4", "--stream"])
412
            # we should die here because exception is what we expect to happen
413
            self.assertEqual(
414
                1, 0,
415
                "Expecting Error because of block corruption"
416
                "\n Output: {0} \n CMD: {1}".format(
417
                    repr(self.output), self.cmd))
418
        except ProbackupException as e:
419
            self.assertIn(
420
                'ERROR: Corruption detected in file "{0}", block 1: '
421
                'page verification failed, calculated checksum'.format(
422
                    heap_fullpath),
423
                e.message,
424
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
425
                    repr(e.message), self.cmd))
426

427
        sleep(1)
428

429
        try:
430
            self.backup_node(
431
                backup_dir, 'node', node,
432
                backup_type="page", options=["-j", "4", "--stream"])
433
            # we should die here because exception is what we expect to happen
434
            self.assertEqual(
435
                1, 0,
436
                "Expecting Error because of block corruption"
437
                "\n Output: {0} \n CMD: {1}".format(
438
                    repr(self.output), self.cmd))
439
        except ProbackupException as e:
440
            self.assertIn(
441
                'ERROR: Corruption detected in file "{0}", block 1: '
442
                'page verification failed, calculated checksum'.format(
443
                    heap_fullpath),
444
                e.message,
445
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
446
                    repr(e.message), self.cmd))
447

448
        sleep(1)
449

450
        if self.ptrack:
451
            try:
452
                self.backup_node(
453
                    backup_dir, 'node', node,
454
                    backup_type="ptrack", options=["-j", "4", "--stream"])
455
                # we should die here because exception is what we expect to happen
456
                self.assertEqual(
457
                    1, 0,
458
                    "Expecting Error because of block corruption"
459
                    "\n Output: {0} \n CMD: {1}".format(
460
                        repr(self.output), self.cmd))
461
            except ProbackupException as e:
462
                self.assertIn(
463
                    'ERROR: Corruption detected in file "{0}", block 1: '
464
                    'page verification failed, calculated checksum'.format(
465
                        heap_fullpath),
466
                    e.message,
467
                    '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
468
                        repr(e.message), self.cmd))
469

470
    # @unittest.skip("skip")
471
    def test_backup_detect_invalid_block_header(self):
472
        """make node, corrupt some page, check that backup failed"""
473
        node = self.make_simple_node(
474
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
475
            set_replication=True,
476
            ptrack_enable=self.ptrack,
477
            initdb_params=['--data-checksums'])
478

479
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
480

481
        self.init_pb(backup_dir)
482
        self.add_instance(backup_dir, 'node', node)
483
        self.set_archiving(backup_dir, 'node', node)
484
        node.slow_start()
485

486
        if self.ptrack:
487
            node.safe_psql(
488
                "postgres",
489
                "create extension ptrack")
490

491
        node.safe_psql(
492
            "postgres",
493
            "create table t_heap as select 1 as id, md5(i::text) as text, "
494
            "md5(repeat(i::text,10))::tsvector as tsvector "
495
            "from generate_series(0,10000) i")
496

497
        heap_path = node.safe_psql(
498
            "postgres",
499
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
500

501
        self.backup_node(
502
            backup_dir, 'node', node,
503
            backup_type="full", options=["-j", "4", "--stream"])
504

505
        node.safe_psql(
506
            "postgres",
507
            "select count(*) from t_heap")
508

509
        node.safe_psql(
510
            "postgres",
511
            "update t_heap set id = id + 10000")
512

513
        node.stop()
514

515
        heap_fullpath = os.path.join(node.data_dir, heap_path)
516
        with open(heap_fullpath, "rb+", 0) as f:
517
                f.seek(8193)
518
                f.write(b"blahblahblahblah")
519
                f.flush()
520
                f.close
521

522
        node.slow_start()
523

524
#        self.backup_node(
525
#            backup_dir, 'node', node,
526
#            backup_type="full", options=["-j", "4", "--stream"])
527

528
        try:
529
            self.backup_node(
530
                backup_dir, 'node', node,
531
                backup_type="full", options=["-j", "4", "--stream"])
532
            # we should die here because exception is what we expect to happen
533
            self.assertEqual(
534
                1, 0,
535
                "Expecting Error because of block corruption"
536
                "\n Output: {0} \n CMD: {1}".format(
537
                    repr(self.output), self.cmd))
538
        except ProbackupException as e:
539
            self.assertIn(
540
                'ERROR: Corruption detected in file "{0}", block 1: '
541
                'page header invalid, pd_lower'.format(heap_fullpath),
542
                e.message,
543
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
544
                    repr(e.message), self.cmd))
545

546
        sleep(1)
547

548
        try:
549
            self.backup_node(
550
                backup_dir, 'node', node,
551
                backup_type="delta", options=["-j", "4", "--stream"])
552
            # we should die here because exception is what we expect to happen
553
            self.assertEqual(
554
                1, 0,
555
                "Expecting Error because of block corruption"
556
                "\n Output: {0} \n CMD: {1}".format(
557
                    repr(self.output), self.cmd))
558
        except ProbackupException as e:
559
            self.assertIn(
560
                'ERROR: Corruption detected in file "{0}", block 1: '
561
                'page header invalid, pd_lower'.format(heap_fullpath),
562
                e.message,
563
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
564
                    repr(e.message), self.cmd))
565

566
        sleep(1)
567

568
        try:
569
            self.backup_node(
570
                backup_dir, 'node', node,
571
                backup_type="page", options=["-j", "4", "--stream"])
572
            # we should die here because exception is what we expect to happen
573
            self.assertEqual(
574
                1, 0,
575
                "Expecting Error because of block corruption"
576
                "\n Output: {0} \n CMD: {1}".format(
577
                    repr(self.output), self.cmd))
578
        except ProbackupException as e:
579
            self.assertIn(
580
                'ERROR: Corruption detected in file "{0}", block 1: '
581
                'page header invalid, pd_lower'.format(heap_fullpath),
582
                e.message,
583
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
584
                    repr(e.message), self.cmd))
585

586
        sleep(1)
587

588
        if self.ptrack:
589
            try:
590
                self.backup_node(
591
                    backup_dir, 'node', node,
592
                    backup_type="ptrack", options=["-j", "4", "--stream"])
593
                # we should die here because exception is what we expect to happen
594
                self.assertEqual(
595
                    1, 0,
596
                    "Expecting Error because of block corruption"
597
                    "\n Output: {0} \n CMD: {1}".format(
598
                        repr(self.output), self.cmd))
599
            except ProbackupException as e:
600
                self.assertIn(
601
                    'ERROR: Corruption detected in file "{0}", block 1: '
602
                    'page header invalid, pd_lower'.format(heap_fullpath),
603
                    e.message,
604
                    '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
605
                        repr(e.message), self.cmd))
606

607
    # @unittest.skip("skip")
608
    def test_backup_detect_missing_permissions(self):
609
        """make node, corrupt some page, check that backup failed"""
610
        node = self.make_simple_node(
611
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
612
            set_replication=True,
613
            ptrack_enable=self.ptrack,
614
            initdb_params=['--data-checksums'])
615

616
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
617

618
        self.init_pb(backup_dir)
619
        self.add_instance(backup_dir, 'node', node)
620
        self.set_archiving(backup_dir, 'node', node)
621
        node.slow_start()
622

623
        if self.ptrack:
624
            node.safe_psql(
625
                "postgres",
626
                "create extension ptrack")
627

628
        node.safe_psql(
629
            "postgres",
630
            "create table t_heap as select 1 as id, md5(i::text) as text, "
631
            "md5(repeat(i::text,10))::tsvector as tsvector "
632
            "from generate_series(0,10000) i")
633

634
        heap_path = node.safe_psql(
635
            "postgres",
636
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
637

638
        self.backup_node(
639
            backup_dir, 'node', node,
640
            backup_type="full", options=["-j", "4", "--stream"])
641

642
        node.safe_psql(
643
            "postgres",
644
            "select count(*) from t_heap")
645

646
        node.safe_psql(
647
            "postgres",
648
            "update t_heap set id = id + 10000")
649

650
        node.stop()
651

652
        heap_fullpath = os.path.join(node.data_dir, heap_path)
653
        with open(heap_fullpath, "rb+", 0) as f:
654
                f.seek(8193)
655
                f.write(b"blahblahblahblah")
656
                f.flush()
657
                f.close
658

659
        node.slow_start()
660

661
#        self.backup_node(
662
#            backup_dir, 'node', node,
663
#            backup_type="full", options=["-j", "4", "--stream"])
664

665
        try:
666
            self.backup_node(
667
                backup_dir, 'node', node,
668
                backup_type="full", options=["-j", "4", "--stream"])
669
            # we should die here because exception is what we expect to happen
670
            self.assertEqual(
671
                1, 0,
672
                "Expecting Error because of block corruption"
673
                "\n Output: {0} \n CMD: {1}".format(
674
                    repr(self.output), self.cmd))
675
        except ProbackupException as e:
676
            self.assertIn(
677
                'ERROR: Corruption detected in file "{0}", block 1: '
678
                'page header invalid, pd_lower'.format(heap_fullpath),
679
                e.message,
680
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
681
                    repr(e.message), self.cmd))
682

683
        sleep(1)
684

685
        try:
686
            self.backup_node(
687
                backup_dir, 'node', node,
688
                backup_type="delta", options=["-j", "4", "--stream"])
689
            # we should die here because exception is what we expect to happen
690
            self.assertEqual(
691
                1, 0,
692
                "Expecting Error because of block corruption"
693
                "\n Output: {0} \n CMD: {1}".format(
694
                    repr(self.output), self.cmd))
695
        except ProbackupException as e:
696
            self.assertIn(
697
                'ERROR: Corruption detected in file "{0}", block 1: '
698
                'page header invalid, pd_lower'.format(heap_fullpath),
699
                e.message,
700
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
701
                    repr(e.message), self.cmd))
702

703
        sleep(1)
704

705
        try:
706
            self.backup_node(
707
                backup_dir, 'node', node,
708
                backup_type="page", options=["-j", "4", "--stream"])
709
            # we should die here because exception is what we expect to happen
710
            self.assertEqual(
711
                1, 0,
712
                "Expecting Error because of block corruption"
713
                "\n Output: {0} \n CMD: {1}".format(
714
                    repr(self.output), self.cmd))
715
        except ProbackupException as e:
716
            self.assertIn(
717
                'ERROR: Corruption detected in file "{0}", block 1: '
718
                'page header invalid, pd_lower'.format(heap_fullpath),
719
                e.message,
720
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
721
                    repr(e.message), self.cmd))
722

723
        sleep(1)
724

725
        if self.ptrack:
726
            try:
727
                self.backup_node(
728
                    backup_dir, 'node', node,
729
                    backup_type="ptrack", options=["-j", "4", "--stream"])
730
                # we should die here because exception is what we expect to happen
731
                self.assertEqual(
732
                    1, 0,
733
                    "Expecting Error because of block corruption"
734
                    "\n Output: {0} \n CMD: {1}".format(
735
                        repr(self.output), self.cmd))
736
            except ProbackupException as e:
737
                self.assertIn(
738
                    'ERROR: Corruption detected in file "{0}", block 1: '
739
                    'page header invalid, pd_lower'.format(heap_fullpath),
740
                    e.message,
741
                    '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
742
                        repr(e.message), self.cmd))
743

744
    # @unittest.skip("skip")
745
    def test_backup_truncate_misaligned(self):
746
        """
747
        make node, truncate file to size not even to BLCKSIZE,
748
        take backup
749
        """
750
        node = self.make_simple_node(
751
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
752
            set_replication=True,
753
            initdb_params=['--data-checksums'])
754

755
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
756

757
        self.init_pb(backup_dir)
758
        self.add_instance(backup_dir, 'node', node)
759
        node.slow_start()
760

761
        node.safe_psql(
762
            "postgres",
763
            "create table t_heap as select 1 as id, md5(i::text) as text, "
764
            "md5(repeat(i::text,10))::tsvector as tsvector "
765
            "from generate_series(0,100000) i")
766

767
        node.safe_psql(
768
            "postgres",
769
            "CHECKPOINT;")
770

771
        heap_path = node.safe_psql(
772
            "postgres",
773
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
774

775
        heap_size = node.safe_psql(
776
            "postgres",
777
            "select pg_relation_size('t_heap')")
778

779
        with open(os.path.join(node.data_dir, heap_path), "rb+", 0) as f:
780
            f.truncate(int(heap_size) - 4096)
781
            f.flush()
782
            f.close
783

784
        output = self.backup_node(
785
            backup_dir, 'node', node, backup_type="full",
786
            options=["-j", "4", "--stream"], return_id=False)
787

788
        self.assertIn("WARNING: File", output)
789
        self.assertIn("invalid file size", output)
790

791
    # @unittest.skip("skip")
792
    def test_tablespace_in_pgdata_pgpro_1376(self):
793
        """PGPRO-1376 """
794
        node = self.make_simple_node(
795
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
796
            set_replication=True,
797
            initdb_params=['--data-checksums'])
798

799
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
800

801
        self.init_pb(backup_dir)
802
        self.add_instance(backup_dir, 'node', node)
803
        node.slow_start()
804

805
        self.create_tblspace_in_node(
806
            node, 'tblspace1',
807
            tblspc_path=(
808
                os.path.join(
809
                    node.data_dir, 'somedirectory', '100500'))
810
            )
811

812
        self.create_tblspace_in_node(
813
            node, 'tblspace2',
814
            tblspc_path=(os.path.join(node.data_dir))
815
            )
816

817
        node.safe_psql(
818
            "postgres",
819
            "create table t_heap1 tablespace tblspace1 as select 1 as id, "
820
            "md5(i::text) as text, "
821
            "md5(repeat(i::text,10))::tsvector as tsvector "
822
            "from generate_series(0,1000) i")
823

824
        node.safe_psql(
825
            "postgres",
826
            "create table t_heap2 tablespace tblspace2 as select 1 as id, "
827
            "md5(i::text) as text, "
828
            "md5(repeat(i::text,10))::tsvector as tsvector "
829
            "from generate_series(0,1000) i")
830

831
        backup_id_1 = self.backup_node(
832
            backup_dir, 'node', node, backup_type="full",
833
            options=["-j", "4", "--stream"])
834

835
        node.safe_psql(
836
            "postgres",
837
            "drop table t_heap2")
838
        node.safe_psql(
839
            "postgres",
840
            "drop tablespace tblspace2")
841

842
        self.backup_node(
843
                backup_dir, 'node', node, backup_type="full",
844
                options=["-j", "4", "--stream"])
845

846
        pgdata = self.pgdata_content(node.data_dir)
847

848
        relfilenode = node.safe_psql(
849
            "postgres",
850
            "select 't_heap1'::regclass::oid"
851
            ).decode('utf-8').rstrip()
852

853
        list = []
854
        for root, dirs, files in os.walk(os.path.join(
855
                backup_dir, 'backups', 'node', backup_id_1)):
856
            for file in files:
857
                if file == relfilenode:
858
                    path = os.path.join(root, file)
859
                    list = list + [path]
860

861
        # We expect that relfilenode can be encountered only once
862
        if len(list) > 1:
863
            message = ""
864
            for string in list:
865
                message = message + string + "\n"
866
            self.assertEqual(
867
                1, 0,
868
                "Following file copied twice by backup:\n {0}".format(
869
                    message)
870
                )
871

872
        node.cleanup()
873

874
        self.restore_node(
875
            backup_dir, 'node', node, options=["-j", "4"])
876

877
        if self.paranoia:
878
            pgdata_restored = self.pgdata_content(node.data_dir)
879
            self.compare_pgdata(pgdata, pgdata_restored)
880

881
    # @unittest.skip("skip")
882
    def test_basic_tablespace_handling(self):
883
        """
884
        make node, take full backup, check that restore with
885
        tablespace mapping will end with error, take page backup,
886
        check that restore with tablespace mapping will end with
887
        success
888
        """
889
        node = self.make_simple_node(
890
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
891
            set_replication=True,
892
            initdb_params=['--data-checksums'])
893

894
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
895

896
        self.init_pb(backup_dir)
897
        self.add_instance(backup_dir, 'node', node)
898
        node.slow_start()
899

900
        backup_id = self.backup_node(
901
            backup_dir, 'node', node, backup_type="full",
902
            options=["-j", "4", "--stream"])
903

904
        tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')
905
        tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old')
906

907
        self.create_tblspace_in_node(
908
            node, 'some_lame_tablespace')
909

910
        self.create_tblspace_in_node(
911
            node, 'tblspace1',
912
            tblspc_path=tblspace1_old_path)
913

914
        self.create_tblspace_in_node(
915
            node, 'tblspace2',
916
            tblspc_path=tblspace2_old_path)
917

918
        node.safe_psql(
919
            "postgres",
920
            "create table t_heap_lame tablespace some_lame_tablespace "
921
            "as select 1 as id, md5(i::text) as text, "
922
            "md5(repeat(i::text,10))::tsvector as tsvector "
923
            "from generate_series(0,1000) i")
924

925
        node.safe_psql(
926
            "postgres",
927
            "create table t_heap2 tablespace tblspace2 as select 1 as id, "
928
            "md5(i::text) as text, "
929
            "md5(repeat(i::text,10))::tsvector as tsvector "
930
            "from generate_series(0,1000) i")
931

932
        tblspace1_new_path = self.get_tblspace_path(node, 'tblspace1_new')
933
        tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new')
934

935
        node_restored = self.make_simple_node(
936
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
937
        node_restored.cleanup()
938

939
        try:
940
            self.restore_node(
941
                backup_dir, 'node', node_restored,
942
                options=[
943
                    "-j", "4",
944
                    "-T", "{0}={1}".format(
945
                        tblspace1_old_path, tblspace1_new_path),
946
                    "-T", "{0}={1}".format(
947
                        tblspace2_old_path, tblspace2_new_path)])
948
            # we should die here because exception is what we expect to happen
949
            self.assertEqual(
950
                1, 0,
951
                "Expecting Error because tablespace mapping is incorrect"
952
                "\n Output: {0} \n CMD: {1}".format(
953
                    repr(self.output), self.cmd))
954
        except ProbackupException as e:
955
            self.assertIn(
956
                'ERROR: Backup {0} has no tablespaceses, '
957
                'nothing to remap'.format(backup_id),
958
                e.message,
959
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
960
                    repr(e.message), self.cmd))
961

962
        node.safe_psql(
963
            "postgres",
964
            "drop table t_heap_lame")
965

966
        node.safe_psql(
967
            "postgres",
968
            "drop tablespace some_lame_tablespace")
969

970
        self.backup_node(
971
            backup_dir, 'node', node, backup_type="delta",
972
            options=["-j", "4", "--stream"])
973

974
        self.restore_node(
975
            backup_dir, 'node', node_restored,
976
            options=[
977
                "-j", "4",
978
                "-T", "{0}={1}".format(
979
                    tblspace1_old_path, tblspace1_new_path),
980
                "-T", "{0}={1}".format(
981
                    tblspace2_old_path, tblspace2_new_path)])
982

983
        if self.paranoia:
984
            pgdata = self.pgdata_content(node.data_dir)
985

986
        if self.paranoia:
987
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
988
            self.compare_pgdata(pgdata, pgdata_restored)
989

990
    # @unittest.skip("skip")
991
    def test_tablespace_handling_1(self):
992
        """
993
        make node with tablespace A, take full backup, check that restore with
994
        tablespace mapping of tablespace B will end with error
995
        """
996
        node = self.make_simple_node(
997
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
998
            set_replication=True,
999
            initdb_params=['--data-checksums'])
1000

1001
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1002

1003
        self.init_pb(backup_dir)
1004
        self.add_instance(backup_dir, 'node', node)
1005
        node.slow_start()
1006

1007
        tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')
1008
        tblspace2_old_path = self.get_tblspace_path(node, 'tblspace2_old')
1009

1010
        tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')
1011

1012
        self.create_tblspace_in_node(
1013
            node, 'tblspace1',
1014
            tblspc_path=tblspace1_old_path)
1015

1016
        self.backup_node(
1017
            backup_dir, 'node', node, backup_type="full",
1018
            options=["-j", "4", "--stream"])
1019

1020
        node_restored = self.make_simple_node(
1021
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1022
        node_restored.cleanup()
1023

1024
        try:
1025
            self.restore_node(
1026
                backup_dir, 'node', node_restored,
1027
                options=[
1028
                    "-j", "4",
1029
                    "-T", "{0}={1}".format(
1030
                        tblspace2_old_path, tblspace_new_path)])
1031
            # we should die here because exception is what we expect to happen
1032
            self.assertEqual(
1033
                1, 0,
1034
                "Expecting Error because tablespace mapping is incorrect"
1035
                "\n Output: {0} \n CMD: {1}".format(
1036
                    repr(self.output), self.cmd))
1037
        except ProbackupException as e:
1038
            self.assertTrue(
1039
                'ERROR: --tablespace-mapping option' in e.message and
1040
                'have an entry in tablespace_map file' in e.message,
1041
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1042
                    repr(e.message), self.cmd))
1043

1044
    # @unittest.skip("skip")
1045
    def test_tablespace_handling_2(self):
1046
        """
1047
        make node without tablespaces, take full backup, check that restore with
1048
        tablespace mapping will end with error
1049
        """
1050
        node = self.make_simple_node(
1051
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1052
            set_replication=True,
1053
            initdb_params=['--data-checksums'])
1054

1055
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1056

1057
        self.init_pb(backup_dir)
1058
        self.add_instance(backup_dir, 'node', node)
1059
        node.slow_start()
1060

1061
        tblspace1_old_path = self.get_tblspace_path(node, 'tblspace1_old')
1062
        tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')
1063

1064
        backup_id = self.backup_node(
1065
            backup_dir, 'node', node, backup_type="full",
1066
            options=["-j", "4", "--stream"])
1067

1068
        node_restored = self.make_simple_node(
1069
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1070
        node_restored.cleanup()
1071

1072
        try:
1073
            self.restore_node(
1074
                backup_dir, 'node', node_restored,
1075
                options=[
1076
                    "-j", "4",
1077
                    "-T", "{0}={1}".format(
1078
                        tblspace1_old_path, tblspace_new_path)])
1079
            # we should die here because exception is what we expect to happen
1080
            self.assertEqual(
1081
                1, 0,
1082
                "Expecting Error because tablespace mapping is incorrect"
1083
                "\n Output: {0} \n CMD: {1}".format(
1084
                    repr(self.output), self.cmd))
1085
        except ProbackupException as e:
1086
            self.assertIn(
1087
                'ERROR: Backup {0} has no tablespaceses, '
1088
                'nothing to remap'.format(backup_id), e.message,
1089
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1090
                    repr(e.message), self.cmd))
1091

1092
    # @unittest.skip("skip")
1093
    def test_drop_rel_during_full_backup(self):
1094
        """"""
1095
        self._check_gdb_flag_or_skip_test()
1096

1097
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1098
        node = self.make_simple_node(
1099
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1100
            set_replication=True,
1101
            initdb_params=['--data-checksums'])
1102

1103
        self.init_pb(backup_dir)
1104
        self.add_instance(backup_dir, 'node', node)
1105
        node.slow_start()
1106

1107
        for i in range(1, 512):
1108
            node.safe_psql(
1109
                "postgres",
1110
                "create table t_heap_{0} as select i"
1111
                " as id from generate_series(0,100) i".format(i))
1112

1113
        node.safe_psql(
1114
            "postgres",
1115
            "VACUUM")
1116

1117
        node.pgbench_init(scale=10)
1118

1119
        relative_path_1 = node.safe_psql(
1120
            "postgres",
1121
            "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip()
1122

1123
        relative_path_2 = node.safe_psql(
1124
            "postgres",
1125
            "select pg_relation_filepath('t_heap_1')").decode('utf-8').rstrip()
1126

1127
        absolute_path_1 = os.path.join(node.data_dir, relative_path_1)
1128
        absolute_path_2 = os.path.join(node.data_dir, relative_path_2)
1129

1130
        # FULL backup
1131
        gdb = self.backup_node(
1132
            backup_dir, 'node', node,
1133
            options=['--stream', '--log-level-file=LOG', '--log-level-console=LOG', '--progress'],
1134
            gdb=True)
1135

1136
        gdb.set_breakpoint('backup_files')
1137
        gdb.run_until_break()
1138

1139
        # REMOVE file
1140
        for i in range(1, 512):
1141
            node.safe_psql(
1142
                "postgres",
1143
                "drop table t_heap_{0}".format(i))
1144

1145
        node.safe_psql(
1146
            "postgres",
1147
            "CHECKPOINT")
1148

1149
        node.safe_psql(
1150
            "postgres",
1151
            "CHECKPOINT")
1152

1153
        # File removed, we can proceed with backup
1154
        gdb.continue_execution_until_exit()
1155

1156
        pgdata = self.pgdata_content(node.data_dir)
1157

1158
        #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
1159
        #    log_content = f.read()
1160
        #    self.assertTrue(
1161
        #        'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
1162
        #        'File "{0}" should be deleted but it`s not'.format(absolute_path))
1163

1164
        node.cleanup()
1165
        self.restore_node(backup_dir, 'node', node)
1166

1167
        # Physical comparison
1168
        pgdata_restored = self.pgdata_content(node.data_dir)
1169
        self.compare_pgdata(pgdata, pgdata_restored)
1170

1171
    @unittest.skip("skip")
1172
    def test_drop_db_during_full_backup(self):
1173
        """"""
1174
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1175
        node = self.make_simple_node(
1176
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1177
            set_replication=True,
1178
            initdb_params=['--data-checksums'])
1179

1180
        self.init_pb(backup_dir)
1181
        self.add_instance(backup_dir, 'node', node)
1182
        node.slow_start()
1183

1184
        for i in range(1, 2):
1185
            node.safe_psql(
1186
                "postgres",
1187
                "create database t_heap_{0}".format(i))
1188

1189
        node.safe_psql(
1190
            "postgres",
1191
            "VACUUM")
1192

1193
        # FULL backup
1194
        gdb = self.backup_node(
1195
            backup_dir, 'node', node, gdb=True,
1196
            options=[
1197
                '--stream', '--log-level-file=LOG',
1198
                '--log-level-console=LOG', '--progress'])
1199

1200
        gdb.set_breakpoint('backup_files')
1201
        gdb.run_until_break()
1202

1203
        # REMOVE file
1204
        for i in range(1, 2):
1205
            node.safe_psql(
1206
                "postgres",
1207
                "drop database t_heap_{0}".format(i))
1208

1209
        node.safe_psql(
1210
            "postgres",
1211
            "CHECKPOINT")
1212

1213
        node.safe_psql(
1214
            "postgres",
1215
            "CHECKPOINT")
1216

1217
        # File removed, we can proceed with backup
1218
        gdb.continue_execution_until_exit()
1219

1220
        pgdata = self.pgdata_content(node.data_dir)
1221

1222
        #with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
1223
        #    log_content = f.read()
1224
        #    self.assertTrue(
1225
        #        'LOG: File "{0}" is not found'.format(absolute_path) in log_content,
1226
        #        'File "{0}" should be deleted but it`s not'.format(absolute_path))
1227

1228
        node.cleanup()
1229
        self.restore_node(backup_dir, 'node', node)
1230

1231
        # Physical comparison
1232
        pgdata_restored = self.pgdata_content(node.data_dir)
1233
        self.compare_pgdata(pgdata, pgdata_restored)
1234

1235
    # @unittest.skip("skip")
1236
    def test_drop_rel_during_backup_delta(self):
1237
        """"""
1238
        self._check_gdb_flag_or_skip_test()
1239

1240
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1241
        node = self.make_simple_node(
1242
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1243
            set_replication=True,
1244
            initdb_params=['--data-checksums'])
1245

1246
        self.init_pb(backup_dir)
1247
        self.add_instance(backup_dir, 'node', node)
1248
        self.set_archiving(backup_dir, 'node', node)
1249
        node.slow_start()
1250

1251
        node.pgbench_init(scale=10)
1252

1253
        node.safe_psql(
1254
            "postgres",
1255
            "create table t_heap as select i"
1256
            " as id from generate_series(0,100) i")
1257

1258
        relative_path = node.safe_psql(
1259
            "postgres",
1260
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
1261

1262
        absolute_path = os.path.join(node.data_dir, relative_path)
1263

1264
        # FULL backup
1265
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
1266

1267
        # DELTA backup
1268
        gdb = self.backup_node(
1269
            backup_dir, 'node', node, backup_type='delta',
1270
            gdb=True, options=['--log-level-file=LOG'])
1271

1272
        gdb.set_breakpoint('backup_files')
1273
        gdb.run_until_break()
1274

1275
        # REMOVE file
1276
        node.safe_psql(
1277
            "postgres",
1278
            "DROP TABLE t_heap")
1279

1280
        node.safe_psql(
1281
            "postgres",
1282
            "CHECKPOINT")
1283

1284
        # File removed, we can proceed with backup
1285
        gdb.continue_execution_until_exit()
1286

1287
        pgdata = self.pgdata_content(node.data_dir)
1288

1289
        with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
1290
            log_content = f.read()
1291
            self.assertTrue(
1292
                'LOG: File not found: "{0}"'.format(absolute_path) in log_content,
1293
                'File "{0}" should be deleted but it`s not'.format(absolute_path))
1294

1295
        node.cleanup()
1296
        self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
1297

1298
        # Physical comparison
1299
        pgdata_restored = self.pgdata_content(node.data_dir)
1300
        self.compare_pgdata(pgdata, pgdata_restored)
1301

1302
    # @unittest.skip("skip")
1303
    def test_drop_rel_during_backup_page(self):
1304
        """"""
1305
        self._check_gdb_flag_or_skip_test()
1306

1307
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1308
        node = self.make_simple_node(
1309
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1310
            set_replication=True,
1311
            initdb_params=['--data-checksums'])
1312

1313
        self.init_pb(backup_dir)
1314
        self.add_instance(backup_dir, 'node', node)
1315
        self.set_archiving(backup_dir, 'node', node)
1316
        node.slow_start()
1317

1318
        node.safe_psql(
1319
            "postgres",
1320
            "create table t_heap as select i"
1321
            " as id from generate_series(0,100) i")
1322

1323
        relative_path = node.safe_psql(
1324
            "postgres",
1325
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
1326

1327
        absolute_path = os.path.join(node.data_dir, relative_path)
1328

1329
        # FULL backup
1330
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
1331

1332
        node.safe_psql(
1333
            "postgres",
1334
            "insert into t_heap select i"
1335
            " as id from generate_series(101,102) i")
1336

1337
        # PAGE backup
1338
        gdb = self.backup_node(
1339
            backup_dir, 'node', node, backup_type='page',
1340
            gdb=True, options=['--log-level-file=LOG'])
1341

1342
        gdb.set_breakpoint('backup_files')
1343
        gdb.run_until_break()
1344

1345
        # REMOVE file
1346
        os.remove(absolute_path)
1347

1348
        # File removed, we can proceed with backup
1349
        gdb.continue_execution_until_exit()
1350
        gdb.kill()
1351

1352
        pgdata = self.pgdata_content(node.data_dir)
1353

1354
        backup_id = self.show_pb(backup_dir, 'node')[1]['id']
1355

1356
        filelist = self.get_backup_filelist(backup_dir, 'node', backup_id)
1357
        self.assertNotIn(relative_path, filelist)
1358

1359
        node.cleanup()
1360
        self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
1361

1362
        # Physical comparison
1363
        pgdata_restored = self.pgdata_content(node.data_dir)
1364
        self.compare_pgdata(pgdata, pgdata_restored)
1365

1366
    # @unittest.skip("skip")
1367
    def test_persistent_slot_for_stream_backup(self):
1368
        """"""
1369
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1370
        node = self.make_simple_node(
1371
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1372
            set_replication=True,
1373
            initdb_params=['--data-checksums'],
1374
            pg_options={
1375
                'max_wal_size': '40MB'})
1376

1377
        self.init_pb(backup_dir)
1378
        self.add_instance(backup_dir, 'node', node)
1379
        self.set_archiving(backup_dir, 'node', node)
1380
        node.slow_start()
1381

1382
        node.safe_psql(
1383
            "postgres",
1384
            "SELECT pg_create_physical_replication_slot('slot_1')")
1385

1386
        # FULL backup
1387
        self.backup_node(
1388
            backup_dir, 'node', node,
1389
            options=['--stream', '--slot=slot_1'])
1390

1391
        # FULL backup
1392
        self.backup_node(
1393
            backup_dir, 'node', node,
1394
            options=['--stream', '--slot=slot_1'])
1395

1396
    # @unittest.skip("skip")
1397
    def test_basic_temp_slot_for_stream_backup(self):
1398
        """"""
1399
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1400
        node = self.make_simple_node(
1401
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1402
            set_replication=True,
1403
            initdb_params=['--data-checksums'],
1404
            pg_options={'max_wal_size': '40MB'})
1405

1406
        if self.get_version(node) < self.version_to_num('10.0'):
1407
            self.skipTest('You need PostgreSQL >= 10 for this test')
1408

1409
        self.init_pb(backup_dir)
1410
        self.add_instance(backup_dir, 'node', node)
1411
        self.set_archiving(backup_dir, 'node', node)
1412
        node.slow_start()
1413

1414
        # FULL backup
1415
        self.backup_node(
1416
            backup_dir, 'node', node,
1417
            options=['--stream', '--temp-slot'])
1418

1419
        # FULL backup
1420
        self.backup_node(
1421
            backup_dir, 'node', node,
1422
            options=['--stream', '--slot=slot_1', '--temp-slot'])
1423

1424
    # @unittest.skip("skip")
1425
    def test_backup_concurrent_drop_table(self):
1426
        """"""
1427
        self._check_gdb_flag_or_skip_test()
1428

1429
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1430
        node = self.make_simple_node(
1431
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1432
            set_replication=True,
1433
            initdb_params=['--data-checksums'])
1434

1435
        self.init_pb(backup_dir)
1436
        self.add_instance(backup_dir, 'node', node)
1437
        self.set_archiving(backup_dir, 'node', node)
1438
        node.slow_start()
1439

1440
        node.pgbench_init(scale=1)
1441

1442
        # FULL backup
1443
        gdb = self.backup_node(
1444
            backup_dir, 'node', node,
1445
            options=['--stream', '--compress'],
1446
            gdb=True)
1447

1448
        gdb.set_breakpoint('backup_data_file')
1449
        gdb.run_until_break()
1450

1451
        node.safe_psql(
1452
            'postgres',
1453
            'DROP TABLE pgbench_accounts')
1454

1455
        # do checkpoint to guarantee filenode removal
1456
        node.safe_psql(
1457
            'postgres',
1458
            'CHECKPOINT')
1459

1460
        gdb.remove_all_breakpoints()
1461
        gdb.continue_execution_until_exit()
1462
        gdb.kill()
1463

1464
        show_backup = self.show_pb(backup_dir, 'node')[0]
1465

1466
        self.assertEqual(show_backup['status'], "OK")
1467

1468
    # @unittest.skip("skip")
1469
    def test_pg_11_adjusted_wal_segment_size(self):
1470
        """"""
1471
        if self.pg_config_version < self.version_to_num('11.0'):
1472
            self.skipTest('You need PostgreSQL >= 11 for this test')
1473

1474
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1475
        node = self.make_simple_node(
1476
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1477
            set_replication=True,
1478
            initdb_params=[
1479
                '--data-checksums',
1480
                '--wal-segsize=64'],
1481
            pg_options={
1482
                'min_wal_size': '128MB'})
1483

1484
        self.init_pb(backup_dir)
1485
        self.add_instance(backup_dir, 'node', node)
1486
        self.set_archiving(backup_dir, 'node', node)
1487
        node.slow_start()
1488

1489
        node.pgbench_init(scale=5)
1490

1491
        # FULL STREAM backup
1492
        self.backup_node(
1493
            backup_dir, 'node', node, options=['--stream'])
1494

1495
        pgbench = node.pgbench(options=['-T', '5', '-c', '2'])
1496
        pgbench.wait()
1497

1498
        # PAGE STREAM backup
1499
        self.backup_node(
1500
            backup_dir, 'node', node,
1501
            backup_type='page', options=['--stream'])
1502

1503
        pgbench = node.pgbench(options=['-T', '5', '-c', '2'])
1504
        pgbench.wait()
1505

1506
        # DELTA STREAM backup
1507
        self.backup_node(
1508
            backup_dir, 'node', node,
1509
            backup_type='delta', options=['--stream'])
1510

1511
        pgbench = node.pgbench(options=['-T', '5', '-c', '2'])
1512
        pgbench.wait()
1513

1514
        # FULL ARCHIVE backup
1515
        self.backup_node(backup_dir, 'node', node)
1516

1517
        pgbench = node.pgbench(options=['-T', '5', '-c', '2'])
1518
        pgbench.wait()
1519

1520
        # PAGE ARCHIVE backup
1521
        self.backup_node(backup_dir, 'node', node, backup_type='page')
1522

1523
        pgbench = node.pgbench(options=['-T', '5', '-c', '2'])
1524
        pgbench.wait()
1525

1526
        # DELTA ARCHIVE backup
1527
        backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta')
1528
        pgdata = self.pgdata_content(node.data_dir)
1529

1530
        # delete
1531
        output = self.delete_pb(
1532
            backup_dir, 'node',
1533
            options=[
1534
                '--expired',
1535
                '--delete-wal',
1536
                '--retention-redundancy=1'])
1537

1538
        # validate
1539
        self.validate_pb(backup_dir)
1540

1541
        # merge
1542
        self.merge_backup(backup_dir, 'node', backup_id=backup_id)
1543

1544
        # restore
1545
        node.cleanup()
1546
        self.restore_node(
1547
            backup_dir, 'node', node, backup_id=backup_id)
1548

1549
        pgdata_restored = self.pgdata_content(node.data_dir)
1550
        self.compare_pgdata(pgdata, pgdata_restored)
1551

1552
    # @unittest.skip("skip")
1553
    def test_sigint_handling(self):
1554
        """"""
1555
        self._check_gdb_flag_or_skip_test()
1556

1557
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1558
        node = self.make_simple_node(
1559
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1560
            set_replication=True,
1561
            initdb_params=['--data-checksums'])
1562

1563
        self.init_pb(backup_dir)
1564
        self.add_instance(backup_dir, 'node', node)
1565
        node.slow_start()
1566

1567
        # FULL backup
1568
        gdb = self.backup_node(
1569
            backup_dir, 'node', node, gdb=True,
1570
            options=['--stream', '--log-level-file=LOG'])
1571

1572
        gdb.set_breakpoint('backup_non_data_file')
1573
        gdb.run_until_break()
1574

1575
        gdb.continue_execution_until_break(20)
1576
        gdb.remove_all_breakpoints()
1577

1578
        gdb._execute('signal SIGINT')
1579
        gdb.continue_execution_until_error()
1580
        gdb.kill()
1581

1582
        backup_id = self.show_pb(backup_dir, 'node')[0]['id']
1583

1584
        self.assertEqual(
1585
            'ERROR',
1586
            self.show_pb(backup_dir, 'node', backup_id)['status'],
1587
            'Backup STATUS should be "ERROR"')
1588

1589
    # @unittest.skip("skip")
1590
    def test_sigterm_handling(self):
1591
        """"""
1592
        self._check_gdb_flag_or_skip_test()
1593

1594
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1595
        node = self.make_simple_node(
1596
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1597
            set_replication=True,
1598
            initdb_params=['--data-checksums'])
1599

1600
        self.init_pb(backup_dir)
1601
        self.add_instance(backup_dir, 'node', node)
1602
        node.slow_start()
1603

1604
        # FULL backup
1605
        gdb = self.backup_node(
1606
            backup_dir, 'node', node, gdb=True,
1607
            options=['--stream', '--log-level-file=LOG'])
1608

1609
        gdb.set_breakpoint('backup_non_data_file')
1610
        gdb.run_until_break()
1611

1612
        gdb.continue_execution_until_break(20)
1613
        gdb.remove_all_breakpoints()
1614

1615
        gdb._execute('signal SIGTERM')
1616
        gdb.continue_execution_until_error()
1617

1618
        backup_id = self.show_pb(backup_dir, 'node')[0]['id']
1619

1620
        self.assertEqual(
1621
            'ERROR',
1622
            self.show_pb(backup_dir, 'node', backup_id)['status'],
1623
            'Backup STATUS should be "ERROR"')
1624

1625
    # @unittest.skip("skip")
1626
    def test_sigquit_handling(self):
1627
        """"""
1628
        self._check_gdb_flag_or_skip_test()
1629

1630
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1631
        node = self.make_simple_node(
1632
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1633
            set_replication=True,
1634
            initdb_params=['--data-checksums'])
1635

1636
        self.init_pb(backup_dir)
1637
        self.add_instance(backup_dir, 'node', node)
1638
        node.slow_start()
1639

1640
        # FULL backup
1641
        gdb = self.backup_node(
1642
            backup_dir, 'node', node, gdb=True, options=['--stream'])
1643

1644
        gdb.set_breakpoint('backup_non_data_file')
1645
        gdb.run_until_break()
1646

1647
        gdb.continue_execution_until_break(20)
1648
        gdb.remove_all_breakpoints()
1649

1650
        gdb._execute('signal SIGQUIT')
1651
        gdb.continue_execution_until_error()
1652

1653
        backup_id = self.show_pb(backup_dir, 'node')[0]['id']
1654

1655
        self.assertEqual(
1656
            'ERROR',
1657
            self.show_pb(backup_dir, 'node', backup_id)['status'],
1658
            'Backup STATUS should be "ERROR"')
1659

1660
    # @unittest.skip("skip")
1661
    def test_drop_table(self):
1662
        """"""
1663
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1664
        node = self.make_simple_node(
1665
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1666
            set_replication=True,
1667
            initdb_params=['--data-checksums'])
1668

1669
        self.init_pb(backup_dir)
1670
        self.add_instance(backup_dir, 'node', node)
1671
        node.slow_start()
1672

1673
        connect_1 = node.connect("postgres")
1674
        connect_1.execute(
1675
            "create table t_heap as select i"
1676
            " as id from generate_series(0,100) i")
1677
        connect_1.commit()
1678

1679
        connect_2 = node.connect("postgres")
1680
        connect_2.execute("SELECT * FROM t_heap")
1681
        connect_2.commit()
1682

1683
        # DROP table
1684
        connect_2.execute("DROP TABLE t_heap")
1685
        connect_2.commit()
1686

1687
        # FULL backup
1688
        self.backup_node(
1689
            backup_dir, 'node', node, options=['--stream'])
1690

1691
    # @unittest.skip("skip")
1692
    def test_basic_missing_file_permissions(self):
1693
        """"""
1694
        if os.name == 'nt':
1695
            self.skipTest('Skipped because it is POSIX only test')
1696

1697
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1698
        node = self.make_simple_node(
1699
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1700
            set_replication=True,
1701
            initdb_params=['--data-checksums'])
1702

1703
        self.init_pb(backup_dir)
1704
        self.add_instance(backup_dir, 'node', node)
1705
        node.slow_start()
1706

1707
        relative_path = node.safe_psql(
1708
            "postgres",
1709
            "select pg_relation_filepath('pg_class')").decode('utf-8').rstrip()
1710

1711
        full_path = os.path.join(node.data_dir, relative_path)
1712

1713
        os.chmod(full_path, 000)
1714

1715
        try:
1716
            # FULL backup
1717
            self.backup_node(
1718
                backup_dir, 'node', node, options=['--stream'])
1719
            # we should die here because exception is what we expect to happen
1720
            self.assertEqual(
1721
                1, 0,
1722
                "Expecting Error because of missing permissions"
1723
                "\n Output: {0} \n CMD: {1}".format(
1724
                    repr(self.output), self.cmd))
1725
        except ProbackupException as e:
1726
            self.assertIn(
1727
                'ERROR: Cannot open file',
1728
                e.message,
1729
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1730
                    repr(e.message), self.cmd))
1731

1732
        os.chmod(full_path, 700)
1733

1734
    # @unittest.skip("skip")
1735
    def test_basic_missing_dir_permissions(self):
1736
        """"""
1737
        if os.name == 'nt':
1738
            self.skipTest('Skipped because it is POSIX only test')
1739

1740
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1741
        node = self.make_simple_node(
1742
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1743
            set_replication=True,
1744
            initdb_params=['--data-checksums'])
1745

1746
        self.init_pb(backup_dir)
1747
        self.add_instance(backup_dir, 'node', node)
1748
        node.slow_start()
1749

1750
        full_path = os.path.join(node.data_dir, 'pg_twophase')
1751

1752
        os.chmod(full_path, 000)
1753

1754
        try:
1755
            # FULL backup
1756
            self.backup_node(
1757
                backup_dir, 'node', node, options=['--stream'])
1758
            # we should die here because exception is what we expect to happen
1759
            self.assertEqual(
1760
                1, 0,
1761
                "Expecting Error because of missing permissions"
1762
                "\n Output: {0} \n CMD: {1}".format(
1763
                    repr(self.output), self.cmd))
1764
        except ProbackupException as e:
1765
            self.assertIn(
1766
                'ERROR: Cannot open directory',
1767
                e.message,
1768
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1769
                    repr(e.message), self.cmd))
1770

1771
        os.rmdir(full_path)
1772

1773
    # @unittest.skip("skip")
1774
    def test_backup_with_least_privileges_role(self):
1775
        """"""
1776
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1777
        node = self.make_simple_node(
1778
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1779
            set_replication=True,
1780
            ptrack_enable=self.ptrack,
1781
            initdb_params=['--data-checksums'],
1782
            pg_options={'archive_timeout': '30s'})
1783

1784
        self.init_pb(backup_dir)
1785
        self.add_instance(backup_dir, 'node', node)
1786
        self.set_archiving(backup_dir, 'node', node)
1787
        node.slow_start()
1788

1789
        node.safe_psql(
1790
            'postgres',
1791
            'CREATE DATABASE backupdb')
1792

1793
        if self.ptrack:
1794
            node.safe_psql(
1795
                "backupdb",
1796
                "CREATE SCHEMA ptrack; "
1797
                "CREATE EXTENSION ptrack WITH SCHEMA ptrack")
1798

1799
        # PG 9.5
1800
        if self.get_version(node) < 90600:
1801
            node.safe_psql(
1802
                'backupdb',
1803
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
1804
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
1805
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
1806
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
1807
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
1808
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
1809
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
1810
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
1811
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
1812
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
1813
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
1814
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
1815
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
1816
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
1817
                "GRANT CONNECT ON DATABASE backupdb to backup; "
1818
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
1819
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
1820
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
1821
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
1822
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
1823
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
1824
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
1825
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
1826
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
1827
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
1828
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
1829
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
1830
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
1831
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
1832
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
1833
        # PG 9.6
1834
        elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
1835
            node.safe_psql(
1836
                'backupdb',
1837
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
1838
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
1839
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
1840
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
1841
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
1842
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
1843
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
1844
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
1845
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
1846
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
1847
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
1848
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
1849
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
1850
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
1851
                "GRANT CONNECT ON DATABASE backupdb to backup; "
1852
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
1853
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
1854
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
1855
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
1856
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
1857
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
1858
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
1859
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
1860
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
1861
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
1862
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
1863
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
1864
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
1865
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
1866
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
1867
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
1868
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
1869
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
1870
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
1871
            )
1872
        # >= 10 && < 15
1873
        elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:
1874
            node.safe_psql(
1875
                'backupdb',
1876
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
1877
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
1878
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
1879
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
1880
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
1881
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
1882
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
1883
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
1884
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
1885
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
1886
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
1887
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
1888
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
1889
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
1890
                "GRANT CONNECT ON DATABASE backupdb to backup; "
1891
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
1892
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
1893
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
1894
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
1895
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
1896
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
1897
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
1898
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
1899
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
1900
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
1901
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
1902
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
1903
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
1904
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
1905
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
1906
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
1907
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
1908
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
1909
            )
1910
        # >= 15
1911
        else:
1912
            node.safe_psql(
1913
                'backupdb',
1914
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
1915
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
1916
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
1917
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
1918
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
1919
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
1920
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
1921
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
1922
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
1923
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
1924
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
1925
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
1926
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
1927
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
1928
                "GRANT CONNECT ON DATABASE backupdb to backup; "
1929
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
1930
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
1931
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
1932
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
1933
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
1934
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
1935
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
1936
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
1937
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
1938
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
1939
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
1940
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
1941
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
1942
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
1943
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
1944
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
1945
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
1946
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
1947
            )
1948

1949
        if self.ptrack:
1950
            node.safe_psql(
1951
                 "backupdb",
1952
                 "GRANT USAGE ON SCHEMA ptrack TO backup")
1953

1954
            node.safe_psql(
1955
                "backupdb",
1956
                "GRANT EXECUTE ON FUNCTION ptrack.ptrack_get_pagemapset(pg_lsn) TO backup; "
1957
                "GRANT EXECUTE ON FUNCTION ptrack.ptrack_init_lsn() TO backup;")
1958

1959
        if ProbackupTest.enterprise:
1960
            node.safe_psql(
1961
                "backupdb",
1962
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
1963
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;")
1964

1965
        # FULL backup
1966
        self.backup_node(
1967
            backup_dir, 'node', node,
1968
            datname='backupdb', options=['--stream', '-U', 'backup'])
1969
        self.backup_node(
1970
            backup_dir, 'node', node,
1971
            datname='backupdb', options=['-U', 'backup'])
1972

1973
        # PAGE
1974
        self.backup_node(
1975
            backup_dir, 'node', node, backup_type='page',
1976
            datname='backupdb', options=['-U', 'backup'])
1977
        self.backup_node(
1978
            backup_dir, 'node', node, backup_type='page', datname='backupdb',
1979
            options=['--stream', '-U', 'backup'])
1980

1981
        # DELTA
1982
        self.backup_node(
1983
            backup_dir, 'node', node, backup_type='delta',
1984
            datname='backupdb', options=['-U', 'backup'])
1985
        self.backup_node(
1986
            backup_dir, 'node', node, backup_type='delta',
1987
            datname='backupdb', options=['--stream', '-U', 'backup'])
1988

1989
        # PTRACK
1990
        if self.ptrack:
1991
            self.backup_node(
1992
                backup_dir, 'node', node, backup_type='ptrack',
1993
                datname='backupdb', options=['-U', 'backup'])
1994
            self.backup_node(
1995
                backup_dir, 'node', node, backup_type='ptrack',
1996
                datname='backupdb', options=['--stream', '-U', 'backup'])
1997

1998
    # @unittest.skip("skip")
1999
    def test_parent_choosing(self):
2000
        """
2001
        PAGE3 <- RUNNING(parent should be FULL)
2002
        PAGE2 <- OK
2003
        PAGE1 <- CORRUPT
2004
        FULL
2005
        """
2006
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2007
        node = self.make_simple_node(
2008
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2009
            set_replication=True,
2010
            initdb_params=['--data-checksums'])
2011

2012
        self.init_pb(backup_dir)
2013
        self.add_instance(backup_dir, 'node', node)
2014
        self.set_archiving(backup_dir, 'node', node)
2015
        node.slow_start()
2016

2017
        full_id = self.backup_node(backup_dir, 'node', node)
2018

2019
        # PAGE1
2020
        page1_id = self.backup_node(
2021
            backup_dir, 'node', node, backup_type='page')
2022

2023
        # PAGE2
2024
        page2_id = self.backup_node(
2025
            backup_dir, 'node', node, backup_type='page')
2026

2027
        # Change PAGE1 to ERROR
2028
        self.change_backup_status(backup_dir, 'node', page1_id, 'ERROR')
2029

2030
        # PAGE3
2031
        page3_id = self.backup_node(
2032
            backup_dir, 'node', node,
2033
            backup_type='page', options=['--log-level-file=LOG'])
2034

2035
        log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')
2036
        with open(log_file_path) as f:
2037
            log_file_content = f.read()
2038

2039
        self.assertIn(
2040
            "WARNING: Backup {0} has invalid parent: {1}. "
2041
            "Cannot be a parent".format(page2_id, page1_id),
2042
            log_file_content)
2043

2044
        self.assertIn(
2045
            "WARNING: Backup {0} has status: ERROR. "
2046
            "Cannot be a parent".format(page1_id),
2047
            log_file_content)
2048

2049
        self.assertIn(
2050
            "Parent backup: {0}".format(full_id),
2051
            log_file_content)
2052

2053
        self.assertEqual(
2054
            self.show_pb(
2055
                backup_dir, 'node', backup_id=page3_id)['parent-backup-id'],
2056
            full_id)
2057

2058
    # @unittest.skip("skip")
2059
    def test_parent_choosing_1(self):
2060
        """
2061
        PAGE3 <- RUNNING(parent should be FULL)
2062
        PAGE2 <- OK
2063
        PAGE1 <- (missing)
2064
        FULL
2065
        """
2066
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2067
        node = self.make_simple_node(
2068
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2069
            set_replication=True,
2070
            initdb_params=['--data-checksums'])
2071

2072
        self.init_pb(backup_dir)
2073
        self.add_instance(backup_dir, 'node', node)
2074
        self.set_archiving(backup_dir, 'node', node)
2075
        node.slow_start()
2076

2077
        full_id = self.backup_node(backup_dir, 'node', node)
2078

2079
        # PAGE1
2080
        page1_id = self.backup_node(
2081
            backup_dir, 'node', node, backup_type='page')
2082

2083
        # PAGE2
2084
        page2_id = self.backup_node(
2085
            backup_dir, 'node', node, backup_type='page')
2086

2087
        # Delete PAGE1
2088
        shutil.rmtree(
2089
            os.path.join(backup_dir, 'backups', 'node', page1_id))
2090

2091
        # PAGE3
2092
        page3_id = self.backup_node(
2093
            backup_dir, 'node', node,
2094
            backup_type='page', options=['--log-level-file=LOG'])
2095

2096
        log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')
2097
        with open(log_file_path) as f:
2098
            log_file_content = f.read()
2099

2100
        self.assertIn(
2101
            "WARNING: Backup {0} has missing parent: {1}. "
2102
            "Cannot be a parent".format(page2_id, page1_id),
2103
            log_file_content)
2104

2105
        self.assertIn(
2106
            "Parent backup: {0}".format(full_id),
2107
            log_file_content)
2108

2109
        self.assertEqual(
2110
            self.show_pb(
2111
                backup_dir, 'node', backup_id=page3_id)['parent-backup-id'],
2112
            full_id)
2113

2114
    # @unittest.skip("skip")
2115
    def test_parent_choosing_2(self):
2116
        """
2117
        PAGE3 <- RUNNING(backup should fail)
2118
        PAGE2 <- OK
2119
        PAGE1 <- OK
2120
        FULL  <- (missing)
2121
        """
2122
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2123
        node = self.make_simple_node(
2124
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2125
            set_replication=True,
2126
            initdb_params=['--data-checksums'])
2127

2128
        self.init_pb(backup_dir)
2129
        self.add_instance(backup_dir, 'node', node)
2130
        self.set_archiving(backup_dir, 'node', node)
2131
        node.slow_start()
2132

2133
        full_id = self.backup_node(backup_dir, 'node', node)
2134

2135
        # PAGE1
2136
        page1_id = self.backup_node(
2137
            backup_dir, 'node', node, backup_type='page')
2138

2139
        # PAGE2
2140
        page2_id = self.backup_node(
2141
            backup_dir, 'node', node, backup_type='page')
2142

2143
        # Delete FULL
2144
        shutil.rmtree(
2145
            os.path.join(backup_dir, 'backups', 'node', full_id))
2146

2147
        # PAGE3
2148
        try:
2149
            self.backup_node(
2150
                backup_dir, 'node', node,
2151
                backup_type='page', options=['--log-level-file=LOG'])
2152
            # we should die here because exception is what we expect to happen
2153
            self.assertEqual(
2154
                1, 0,
2155
                "Expecting Error because FULL backup is missing"
2156
                "\n Output: {0} \n CMD: {1}".format(
2157
                    repr(self.output), self.cmd))
2158
        except ProbackupException as e:
2159
            self.assertTrue(
2160
                'WARNING: Valid full backup on current timeline 1 is not found' in e.message and
2161
                'ERROR: Create new full backup before an incremental one' in e.message,
2162
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
2163
                    repr(e.message), self.cmd))
2164

2165
        self.assertEqual(
2166
            self.show_pb(
2167
                backup_dir, 'node')[2]['status'],
2168
            'ERROR')
2169

2170
    # @unittest.skip("skip")
2171
    def test_backup_with_less_privileges_role(self):
2172
        """
2173
        check permissions correctness from documentation:
2174
        https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster
2175
        """
2176
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2177
        node = self.make_simple_node(
2178
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2179
            set_replication=True,
2180
            ptrack_enable=self.ptrack,
2181
            initdb_params=['--data-checksums'],
2182
            pg_options={
2183
                'archive_timeout': '30s',
2184
                'archive_mode': 'always',
2185
                'checkpoint_timeout': '60s',
2186
                'wal_level': 'logical'})
2187

2188
        self.init_pb(backup_dir)
2189
        self.add_instance(backup_dir, 'node', node)
2190
        self.set_config(backup_dir, 'node', options=['--archive-timeout=60s'])
2191
        self.set_archiving(backup_dir, 'node', node)
2192
        node.slow_start()
2193

2194
        node.safe_psql(
2195
            'postgres',
2196
            'CREATE DATABASE backupdb')
2197

2198
        if self.ptrack:
2199
            node.safe_psql(
2200
                'backupdb',
2201
                'CREATE EXTENSION ptrack')
2202

2203
        # PG 9.5
2204
        if self.get_version(node) < 90600:
2205
            node.safe_psql(
2206
                'backupdb',
2207
                "CREATE ROLE backup WITH LOGIN; "
2208
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2209
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2210
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
2211
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
2212
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
2213
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
2214
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
2215
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
2216
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2217
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
2218
        # PG 9.6
2219
        elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
2220
            node.safe_psql(
2221
                'backupdb',
2222
                "CREATE ROLE backup WITH LOGIN; "
2223
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2224
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2225
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
2226
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
2227
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
2228
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
2229
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
2230
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
2231
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
2232
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2233
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
2234
                "COMMIT;"
2235
            )
2236
        # >= 10 && < 15
2237
        elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:
2238
            node.safe_psql(
2239
                'backupdb',
2240
                "CREATE ROLE backup WITH LOGIN; "
2241
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2242
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2243
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
2244
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
2245
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
2246
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
2247
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
2248
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
2249
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
2250
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2251
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
2252
                "COMMIT;"
2253
            )
2254
        # >= 15
2255
        else:
2256
            node.safe_psql(
2257
                'backupdb',
2258
                "BEGIN; "
2259
                "CREATE ROLE backup WITH LOGIN; "
2260
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2261
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2262
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
2263
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
2264
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
2265
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
2266
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
2267
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
2268
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; "
2269
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2270
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; "
2271
                "COMMIT;"
2272
            )
2273

2274
        # enable STREAM backup
2275
        node.safe_psql(
2276
            'backupdb',
2277
            'ALTER ROLE backup WITH REPLICATION;')
2278

2279
        # FULL backup
2280
        self.backup_node(
2281
            backup_dir, 'node', node,
2282
            datname='backupdb', options=['--stream', '-U', 'backup'])
2283
        self.backup_node(
2284
            backup_dir, 'node', node,
2285
            datname='backupdb', options=['-U', 'backup'])
2286

2287
        # PAGE
2288
        self.backup_node(
2289
            backup_dir, 'node', node, backup_type='page',
2290
            datname='backupdb', options=['-U', 'backup'])
2291
        self.backup_node(
2292
            backup_dir, 'node', node, backup_type='page', datname='backupdb',
2293
            options=['--stream', '-U', 'backup'])
2294

2295
        # DELTA
2296
        self.backup_node(
2297
            backup_dir, 'node', node, backup_type='delta',
2298
            datname='backupdb', options=['-U', 'backup'])
2299
        self.backup_node(
2300
            backup_dir, 'node', node, backup_type='delta',
2301
            datname='backupdb', options=['--stream', '-U', 'backup'])
2302

2303
        # PTRACK
2304
        if self.ptrack:
2305
            self.backup_node(
2306
                backup_dir, 'node', node, backup_type='ptrack',
2307
                datname='backupdb', options=['-U', 'backup'])
2308
            self.backup_node(
2309
                backup_dir, 'node', node, backup_type='ptrack',
2310
                datname='backupdb', options=['--stream', '-U', 'backup'])
2311

2312
        if self.get_version(node) < 90600:
2313
            return
2314

2315
        # Restore as replica
2316
        replica = self.make_simple_node(
2317
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2318
        replica.cleanup()
2319

2320
        self.restore_node(backup_dir, 'node', replica)
2321
        self.set_replica(node, replica)
2322
        self.add_instance(backup_dir, 'replica', replica)
2323
        self.set_config(
2324
            backup_dir, 'replica',
2325
            options=['--archive-timeout=120s', '--log-level-console=LOG'])
2326
        self.set_archiving(backup_dir, 'replica', replica, replica=True)
2327
        self.set_auto_conf(replica, {'hot_standby': 'on'})
2328

2329
        # freeze bgwriter to get rid of RUNNING XACTS records
2330
        # bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0]
2331
        # gdb_checkpointer = self.gdb_attach(bgwriter_pid)
2332

2333
        copy_tree(
2334
            os.path.join(backup_dir, 'wal', 'node'),
2335
            os.path.join(backup_dir, 'wal', 'replica'))
2336

2337
        replica.slow_start(replica=True)
2338

2339
        # self.switch_wal_segment(node)
2340
        # self.switch_wal_segment(node)
2341

2342
        self.backup_node(
2343
            backup_dir, 'replica', replica,
2344
            datname='backupdb', options=['-U', 'backup'])
2345

2346
        # stream full backup from replica
2347
        self.backup_node(
2348
            backup_dir, 'replica', replica,
2349
            datname='backupdb', options=['--stream', '-U', 'backup'])
2350

2351
#        self.switch_wal_segment(node)
2352

2353
        # PAGE backup from replica
2354
        self.switch_wal_segment(node)
2355
        self.backup_node(
2356
            backup_dir, 'replica', replica, backup_type='page',
2357
            datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s'])
2358

2359
        self.backup_node(
2360
            backup_dir, 'replica', replica, backup_type='page',
2361
            datname='backupdb', options=['--stream', '-U', 'backup'])
2362

2363
        # DELTA backup from replica
2364
        self.switch_wal_segment(node)
2365
        self.backup_node(
2366
            backup_dir, 'replica', replica, backup_type='delta',
2367
            datname='backupdb', options=['-U', 'backup'])
2368
        self.backup_node(
2369
            backup_dir, 'replica', replica, backup_type='delta',
2370
            datname='backupdb', options=['--stream', '-U', 'backup'])
2371

2372
        # PTRACK backup from replica
2373
        if self.ptrack:
2374
            self.switch_wal_segment(node)
2375
            self.backup_node(
2376
                backup_dir, 'replica', replica, backup_type='ptrack',
2377
                datname='backupdb', options=['-U', 'backup'])
2378
            self.backup_node(
2379
                backup_dir, 'replica', replica, backup_type='ptrack',
2380
                datname='backupdb', options=['--stream', '-U', 'backup'])
2381

2382
    @unittest.skip("skip")
2383
    def test_issue_132(self):
2384
        """
2385
        https://github.com/postgrespro/pg_probackup/issues/132
2386
        """
2387
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2388
        node = self.make_simple_node(
2389
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2390
            set_replication=True,
2391
            initdb_params=['--data-checksums'])
2392

2393
        self.init_pb(backup_dir)
2394
        self.add_instance(backup_dir, 'node', node)
2395
        node.slow_start()
2396

2397
        with node.connect("postgres") as conn:
2398
            for i in range(50000):
2399
                conn.execute(
2400
                    "CREATE TABLE t_{0} as select 1".format(i))
2401
                conn.commit()
2402

2403
        self.backup_node(
2404
            backup_dir, 'node', node, options=['--stream'])
2405

2406
        pgdata = self.pgdata_content(node.data_dir)
2407

2408
        node.cleanup()
2409
        self.restore_node(backup_dir, 'node', node)
2410

2411
        pgdata_restored = self.pgdata_content(node.data_dir)
2412
        self.compare_pgdata(pgdata, pgdata_restored)
2413

2414
        exit(1)
2415

2416
    @unittest.skip("skip")
2417
    def test_issue_132_1(self):
2418
        """
2419
        https://github.com/postgrespro/pg_probackup/issues/132
2420
        """
2421
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2422
        node = self.make_simple_node(
2423
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2424
            set_replication=True,
2425
            initdb_params=['--data-checksums'])
2426

2427
        # TODO: check version of old binary, it should be 2.1.4, 2.1.5 or 2.2.1
2428

2429
        self.init_pb(backup_dir)
2430
        self.add_instance(backup_dir, 'node', node)
2431
        node.slow_start()
2432

2433
        with node.connect("postgres") as conn:
2434
            for i in range(30000):
2435
                conn.execute(
2436
                    "CREATE TABLE t_{0} as select 1".format(i))
2437
                conn.commit()
2438

2439
        full_id = self.backup_node(
2440
            backup_dir, 'node', node, options=['--stream'], old_binary=True)
2441

2442
        delta_id = self.backup_node(
2443
            backup_dir, 'node', node, backup_type='delta',
2444
            options=['--stream'], old_binary=True)
2445

2446
        node.cleanup()
2447

2448
        # make sure that new binary can detect corruption
2449
        try:
2450
            self.validate_pb(backup_dir, 'node', backup_id=full_id)
2451
            # we should die here because exception is what we expect to happen
2452
            self.assertEqual(
2453
                1, 0,
2454
                "Expecting Error because FULL backup is CORRUPT"
2455
                "\n Output: {0} \n CMD: {1}".format(
2456
                    repr(self.output), self.cmd))
2457
        except ProbackupException as e:
2458
            self.assertIn(
2459
                'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id),
2460
                e.message,
2461
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
2462
                    repr(e.message), self.cmd))
2463

2464
        try:
2465
            self.validate_pb(backup_dir, 'node', backup_id=delta_id)
2466
            # we should die here because exception is what we expect to happen
2467
            self.assertEqual(
2468
                1, 0,
2469
                "Expecting Error because FULL backup is CORRUPT"
2470
                "\n Output: {0} \n CMD: {1}".format(
2471
                    repr(self.output), self.cmd))
2472
        except ProbackupException as e:
2473
            self.assertIn(
2474
                'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id),
2475
                e.message,
2476
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
2477
                    repr(e.message), self.cmd))
2478

2479
        self.assertEqual(
2480
            'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'],
2481
            'Backup STATUS should be "CORRUPT"')
2482

2483
        self.assertEqual(
2484
            'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'],
2485
            'Backup STATUS should be "ORPHAN"')
2486

2487
        # check that revalidation is working correctly
2488
        try:
2489
            self.restore_node(
2490
                backup_dir, 'node', node, backup_id=delta_id)
2491
            # we should die here because exception is what we expect to happen
2492
            self.assertEqual(
2493
                1, 0,
2494
                "Expecting Error because FULL backup is CORRUPT"
2495
                "\n Output: {0} \n CMD: {1}".format(
2496
                    repr(self.output), self.cmd))
2497
        except ProbackupException as e:
2498
            self.assertIn(
2499
                'WARNING: Backup {0} is a victim of metadata corruption'.format(full_id),
2500
                e.message,
2501
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
2502
                    repr(e.message), self.cmd))
2503

2504
        self.assertEqual(
2505
            'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'],
2506
            'Backup STATUS should be "CORRUPT"')
2507

2508
        self.assertEqual(
2509
            'ORPHAN', self.show_pb(backup_dir, 'node', delta_id)['status'],
2510
            'Backup STATUS should be "ORPHAN"')
2511

2512
        # check that '--no-validate' do not allow to restore ORPHAN backup
2513
#        try:
2514
#            self.restore_node(
2515
#                backup_dir, 'node', node, backup_id=delta_id,
2516
#                options=['--no-validate'])
2517
#            # we should die here because exception is what we expect to happen
2518
#            self.assertEqual(
2519
#                1, 0,
2520
#                "Expecting Error because FULL backup is CORRUPT"
2521
#                "\n Output: {0} \n CMD: {1}".format(
2522
#                    repr(self.output), self.cmd))
2523
#        except ProbackupException as e:
2524
#            self.assertIn(
2525
#                'Insert data',
2526
#                e.message,
2527
#                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
2528
#                    repr(e.message), self.cmd))
2529

2530
        node.cleanup()
2531

2532
        output = self.restore_node(
2533
            backup_dir, 'node', node, backup_id=full_id, options=['--force'])
2534

2535
        self.assertIn(
2536
            'WARNING: Backup {0} has status: CORRUPT'.format(full_id),
2537
            output)
2538

2539
        self.assertIn(
2540
            'WARNING: Backup {0} is corrupt.'.format(full_id),
2541
            output)
2542

2543
        self.assertIn(
2544
            'WARNING: Backup {0} is not valid, restore is forced'.format(full_id),
2545
            output)
2546

2547
        self.assertIn(
2548
            'INFO: Restore of backup {0} completed.'.format(full_id),
2549
            output)
2550

2551
        node.cleanup()
2552

2553
        output = self.restore_node(
2554
            backup_dir, 'node', node, backup_id=delta_id, options=['--force'])
2555

2556
        self.assertIn(
2557
            'WARNING: Backup {0} is orphan.'.format(delta_id),
2558
            output)
2559

2560
        self.assertIn(
2561
            'WARNING: Backup {0} is not valid, restore is forced'.format(full_id),
2562
            output)
2563

2564
        self.assertIn(
2565
            'WARNING: Backup {0} is not valid, restore is forced'.format(delta_id),
2566
            output)
2567

2568
        self.assertIn(
2569
            'INFO: Restore of backup {0} completed.'.format(delta_id),
2570
            output)
2571

2572
    def test_note_sanity(self):
2573
        """
2574
        test that adding note to backup works as expected
2575
        """
2576
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2577
        node = self.make_simple_node(
2578
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2579
            set_replication=True,
2580
            initdb_params=['--data-checksums'])
2581

2582
        self.init_pb(backup_dir)
2583
        self.add_instance(backup_dir, 'node', node)
2584
        self.set_archiving(backup_dir, 'node', node)
2585
        node.slow_start()
2586

2587
        # FULL backup
2588
        backup_id = self.backup_node(
2589
            backup_dir, 'node', node,
2590
            options=['--stream', '--log-level-file=LOG', '--note=test_note'])
2591

2592
        show_backups = self.show_pb(backup_dir, 'node')
2593

2594
        print(self.show_pb(backup_dir, as_text=True, as_json=True))
2595

2596
        self.assertEqual(show_backups[0]['note'], "test_note")
2597

2598
        self.set_backup(backup_dir, 'node', backup_id, options=['--note=none'])
2599

2600
        backup_meta = self.show_pb(backup_dir, 'node', backup_id)
2601

2602
        self.assertNotIn(
2603
            'note',
2604
            backup_meta)
2605

2606
    # @unittest.skip("skip")
2607
    def test_parent_backup_made_by_newer_version(self):
2608
        """incremental backup with parent made by newer version"""
2609
        node = self.make_simple_node(
2610
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2611
            initdb_params=['--data-checksums'])
2612

2613
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2614
        self.init_pb(backup_dir)
2615
        self.add_instance(backup_dir, 'node', node)
2616
        self.set_archiving(backup_dir, 'node', node)
2617
        node.slow_start()
2618

2619
        backup_id = self.backup_node(backup_dir, 'node', node)
2620

2621
        control_file = os.path.join(
2622
            backup_dir, "backups", "node", backup_id,
2623
            "backup.control")
2624

2625
        version = self.probackup_version
2626
        fake_new_version = str(int(version.split('.')[0]) + 1) + '.0.0'
2627

2628
        with open(control_file, 'r') as f:
2629
            data = f.read();
2630

2631
        data = data.replace(version, fake_new_version)
2632

2633
        with open(control_file, 'w') as f:
2634
            f.write(data);
2635

2636
        try:
2637
            self.backup_node(backup_dir, 'node', node, backup_type="page")
2638
            # we should die here because exception is what we expect to happen
2639
            self.assertEqual(
2640
                1, 0,
2641
                "Expecting Error because incremental backup should not be possible "
2642
                "if parent made by newer version.\n Output: {0} \n CMD: {1}".format(
2643
                    repr(self.output), self.cmd))
2644
        except ProbackupException as e:
2645
            self.assertIn(
2646
                "pg_probackup do not guarantee to be forward compatible. "
2647
                "Please upgrade pg_probackup binary.",
2648
                e.message,
2649
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
2650
                    repr(e.message), self.cmd))
2651

2652
        self.assertEqual(
2653
            self.show_pb(backup_dir, 'node')[1]['status'], "ERROR")
2654

2655
    # @unittest.skip("skip")
2656
    def test_issue_289(self):
2657
        """
2658
        https://github.com/postgrespro/pg_probackup/issues/289
2659
        """
2660
        node = self.make_simple_node(
2661
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2662
            initdb_params=['--data-checksums'])
2663

2664
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2665
        self.init_pb(backup_dir)
2666
        self.add_instance(backup_dir, 'node', node)
2667

2668
        node.slow_start()
2669

2670
        try:
2671
            self.backup_node(
2672
                backup_dir, 'node', node,
2673
                backup_type='page', options=['--archive-timeout=10s'])
2674
            # we should die here because exception is what we expect to happen
2675
            self.assertEqual(
2676
                1, 0,
2677
                "Expecting Error because full backup is missing"
2678
                "\n Output: {0} \n CMD: {1}".format(
2679
                    repr(self.output), self.cmd))
2680
        except ProbackupException as e:
2681
            self.assertNotIn(
2682
                "INFO: Wait for WAL segment",
2683
                e.message,
2684
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
2685
                    repr(e.message), self.cmd))
2686

2687
            self.assertIn(
2688
                "ERROR: Create new full backup before an incremental one",
2689
                e.message,
2690
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
2691
                    repr(e.message), self.cmd))
2692

2693
        self.assertEqual(
2694
            self.show_pb(backup_dir, 'node')[0]['status'], "ERROR")
2695

2696
    # @unittest.skip("skip")
2697
    def test_issue_290(self):
2698
        """
2699
        https://github.com/postgrespro/pg_probackup/issues/290
2700
        """
2701
        node = self.make_simple_node(
2702
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2703
            initdb_params=['--data-checksums'])
2704

2705
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2706
        self.init_pb(backup_dir)
2707
        self.add_instance(backup_dir, 'node', node)
2708
        self.set_archiving(backup_dir, 'node', node)
2709

2710
        os.rmdir(
2711
            os.path.join(backup_dir, "wal", "node"))
2712

2713
        node.slow_start()
2714

2715
        try:
2716
            self.backup_node(
2717
                backup_dir, 'node', node,
2718
                options=['--archive-timeout=10s'])
2719
            # we should die here because exception is what we expect to happen
2720
            self.assertEqual(
2721
                1, 0,
2722
                "Expecting Error because full backup is missing"
2723
                "\n Output: {0} \n CMD: {1}".format(
2724
                    repr(self.output), self.cmd))
2725
        except ProbackupException as e:
2726
            self.assertNotIn(
2727
                "INFO: Wait for WAL segment",
2728
                e.message,
2729
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
2730
                    repr(e.message), self.cmd))
2731

2732
            self.assertIn(
2733
                "WAL archive directory is not accessible",
2734
                e.message,
2735
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
2736
                    repr(e.message), self.cmd))
2737

2738
        self.assertEqual(
2739
            self.show_pb(backup_dir, 'node')[0]['status'], "ERROR")
2740

2741
    @unittest.skip("skip")
2742
    def test_issue_203(self):
2743
        """
2744
        https://github.com/postgrespro/pg_probackup/issues/203
2745
        """
2746
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2747
        node = self.make_simple_node(
2748
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2749
            set_replication=True,
2750
            initdb_params=['--data-checksums'])
2751

2752
        self.init_pb(backup_dir)
2753
        self.add_instance(backup_dir, 'node', node)
2754
        node.slow_start()
2755

2756
        with node.connect("postgres") as conn:
2757
            for i in range(1000000):
2758
                conn.execute(
2759
                    "CREATE TABLE t_{0} as select 1".format(i))
2760
                conn.commit()
2761

2762
        full_id = self.backup_node(
2763
            backup_dir, 'node', node, options=['--stream', '-j2'])
2764

2765
        pgdata = self.pgdata_content(node.data_dir)
2766

2767
        node_restored = self.make_simple_node(
2768
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
2769
        node_restored.cleanup()
2770

2771
        self.restore_node(backup_dir, 'node',
2772
            node_restored, data_dir=node_restored.data_dir)
2773

2774
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
2775
        self.compare_pgdata(pgdata, pgdata_restored)
2776

2777
    # @unittest.skip("skip")
2778
    def test_issue_231(self):
2779
        """
2780
        https://github.com/postgrespro/pg_probackup/issues/231
2781
        """
2782
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2783
        node = self.make_simple_node(
2784
            base_dir=os.path.join(self.module_name, self.fname, 'node'))
2785

2786
        self.init_pb(backup_dir)
2787
        self.add_instance(backup_dir, 'node', node)
2788

2789
        datadir = os.path.join(node.data_dir, '123')
2790

2791
        t0 = time()
2792
        while True:
2793
            with self.assertRaises(ProbackupException) as ctx:
2794
                self.backup_node(backup_dir, 'node', node)
2795
            pb1 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0]
2796

2797
            t = time()
2798
            if int(pb1, 36) == int(t) and t % 1 < 0.5:
2799
                # ok, we have a chance to start next backup in same second
2800
                break
2801
            elif t - t0 > 20:
2802
                # Oops, we are waiting for too long. Looks like this runner
2803
                # is too slow. Lets skip the test.
2804
                self.skipTest("runner is too slow")
2805
            # sleep to the second's end so backup will not sleep for a second.
2806
            sleep(1 - t % 1)
2807

2808
        with self.assertRaises(ProbackupException) as ctx:
2809
            self.backup_node(backup_dir, 'node', node)
2810
        pb2 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0]
2811

2812
        self.assertNotEqual(pb1, pb2)
2813

2814
    def test_incr_backup_filenode_map(self):
2815
        """
2816
        https://github.com/postgrespro/pg_probackup/issues/320
2817
        """
2818
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2819
        node = self.make_simple_node(
2820
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2821
            initdb_params=['--data-checksums'])
2822

2823
        self.init_pb(backup_dir)
2824
        self.add_instance(backup_dir, 'node', node)
2825
        self.set_archiving(backup_dir, 'node', node)
2826
        node.slow_start()
2827

2828
        node1 = self.make_simple_node(
2829
            base_dir=os.path.join(self.module_name, self.fname, 'node1'),
2830
            initdb_params=['--data-checksums'])
2831
        node1.cleanup()
2832

2833
        node.pgbench_init(scale=5)
2834

2835
        # FULL backup
2836
        backup_id = self.backup_node(backup_dir, 'node', node)
2837

2838
        pgbench = node.pgbench(
2839
            stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
2840
            options=['-T', '10', '-c', '1'])
2841

2842
        backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta')
2843

2844
        node.safe_psql(
2845
            'postgres',
2846
            'reindex index pg_type_oid_index')
2847

2848
        backup_id = self.backup_node(
2849
            backup_dir, 'node', node, backup_type='delta')
2850

2851
        # incremental restore into node1
2852
        node.cleanup()
2853

2854
        self.restore_node(backup_dir, 'node', node)
2855
        node.slow_start()
2856

2857
        node.safe_psql(
2858
            'postgres',
2859
            'select 1')
2860

2861
    # @unittest.skip("skip")
2862
    def test_missing_wal_segment(self):
2863
        """"""
2864
        self._check_gdb_flag_or_skip_test()
2865

2866
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2867
        node = self.make_simple_node(
2868
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2869
            set_replication=True,
2870
            ptrack_enable=self.ptrack,
2871
            initdb_params=['--data-checksums'],
2872
            pg_options={'archive_timeout': '30s'})
2873

2874
        self.init_pb(backup_dir)
2875
        self.add_instance(backup_dir, 'node', node)
2876
        self.set_archiving(backup_dir, 'node', node)
2877
        node.slow_start()
2878

2879
        node.pgbench_init(scale=10)
2880

2881
        node.safe_psql(
2882
            'postgres',
2883
            'CREATE DATABASE backupdb')
2884

2885
        # get segments in pg_wal, sort then and remove all but the latest
2886
        pg_wal_dir = os.path.join(node.data_dir, 'pg_wal')
2887

2888
        if node.major_version >= 10:
2889
            pg_wal_dir = os.path.join(node.data_dir, 'pg_wal')
2890
        else:
2891
            pg_wal_dir = os.path.join(node.data_dir, 'pg_xlog')
2892

2893
        # Full backup in streaming mode
2894
        gdb = self.backup_node(
2895
            backup_dir, 'node', node, datname='backupdb',
2896
            options=['--stream', '--log-level-file=INFO'], gdb=True)
2897

2898
        # break at streaming start
2899
        gdb.set_breakpoint('start_WAL_streaming')
2900
        gdb.run_until_break()
2901

2902
        # generate some more data
2903
        node.pgbench_init(scale=3)
2904

2905
        # remove redundant WAL segments in pg_wal
2906
        files = os.listdir(pg_wal_dir)
2907
        files.sort(reverse=True)
2908

2909
        # leave first two files in list
2910
        del files[:2]
2911
        for filename in files:
2912
            os.remove(os.path.join(pg_wal_dir, filename))
2913

2914
        gdb.continue_execution_until_exit()
2915

2916
        self.assertIn(
2917
            'unexpected termination of replication stream: ERROR:  requested WAL segment',
2918
            gdb.output)
2919

2920
        self.assertIn(
2921
            'has already been removed',
2922
            gdb.output)
2923

2924
        self.assertIn(
2925
            'ERROR: Interrupted during waiting for WAL streaming',
2926
            gdb.output)
2927

2928
        self.assertIn(
2929
            'WARNING: A backup is in progress, stopping it',
2930
            gdb.output)
2931

2932
        # TODO: check the same for PAGE backup
2933

2934
    # @unittest.skip("skip")
2935
    def test_missing_replication_permission(self):
2936
        """"""
2937
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2938
        node = self.make_simple_node(
2939
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2940
            set_replication=True,
2941
            ptrack_enable=self.ptrack,
2942
            initdb_params=['--data-checksums'])
2943

2944
        self.init_pb(backup_dir)
2945
        self.add_instance(backup_dir, 'node', node)
2946
#        self.set_archiving(backup_dir, 'node', node)
2947
        node.slow_start()
2948

2949
        # FULL backup
2950
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
2951

2952
        # Create replica
2953
        replica = self.make_simple_node(
2954
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2955
        replica.cleanup()
2956
        self.restore_node(backup_dir, 'node', replica)
2957

2958
        # Settings for Replica
2959
        self.set_replica(node, replica)
2960
        replica.slow_start(replica=True)
2961

2962
        node.safe_psql(
2963
            'postgres',
2964
            'CREATE DATABASE backupdb')
2965

2966
        # PG 9.5
2967
        if self.get_version(node) < 90600:
2968
            node.safe_psql(
2969
                'backupdb',
2970
                "CREATE ROLE backup WITH LOGIN; "
2971
                "GRANT CONNECT ON DATABASE backupdb to backup; "
2972
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2973
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
2974
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
2975
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
2976
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
2977
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
2978
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
2979
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2980
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
2981
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
2982
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
2983
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
2984
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
2985
        # PG 9.6
2986
        elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
2987
            node.safe_psql(
2988
                'backupdb',
2989
                "CREATE ROLE backup WITH LOGIN; "
2990
                "GRANT CONNECT ON DATABASE backupdb to backup; "
2991
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
2992
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
2993
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
2994
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
2995
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
2996
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
2997
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
2998
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
2999
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3000
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
3001
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
3002
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
3003
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
3004
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
3005
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
3006
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3007
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
3008
        # >= 10 && < 15
3009
        elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:
3010
            node.safe_psql(
3011
                'backupdb',
3012
                "CREATE ROLE backup WITH LOGIN; "
3013
                "GRANT CONNECT ON DATABASE backupdb to backup; "
3014
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
3015
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3016
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
3017
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3018
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
3019
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
3020
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
3021
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3022
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
3023
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
3024
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
3025
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
3026
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
3027
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
3028
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3029
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
3030
            )
3031
        # >= 15
3032
        else:
3033
            node.safe_psql(
3034
                'backupdb',
3035
                "CREATE ROLE backup WITH LOGIN; "
3036
                "GRANT CONNECT ON DATABASE backupdb to backup; "
3037
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
3038
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3039
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
3040
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3041
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
3042
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
3043
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
3044
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3045
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
3046
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
3047
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
3048
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
3049
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
3050
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
3051
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3052
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
3053
            )
3054

3055
        if ProbackupTest.enterprise:
3056
            node.safe_psql(
3057
                "backupdb",
3058
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
3059
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;")
3060

3061
        sleep(2)
3062
        replica.promote()
3063

3064
        # Delta backup        
3065
        try:
3066
            self.backup_node(
3067
                backup_dir, 'node', replica, backup_type='delta',
3068
                data_dir=replica.data_dir, datname='backupdb', options=['--stream', '-U', 'backup'])
3069
            # we should die here because exception is what we expect to happen
3070
            self.assertEqual(
3071
                1, 0,
3072
                "Expecting Error because incremental backup should not be possible "
3073
                "\n Output: {0} \n CMD: {1}".format(
3074
                    repr(self.output), self.cmd))
3075
        except ProbackupException as e:
3076
            # 9.5: ERROR:  must be superuser or replication role to run a backup
3077
            # >=9.6: FATAL:  must be superuser or replication role to start walsender
3078
            if self.pg_config_version < 160000:
3079
                self.assertRegex(
3080
                    e.message,
3081
                    "ERROR:  must be superuser or replication role to run a backup|"
3082
                    "FATAL:  must be superuser or replication role to start walsender",
3083
                    "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3084
                        repr(e.message), self.cmd))
3085
            else:
3086
                self.assertRegex(
3087
                    e.message,
3088
                    "FATAL:  permission denied to start WAL sender\n"
3089
                    "DETAIL:  Only roles with the REPLICATION",
3090
                    "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3091
                        repr(e.message), self.cmd))
3092

3093
    # @unittest.skip("skip")
3094
    def test_missing_replication_permission_1(self):
3095
        """"""
3096
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3097
        node = self.make_simple_node(
3098
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3099
            set_replication=True,
3100
            ptrack_enable=self.ptrack,
3101
            initdb_params=['--data-checksums'])
3102

3103
        self.init_pb(backup_dir)
3104
        self.add_instance(backup_dir, 'node', node)
3105
        self.set_archiving(backup_dir, 'node', node)
3106
        node.slow_start()
3107

3108
        # FULL backup
3109
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
3110

3111
        # Create replica
3112
        replica = self.make_simple_node(
3113
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3114
        replica.cleanup()
3115
        self.restore_node(backup_dir, 'node', replica)
3116

3117
        # Settings for Replica
3118
        self.set_replica(node, replica)
3119
        replica.slow_start(replica=True)
3120

3121
        node.safe_psql(
3122
            'postgres',
3123
            'CREATE DATABASE backupdb')
3124

3125
        # PG 9.5
3126
        if self.get_version(node) < 90600:
3127
            node.safe_psql(
3128
                'backupdb',
3129
                "CREATE ROLE backup WITH LOGIN; "
3130
                "GRANT CONNECT ON DATABASE backupdb to backup; "
3131
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
3132
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
3133
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3134
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
3135
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
3136
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
3137
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
3138
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
3139
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3140
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
3141
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
3142
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3143
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
3144
        # PG 9.6
3145
        elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
3146
            node.safe_psql(
3147
                'backupdb',
3148
                "CREATE ROLE backup WITH LOGIN; "
3149
                "GRANT CONNECT ON DATABASE backupdb to backup; "
3150
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
3151
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3152
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
3153
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
3154
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
3155
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
3156
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
3157
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
3158
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3159
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
3160
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
3161
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
3162
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
3163
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
3164
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
3165
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3166
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
3167
            )
3168
        # >= 10 && < 15
3169
        elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:
3170
            node.safe_psql(
3171
                'backupdb',
3172
                "CREATE ROLE backup WITH LOGIN; "
3173
                "GRANT CONNECT ON DATABASE backupdb to backup; "
3174
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
3175
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3176
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
3177
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3178
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
3179
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
3180
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
3181
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3182
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
3183
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
3184
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
3185
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
3186
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
3187
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
3188
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3189
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
3190
            )
3191
        # > 15
3192
        else:
3193
            node.safe_psql(
3194
                'backupdb',
3195
                "CREATE ROLE backup WITH LOGIN; "
3196
                "GRANT CONNECT ON DATABASE backupdb to backup; "
3197
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
3198
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3199
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
3200
                "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; "
3201
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
3202
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
3203
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
3204
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
3205
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
3206
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
3207
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
3208
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
3209
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
3210
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
3211
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
3212
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
3213
            )
3214

3215
        if ProbackupTest.enterprise:
3216
            node.safe_psql(
3217
                "backupdb",
3218
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
3219
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;")
3220

3221
        replica.promote()
3222

3223
        # PAGE
3224
        output = self.backup_node(
3225
            backup_dir, 'node', replica, backup_type='page',
3226
            data_dir=replica.data_dir, datname='backupdb', options=['-U', 'backup'],
3227
            return_id=False)
3228
        
3229
        self.assertIn(
3230
            'WARNING: Valid full backup on current timeline 2 is not found, trying to look up on previous timelines',
3231
            output)
3232

3233
        # Messages before 14
3234
        # 'WARNING: could not connect to database backupdb: FATAL:  must be superuser or replication role to start walsender'
3235
        # Messages for >=14
3236
        # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL:  must be superuser or replication role to start walsender'
3237
        # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL:  must be superuser or replication role to start walsender'
3238
        # OS-dependant messages:
3239
        # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL:  must be superuser or replication role to start walsender'
3240

3241
        if self.pg_config_version < 160000:
3242
            self.assertRegex(
3243
                output,
3244
                r'WARNING: could not connect to database backupdb:[\s\S]*?'
3245
                r'FATAL:  must be superuser or replication role to start walsender')
3246
        else:
3247
            self.assertRegex(
3248
                output,
3249
                r'WARNING: could not connect to database backupdb:[\s\S]*?'
3250
                r'FATAL:  permission denied to start WAL sender')
3251

3252
    # @unittest.skip("skip")
3253
    def test_basic_backup_default_transaction_read_only(self):
3254
        """"""
3255
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3256
        node = self.make_simple_node(
3257
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3258
            set_replication=True,
3259
            initdb_params=['--data-checksums'],
3260
            pg_options={'default_transaction_read_only': 'on'})
3261

3262
        self.init_pb(backup_dir)
3263
        self.add_instance(backup_dir, 'node', node)
3264
        self.set_archiving(backup_dir, 'node', node)
3265
        node.slow_start()
3266

3267
        try:
3268
            node.safe_psql(
3269
                'postgres',
3270
                'create temp table t1()')
3271
        # we should die here because exception is what we expect to happen
3272
            self.assertEqual(
3273
                1, 0,
3274
                "Expecting Error because incremental backup should not be possible "
3275
                "\n Output: {0} \n CMD: {1}".format(
3276
                    repr(self.output), self.cmd))
3277
        except QueryException as e:
3278
            self.assertIn(
3279
                "cannot execute CREATE TABLE in a read-only transaction",
3280
                e.message,
3281
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3282
                    repr(e.message), self.cmd))
3283

3284
        # FULL backup
3285
        self.backup_node(
3286
            backup_dir, 'node', node,
3287
            options=['--stream'])
3288

3289
        # DELTA backup
3290
        self.backup_node(
3291
            backup_dir, 'node', node, backup_type='delta', options=['--stream'])
3292

3293
        # PAGE backup
3294
        self.backup_node(backup_dir, 'node', node, backup_type='page')
3295

3296
    # @unittest.skip("skip")
3297
    def test_backup_atexit(self):
3298
        """"""
3299
        self._check_gdb_flag_or_skip_test()
3300

3301
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3302
        node = self.make_simple_node(
3303
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3304
            set_replication=True,
3305
            ptrack_enable=self.ptrack,
3306
            initdb_params=['--data-checksums'])
3307

3308
        self.init_pb(backup_dir)
3309
        self.add_instance(backup_dir, 'node', node)
3310
        self.set_archiving(backup_dir, 'node', node)
3311
        node.slow_start()
3312

3313
        node.pgbench_init(scale=5)
3314

3315
        # Full backup in streaming mode
3316
        gdb = self.backup_node(
3317
            backup_dir, 'node', node,
3318
            options=['--stream', '--log-level-file=VERBOSE'], gdb=True)
3319

3320
        # break at streaming start
3321
        gdb.set_breakpoint('backup_data_file')
3322
        gdb.run_until_break()
3323

3324
        gdb.remove_all_breakpoints()
3325
        gdb._execute('signal SIGINT')
3326
        sleep(1)
3327

3328
        self.assertEqual(
3329
            self.show_pb(
3330
                backup_dir, 'node')[0]['status'], 'ERROR')
3331

3332
        with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
3333
            log_content = f.read()
3334
            #print(log_content)
3335
            self.assertIn(
3336
                'WARNING: A backup is in progress, stopping it.',
3337
                log_content)
3338

3339
            if self.get_version(node) < 150000:
3340
                self.assertIn(
3341
                    'FROM pg_catalog.pg_stop_backup',
3342
                    log_content)
3343
            else:
3344
                self.assertIn(
3345
                    'FROM pg_catalog.pg_backup_stop',
3346
                    log_content)
3347

3348
            self.assertIn(
3349
                'setting its status to ERROR',
3350
                log_content)
3351

3352
    # @unittest.skip("skip")
3353
    def test_pg_stop_backup_missing_permissions(self):
3354
        """"""
3355
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3356
        node = self.make_simple_node(
3357
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3358
            set_replication=True,
3359
            ptrack_enable=self.ptrack,
3360
            initdb_params=['--data-checksums'])
3361

3362
        self.init_pb(backup_dir)
3363
        self.add_instance(backup_dir, 'node', node)
3364
        self.set_archiving(backup_dir, 'node', node)
3365
        node.slow_start()
3366

3367
        node.pgbench_init(scale=5)
3368

3369
        self.simple_bootstrap(node, 'backup')
3370

3371
        if self.get_version(node) < 90600:
3372
            node.safe_psql(
3373
                'postgres',
3374
                'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup')
3375
        elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
3376
            node.safe_psql(
3377
                'postgres',
3378
                'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup')
3379
        elif self.get_version(node) < 150000:
3380
            node.safe_psql(
3381
                'postgres',
3382
                'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup')
3383
        else:
3384
            node.safe_psql(
3385
                'postgres',
3386
                'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup')
3387

3388

3389
        # Full backup in streaming mode
3390
        try:
3391
            self.backup_node(
3392
                backup_dir, 'node', node,
3393
                options=['--stream', '-U', 'backup'])
3394
            # we should die here because exception is what we expect to happen
3395
            if self.get_version(node) < 150000:
3396
                self.assertEqual(
3397
                    1, 0,
3398
                    "Expecting Error because of missing permissions on pg_stop_backup "
3399
                    "\n Output: {0} \n CMD: {1}".format(
3400
                        repr(self.output), self.cmd))
3401
            else:
3402
                self.assertEqual(
3403
                    1, 0,
3404
                    "Expecting Error because of missing permissions on pg_backup_stop "
3405
                    "\n Output: {0} \n CMD: {1}".format(
3406
                        repr(self.output), self.cmd))
3407
        except ProbackupException as e:
3408
            if self.get_version(node) < 150000:
3409
                self.assertIn(
3410
                    "ERROR:  permission denied for function pg_stop_backup",
3411
                    e.message,
3412
                    "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3413
                        repr(e.message), self.cmd))
3414
            else:
3415
                self.assertIn(
3416
                    "ERROR:  permission denied for function pg_backup_stop",
3417
                    e.message,
3418
                    "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3419
                        repr(e.message), self.cmd))
3420

3421
            self.assertIn(
3422
                "query was: SELECT pg_catalog.txid_snapshot_xmax",
3423
                e.message,
3424
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3425
                    repr(e.message), self.cmd))
3426

3427
    # @unittest.skip("skip")
3428
    def test_start_time(self):
3429
        """Test, that option --start-time allows to set backup_id and restore"""
3430
        node = self.make_simple_node(
3431
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3432
            set_replication=True,
3433
            ptrack_enable=self.ptrack,
3434
            initdb_params=['--data-checksums'])
3435

3436
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3437
        self.init_pb(backup_dir)
3438
        self.add_instance(backup_dir, 'node', node)
3439
        self.set_archiving(backup_dir, 'node', node)
3440
        node.slow_start()
3441

3442
        # FULL backup
3443
        startTime = int(time())
3444
        self.backup_node(
3445
            backup_dir, 'node', node, backup_type='full',
3446
            options=['--stream', '--start-time={0}'.format(str(startTime))])
3447
        # restore FULL backup by backup_id calculated from start-time
3448
        self.restore_node(
3449
            backup_dir, 'node',
3450
            data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'),
3451
            backup_id=base36enc(startTime))
3452

3453
        #FULL backup with incorrect start time
3454
        try:
3455
            startTime = str(int(time()-100000))
3456
            self.backup_node(
3457
                backup_dir, 'node', node, backup_type='full',
3458
                options=['--stream', '--start-time={0}'.format(startTime)])
3459
            # we should die here because exception is what we expect to happen
3460
            self.assertEqual(
3461
                1, 0,
3462
                'Expecting Error because start time for new backup must be newer '
3463
                '\n Output: {0} \n CMD: {1}'.format(
3464
                    repr(self.output), self.cmd))
3465
        except ProbackupException as e:
3466
            self.assertRegex(
3467
                e.message,
3468
                r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n",
3469
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
3470
                    repr(e.message), self.cmd))
3471

3472
        # DELTA backup
3473
        startTime = int(time())
3474
        self.backup_node(
3475
            backup_dir, 'node', node, backup_type='delta',
3476
            options=['--stream', '--start-time={0}'.format(str(startTime))])
3477
        # restore DELTA backup by backup_id calculated from start-time
3478
        self.restore_node(
3479
            backup_dir, 'node',
3480
            data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'),
3481
            backup_id=base36enc(startTime))
3482

3483
        # PAGE backup
3484
        startTime = int(time())
3485
        self.backup_node(
3486
            backup_dir, 'node', node, backup_type='page',
3487
            options=['--stream', '--start-time={0}'.format(str(startTime))])
3488
        # restore PAGE backup by backup_id calculated from start-time
3489
        self.restore_node(
3490
            backup_dir, 'node',
3491
            data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'),
3492
            backup_id=base36enc(startTime))
3493

3494
        # PTRACK backup
3495
        if self.ptrack:
3496
            node.safe_psql(
3497
                'postgres',
3498
                'create extension ptrack')
3499

3500
            startTime = int(time())
3501
            self.backup_node(
3502
                backup_dir, 'node', node, backup_type='ptrack',
3503
                options=['--stream', '--start-time={0}'.format(str(startTime))])
3504
            # restore PTRACK backup by backup_id calculated from start-time
3505
            self.restore_node(
3506
                backup_dir, 'node',
3507
                data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'),
3508
                backup_id=base36enc(startTime))
3509

3510
    # @unittest.skip("skip")
3511
    def test_start_time_few_nodes(self):
3512
        """Test, that we can synchronize backup_id's for different DBs"""
3513
        node1 = self.make_simple_node(
3514
            base_dir=os.path.join(self.module_name, self.fname, 'node1'),
3515
            set_replication=True,
3516
            ptrack_enable=self.ptrack,
3517
            initdb_params=['--data-checksums'])
3518

3519
        backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1')
3520
        self.init_pb(backup_dir1)
3521
        self.add_instance(backup_dir1, 'node1', node1)
3522
        self.set_archiving(backup_dir1, 'node1', node1)
3523
        node1.slow_start()
3524

3525
        node2 = self.make_simple_node(
3526
            base_dir=os.path.join(self.module_name, self.fname, 'node2'),
3527
            set_replication=True,
3528
            ptrack_enable=self.ptrack,
3529
            initdb_params=['--data-checksums'])
3530

3531
        backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2')
3532
        self.init_pb(backup_dir2)
3533
        self.add_instance(backup_dir2, 'node2', node2)
3534
        self.set_archiving(backup_dir2, 'node2', node2)
3535
        node2.slow_start()
3536

3537
        # FULL backup
3538
        startTime = str(int(time()))
3539
        self.backup_node(
3540
            backup_dir1, 'node1', node1, backup_type='full',
3541
            options=['--stream', '--start-time={0}'.format(startTime)])
3542
        self.backup_node(
3543
            backup_dir2, 'node2', node2, backup_type='full',
3544
            options=['--stream', '--start-time={0}'.format(startTime)])
3545
        show_backup1 = self.show_pb(backup_dir1, 'node1')[0]
3546
        show_backup2 = self.show_pb(backup_dir2, 'node2')[0]
3547
        self.assertEqual(show_backup1['id'], show_backup2['id'])
3548

3549
        # DELTA backup
3550
        startTime = str(int(time()))
3551
        self.backup_node(
3552
            backup_dir1, 'node1', node1, backup_type='delta',
3553
            options=['--stream', '--start-time={0}'.format(startTime)])
3554
        self.backup_node(
3555
            backup_dir2, 'node2', node2, backup_type='delta',
3556
            options=['--stream', '--start-time={0}'.format(startTime)])
3557
        show_backup1 = self.show_pb(backup_dir1, 'node1')[1]
3558
        show_backup2 = self.show_pb(backup_dir2, 'node2')[1]
3559
        self.assertEqual(show_backup1['id'], show_backup2['id'])
3560

3561
        # PAGE backup
3562
        startTime = str(int(time()))
3563
        self.backup_node(
3564
            backup_dir1, 'node1', node1, backup_type='page',
3565
            options=['--stream', '--start-time={0}'.format(startTime)])
3566
        self.backup_node(
3567
            backup_dir2, 'node2', node2, backup_type='page',
3568
            options=['--stream', '--start-time={0}'.format(startTime)])
3569
        show_backup1 = self.show_pb(backup_dir1, 'node1')[2]
3570
        show_backup2 = self.show_pb(backup_dir2, 'node2')[2]
3571
        self.assertEqual(show_backup1['id'], show_backup2['id'])
3572

3573
        # PTRACK backup
3574
        if self.ptrack:
3575
            node1.safe_psql(
3576
                'postgres',
3577
                'create extension ptrack')
3578
            node2.safe_psql(
3579
                'postgres',
3580
                'create extension ptrack')
3581

3582
            startTime = str(int(time()))
3583
            self.backup_node(
3584
                backup_dir1, 'node1', node1, backup_type='ptrack',
3585
                options=['--stream', '--start-time={0}'.format(startTime)])
3586
            self.backup_node(
3587
                backup_dir2, 'node2', node2, backup_type='ptrack',
3588
                options=['--stream', '--start-time={0}'.format(startTime)])
3589
            show_backup1 = self.show_pb(backup_dir1, 'node1')[3]
3590
            show_backup2 = self.show_pb(backup_dir2, 'node2')[3]
3591
            self.assertEqual(show_backup1['id'], show_backup2['id'])
3592

3593
    def test_regress_issue_585(self):
3594
        """https://github.com/postgrespro/pg_probackup/issues/585"""
3595
        node = self.make_simple_node(
3596
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3597
            set_replication=True,
3598
            initdb_params=['--data-checksums'])
3599

3600
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3601
        self.init_pb(backup_dir)
3602
        self.add_instance(backup_dir, 'node', node)
3603
        node.slow_start()
3604

3605
        # create couple of files that looks like db files
3606
        with open(os.path.join(node.data_dir, 'pg_multixact/offsets/1000'),'wb') as f:
3607
            pass
3608
        with open(os.path.join(node.data_dir, 'pg_multixact/members/1000'),'wb') as f:
3609
            pass
3610

3611
        self.backup_node(
3612
            backup_dir, 'node', node, backup_type='full',
3613
            options=['--stream'])
3614

3615
        output = self.backup_node(
3616
            backup_dir, 'node', node, backup_type='delta',
3617
            options=['--stream'],
3618
            return_id=False,
3619
        )
3620
        self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like')
3621

3622
        node.cleanup()
3623

3624
        output = self.restore_node(backup_dir, 'node', node)
3625
        self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like')
3626

3627
    def test_2_delta_backups(self):
3628
        """https://github.com/postgrespro/pg_probackup/issues/596"""
3629
        node = self.make_simple_node('node',
3630
            initdb_params=['--data-checksums'])
3631

3632
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3633

3634
        self.init_pb(backup_dir)
3635
        self.add_instance(backup_dir, 'node', node)
3636
        # self.set_archiving(backup_dir, 'node', node)
3637
        node.slow_start()
3638

3639
        # FULL
3640
        full_backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"])
3641

3642
        # delta backup mode
3643
        delta_backup_id1 = self.backup_node(
3644
            backup_dir, 'node', node, backup_type="delta", options=["--stream"])
3645

3646
        delta_backup_id2 = self.backup_node(
3647
            backup_dir, 'node', node, backup_type="delta", options=["--stream"])
3648

3649
        # postgresql.conf and pg_hba.conf shouldn't be copied
3650
        conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id1, 'database', 'postgresql.conf')
3651
        self.assertFalse(
3652
            os.path.exists(conf_file),
3653
            "File should not exist: {0}".format(conf_file))
3654
        conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id2, 'database', 'postgresql.conf')
3655
        print(conf_file)
3656
        self.assertFalse(
3657
            os.path.exists(conf_file),
3658
            "File should not exist: {0}".format(conf_file))
3659

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.