pg_probackup

Форк
0
/
compatibility_test.py 
1503 строки · 50.5 Кб
1
import unittest
2
import subprocess
3
import os
4
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
5
from sys import exit
6
import shutil
7

8

9
def check_manual_tests_enabled():
10
    return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON'
11

12

13
def check_ssh_agent_path_exists():
14
    return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ
15

16

17
class CrossCompatibilityTest(ProbackupTest, unittest.TestCase):
18
    @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test')
19
    @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist')
20
    # @unittest.skip("skip")
21
    def test_catchup_with_different_remote_major_pg(self):
22
        """
23
        Decription in jira issue PBCKP-236
24
        This test exposures ticket error using pg_probackup builds for both PGPROEE11 and PGPROEE9_6
25

26
        Prerequisites:
27
        - pg_probackup git tag for PBCKP 2.5.1
28
        - master pg_probackup build should be made for PGPROEE11
29
        - agent pg_probackup build should be made for PGPROEE9_6
30

31
        Calling probackup PGPROEE9_6 pg_probackup agent from PGPROEE11 pg_probackup master for DELTA backup causes
32
        the PBCKP-236 problem
33

34
        Please give env variables PROBACKUP_MANUAL=ON;PGPROBACKUP_SSH_AGENT_PATH=<pg_probackup_ssh_agent_path>
35
        for the test
36

37
        Please make path for agent's pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.ee.9.6/bin/'
38
        without pg_probackup executable
39
        """
40

41
        self.verbose = True
42
        self.remote = True
43
        # please use your own local path like
44
        # pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/'
45
        pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH']
46

47
        src_pg = self.make_simple_node(
48
            base_dir=os.path.join(self.module_name, self.fname, 'src'),
49
            set_replication=True,
50
            )
51
        src_pg.slow_start()
52
        src_pg.safe_psql(
53
            "postgres",
54
            "CREATE TABLE ultimate_question AS SELECT 42 AS answer")
55

56
        # do full catchup
57
        dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst'))
58
        self.catchup_node(
59
            backup_mode='FULL',
60
            source_pgdata=src_pg.data_dir,
61
            destination_node=dst_pg,
62
            options=['-d', 'postgres', '-p', str(src_pg.port), '--stream']
63
            )
64

65
        dst_options = {'port': str(dst_pg.port)}
66
        self.set_auto_conf(dst_pg, dst_options)
67
        dst_pg.slow_start()
68
        dst_pg.stop()
69

70
        src_pg.safe_psql(
71
            "postgres",
72
            "CREATE TABLE ultimate_question2 AS SELECT 42 AS answer")
73

74
        # do delta catchup with remote pg_probackup agent with another postgres major version
75
        # this DELTA backup should fail without PBCKP-236 patch.
76
        self.catchup_node(
77
            backup_mode='DELTA',
78
            source_pgdata=src_pg.data_dir,
79
            destination_node=dst_pg,
80
            # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version
81
            options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path]
82
            )
83

84

85
class CompatibilityTest(ProbackupTest, unittest.TestCase):
86

87
    def setUp(self):
88
        super().setUp()
89
        if not self.probackup_old_path:
90
            self.skipTest('PGPROBACKUPBIN_OLD is not set')
91

92
    # @unittest.expectedFailure
93
    # @unittest.skip("skip")
94
    def test_backward_compatibility_page(self):
95
        """Description in jira issue PGPRO-434"""
96
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
97
        node = self.make_simple_node(
98
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
99
            set_replication=True,
100
            initdb_params=['--data-checksums'])
101

102
        self.init_pb(backup_dir, old_binary=True)
103
        self.show_pb(backup_dir)
104

105
        self.add_instance(backup_dir, 'node', node, old_binary=True)
106
        self.show_pb(backup_dir)
107

108
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
109
        node.slow_start()
110

111
        node.pgbench_init(scale=10)
112

113
        # FULL backup with old binary
114
        self.backup_node(
115
            backup_dir, 'node', node, old_binary=True)
116

117
        if self.paranoia:
118
            pgdata = self.pgdata_content(node.data_dir)
119

120
        self.show_pb(backup_dir)
121

122
        self.validate_pb(backup_dir)
123

124
        # RESTORE old FULL with new binary
125
        node_restored = self.make_simple_node(
126
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
127

128
        node_restored.cleanup()
129

130
        self.restore_node(
131
                backup_dir, 'node', node_restored, options=["-j", "4"])
132

133
        if self.paranoia:
134
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
135
            self.compare_pgdata(pgdata, pgdata_restored)
136

137
        # Page BACKUP with old binary
138
        pgbench = node.pgbench(
139
            stdout=subprocess.PIPE,
140
            stderr=subprocess.STDOUT,
141
            options=["-c", "4", "-T", "20"]
142
        )
143
        pgbench.wait()
144
        pgbench.stdout.close()
145

146
        self.backup_node(
147
            backup_dir, 'node', node, backup_type='page',
148
            old_binary=True)
149

150
        if self.paranoia:
151
            pgdata = self.pgdata_content(node.data_dir)
152

153
        node_restored.cleanup()
154
        self.restore_node(
155
            backup_dir, 'node', node_restored, options=["-j", "4"])
156

157
        if self.paranoia:
158
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
159
            self.compare_pgdata(pgdata, pgdata_restored)
160

161
        # Page BACKUP with new binary
162
        pgbench = node.pgbench(
163
            stdout=subprocess.PIPE,
164
            stderr=subprocess.STDOUT,
165
            options=["-c", "4", "-T", "20"])
166

167
        pgbench.wait()
168
        pgbench.stdout.close()
169

170
        self.backup_node(
171
            backup_dir, 'node', node, backup_type='page')
172

173
        if self.paranoia:
174
            pgdata = self.pgdata_content(node.data_dir)
175

176
        node_restored.cleanup()
177

178
        self.restore_node(
179
            backup_dir, 'node', node_restored, options=["-j", "4"])
180

181
        if self.paranoia:
182
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
183
            self.compare_pgdata(pgdata, pgdata_restored)
184

185
        node.safe_psql(
186
            'postgres',
187
            'create table tmp as select * from pgbench_accounts where aid < 1000')
188

189
        node.safe_psql(
190
            'postgres',
191
            'delete from pgbench_accounts')
192

193
        node.safe_psql(
194
            'postgres',
195
            'VACUUM')
196

197
        self.backup_node(backup_dir, 'node', node, backup_type='page')
198

199
        pgdata = self.pgdata_content(node.data_dir)
200

201
        node_restored.cleanup()
202
        self.restore_node(
203
            backup_dir, 'node', node_restored, options=["-j", "4"])
204

205
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
206
        self.compare_pgdata(pgdata, pgdata_restored)
207

208
        node.safe_psql(
209
            'postgres',
210
            'insert into pgbench_accounts select * from pgbench_accounts')
211

212
        self.backup_node(backup_dir, 'node', node, backup_type='page')
213

214
        pgdata = self.pgdata_content(node.data_dir)
215

216
        node_restored.cleanup()
217
        self.restore_node(
218
            backup_dir, 'node', node_restored, options=["-j", "4"])
219

220
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
221
        self.compare_pgdata(pgdata, pgdata_restored)
222

223
    # @unittest.expectedFailure
224
    # @unittest.skip("skip")
225
    def test_backward_compatibility_delta(self):
226
        """Description in jira issue PGPRO-434"""
227
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
228
        node = self.make_simple_node(
229
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
230
            set_replication=True,
231
            initdb_params=['--data-checksums'])
232

233
        self.init_pb(backup_dir, old_binary=True)
234
        self.show_pb(backup_dir)
235

236
        self.add_instance(backup_dir, 'node', node, old_binary=True)
237
        self.show_pb(backup_dir)
238

239
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
240
        node.slow_start()
241

242
        node.pgbench_init(scale=10)
243

244
        # FULL backup with old binary
245
        self.backup_node(
246
            backup_dir, 'node', node, old_binary=True)
247

248
        if self.paranoia:
249
            pgdata = self.pgdata_content(node.data_dir)
250

251
        self.show_pb(backup_dir)
252

253
        self.validate_pb(backup_dir)
254

255
        # RESTORE old FULL with new binary
256
        node_restored = self.make_simple_node(
257
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
258

259
        node_restored.cleanup()
260

261
        self.restore_node(
262
            backup_dir, 'node', node_restored, options=["-j", "4"])
263

264
        if self.paranoia:
265
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
266
            self.compare_pgdata(pgdata, pgdata_restored)
267

268
        # Delta BACKUP with old binary
269
        pgbench = node.pgbench(
270
            stdout=subprocess.PIPE,
271
            stderr=subprocess.STDOUT,
272
            options=["-c", "4", "-T", "20"]
273
        )
274
        pgbench.wait()
275
        pgbench.stdout.close()
276

277
        self.backup_node(
278
            backup_dir, 'node', node, backup_type='delta',
279
            old_binary=True)
280

281
        if self.paranoia:
282
            pgdata = self.pgdata_content(node.data_dir)
283

284
        node_restored.cleanup()
285
        self.restore_node(
286
            backup_dir, 'node', node_restored, options=["-j", "4"])
287

288
        if self.paranoia:
289
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
290
            self.compare_pgdata(pgdata, pgdata_restored)
291

292
        # Delta BACKUP with new binary
293
        pgbench = node.pgbench(
294
            stdout=subprocess.PIPE,
295
            stderr=subprocess.STDOUT,
296
            options=["-c", "4", "-T", "20"]
297
        )
298
        pgbench.wait()
299
        pgbench.stdout.close()
300

301
        self.backup_node(backup_dir, 'node', node, backup_type='delta')
302

303
        if self.paranoia:
304
            pgdata = self.pgdata_content(node.data_dir)
305

306
        node_restored.cleanup()
307

308
        self.restore_node(
309
            backup_dir, 'node', node_restored, options=["-j", "4"])
310

311
        if self.paranoia:
312
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
313
            self.compare_pgdata(pgdata, pgdata_restored)
314

315
        node.safe_psql(
316
            'postgres',
317
            'create table tmp as select * from pgbench_accounts where aid < 1000')
318

319
        node.safe_psql(
320
            'postgres',
321
            'delete from pgbench_accounts')
322

323
        node.safe_psql(
324
            'postgres',
325
            'VACUUM')
326

327
        self.backup_node(backup_dir, 'node', node, backup_type='delta')
328

329
        pgdata = self.pgdata_content(node.data_dir)
330

331
        node_restored.cleanup()
332
        self.restore_node(
333
            backup_dir, 'node', node_restored, options=["-j", "4"])
334

335
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
336
        self.compare_pgdata(pgdata, pgdata_restored)
337

338
        node.safe_psql(
339
            'postgres',
340
            'insert into pgbench_accounts select * from pgbench_accounts')
341

342
        self.backup_node(backup_dir, 'node', node, backup_type='delta')
343

344
        pgdata = self.pgdata_content(node.data_dir)
345

346
        node_restored.cleanup()
347
        self.restore_node(
348
            backup_dir, 'node', node_restored, options=["-j", "4"])
349

350
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
351
        self.compare_pgdata(pgdata, pgdata_restored)
352

353
    # @unittest.expectedFailure
354
    # @unittest.skip("skip")
355
    def test_backward_compatibility_ptrack(self):
356
        """Description in jira issue PGPRO-434"""
357

358
        if not self.ptrack:
359
            self.skipTest('Skipped because ptrack support is disabled')
360

361
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
362
        node = self.make_simple_node(
363
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
364
            set_replication=True,
365
            ptrack_enable=True,
366
            initdb_params=['--data-checksums'])
367

368
        self.init_pb(backup_dir, old_binary=True)
369
        self.show_pb(backup_dir)
370

371
        self.add_instance(backup_dir, 'node', node, old_binary=True)
372
        self.show_pb(backup_dir)
373

374
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
375
        node.slow_start()
376

377
        node.safe_psql(
378
            "postgres",
379
            "CREATE EXTENSION ptrack")
380

381
        node.pgbench_init(scale=10)
382

383
        # FULL backup with old binary
384
        self.backup_node(
385
            backup_dir, 'node', node, old_binary=True)
386

387
        if self.paranoia:
388
            pgdata = self.pgdata_content(node.data_dir)
389

390
        self.show_pb(backup_dir)
391

392
        self.validate_pb(backup_dir)
393

394
        # RESTORE old FULL with new binary
395
        node_restored = self.make_simple_node(
396
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
397

398
        node_restored.cleanup()
399

400
        self.restore_node(
401
            backup_dir, 'node', node_restored, options=["-j", "4"])
402

403
        if self.paranoia:
404
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
405
            self.compare_pgdata(pgdata, pgdata_restored)
406

407
        # ptrack BACKUP with old binary
408
        pgbench = node.pgbench(
409
            stdout=subprocess.PIPE,
410
            stderr=subprocess.STDOUT,
411
            options=["-c", "4", "-T", "20"]
412
        )
413
        pgbench.wait()
414
        pgbench.stdout.close()
415

416
        self.backup_node(
417
            backup_dir, 'node', node, backup_type='ptrack',
418
            old_binary=True)
419

420
        if self.paranoia:
421
            pgdata = self.pgdata_content(node.data_dir)
422

423
        node_restored.cleanup()
424
        self.restore_node(
425
            backup_dir, 'node', node_restored,
426
            options=[
427
                "-j", "4",
428
                "--recovery-target=latest",
429
                "--recovery-target-action=promote"])
430

431
        if self.paranoia:
432
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
433
            self.compare_pgdata(pgdata, pgdata_restored)
434

435
        # Ptrack BACKUP with new binary
436
        pgbench = node.pgbench(
437
            stdout=subprocess.PIPE,
438
            stderr=subprocess.STDOUT,
439
            options=["-c", "4", "-T", "20"]
440
        )
441
        pgbench.wait()
442
        pgbench.stdout.close()
443

444
        self.backup_node(
445
            backup_dir, 'node', node, backup_type='ptrack')
446

447
        if self.paranoia:
448
            pgdata = self.pgdata_content(node.data_dir)
449

450
        node_restored.cleanup()
451

452
        self.restore_node(
453
            backup_dir, 'node', node_restored,
454
            options=[
455
                "-j", "4",
456
                "--recovery-target=latest",
457
                "--recovery-target-action=promote"])
458

459
        if self.paranoia:
460
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
461
            self.compare_pgdata(pgdata, pgdata_restored)
462

463
    # @unittest.expectedFailure
464
    # @unittest.skip("skip")
465
    def test_backward_compatibility_compression(self):
466
        """Description in jira issue PGPRO-434"""
467
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
468
        node = self.make_simple_node(
469
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
470
            set_replication=True,
471
            initdb_params=['--data-checksums'])
472

473
        self.init_pb(backup_dir, old_binary=True)
474
        self.add_instance(backup_dir, 'node', node, old_binary=True)
475

476
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
477
        node.slow_start()
478

479
        node.pgbench_init(scale=10)
480

481
        # FULL backup with OLD binary
482
        backup_id = self.backup_node(
483
            backup_dir, 'node', node,
484
            old_binary=True,
485
            options=['--compress'])
486

487
        if self.paranoia:
488
            pgdata = self.pgdata_content(node.data_dir)
489

490
        # restore OLD FULL with new binary
491
        node_restored = self.make_simple_node(
492
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
493

494
        node_restored.cleanup()
495

496
        self.restore_node(
497
                backup_dir, 'node', node_restored,
498
                options=["-j", "4"])
499

500
        if self.paranoia:
501
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
502
            self.compare_pgdata(pgdata, pgdata_restored)
503

504
        # PAGE backup with OLD binary
505
        pgbench = node.pgbench(
506
            stdout=subprocess.PIPE,
507
            stderr=subprocess.STDOUT,
508
            options=["-c", "4", "-T", "10"])
509
        pgbench.wait()
510
        pgbench.stdout.close()
511

512
        self.backup_node(
513
            backup_dir, 'node', node,
514
            backup_type='page',
515
            old_binary=True,
516
            options=['--compress'])
517

518
        if self.paranoia:
519
            pgdata = self.pgdata_content(node.data_dir)
520

521
        node_restored.cleanup()
522
        self.restore_node(
523
            backup_dir, 'node', node_restored,
524
            options=["-j", "4"])
525

526
        if self.paranoia:
527
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
528
            self.compare_pgdata(pgdata, pgdata_restored)
529

530
        # PAGE backup with new binary
531
        pgbench = node.pgbench(
532
            stdout=subprocess.PIPE,
533
            stderr=subprocess.STDOUT,
534
            options=["-c", "4", "-T", "10"])
535
        pgbench.wait()
536
        pgbench.stdout.close()
537

538
        self.backup_node(
539
            backup_dir, 'node', node,
540
            backup_type='page',
541
            options=['--compress'])
542

543
        if self.paranoia:
544
            pgdata = self.pgdata_content(node.data_dir)
545

546
        node_restored.cleanup()
547

548
        self.restore_node(
549
            backup_dir, 'node', node_restored,
550
            options=["-j", "4"])
551

552
        if self.paranoia:
553
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
554
            self.compare_pgdata(pgdata, pgdata_restored)
555

556
        # Delta backup with old binary
557
        self.delete_pb(backup_dir, 'node', backup_id)
558

559
        self.backup_node(
560
            backup_dir, 'node', node,
561
            old_binary=True,
562
            options=['--compress'])
563

564
        pgbench = node.pgbench(
565
            stdout=subprocess.PIPE,
566
            stderr=subprocess.STDOUT,
567
            options=["-c", "4", "-T", "10"])
568

569
        pgbench.wait()
570
        pgbench.stdout.close()
571

572
        self.backup_node(
573
            backup_dir, 'node', node,
574
            backup_type='delta',
575
            options=['--compress'],
576
            old_binary=True)
577

578
        if self.paranoia:
579
            pgdata = self.pgdata_content(node.data_dir)
580

581
        node_restored.cleanup()
582

583
        self.restore_node(
584
            backup_dir, 'node', node_restored,
585
            options=["-j", "4"])
586

587
        if self.paranoia:
588
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
589
            self.compare_pgdata(pgdata, pgdata_restored)
590

591
        # Delta backup with new binary
592
        pgbench = node.pgbench(
593
            stdout=subprocess.PIPE,
594
            stderr=subprocess.STDOUT,
595
            options=["-c", "4", "-T", "10"])
596

597
        pgbench.wait()
598
        pgbench.stdout.close()
599

600
        self.backup_node(
601
            backup_dir, 'node', node,
602
            backup_type='delta',
603
            options=['--compress'])
604

605
        if self.paranoia:
606
            pgdata = self.pgdata_content(node.data_dir)
607

608
        node_restored.cleanup()
609

610
        self.restore_node(
611
            backup_dir, 'node', node_restored,
612
            options=["-j", "4"])
613

614
        if self.paranoia:
615
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
616
            self.compare_pgdata(pgdata, pgdata_restored)
617

618
    # @unittest.expectedFailure
619
    # @unittest.skip("skip")
620
    def test_backward_compatibility_merge(self):
621
        """
622
        Create node, take FULL and PAGE backups with old binary,
623
        merge them with new binary
624
        """
625
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
626
        node = self.make_simple_node(
627
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
628
            set_replication=True,
629
            initdb_params=['--data-checksums'])
630

631
        self.init_pb(backup_dir, old_binary=True)
632
        self.add_instance(backup_dir, 'node', node, old_binary=True)
633

634
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
635
        node.slow_start()
636

637
        # FULL backup with OLD binary
638
        self.backup_node(
639
            backup_dir, 'node', node,
640
            old_binary=True)
641

642
        node.pgbench_init(scale=1)
643

644
        # PAGE backup with OLD binary
645
        backup_id = self.backup_node(
646
            backup_dir, 'node', node,
647
            backup_type='page', old_binary=True)
648

649
        if self.paranoia:
650
            pgdata = self.pgdata_content(node.data_dir)
651

652
        self.merge_backup(backup_dir, "node", backup_id)
653

654
        self.show_pb(backup_dir, as_text=True, as_json=False)
655

656
        # restore OLD FULL with new binary
657
        node_restored = self.make_simple_node(
658
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
659

660
        node_restored.cleanup()
661

662
        self.restore_node(
663
            backup_dir, 'node', node_restored, options=["-j", "4"])
664

665
        if self.paranoia:
666
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
667
            self.compare_pgdata(pgdata, pgdata_restored)
668

669
    # @unittest.expectedFailure
670
    # @unittest.skip("skip")
671
    def test_backward_compatibility_merge_1(self):
672
        """
673
        Create node, take FULL and PAGE backups with old binary,
674
        merge them with new binary.
675
        old binary version =< 2.2.7
676
        """
677
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
678
        node = self.make_simple_node(
679
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
680
            set_replication=True,
681
            initdb_params=['--data-checksums'])
682

683
        self.init_pb(backup_dir, old_binary=True)
684
        self.add_instance(backup_dir, 'node', node, old_binary=True)
685

686
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
687
        node.slow_start()
688

689
        node.pgbench_init(scale=20)
690

691
        # FULL backup with OLD binary
692
        self.backup_node(backup_dir, 'node', node, old_binary=True)
693

694
        pgbench = node.pgbench(
695
            stdout=subprocess.PIPE,
696
            stderr=subprocess.STDOUT,
697
            options=["-c", "1", "-T", "10", "--no-vacuum"])
698
        pgbench.wait()
699
        pgbench.stdout.close()
700

701
        # PAGE1 backup with OLD binary
702
        self.backup_node(
703
            backup_dir, 'node', node, backup_type='page', old_binary=True)
704

705
        node.safe_psql(
706
            'postgres',
707
            'DELETE from pgbench_accounts')
708

709
        node.safe_psql(
710
            'postgres',
711
            'VACUUM pgbench_accounts')
712

713
        # PAGE2 backup with OLD binary
714
        backup_id = self.backup_node(
715
            backup_dir, 'node', node, backup_type='page', old_binary=True)
716

717
        pgdata = self.pgdata_content(node.data_dir)
718

719
        # merge chain created by old binary with new binary
720
        output = self.merge_backup(backup_dir, "node", backup_id)
721

722
        # check that in-place is disabled
723
        self.assertIn(
724
            "WARNING: In-place merge is disabled "
725
            "because of storage format incompatibility", output)
726

727
        # restore merged backup
728
        node_restored = self.make_simple_node(
729
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
730
        node_restored.cleanup()
731

732
        self.restore_node(backup_dir, 'node', node_restored)
733

734
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
735
        self.compare_pgdata(pgdata, pgdata_restored)
736

737
    # @unittest.expectedFailure
738
    # @unittest.skip("skip")
739
    def test_backward_compatibility_merge_2(self):
740
        """
741
        Create node, take FULL and PAGE backups with old binary,
742
        merge them with new binary.
743
        old binary version =< 2.2.7
744
        """
745
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
746
        node = self.make_simple_node(
747
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
748
            set_replication=True,
749
            initdb_params=['--data-checksums'])
750

751
        self.init_pb(backup_dir, old_binary=True)
752
        self.add_instance(backup_dir, 'node', node, old_binary=True)
753

754
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
755
        node.slow_start()
756

757
        node.pgbench_init(scale=50)
758

759
        node.safe_psql(
760
            'postgres',
761
            'VACUUM pgbench_accounts')
762

763
        node_restored = self.make_simple_node(
764
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
765

766
        # FULL backup with OLD binary
767
        self.backup_node(backup_dir, 'node', node, old_binary=True)
768

769
        pgbench = node.pgbench(
770
            stdout=subprocess.PIPE,
771
            stderr=subprocess.STDOUT,
772
            options=["-c", "1", "-T", "10", "--no-vacuum"])
773
        pgbench.wait()
774
        pgbench.stdout.close()
775

776
        # PAGE1 backup with OLD binary
777
        page1 = self.backup_node(
778
            backup_dir, 'node', node,
779
            backup_type='page', old_binary=True)
780

781
        pgdata1 = self.pgdata_content(node.data_dir)
782

783
        node.safe_psql(
784
            'postgres',
785
            "DELETE from pgbench_accounts where ctid > '(10,1)'")
786

787
        # PAGE2 backup with OLD binary
788
        page2 = self.backup_node(
789
            backup_dir, 'node', node,
790
            backup_type='page', old_binary=True)
791

792
        pgdata2 = self.pgdata_content(node.data_dir)
793

794
        # PAGE3 backup with OLD binary
795
        page3 = self.backup_node(
796
            backup_dir, 'node', node,
797
            backup_type='page', old_binary=True)
798

799
        pgdata3 = self.pgdata_content(node.data_dir)
800

801
        pgbench = node.pgbench(
802
            stdout=subprocess.PIPE,
803
            stderr=subprocess.STDOUT,
804
            options=["-c", "1", "-T", "10", "--no-vacuum"])
805
        pgbench.wait()
806
        pgbench.stdout.close()
807

808
        # PAGE4 backup with NEW binary
809
        page4 = self.backup_node(
810
            backup_dir, 'node', node, backup_type='page')
811
        pgdata4 = self.pgdata_content(node.data_dir)
812

813
        # merge backups one by one and check data correctness
814
        # merge PAGE1
815
        self.merge_backup(
816
            backup_dir, "node", page1, options=['--log-level-file=VERBOSE'])
817

818
        # check data correctness for PAGE1
819
        node_restored.cleanup()
820
        self.restore_node(
821
            backup_dir, 'node', node_restored, backup_id=page1,
822
            options=['--log-level-file=VERBOSE'])
823
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
824
        self.compare_pgdata(pgdata1, pgdata_restored)
825

826
        # merge PAGE2
827
        self.merge_backup(backup_dir, "node", page2)
828

829
        # check data correctness for PAGE2
830
        node_restored.cleanup()
831
        self.restore_node(backup_dir, 'node', node_restored, backup_id=page2)
832
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
833
        self.compare_pgdata(pgdata2, pgdata_restored)
834

835
        # merge PAGE3
836
        self.show_pb(backup_dir, 'node', page3)
837
        self.merge_backup(backup_dir, "node", page3)
838
        self.show_pb(backup_dir, 'node', page3)
839

840
        # check data correctness for PAGE3
841
        node_restored.cleanup()
842
        self.restore_node(backup_dir, 'node', node_restored, backup_id=page3)
843
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
844
        self.compare_pgdata(pgdata3, pgdata_restored)
845

846
        # merge PAGE4
847
        self.merge_backup(backup_dir, "node", page4)
848

849
        # check data correctness for PAGE4
850
        node_restored.cleanup()
851
        self.restore_node(backup_dir, 'node', node_restored, backup_id=page4)
852
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
853
        self.compare_pgdata(pgdata4, pgdata_restored)
854

855
    # @unittest.expectedFailure
856
    # @unittest.skip("skip")
857
    def test_backward_compatibility_merge_3(self):
858
        """
859
        Create node, take FULL and PAGE backups with old binary,
860
        merge them with new binary.
861
        old binary version =< 2.2.7
862
        """
863
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
864
        node = self.make_simple_node(
865
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
866
            set_replication=True,
867
            initdb_params=['--data-checksums'])
868

869
        self.init_pb(backup_dir, old_binary=True)
870
        self.add_instance(backup_dir, 'node', node, old_binary=True)
871

872
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
873
        node.slow_start()
874

875
        node.pgbench_init(scale=50)
876

877
        node.safe_psql(
878
            'postgres',
879
            'VACUUM pgbench_accounts')
880

881
        node_restored = self.make_simple_node(
882
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
883

884
        # FULL backup with OLD binary
885
        self.backup_node(
886
            backup_dir, 'node', node, old_binary=True, options=['--compress'])
887

888
        pgbench = node.pgbench(
889
            stdout=subprocess.PIPE,
890
            stderr=subprocess.STDOUT,
891
            options=["-c", "1", "-T", "10", "--no-vacuum"])
892
        pgbench.wait()
893
        pgbench.stdout.close()
894

895
        # PAGE1 backup with OLD binary
896
        page1 = self.backup_node(
897
            backup_dir, 'node', node,
898
            backup_type='page', old_binary=True, options=['--compress'])
899

900
        pgdata1 = self.pgdata_content(node.data_dir)
901

902
        node.safe_psql(
903
            'postgres',
904
            "DELETE from pgbench_accounts where ctid > '(10,1)'")
905

906
        # PAGE2 backup with OLD binary
907
        page2 = self.backup_node(
908
            backup_dir, 'node', node,
909
            backup_type='page', old_binary=True, options=['--compress'])
910

911
        pgdata2 = self.pgdata_content(node.data_dir)
912

913
        # PAGE3 backup with OLD binary
914
        page3 = self.backup_node(
915
            backup_dir, 'node', node,
916
            backup_type='page', old_binary=True, options=['--compress'])
917

918
        pgdata3 = self.pgdata_content(node.data_dir)
919

920
        pgbench = node.pgbench(
921
            stdout=subprocess.PIPE,
922
            stderr=subprocess.STDOUT,
923
            options=["-c", "1", "-T", "10", "--no-vacuum"])
924
        pgbench.wait()
925
        pgbench.stdout.close()
926

927
        # PAGE4 backup with NEW binary
928
        page4 = self.backup_node(
929
            backup_dir, 'node', node, backup_type='page', options=['--compress'])
930
        pgdata4 = self.pgdata_content(node.data_dir)
931

932
        # merge backups one by one and check data correctness
933
        # merge PAGE1
934
        self.merge_backup(
935
            backup_dir, "node", page1, options=['--log-level-file=VERBOSE'])
936

937
        # check data correctness for PAGE1
938
        node_restored.cleanup()
939
        self.restore_node(
940
            backup_dir, 'node', node_restored, backup_id=page1,
941
            options=['--log-level-file=VERBOSE'])
942
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
943
        self.compare_pgdata(pgdata1, pgdata_restored)
944

945
        # merge PAGE2
946
        self.merge_backup(backup_dir, "node", page2)
947

948
        # check data correctness for PAGE2
949
        node_restored.cleanup()
950
        self.restore_node(backup_dir, 'node', node_restored, backup_id=page2)
951
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
952
        self.compare_pgdata(pgdata2, pgdata_restored)
953

954
        # merge PAGE3
955
        self.show_pb(backup_dir, 'node', page3)
956
        self.merge_backup(backup_dir, "node", page3)
957
        self.show_pb(backup_dir, 'node', page3)
958

959
        # check data correctness for PAGE3
960
        node_restored.cleanup()
961
        self.restore_node(backup_dir, 'node', node_restored, backup_id=page3)
962
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
963
        self.compare_pgdata(pgdata3, pgdata_restored)
964

965
        # merge PAGE4
966
        self.merge_backup(backup_dir, "node", page4)
967

968
        # check data correctness for PAGE4
969
        node_restored.cleanup()
970
        self.restore_node(backup_dir, 'node', node_restored, backup_id=page4)
971
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
972
        self.compare_pgdata(pgdata4, pgdata_restored)
973

974
    # @unittest.expectedFailure
975
    # @unittest.skip("skip")
976
    def test_backward_compatibility_merge_4(self):
977
        """
978
        Start merge between minor version, crash and retry it.
979
        old binary version =< 2.4.0
980
        """
981
        if self.version_to_num(self.old_probackup_version) > self.version_to_num('2.4.0'):
982
            self.assertTrue(
983
                False, 'You need pg_probackup old_binary =< 2.4.0 for this test')
984

985
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
986
        node = self.make_simple_node(
987
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
988
            set_replication=True,
989
            initdb_params=['--data-checksums'])
990

991
        self.init_pb(backup_dir, old_binary=True)
992
        self.add_instance(backup_dir, 'node', node, old_binary=True)
993

994
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
995
        node.slow_start()
996

997
        node.pgbench_init(scale=20)
998

999
        node.safe_psql(
1000
            'postgres',
1001
            'VACUUM pgbench_accounts')
1002

1003
        node_restored = self.make_simple_node(
1004
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1005

1006
        # FULL backup with OLD binary
1007
        self.backup_node(
1008
            backup_dir, 'node', node, old_binary=True, options=['--compress'])
1009

1010
        pgbench = node.pgbench(
1011
            stdout=subprocess.PIPE,
1012
            stderr=subprocess.STDOUT,
1013
            options=["-c", "1", "-T", "20", "--no-vacuum"])
1014
        pgbench.wait()
1015
        pgbench.stdout.close()
1016

1017
        # PAGE backup with NEW binary
1018
        page_id = self.backup_node(
1019
            backup_dir, 'node', node, backup_type='page', options=['--compress'])
1020
        pgdata = self.pgdata_content(node.data_dir)
1021

1022
        # merge PAGE4
1023
        gdb = self.merge_backup(backup_dir, "node", page_id, gdb=True)
1024

1025
        gdb.set_breakpoint('rename')
1026
        gdb.run_until_break()
1027
        gdb.continue_execution_until_break(500)
1028
        gdb._execute('signal SIGKILL')
1029

1030
        try:
1031
            self.merge_backup(backup_dir, "node", page_id)
1032
            self.assertEqual(
1033
                1, 0,
1034
                "Expecting Error because of format changes.\n "
1035
                "Output: {0} \n CMD: {1}".format(
1036
                    repr(self.output), self.cmd))
1037
        except ProbackupException as e:
1038
            self.assertIn(
1039
                "ERROR: Retry of failed merge for backups with different "
1040
                "between minor versions is forbidden to avoid data corruption "
1041
                "because of storage format changes introduced in 2.4.0 version, "
1042
                "please take a new full backup",
1043
                e.message,
1044
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1045
                    repr(e.message), self.cmd))
1046

1047
    # @unittest.expectedFailure
1048
    # @unittest.skip("skip")
1049
    def test_backward_compatibility_merge_5(self):
1050
        """
1051
        Create node, take FULL and PAGE backups with old binary,
1052
        merge them with new binary.
1053
        old binary version >= STORAGE_FORMAT_VERSION (2.4.4)
1054
        """
1055
        if self.version_to_num(self.old_probackup_version) < self.version_to_num('2.4.4'):
1056
            self.assertTrue(
1057
                False, 'OLD pg_probackup binary must be >= 2.4.4 for this test')
1058

1059
        self.assertNotEqual(
1060
            self.version_to_num(self.old_probackup_version),
1061
            self.version_to_num(self.probackup_version))
1062

1063
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1064
        node = self.make_simple_node(
1065
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1066
            set_replication=True,
1067
            initdb_params=['--data-checksums'])
1068

1069
        self.init_pb(backup_dir, old_binary=True)
1070
        self.add_instance(backup_dir, 'node', node, old_binary=True)
1071

1072
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
1073
        node.slow_start()
1074

1075
        node.pgbench_init(scale=20)
1076

1077
        # FULL backup with OLD binary
1078
        self.backup_node(backup_dir, 'node', node, old_binary=True)
1079

1080
        pgbench = node.pgbench(
1081
            stdout=subprocess.PIPE,
1082
            stderr=subprocess.STDOUT,
1083
            options=["-c", "1", "-T", "10", "--no-vacuum"])
1084
        pgbench.wait()
1085
        pgbench.stdout.close()
1086

1087
        # PAGE1 backup with OLD binary
1088
        self.backup_node(
1089
            backup_dir, 'node', node, backup_type='page', old_binary=True)
1090

1091
        node.safe_psql(
1092
            'postgres',
1093
            'DELETE from pgbench_accounts')
1094

1095
        node.safe_psql(
1096
            'postgres',
1097
            'VACUUM pgbench_accounts')
1098

1099
        # PAGE2 backup with OLD binary
1100
        backup_id = self.backup_node(
1101
            backup_dir, 'node', node, backup_type='page', old_binary=True)
1102

1103
        pgdata = self.pgdata_content(node.data_dir)
1104

1105
        # merge chain created by old binary with new binary
1106
        output = self.merge_backup(backup_dir, "node", backup_id)
1107

1108
        # check that in-place is disabled
1109
        self.assertNotIn(
1110
            "WARNING: In-place merge is disabled "
1111
            "because of storage format incompatibility", output)
1112

1113
        # restore merged backup
1114
        node_restored = self.make_simple_node(
1115
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1116
        node_restored.cleanup()
1117

1118
        self.restore_node(backup_dir, 'node', node_restored)
1119

1120
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1121
        self.compare_pgdata(pgdata, pgdata_restored)
1122

1123
    # @unittest.skip("skip")
1124
    def test_page_vacuum_truncate(self):
1125
        """
1126
        make node, create table, take full backup,
1127
        delete all data, vacuum relation,
1128
        take page backup, insert some data,
1129
        take second page backup,
1130
        restore latest page backup using new binary
1131
        and check data correctness
1132
        old binary should be 2.2.x version
1133
        """
1134
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1135
        node = self.make_simple_node(
1136
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1137
            set_replication=True,
1138
            initdb_params=['--data-checksums'])
1139

1140
        self.init_pb(backup_dir, old_binary=True)
1141
        self.add_instance(backup_dir, 'node', node, old_binary=True)
1142
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
1143
        node.slow_start()
1144

1145
        node.safe_psql(
1146
            "postgres",
1147
            "create sequence t_seq; "
1148
            "create table t_heap as select i as id, "
1149
            "md5(i::text) as text, "
1150
            "md5(repeat(i::text,10))::tsvector as tsvector "
1151
            "from generate_series(0,1024) i")
1152

1153
        node.safe_psql(
1154
            "postgres",
1155
            "vacuum t_heap")
1156

1157
        id1 = self.backup_node(backup_dir, 'node', node, old_binary=True)
1158
        pgdata1 = self.pgdata_content(node.data_dir)
1159

1160
        node.safe_psql(
1161
            "postgres",
1162
            "delete from t_heap")
1163

1164
        node.safe_psql(
1165
            "postgres",
1166
            "vacuum t_heap")
1167

1168
        id2 = self.backup_node(
1169
            backup_dir, 'node', node, backup_type='page', old_binary=True)
1170
        pgdata2 = self.pgdata_content(node.data_dir)
1171

1172
        node.safe_psql(
1173
            "postgres",
1174
            "insert into t_heap select i as id, "
1175
            "md5(i::text) as text, "
1176
            "md5(repeat(i::text,10))::tsvector as tsvector "
1177
            "from generate_series(0,1) i")
1178

1179
        id3 = self.backup_node(
1180
            backup_dir, 'node', node, backup_type='page', old_binary=True)
1181
        pgdata3 = self.pgdata_content(node.data_dir)
1182

1183
        node_restored = self.make_simple_node(
1184
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1185
        node_restored.cleanup()
1186

1187
        self.restore_node(
1188
            backup_dir, 'node', node_restored,
1189
            data_dir=node_restored.data_dir, backup_id=id1)
1190

1191
        # Physical comparison
1192
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1193
        self.compare_pgdata(pgdata1, pgdata_restored)
1194

1195
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1196
        node_restored.slow_start()
1197
        node_restored.cleanup()
1198

1199
        self.restore_node(
1200
            backup_dir, 'node', node_restored,
1201
            data_dir=node_restored.data_dir, backup_id=id2)
1202

1203
        # Physical comparison
1204
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1205
        self.compare_pgdata(pgdata2, pgdata_restored)
1206

1207
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1208
        node_restored.slow_start()
1209
        node_restored.cleanup()
1210

1211
        self.restore_node(
1212
            backup_dir, 'node', node_restored,
1213
            data_dir=node_restored.data_dir, backup_id=id3)
1214

1215
        # Physical comparison
1216
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1217
        self.compare_pgdata(pgdata3, pgdata_restored)
1218

1219
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1220
        node_restored.slow_start()
1221
        node_restored.cleanup()
1222

1223
    # @unittest.skip("skip")
1224
    def test_page_vacuum_truncate_compression(self):
1225
        """
1226
        make node, create table, take full backup,
1227
        delete all data, vacuum relation,
1228
        take page backup, insert some data,
1229
        take second page backup,
1230
        restore latest page backup using new binary
1231
        and check data correctness
1232
        old binary should be 2.2.x version
1233
        """
1234
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1235
        node = self.make_simple_node(
1236
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1237
            set_replication=True,
1238
            initdb_params=['--data-checksums'])
1239

1240
        self.init_pb(backup_dir, old_binary=True)
1241
        self.add_instance(backup_dir, 'node', node, old_binary=True)
1242
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
1243
        node.slow_start()
1244

1245
        node.safe_psql(
1246
            "postgres",
1247
            "create sequence t_seq; "
1248
            "create table t_heap as select i as id, "
1249
            "md5(i::text) as text, "
1250
            "md5(repeat(i::text,10))::tsvector as tsvector "
1251
            "from generate_series(0,1024) i")
1252

1253
        node.safe_psql(
1254
            "postgres",
1255
            "vacuum t_heap")
1256

1257
        self.backup_node(
1258
            backup_dir, 'node',node, old_binary=True, options=['--compress'])
1259

1260
        node.safe_psql(
1261
            "postgres",
1262
            "delete from t_heap")
1263

1264
        node.safe_psql(
1265
            "postgres",
1266
            "vacuum t_heap")
1267

1268
        self.backup_node(
1269
            backup_dir, 'node', node, backup_type='page',
1270
            old_binary=True, options=['--compress'])
1271

1272
        node.safe_psql(
1273
            "postgres",
1274
            "insert into t_heap select i as id, "
1275
            "md5(i::text) as text, "
1276
            "md5(repeat(i::text,10))::tsvector as tsvector "
1277
            "from generate_series(0,1) i")
1278

1279
        self.backup_node(
1280
            backup_dir, 'node', node, backup_type='page',
1281
            old_binary=True, options=['--compress'])
1282

1283
        pgdata = self.pgdata_content(node.data_dir)
1284

1285
        node_restored = self.make_simple_node(
1286
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1287
        node_restored.cleanup()
1288

1289
        self.restore_node(backup_dir, 'node', node_restored)
1290

1291
        # Physical comparison
1292
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1293
        self.compare_pgdata(pgdata, pgdata_restored)
1294

1295
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1296
        node_restored.slow_start()
1297

1298
    # @unittest.skip("skip")
1299
    def test_page_vacuum_truncate_compressed_1(self):
1300
        """
1301
        make node, create table, take full backup,
1302
        delete all data, vacuum relation,
1303
        take page backup, insert some data,
1304
        take second page backup,
1305
        restore latest page backup using new binary
1306
        and check data correctness
1307
        old binary should be 2.2.x version
1308
        """
1309
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1310
        node = self.make_simple_node(
1311
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1312
            set_replication=True,
1313
            initdb_params=['--data-checksums'])
1314

1315
        self.init_pb(backup_dir, old_binary=True)
1316
        self.add_instance(backup_dir, 'node', node, old_binary=True)
1317
        self.set_archiving(backup_dir, 'node', node, old_binary=True)
1318
        node.slow_start()
1319

1320
        node.safe_psql(
1321
            "postgres",
1322
            "create sequence t_seq; "
1323
            "create table t_heap as select i as id, "
1324
            "md5(i::text) as text, "
1325
            "md5(repeat(i::text,10))::tsvector as tsvector "
1326
            "from generate_series(0,1024) i")
1327

1328
        node.safe_psql(
1329
            "postgres",
1330
            "vacuum t_heap")
1331

1332
        id1 = self.backup_node(
1333
            backup_dir, 'node', node,
1334
            old_binary=True, options=['--compress'])
1335
        pgdata1 = self.pgdata_content(node.data_dir)
1336

1337
        node.safe_psql(
1338
            "postgres",
1339
            "delete from t_heap")
1340

1341
        node.safe_psql(
1342
            "postgres",
1343
            "vacuum t_heap")
1344

1345
        id2 = self.backup_node(
1346
            backup_dir, 'node', node, backup_type='page',
1347
            old_binary=True, options=['--compress'])
1348
        pgdata2 = self.pgdata_content(node.data_dir)
1349

1350
        node.safe_psql(
1351
            "postgres",
1352
            "insert into t_heap select i as id, "
1353
            "md5(i::text) as text, "
1354
            "md5(repeat(i::text,10))::tsvector as tsvector "
1355
            "from generate_series(0,1) i")
1356

1357
        id3 = self.backup_node(
1358
            backup_dir, 'node', node, backup_type='page',
1359
            old_binary=True, options=['--compress'])
1360
        pgdata3 = self.pgdata_content(node.data_dir)
1361

1362
        node_restored = self.make_simple_node(
1363
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1364
        node_restored.cleanup()
1365

1366
        self.restore_node(
1367
            backup_dir, 'node', node_restored,
1368
            data_dir=node_restored.data_dir, backup_id=id1)
1369

1370
        # Physical comparison
1371
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1372
        self.compare_pgdata(pgdata1, pgdata_restored)
1373

1374
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1375
        node_restored.slow_start()
1376
        node_restored.cleanup()
1377

1378
        self.restore_node(
1379
            backup_dir, 'node', node_restored,
1380
            data_dir=node_restored.data_dir, backup_id=id2)
1381

1382
        # Physical comparison
1383
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1384
        self.compare_pgdata(pgdata2, pgdata_restored)
1385

1386
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1387
        node_restored.slow_start()
1388
        node_restored.cleanup()
1389

1390
        self.restore_node(
1391
            backup_dir, 'node', node_restored,
1392
            data_dir=node_restored.data_dir, backup_id=id3)
1393

1394
        # Physical comparison
1395
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1396
        self.compare_pgdata(pgdata3, pgdata_restored)
1397

1398
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1399
        node_restored.slow_start()
1400
        node_restored.cleanup()
1401

1402
    # @unittest.skip("skip")
1403
    def test_hidden_files(self):
1404
        """
1405
        old_version should be < 2.3.0
1406
        Create hidden file in pgdata, take backup
1407
        with old binary, then try to delete backup
1408
        with new binary
1409
        """
1410
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1411
        node = self.make_simple_node(
1412
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1413
            set_replication=True,
1414
            initdb_params=['--data-checksums'])
1415

1416
        self.init_pb(backup_dir, old_binary=True)
1417
        self.add_instance(backup_dir, 'node', node, old_binary=True)
1418
        node.slow_start()
1419

1420
        open(os.path.join(node.data_dir, ".hidden_stuff"), 'a').close()
1421

1422
        backup_id = self.backup_node(
1423
            backup_dir, 'node',node, old_binary=True, options=['--stream'])
1424

1425
        self.delete_pb(backup_dir, 'node', backup_id)
1426

1427
    # @unittest.skip("skip")
1428
    def test_compatibility_tablespace(self):
1429
        """
1430
        https://github.com/postgrespro/pg_probackup/issues/348
1431
        """
1432
        node = self.make_simple_node(
1433
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1434
            set_replication=True,
1435
            initdb_params=['--data-checksums'])
1436

1437
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1438

1439
        self.init_pb(backup_dir)
1440
        self.add_instance(backup_dir, 'node', node, old_binary=True)
1441
        node.slow_start()
1442

1443
        backup_id = self.backup_node(
1444
            backup_dir, 'node', node, backup_type="full",
1445
            options=["-j", "4", "--stream"], old_binary=True)
1446

1447
        tblspace_old_path = self.get_tblspace_path(node, 'tblspace_old')
1448

1449
        self.create_tblspace_in_node(
1450
            node, 'tblspace',
1451
            tblspc_path=tblspace_old_path)
1452

1453
        node.safe_psql(
1454
            "postgres",
1455
            "create table t_heap_lame tablespace tblspace "
1456
            "as select 1 as id, md5(i::text) as text, "
1457
            "md5(repeat(i::text,10))::tsvector as tsvector "
1458
            "from generate_series(0,1000) i")
1459

1460
        tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')
1461

1462
        node_restored = self.make_simple_node(
1463
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1464
        node_restored.cleanup()
1465

1466
        try:
1467
            self.restore_node(
1468
                backup_dir, 'node', node_restored,
1469
                options=[
1470
                    "-j", "4",
1471
                    "-T", "{0}={1}".format(
1472
                        tblspace_old_path, tblspace_new_path)])
1473
            # we should die here because exception is what we expect to happen
1474
            self.assertEqual(
1475
                1, 0,
1476
                "Expecting Error because tablespace mapping is incorrect"
1477
                "\n Output: {0} \n CMD: {1}".format(
1478
                    repr(self.output), self.cmd))
1479
        except ProbackupException as e:
1480
            self.assertIn(
1481
                'ERROR: Backup {0} has no tablespaceses, '
1482
                'nothing to remap'.format(backup_id),
1483
                e.message,
1484
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1485
                    repr(e.message), self.cmd))
1486

1487
        self.backup_node(
1488
            backup_dir, 'node', node, backup_type="delta",
1489
            options=["-j", "4", "--stream"], old_binary=True)
1490

1491
        self.restore_node(
1492
            backup_dir, 'node', node_restored,
1493
            options=[
1494
                "-j", "4",
1495
                "-T", "{0}={1}".format(
1496
                    tblspace_old_path, tblspace_new_path)])
1497

1498
        if self.paranoia:
1499
            pgdata = self.pgdata_content(node.data_dir)
1500

1501
        if self.paranoia:
1502
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
1503
            self.compare_pgdata(pgdata, pgdata_restored)
1504

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.