pg_probackup

Форк
0
/
page_test.py 
1417 строк · 49.1 Кб
1
import os
2
import unittest
3
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
4
from testgres import QueryException
5
from datetime import datetime, timedelta
6
import subprocess
7
import gzip
8
import shutil
9
import time
10

11
class PageTest(ProbackupTest, unittest.TestCase):
12

13
    # @unittest.skip("skip")
14
    def test_basic_page_vacuum_truncate(self):
15
        """
16
        make node, create table, take full backup,
17
        delete last 3 pages, vacuum relation,
18
        take page backup, take second page backup,
19
        restore last page backup and check data correctness
20
        """
21
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
22
        node = self.make_simple_node(
23
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
24
            set_replication=True,
25
            initdb_params=['--data-checksums'],
26
            pg_options={
27
                'checkpoint_timeout': '300s'})
28

29
        node_restored = self.make_simple_node(
30
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
31

32
        self.init_pb(backup_dir)
33
        self.add_instance(backup_dir, 'node', node)
34
        self.set_archiving(backup_dir, 'node', node)
35
        node_restored.cleanup()
36
        node.slow_start()
37
        self.create_tblspace_in_node(node, 'somedata')
38

39
        node.safe_psql(
40
            "postgres",
41
            "create sequence t_seq; "
42
            "create table t_heap tablespace somedata as select i as id, "
43
            "md5(i::text) as text, "
44
            "md5(repeat(i::text,10))::tsvector as tsvector "
45
            "from generate_series(0,1024) i;")
46

47
        node.safe_psql(
48
            "postgres",
49
            "vacuum t_heap")
50

51
        self.backup_node(backup_dir, 'node', node)
52

53
        # TODO: make it dynamic
54
        node.safe_psql(
55
            "postgres",
56
            "delete from t_heap where ctid >= '(11,0)'")
57
        node.safe_psql(
58
            "postgres",
59
            "vacuum t_heap")
60

61
        self.backup_node(
62
            backup_dir, 'node', node, backup_type='page')
63

64
        self.backup_node(
65
            backup_dir, 'node', node, backup_type='page')
66

67
        if self.paranoia:
68
            pgdata = self.pgdata_content(node.data_dir)
69

70
        old_tablespace = self.get_tblspace_path(node, 'somedata')
71
        new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
72

73
        self.restore_node(
74
            backup_dir, 'node', node_restored,
75
            options=[
76
                "-j", "4",
77
                "-T", "{0}={1}".format(old_tablespace, new_tablespace)])
78

79
        # Physical comparison
80
        if self.paranoia:
81
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
82
            self.compare_pgdata(pgdata, pgdata_restored)
83

84
        self.set_auto_conf(node_restored, {'port': node_restored.port})
85
        node_restored.slow_start()
86

87
        # Logical comparison
88
        result1 = node.table_checksum("t_heap")
89
        result2 = node_restored.table_checksum("t_heap")
90

91
        self.assertEqual(result1, result2)
92

93
    # @unittest.skip("skip")
94
    def test_page_vacuum_truncate_1(self):
95
        """
96
        make node, create table, take full backup,
97
        delete all data, vacuum relation,
98
        take page backup, insert some data,
99
        take second page backup and check data correctness
100
        """
101
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
102
        node = self.make_simple_node(
103
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
104
            set_replication=True,
105
            initdb_params=['--data-checksums'])
106

107
        self.init_pb(backup_dir)
108
        self.add_instance(backup_dir, 'node', node)
109
        self.set_archiving(backup_dir, 'node', node)
110
        node.slow_start()
111

112
        node.safe_psql(
113
            "postgres",
114
            "create sequence t_seq; "
115
            "create table t_heap as select i as id, "
116
            "md5(i::text) as text, "
117
            "md5(repeat(i::text,10))::tsvector as tsvector "
118
            "from generate_series(0,1024) i")
119

120
        node.safe_psql(
121
            "postgres",
122
            "vacuum t_heap")
123

124
        self.backup_node(backup_dir, 'node', node)
125

126
        node.safe_psql(
127
            "postgres",
128
            "delete from t_heap")
129

130
        node.safe_psql(
131
            "postgres",
132
            "vacuum t_heap")
133

134
        self.backup_node(
135
            backup_dir, 'node', node, backup_type='page')
136

137
        node.safe_psql(
138
            "postgres",
139
            "insert into t_heap select i as id, "
140
            "md5(i::text) as text, "
141
            "md5(repeat(i::text,10))::tsvector as tsvector "
142
            "from generate_series(0,1) i")
143

144
        self.backup_node(
145
            backup_dir, 'node', node, backup_type='page')
146

147
        pgdata = self.pgdata_content(node.data_dir)
148

149
        node_restored = self.make_simple_node(
150
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
151
        node_restored.cleanup()
152

153
        self.restore_node(backup_dir, 'node', node_restored)
154

155
        # Physical comparison
156
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
157
        self.compare_pgdata(pgdata, pgdata_restored)
158

159
        self.set_auto_conf(node_restored, {'port': node_restored.port})
160
        node_restored.slow_start()
161

162
    # @unittest.skip("skip")
163
    def test_page_stream(self):
164
        """
165
        make archive node, take full and page stream backups,
166
        restore them and check data correctness
167
        """
168
        self.maxDiff = None
169
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
170
        node = self.make_simple_node(
171
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
172
            set_replication=True,
173
            initdb_params=['--data-checksums'],
174
            pg_options={
175
                'checkpoint_timeout': '30s'}
176
            )
177

178
        self.init_pb(backup_dir)
179
        self.add_instance(backup_dir, 'node', node)
180
        self.set_archiving(backup_dir, 'node', node)
181
        node.slow_start()
182

183
        # FULL BACKUP
184
        node.safe_psql(
185
            "postgres",
186
            "create table t_heap as select i as id, md5(i::text) as text, "
187
            "md5(i::text)::tsvector as tsvector "
188
            "from generate_series(0,100) i")
189

190
        full_result = node.table_checksum("t_heap")
191
        full_backup_id = self.backup_node(
192
            backup_dir, 'node', node,
193
            backup_type='full', options=['--stream'])
194

195
        # PAGE BACKUP
196
        node.safe_psql(
197
            "postgres",
198
            "insert into t_heap select i as id, md5(i::text) as text, "
199
            "md5(i::text)::tsvector as tsvector "
200
            "from generate_series(100,200) i")
201
        page_result = node.table_checksum("t_heap")
202
        page_backup_id = self.backup_node(
203
            backup_dir, 'node', node,
204
            backup_type='page', options=['--stream', '-j', '4'])
205

206
        if self.paranoia:
207
            pgdata = self.pgdata_content(node.data_dir)
208

209
        # Drop Node
210
        node.cleanup()
211

212
        # Check full backup
213
        self.assertIn(
214
            "INFO: Restore of backup {0} completed.".format(full_backup_id),
215
            self.restore_node(
216
                backup_dir, 'node', node,
217
                backup_id=full_backup_id, options=["-j", "4"]),
218
            '\n Unexpected Error Message: {0}\n'
219
            ' CMD: {1}'.format(repr(self.output), self.cmd))
220

221
        node.slow_start()
222
        full_result_new = node.table_checksum("t_heap")
223
        self.assertEqual(full_result, full_result_new)
224
        node.cleanup()
225

226
        # Check page backup
227
        self.assertIn(
228
            "INFO: Restore of backup {0} completed.".format(page_backup_id),
229
            self.restore_node(
230
                backup_dir, 'node', node,
231
                backup_id=page_backup_id, options=["-j", "4"]),
232
            '\n Unexpected Error Message: {0}\n'
233
            ' CMD: {1}'.format(repr(self.output), self.cmd))
234

235
        # GET RESTORED PGDATA AND COMPARE
236
        if self.paranoia:
237
            pgdata_restored = self.pgdata_content(node.data_dir)
238
            self.compare_pgdata(pgdata, pgdata_restored)
239

240
        node.slow_start()
241
        page_result_new = node.table_checksum("t_heap")
242
        self.assertEqual(page_result, page_result_new)
243
        node.cleanup()
244

245
    # @unittest.skip("skip")
246
    def test_page_archive(self):
247
        """
248
        make archive node, take full and page archive backups,
249
        restore them and check data correctness
250
        """
251
        self.maxDiff = None
252
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
253
        node = self.make_simple_node(
254
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
255
            set_replication=True,
256
            initdb_params=['--data-checksums'],
257
            pg_options={
258
                'checkpoint_timeout': '30s'}
259
            )
260

261
        self.init_pb(backup_dir)
262
        self.add_instance(backup_dir, 'node', node)
263
        self.set_archiving(backup_dir, 'node', node)
264
        node.slow_start()
265

266
        # FULL BACKUP
267
        node.safe_psql(
268
            "postgres",
269
            "create table t_heap as select i as id, md5(i::text) as text, "
270
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
271
        full_result = node.table_checksum("t_heap")
272
        full_backup_id = self.backup_node(
273
            backup_dir, 'node', node, backup_type='full')
274

275
        # PAGE BACKUP
276
        node.safe_psql(
277
            "postgres",
278
            "insert into t_heap select i as id, "
279
            "md5(i::text) as text, md5(i::text)::tsvector as tsvector "
280
            "from generate_series(100, 200) i")
281
        page_result = node.table_checksum("t_heap")
282
        page_backup_id = self.backup_node(
283
            backup_dir, 'node', node,
284
            backup_type='page', options=["-j", "4"])
285

286
        if self.paranoia:
287
            pgdata = self.pgdata_content(node.data_dir)
288

289
        # Drop Node
290
        node.cleanup()
291

292
        # Restore and check full backup
293
        self.assertIn("INFO: Restore of backup {0} completed.".format(
294
            full_backup_id),
295
            self.restore_node(
296
                backup_dir, 'node', node,
297
                backup_id=full_backup_id,
298
                options=[
299
                    "-j", "4",
300
                    "--immediate",
301
                    "--recovery-target-action=promote"]),
302
            '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
303
                repr(self.output), self.cmd))
304

305
        node.slow_start()
306

307
        full_result_new = node.table_checksum("t_heap")
308
        self.assertEqual(full_result, full_result_new)
309
        node.cleanup()
310

311
        # Restore and check page backup
312
        self.assertIn(
313
            "INFO: Restore of backup {0} completed.".format(page_backup_id),
314
            self.restore_node(
315
                backup_dir, 'node', node,
316
                backup_id=page_backup_id,
317
                options=[
318
                    "-j", "4",
319
                    "--immediate",
320
                    "--recovery-target-action=promote"]),
321
            '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
322
                repr(self.output), self.cmd))
323

324
         # GET RESTORED PGDATA AND COMPARE
325
        if self.paranoia:
326
            pgdata_restored = self.pgdata_content(node.data_dir)
327
            self.compare_pgdata(pgdata, pgdata_restored)
328

329
        node.slow_start()
330

331
        page_result_new = node.table_checksum("t_heap")
332
        self.assertEqual(page_result, page_result_new)
333
        node.cleanup()
334

335
    # @unittest.skip("skip")
336
    def test_page_multiple_segments(self):
337
        """
338
        Make node, create table with multiple segments,
339
        write some data to it, check page and data correctness
340
        """
341
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
342
        node = self.make_simple_node(
343
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
344
            set_replication=True,
345
            initdb_params=['--data-checksums'],
346
            pg_options={
347
                'fsync': 'off',
348
                'shared_buffers': '1GB',
349
                'maintenance_work_mem': '1GB',
350
                'full_page_writes': 'off'})
351

352
        self.init_pb(backup_dir)
353
        self.add_instance(backup_dir, 'node', node)
354
        self.set_archiving(backup_dir, 'node', node)
355
        node.slow_start()
356

357
        self.create_tblspace_in_node(node, 'somedata')
358

359
        # CREATE TABLE
360
        node.pgbench_init(scale=100, options=['--tablespace=somedata'])
361
        # FULL BACKUP
362
        self.backup_node(backup_dir, 'node', node)
363

364
        # PGBENCH STUFF
365
        pgbench = node.pgbench(options=['-T', '50', '-c', '1', '--no-vacuum'])
366
        pgbench.wait()
367

368
        # GET LOGICAL CONTENT FROM NODE
369
        result = node.table_checksum("pgbench_accounts")
370
        # PAGE BACKUP
371
        self.backup_node(backup_dir, 'node', node, backup_type='page')
372

373
        # GET PHYSICAL CONTENT FROM NODE
374
        pgdata = self.pgdata_content(node.data_dir)
375

376
        # RESTORE NODE
377
        restored_node = self.make_simple_node(
378
            base_dir=os.path.join(self.module_name, self.fname, 'restored_node'))
379
        restored_node.cleanup()
380
        tblspc_path = self.get_tblspace_path(node, 'somedata')
381
        tblspc_path_new = self.get_tblspace_path(
382
            restored_node, 'somedata_restored')
383

384
        self.restore_node(
385
            backup_dir, 'node', restored_node,
386
            options=[
387
                "-j", "4",
388
                "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
389

390
        # GET PHYSICAL CONTENT FROM NODE_RESTORED
391
        pgdata_restored = self.pgdata_content(restored_node.data_dir)
392

393
        # START RESTORED NODE
394
        self.set_auto_conf(restored_node, {'port': restored_node.port})
395
        restored_node.slow_start()
396

397
        result_new = restored_node.table_checksum("pgbench_accounts")
398

399
        # COMPARE RESTORED FILES
400
        self.assertEqual(result, result_new, 'data is lost')
401

402
        if self.paranoia:
403
            self.compare_pgdata(pgdata, pgdata_restored)
404

405
    # @unittest.skip("skip")
406
    def test_page_delete(self):
407
        """
408
        Make node, create tablespace with table, take full backup,
409
        delete everything from table, vacuum table, take page backup,
410
        restore page backup, compare .
411
        """
412
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
413
        node = self.make_simple_node(
414
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
415
            set_replication=True, initdb_params=['--data-checksums'],
416
            pg_options={
417
                'checkpoint_timeout': '30s',
418
            }
419
        )
420

421
        self.init_pb(backup_dir)
422
        self.add_instance(backup_dir, 'node', node)
423
        self.set_archiving(backup_dir, 'node', node)
424
        node.slow_start()
425

426
        self.create_tblspace_in_node(node, 'somedata')
427
        # FULL backup
428
        self.backup_node(backup_dir, 'node', node)
429
        node.safe_psql(
430
            "postgres",
431
            "create table t_heap tablespace somedata as select i as id,"
432
            " md5(i::text) as text, md5(i::text)::tsvector as tsvector"
433
            " from generate_series(0,100) i")
434

435
        node.safe_psql(
436
            "postgres",
437
            "delete from t_heap")
438

439
        node.safe_psql(
440
            "postgres",
441
            "vacuum t_heap")
442

443
        # PAGE BACKUP
444
        self.backup_node(
445
            backup_dir, 'node', node, backup_type='page')
446
        if self.paranoia:
447
            pgdata = self.pgdata_content(node.data_dir)
448

449
        # RESTORE
450
        node_restored = self.make_simple_node(
451
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
452
        node_restored.cleanup()
453

454
        self.restore_node(
455
            backup_dir, 'node', node_restored,
456
            options=[
457
                "-j", "4",
458
                "-T", "{0}={1}".format(
459
                    self.get_tblspace_path(node, 'somedata'),
460
                    self.get_tblspace_path(node_restored, 'somedata'))
461
            ]
462
        )
463

464
        # GET RESTORED PGDATA AND COMPARE
465
        if self.paranoia:
466
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
467
            self.compare_pgdata(pgdata, pgdata_restored)
468

469
        # START RESTORED NODE
470
        self.set_auto_conf(node_restored, {'port': node_restored.port})
471
        node_restored.slow_start()
472

473
    # @unittest.skip("skip")
474
    def test_page_delete_1(self):
475
        """
476
        Make node, create tablespace with table, take full backup,
477
        delete everything from table, vacuum table, take page backup,
478
        restore page backup, compare .
479
        """
480
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
481
        node = self.make_simple_node(
482
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
483
            set_replication=True,
484
            initdb_params=['--data-checksums'],
485
            pg_options={
486
                'checkpoint_timeout': '30s',
487
            }
488
        )
489

490
        self.init_pb(backup_dir)
491
        self.add_instance(backup_dir, 'node', node)
492
        self.set_archiving(backup_dir, 'node', node)
493
        node.slow_start()
494

495
        self.create_tblspace_in_node(node, 'somedata')
496

497
        node.safe_psql(
498
            "postgres",
499
            "create table t_heap tablespace somedata as select i as id,"
500
            " md5(i::text) as text, md5(i::text)::tsvector as tsvector"
501
            " from generate_series(0,100) i"
502
        )
503
        # FULL backup
504
        self.backup_node(backup_dir, 'node', node)
505

506
        node.safe_psql(
507
            "postgres",
508
            "delete from t_heap"
509
        )
510

511
        node.safe_psql(
512
            "postgres",
513
            "vacuum t_heap"
514
        )
515

516
        # PAGE BACKUP
517
        self.backup_node(
518
            backup_dir, 'node', node, backup_type='page')
519
        if self.paranoia:
520
            pgdata = self.pgdata_content(node.data_dir)
521

522
        # RESTORE
523
        node_restored = self.make_simple_node(
524
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored')
525
        )
526
        node_restored.cleanup()
527

528
        self.restore_node(
529
            backup_dir, 'node', node_restored,
530
            options=[
531
                "-j", "4",
532
                "-T", "{0}={1}".format(
533
                    self.get_tblspace_path(node, 'somedata'),
534
                    self.get_tblspace_path(node_restored, 'somedata'))
535
            ]
536
        )
537

538
        # GET RESTORED PGDATA AND COMPARE
539
        if self.paranoia:
540
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
541
            self.compare_pgdata(pgdata, pgdata_restored)
542

543
        # START RESTORED NODE
544
        self.set_auto_conf(node_restored, {'port': node_restored.port})
545
        node_restored.slow_start()
546

547
    def test_parallel_pagemap(self):
548
        """
549
        Test for parallel WAL segments reading, during which pagemap is built
550
        """
551
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
552

553
        # Initialize instance and backup directory
554
        node = self.make_simple_node(
555
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
556
            initdb_params=['--data-checksums'],
557
            pg_options={
558
                "hot_standby": "on"
559
            }
560
        )
561
        node_restored = self.make_simple_node(
562
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'),
563
        )
564

565
        self.init_pb(backup_dir)
566
        self.add_instance(backup_dir, 'node', node)
567
        node_restored.cleanup()
568
        self.set_archiving(backup_dir, 'node', node)
569
        node.slow_start()
570

571
        # Do full backup
572
        self.backup_node(backup_dir, 'node', node)
573
        show_backup = self.show_pb(backup_dir, 'node')[0]
574

575
        self.assertEqual(show_backup['status'], "OK")
576
        self.assertEqual(show_backup['backup-mode'], "FULL")
577

578
        # Fill instance with data and make several WAL segments ...
579
        with node.connect() as conn:
580
            conn.execute("create table test (id int)")
581
            for x in range(0, 8):
582
                conn.execute(
583
                    "insert into test select i from generate_series(1,100) s(i)")
584
                conn.commit()
585
                self.switch_wal_segment(conn)
586
            count1 = conn.execute("select count(*) from test")
587

588
        # ... and do page backup with parallel pagemap
589
        self.backup_node(
590
            backup_dir, 'node', node, backup_type="page", options=["-j", "4"])
591
        show_backup = self.show_pb(backup_dir, 'node')[1]
592

593
        self.assertEqual(show_backup['status'], "OK")
594
        self.assertEqual(show_backup['backup-mode'], "PAGE")
595

596
        if self.paranoia:
597
            pgdata = self.pgdata_content(node.data_dir)
598

599
        # Restore it
600
        self.restore_node(backup_dir, 'node', node_restored)
601

602
        # Physical comparison
603
        if self.paranoia:
604
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
605
            self.compare_pgdata(pgdata, pgdata_restored)
606

607
        self.set_auto_conf(node_restored, {'port': node_restored.port})
608
        node_restored.slow_start()
609

610
        # Check restored node
611
        count2 = node_restored.execute("postgres", "select count(*) from test")
612

613
        self.assertEqual(count1, count2)
614

615
        # Clean after yourself
616
        node.cleanup()
617
        node_restored.cleanup()
618

619
    def test_parallel_pagemap_1(self):
620
        """
621
        Test for parallel WAL segments reading, during which pagemap is built
622
        """
623
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
624

625
        # Initialize instance and backup directory
626
        node = self.make_simple_node(
627
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
628
            initdb_params=['--data-checksums'],
629
            pg_options={}
630
        )
631

632
        self.init_pb(backup_dir)
633
        self.add_instance(backup_dir, 'node', node)
634
        self.set_archiving(backup_dir, 'node', node)
635
        node.slow_start()
636

637
        # Do full backup
638
        self.backup_node(backup_dir, 'node', node)
639
        show_backup = self.show_pb(backup_dir, 'node')[0]
640

641
        self.assertEqual(show_backup['status'], "OK")
642
        self.assertEqual(show_backup['backup-mode'], "FULL")
643

644
        # Fill instance with data and make several WAL segments ...
645
        node.pgbench_init(scale=10)
646

647
        # do page backup in single thread
648
        page_id = self.backup_node(
649
            backup_dir, 'node', node, backup_type="page")
650

651
        self.delete_pb(backup_dir, 'node', page_id)
652

653
        # ... and do page backup with parallel pagemap
654
        self.backup_node(
655
            backup_dir, 'node', node, backup_type="page", options=["-j", "4"])
656
        show_backup = self.show_pb(backup_dir, 'node')[1]
657

658
        self.assertEqual(show_backup['status'], "OK")
659
        self.assertEqual(show_backup['backup-mode'], "PAGE")
660

661
        # Drop node and restore it
662
        node.cleanup()
663
        self.restore_node(backup_dir, 'node', node)
664
        node.slow_start()
665

666
        # Clean after yourself
667
        node.cleanup()
668

669
    # @unittest.skip("skip")
670
    def test_page_backup_with_lost_wal_segment(self):
671
        """
672
        make node with archiving
673
        make archive backup, then generate some wals with pgbench,
674
        delete latest archived wal segment
675
        run page backup, expecting error because of missing wal segment
676
        make sure that backup status is 'ERROR'
677
        """
678
        node = self.make_simple_node(
679
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
680
            initdb_params=['--data-checksums'])
681

682
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
683
        self.init_pb(backup_dir)
684
        self.add_instance(backup_dir, 'node', node)
685
        self.set_archiving(backup_dir, 'node', node)
686
        node.slow_start()
687

688
        self.backup_node(backup_dir, 'node', node)
689

690
        # make some wals
691
        node.pgbench_init(scale=3)
692

693
        # delete last wal segment
694
        wals_dir = os.path.join(backup_dir, 'wal', 'node')
695
        wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
696
            wals_dir, f)) and not f.endswith('.backup') and not f.endswith('.part')]
697
        wals = map(str, wals)
698
        file = os.path.join(wals_dir, max(wals))
699
        os.remove(file)
700
        if self.archive_compress:
701
            file = file[:-3]
702

703
        # Single-thread PAGE backup
704
        try:
705
            self.backup_node(
706
                backup_dir, 'node', node, backup_type='page')
707
            self.assertEqual(
708
                1, 0,
709
                "Expecting Error because of wal segment disappearance.\n "
710
                "Output: {0} \n CMD: {1}".format(
711
                    self.output, self.cmd))
712
        except ProbackupException as e:
713
            self.assertTrue(
714
                'Could not read WAL record at' in e.message and
715
                'is absent' in e.message,
716
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
717
                    repr(e.message), self.cmd))
718

719
        self.assertEqual(
720
            'ERROR',
721
            self.show_pb(backup_dir, 'node')[1]['status'],
722
            'Backup {0} should have STATUS "ERROR"')
723

724
        # Multi-thread PAGE backup
725
        try:
726
            self.backup_node(
727
                backup_dir, 'node', node,
728
                backup_type='page',
729
                options=["-j", "4"])
730
            self.assertEqual(
731
                1, 0,
732
                "Expecting Error because of wal segment disappearance.\n "
733
                "Output: {0} \n CMD: {1}".format(
734
                    self.output, self.cmd))
735
        except ProbackupException as e:
736
            self.assertTrue(
737
                'Could not read WAL record at' in e.message and
738
                'is absent' in e.message,
739
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
740
                    repr(e.message), self.cmd))
741

742
        self.assertEqual(
743
            'ERROR',
744
            self.show_pb(backup_dir, 'node')[2]['status'],
745
            'Backup {0} should have STATUS "ERROR"')
746

747
    # @unittest.skip("skip")
748
    def test_page_backup_with_corrupted_wal_segment(self):
749
        """
750
        make node with archiving
751
        make archive backup, then generate some wals with pgbench,
752
        corrupt latest archived wal segment
753
        run page backup, expecting error because of missing wal segment
754
        make sure that backup status is 'ERROR'
755
        """
756
        node = self.make_simple_node(
757
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
758
            initdb_params=['--data-checksums'])
759

760
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
761
        self.init_pb(backup_dir)
762
        self.add_instance(backup_dir, 'node', node)
763
        self.set_archiving(backup_dir, 'node', node)
764
        node.slow_start()
765

766
        self.backup_node(backup_dir, 'node', node)
767

768
        # make some wals
769
        node.pgbench_init(scale=10)
770

771
        # delete last wal segment
772
        wals_dir = os.path.join(backup_dir, 'wal', 'node')
773
        wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
774
            wals_dir, f)) and not f.endswith('.backup')]
775
        wals = map(str, wals)
776
 #       file = os.path.join(wals_dir, max(wals))
777

778
        if self.archive_compress:
779
            original_file = os.path.join(wals_dir, '000000010000000000000004.gz')
780
            tmp_file = os.path.join(backup_dir, '000000010000000000000004')
781

782
            with gzip.open(original_file, 'rb') as f_in, open(tmp_file, 'wb') as f_out:
783
                shutil.copyfileobj(f_in, f_out)
784

785
            # drop healthy file
786
            os.remove(original_file)
787
            file = tmp_file
788

789
        else:
790
            file = os.path.join(wals_dir, '000000010000000000000004')
791

792
        # corrupt file
793
        print(file)
794
        with open(file, "rb+", 0) as f:
795
            f.seek(42)
796
            f.write(b"blah")
797
            f.flush()
798
            f.close
799

800
        if self.archive_compress:
801
            # compress corrupted file and replace with it old file
802
            with open(file, 'rb') as f_in, gzip.open(original_file, 'wb', compresslevel=1) as f_out:
803
                shutil.copyfileobj(f_in, f_out)
804

805
            file = os.path.join(wals_dir, '000000010000000000000004.gz')
806

807
        #if self.archive_compress:
808
        #    file = file[:-3]
809

810
        # Single-thread PAGE backup
811
        try:
812
            self.backup_node(
813
                backup_dir, 'node', node, backup_type='page')
814
            self.assertEqual(
815
                1, 0,
816
                "Expecting Error because of wal segment disappearance.\n "
817
                "Output: {0} \n CMD: {1}".format(
818
                    self.output, self.cmd))
819
        except ProbackupException as e:
820
            self.assertTrue(
821
                'Could not read WAL record at' in e.message and
822
                'Possible WAL corruption. Error has occured during reading WAL segment' in e.message,
823
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
824
                    repr(e.message), self.cmd))
825

826
        self.assertEqual(
827
            'ERROR',
828
            self.show_pb(backup_dir, 'node')[1]['status'],
829
            'Backup {0} should have STATUS "ERROR"')
830

831
        # Multi-thread PAGE backup
832
        try:
833
            self.backup_node(
834
                backup_dir, 'node', node,
835
                backup_type='page', options=["-j", "4"])
836
            self.assertEqual(
837
                1, 0,
838
                "Expecting Error because of wal segment disappearance.\n "
839
                "Output: {0} \n CMD: {1}".format(
840
                    self.output, self.cmd))
841
        except ProbackupException as e:
842
            self.assertTrue(
843
                'Could not read WAL record at' in e.message and
844
                'Possible WAL corruption. Error has occured during reading WAL segment "{0}"'.format(
845
                    file) in e.message,
846
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
847
                    repr(e.message), self.cmd))
848

849
        self.assertEqual(
850
            'ERROR',
851
            self.show_pb(backup_dir, 'node')[2]['status'],
852
            'Backup {0} should have STATUS "ERROR"')
853

854
    # @unittest.skip("skip")
855
    def test_page_backup_with_alien_wal_segment(self):
856
        """
857
        make two nodes with archiving
858
        take archive full backup from both nodes,
859
        generate some wals with pgbench on both nodes,
860
        move latest archived wal segment from second node to first node`s archive
861
        run page backup on first node
862
        expecting error because of alien wal segment
863
        make sure that backup status is 'ERROR'
864
        """
865
        node = self.make_simple_node(
866
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
867
            set_replication=True,
868
            initdb_params=['--data-checksums'])
869

870
        alien_node = self.make_simple_node(
871
            base_dir=os.path.join(self.module_name, self.fname, 'alien_node'),
872
            set_replication=True,
873
            initdb_params=['--data-checksums'])
874

875
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
876
        self.init_pb(backup_dir)
877
        self.add_instance(backup_dir, 'node', node)
878
        self.set_archiving(backup_dir, 'node', node)
879
        node.slow_start()
880

881
        self.add_instance(backup_dir, 'alien_node', alien_node)
882
        self.set_archiving(backup_dir, 'alien_node', alien_node)
883
        alien_node.slow_start()
884

885
        self.backup_node(
886
            backup_dir, 'node', node, options=['--stream'])
887
        self.backup_node(
888
            backup_dir, 'alien_node', alien_node, options=['--stream'])
889

890
        # make some wals
891
        node.safe_psql(
892
            "postgres",
893
            "create sequence t_seq; "
894
            "create table t_heap as select i as id, "
895
            "md5(i::text) as text, "
896
            "md5(repeat(i::text,10))::tsvector as tsvector "
897
            "from generate_series(0,10000) i;")
898

899
        alien_node.safe_psql(
900
            "postgres",
901
            "create database alien")
902

903
        alien_node.safe_psql(
904
            "alien",
905
            "create sequence t_seq; "
906
            "create table t_heap_alien as select i as id, "
907
            "md5(i::text) as text, "
908
            "md5(repeat(i::text,10))::tsvector as tsvector "
909
            "from generate_series(0,10000) i;")
910

911
        # copy latest wal segment
912
        wals_dir = os.path.join(backup_dir, 'wal', 'alien_node')
913
        wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(
914
            wals_dir, f)) and not f.endswith('.backup')]
915
        wals = map(str, wals)
916
        filename = max(wals)
917
        file = os.path.join(wals_dir, filename)
918
        file_destination = os.path.join(
919
            os.path.join(backup_dir, 'wal', 'node'), filename)
920
        start = time.time()
921
        while not os.path.exists(file_destination) and time.time() - start < 20:
922
            time.sleep(0.1)
923
        os.remove(file_destination)
924
        os.rename(file, file_destination)
925

926
        # Single-thread PAGE backup
927
        try:
928
            self.backup_node(
929
                backup_dir, 'node', node,
930
                backup_type='page')
931
            self.assertEqual(
932
                1, 0,
933
                "Expecting Error because of alien wal segment.\n "
934
                "Output: {0} \n CMD: {1}".format(
935
                    self.output, self.cmd))
936
        except ProbackupException as e:
937
            self.assertTrue(
938
                'Could not read WAL record at' in e.message and
939
                'Possible WAL corruption. Error has occured during reading WAL segment' in e.message,
940
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
941
                    repr(e.message), self.cmd))
942

943
        self.assertEqual(
944
            'ERROR',
945
            self.show_pb(backup_dir, 'node')[1]['status'],
946
            'Backup {0} should have STATUS "ERROR"')
947

948
        # Multi-thread PAGE backup
949
        try:
950
            self.backup_node(
951
                backup_dir, 'node', node,
952
                backup_type='page', options=["-j", "4"])
953
            self.assertEqual(
954
                1, 0,
955
                "Expecting Error because of alien wal segment.\n "
956
                "Output: {0} \n CMD: {1}".format(
957
                    self.output, self.cmd))
958
        except ProbackupException as e:
959
            self.assertIn('Could not read WAL record at', e.message)
960
            self.assertIn('WAL file is from different database system: '
961
                'WAL file database system identifier is', e.message)
962
            self.assertIn('pg_control database system identifier is', e.message)
963
            self.assertIn('Possible WAL corruption. Error has occured '
964
                'during reading WAL segment', e.message)
965

966
        self.assertEqual(
967
            'ERROR',
968
            self.show_pb(backup_dir, 'node')[2]['status'],
969
            'Backup {0} should have STATUS "ERROR"')
970

971
    # @unittest.skip("skip")
972
    def test_multithread_page_backup_with_toast(self):
973
        """
974
        make node, create toast, do multithread PAGE backup
975
        """
976
        node = self.make_simple_node(
977
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
978
            initdb_params=['--data-checksums'])
979

980
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
981
        self.init_pb(backup_dir)
982
        self.add_instance(backup_dir, 'node', node)
983
        self.set_archiving(backup_dir, 'node', node)
984
        node.slow_start()
985

986
        self.backup_node(backup_dir, 'node', node)
987

988
        # make some wals
989
        node.safe_psql(
990
            "postgres",
991
            "create table t3 as select i, "
992
            "repeat(md5(i::text),5006056) as fat_attr "
993
            "from generate_series(0,70) i")
994

995
        # Multi-thread PAGE backup
996
        self.backup_node(
997
            backup_dir, 'node', node,
998
            backup_type='page', options=["-j", "4"])
999

1000
    # @unittest.skip("skip")
1001
    def test_page_create_db(self):
1002
        """
1003
        Make node, take full backup, create database db1, take page backup,
1004
        restore database and check it presense
1005
        """
1006
        self.maxDiff = None
1007
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1008
        node = self.make_simple_node(
1009
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1010
            set_replication=True,
1011
            initdb_params=['--data-checksums'],
1012
            pg_options={
1013
                'max_wal_size': '10GB',
1014
                'checkpoint_timeout': '5min',
1015
            }
1016
        )
1017

1018
        self.init_pb(backup_dir)
1019
        self.add_instance(backup_dir, 'node', node)
1020
        self.set_archiving(backup_dir, 'node', node)
1021
        node.slow_start()
1022

1023
        # FULL BACKUP
1024
        node.safe_psql(
1025
            "postgres",
1026
            "create table t_heap as select i as id, md5(i::text) as text, "
1027
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1028

1029
        self.backup_node(
1030
            backup_dir, 'node', node)
1031

1032
        # CREATE DATABASE DB1
1033
        node.safe_psql("postgres", "create database db1")
1034
        node.safe_psql(
1035
            "db1",
1036
            "create table t_heap as select i as id, md5(i::text) as text, "
1037
            "md5(i::text)::tsvector as tsvector from generate_series(0,1000) i")
1038

1039
        # PAGE BACKUP
1040
        backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page')
1041

1042
        if self.paranoia:
1043
            pgdata = self.pgdata_content(node.data_dir)
1044

1045
        # RESTORE
1046
        node_restored = self.make_simple_node(
1047
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1048

1049
        node_restored.cleanup()
1050
        self.restore_node(
1051
            backup_dir, 'node', node_restored,
1052
            backup_id=backup_id, options=["-j", "4"])
1053

1054
        # COMPARE PHYSICAL CONTENT
1055
        if self.paranoia:
1056
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
1057
            self.compare_pgdata(pgdata, pgdata_restored)
1058

1059
        # START RESTORED NODE
1060
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1061
        node_restored.slow_start()
1062

1063
        node_restored.safe_psql('db1', 'select 1')
1064
        node_restored.cleanup()
1065

1066
        # DROP DATABASE DB1
1067
        node.safe_psql(
1068
            "postgres", "drop database db1")
1069
        # SECOND PAGE BACKUP
1070
        backup_id = self.backup_node(
1071
            backup_dir, 'node', node, backup_type='page')
1072

1073
        if self.paranoia:
1074
            pgdata = self.pgdata_content(node.data_dir)
1075

1076
        # RESTORE SECOND PAGE BACKUP
1077
        self.restore_node(
1078
            backup_dir, 'node', node_restored,
1079
            backup_id=backup_id, options=["-j", "4"]
1080
        )
1081

1082
        # COMPARE PHYSICAL CONTENT
1083
        if self.paranoia:
1084
            pgdata_restored = self.pgdata_content(
1085
                node_restored.data_dir, ignore_ptrack=False)
1086
            self.compare_pgdata(pgdata, pgdata_restored)
1087

1088
        # START RESTORED NODE
1089
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1090
        node_restored.slow_start()
1091

1092
        try:
1093
            node_restored.safe_psql('db1', 'select 1')
1094
            # we should die here because exception is what we expect to happen
1095
            self.assertEqual(
1096
                1, 0,
1097
                "Expecting Error because we are connecting to deleted database"
1098
                "\n Output: {0} \n CMD: {1}".format(
1099
                    repr(self.output), self.cmd)
1100
            )
1101
        except QueryException as e:
1102
            self.assertTrue(
1103
                'FATAL:  database "db1" does not exist' in e.message,
1104
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1105
                    repr(e.message), self.cmd)
1106
            )
1107

1108
    # @unittest.skip("skip")
1109
    # @unittest.expectedFailure
1110
    def test_multi_timeline_page(self):
1111
        """
1112
        Check that backup in PAGE mode choose
1113
        parent backup correctly:
1114
        t12        /---P-->
1115
        ...
1116
        t3      /---->
1117
        t2   /---->
1118
        t1 -F-----D->
1119

1120
        P must have F as parent
1121
        """
1122
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1123
        node = self.make_simple_node(
1124
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1125
            set_replication=True,
1126
            initdb_params=['--data-checksums'])
1127

1128
        self.init_pb(backup_dir)
1129
        self.add_instance(backup_dir, 'node', node)
1130
        self.set_archiving(backup_dir, 'node', node)
1131
        node.slow_start()
1132

1133
        node.safe_psql("postgres", "create extension pageinspect")
1134

1135
        try:
1136
            node.safe_psql(
1137
                "postgres",
1138
                "create extension amcheck")
1139
        except QueryException as e:
1140
            node.safe_psql(
1141
                "postgres",
1142
                "create extension amcheck_next")
1143

1144
        node.pgbench_init(scale=20)
1145
        full_id = self.backup_node(backup_dir, 'node', node)
1146

1147
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
1148
        pgbench.wait()
1149

1150
        self.backup_node(backup_dir, 'node', node, backup_type='delta')
1151

1152
        node.cleanup()
1153
        self.restore_node(
1154
            backup_dir, 'node', node, backup_id=full_id,
1155
            options=[
1156
                '--recovery-target=immediate',
1157
                '--recovery-target-action=promote'])
1158

1159
        node.slow_start()
1160

1161
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
1162
        pgbench.wait()
1163

1164
        # create timelines
1165
        for i in range(2, 7):
1166
            node.cleanup()
1167
            self.restore_node(
1168
                backup_dir, 'node', node,
1169
                options=[
1170
                    '--recovery-target=latest',
1171
                    '--recovery-target-action=promote',
1172
                    '--recovery-target-timeline={0}'.format(i)])
1173
            node.slow_start()
1174

1175
            # at this point there is i+1 timeline
1176
            pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum'])
1177
            pgbench.wait()
1178

1179
            # create backup at 2, 4 and 6 timeline
1180
            if i % 2 == 0:
1181
                self.backup_node(backup_dir, 'node', node, backup_type='page')
1182

1183
        page_id = self.backup_node(
1184
            backup_dir, 'node', node, backup_type='page',
1185
            options=['--log-level-file=VERBOSE'])
1186

1187
        pgdata = self.pgdata_content(node.data_dir)
1188

1189
        result = node.table_checksum("pgbench_accounts")
1190

1191
        node_restored = self.make_simple_node(
1192
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1193
        node_restored.cleanup()
1194

1195
        self.restore_node(backup_dir, 'node', node_restored)
1196
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1197

1198
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1199
        node_restored.slow_start()
1200

1201
        result_new = node_restored.table_checksum("pgbench_accounts")
1202

1203
        self.assertEqual(result, result_new)
1204

1205
        self.compare_pgdata(pgdata, pgdata_restored)
1206

1207
        self.checkdb_node(
1208
            backup_dir,
1209
            'node',
1210
            options=[
1211
                '--amcheck',
1212
                '-d', 'postgres', '-p', str(node.port)])
1213

1214
        self.checkdb_node(
1215
            backup_dir,
1216
            'node',
1217
            options=[
1218
                '--amcheck',
1219
                '-d', 'postgres', '-p', str(node_restored.port)])
1220

1221
        backup_list = self.show_pb(backup_dir, 'node')
1222

1223
        self.assertEqual(
1224
            backup_list[2]['parent-backup-id'],
1225
            backup_list[0]['id'])
1226
        self.assertEqual(backup_list[2]['current-tli'], 3)
1227

1228
        self.assertEqual(
1229
            backup_list[3]['parent-backup-id'],
1230
            backup_list[2]['id'])
1231
        self.assertEqual(backup_list[3]['current-tli'], 5)
1232

1233
        self.assertEqual(
1234
            backup_list[4]['parent-backup-id'],
1235
            backup_list[3]['id'])
1236
        self.assertEqual(backup_list[4]['current-tli'], 7)
1237

1238
        self.assertEqual(
1239
            backup_list[5]['parent-backup-id'],
1240
            backup_list[4]['id'])
1241
        self.assertEqual(backup_list[5]['current-tli'], 7)
1242

1243
    # @unittest.skip("skip")
1244
    # @unittest.expectedFailure
1245
    def test_multitimeline_page_1(self):
1246
        """
1247
        Check that backup in PAGE mode choose
1248
        parent backup correctly:
1249
        t2        /---->
1250
        t1 -F--P---D->
1251

1252
        P must have F as parent
1253
        """
1254
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1255
        node = self.make_simple_node(
1256
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1257
            set_replication=True,
1258
            initdb_params=['--data-checksums'],
1259
            pg_options={'wal_log_hints': 'on'})
1260

1261
        self.init_pb(backup_dir)
1262
        self.add_instance(backup_dir, 'node', node)
1263
        self.set_archiving(backup_dir, 'node', node)
1264
        node.slow_start()
1265

1266
        node.safe_psql("postgres", "create extension pageinspect")
1267

1268
        try:
1269
            node.safe_psql(
1270
                "postgres",
1271
                "create extension amcheck")
1272
        except QueryException as e:
1273
            node.safe_psql(
1274
                "postgres",
1275
                "create extension amcheck_next")
1276

1277
        node.pgbench_init(scale=20)
1278
        full_id = self.backup_node(backup_dir, 'node', node)
1279

1280
        pgbench = node.pgbench(options=['-T', '20', '-c', '1'])
1281
        pgbench.wait()
1282

1283
        page1 = self.backup_node(backup_dir, 'node', node, backup_type='page')
1284

1285
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
1286
        pgbench.wait()
1287

1288
        page1 = self.backup_node(backup_dir, 'node', node, backup_type='delta')
1289

1290
        node.cleanup()
1291
        self.restore_node(
1292
            backup_dir, 'node', node, backup_id=page1,
1293
            options=[
1294
                '--recovery-target=immediate',
1295
                '--recovery-target-action=promote'])
1296

1297
        node.slow_start()
1298

1299
        pgbench = node.pgbench(options=['-T', '20', '-c', '1', '--no-vacuum'])
1300
        pgbench.wait()
1301

1302
        print(self.backup_node(
1303
            backup_dir, 'node', node, backup_type='page',
1304
            options=['--log-level-console=LOG'], return_id=False))
1305

1306
        pgdata = self.pgdata_content(node.data_dir)
1307

1308
        node_restored = self.make_simple_node(
1309
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1310
        node_restored.cleanup()
1311

1312
        self.restore_node(backup_dir, 'node', node_restored)
1313
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1314

1315
        self.set_auto_conf(node_restored, {'port': node_restored.port})
1316
        node_restored.slow_start()
1317

1318
        self.compare_pgdata(pgdata, pgdata_restored)
1319

1320
    @unittest.skip("skip")
1321
    # @unittest.expectedFailure
1322
    def test_page_pg_resetxlog(self):
1323
        node = self.make_simple_node(
1324
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1325
            set_replication=True,
1326
            initdb_params=['--data-checksums'],
1327
            pg_options={
1328
                'shared_buffers': '512MB',
1329
                'max_wal_size': '3GB'})
1330

1331
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1332
        self.init_pb(backup_dir)
1333
        self.add_instance(backup_dir, 'node', node)
1334
        self.set_archiving(backup_dir, 'node', node)
1335
        node.slow_start()
1336

1337
        # Create table
1338
        node.safe_psql(
1339
            "postgres",
1340
            "create extension bloom; create sequence t_seq; "
1341
            "create table t_heap "
1342
            "as select nextval('t_seq')::int as id, md5(i::text) as text, "
1343
            "md5(repeat(i::text,10))::tsvector as tsvector "
1344
#            "from generate_series(0,25600) i")
1345
            "from generate_series(0,2560) i")
1346

1347
        self.backup_node(backup_dir, 'node', node)
1348

1349
        node.safe_psql(
1350
            'postgres',
1351
            "update t_heap set id = nextval('t_seq'), text = md5(text), "
1352
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector")
1353

1354
        self.switch_wal_segment(node)
1355

1356
        # kill the bastard
1357
        if self.verbose:
1358
            print('Killing postmaster. Losing Ptrack changes')
1359
        node.stop(['-m', 'immediate', '-D', node.data_dir])
1360

1361
        # now smack it with sledgehammer
1362
        if node.major_version >= 10:
1363
            pg_resetxlog_path = self.get_bin_path('pg_resetwal')
1364
            wal_dir = 'pg_wal'
1365
        else:
1366
            pg_resetxlog_path = self.get_bin_path('pg_resetxlog')
1367
            wal_dir = 'pg_xlog'
1368

1369
        self.run_binary(
1370
            [
1371
                pg_resetxlog_path,
1372
                '-D',
1373
                node.data_dir,
1374
                '-o 42',
1375
                '-f'
1376
            ],
1377
            asynchronous=False)
1378

1379
        if not node.status():
1380
            node.slow_start()
1381
        else:
1382
            print("Die! Die! Why won't you die?... Why won't you die?")
1383
            exit(1)
1384

1385
        # take ptrack backup
1386
#        self.backup_node(
1387
#                backup_dir, 'node', node,
1388
#                backup_type='page', options=['--stream'])
1389

1390
        try:
1391
            self.backup_node(
1392
                backup_dir, 'node', node, backup_type='page')
1393
            # we should die here because exception is what we expect to happen
1394
            self.assertEqual(
1395
                1, 0,
1396
                "Expecting Error because instance was brutalized by pg_resetxlog"
1397
                "\n Output: {0} \n CMD: {1}".format(
1398
                    repr(self.output), self.cmd)
1399
            )
1400
        except ProbackupException as e:
1401
            self.assertIn(
1402
                'Insert error message',
1403
                e.message,
1404
                '\n Unexpected Error Message: {0}\n'
1405
                ' CMD: {1}'.format(repr(e.message), self.cmd))
1406

1407
#        pgdata = self.pgdata_content(node.data_dir)
1408
#
1409
#        node_restored = self.make_simple_node(
1410
#            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1411
#        node_restored.cleanup()
1412
#
1413
#        self.restore_node(
1414
#            backup_dir, 'node', node_restored)
1415
#
1416
#        pgdata_restored = self.pgdata_content(node_restored.data_dir)
1417
#        self.compare_pgdata(pgdata, pgdata_restored)
1418

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.