pg_probackup

Форк
0
/
ptrack_test.py 
4397 строк · 163.8 Кб
1
import os
2
import unittest
3
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
4
from datetime import datetime, timedelta
5
import subprocess
6
from testgres import QueryException, StartNodeException
7
import shutil
8
import sys
9
from time import sleep
10
from threading import Thread
11

12

13
class PtrackTest(ProbackupTest, unittest.TestCase):
14
    def setUp(self):
15
        if self.pg_config_version < self.version_to_num('11.0'):
16
            self.skipTest('You need PostgreSQL >= 11 for this test')
17
        self.fname = self.id().split('.')[3]
18

19
    # @unittest.skip("skip")
20
    def test_drop_rel_during_backup_ptrack(self):
21
        """
22
        drop relation during ptrack backup
23
        """
24
        self._check_gdb_flag_or_skip_test()
25

26
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
27
        node = self.make_simple_node(
28
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
29
            set_replication=True,
30
            ptrack_enable=self.ptrack,
31
            initdb_params=['--data-checksums'])
32

33
        self.init_pb(backup_dir)
34
        self.add_instance(backup_dir, 'node', node)
35
        self.set_archiving(backup_dir, 'node', node)
36
        node.slow_start()
37

38
        node.safe_psql(
39
            "postgres",
40
            "CREATE EXTENSION ptrack")
41

42
        node.safe_psql(
43
            "postgres",
44
            "create table t_heap as select i"
45
            " as id from generate_series(0,100) i")
46

47
        relative_path = node.safe_psql(
48
            "postgres",
49
            "select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
50

51
        absolute_path = os.path.join(node.data_dir, relative_path)
52

53
        # FULL backup
54
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
55

56
        # PTRACK backup
57
        gdb = self.backup_node(
58
            backup_dir, 'node', node, backup_type='ptrack',
59
            gdb=True, options=['--log-level-file=LOG'])
60

61
        gdb.set_breakpoint('backup_files')
62
        gdb.run_until_break()
63

64
        # REMOVE file
65
        os.remove(absolute_path)
66

67
        # File removed, we can proceed with backup
68
        gdb.continue_execution_until_exit()
69

70
        pgdata = self.pgdata_content(node.data_dir)
71

72
        with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
73
            log_content = f.read()
74
            self.assertTrue(
75
                'LOG: File not found: "{0}"'.format(absolute_path) in log_content,
76
                'File "{0}" should be deleted but it`s not'.format(absolute_path))
77

78
        node.cleanup()
79
        self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
80

81
        # Physical comparison
82
        pgdata_restored = self.pgdata_content(node.data_dir)
83
        self.compare_pgdata(pgdata, pgdata_restored)
84

85
    # @unittest.skip("skip")
86
    def test_ptrack_without_full(self):
87
        """ptrack backup without validated full backup"""
88
        node = self.make_simple_node(
89
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
90
            initdb_params=['--data-checksums'],
91
            ptrack_enable=True)
92

93
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
94
        self.init_pb(backup_dir)
95
        self.add_instance(backup_dir, 'node', node)
96
        self.set_archiving(backup_dir, 'node', node)
97
        node.slow_start()
98

99
        node.safe_psql(
100
            "postgres",
101
            "CREATE EXTENSION ptrack")
102

103
        try:
104
            self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
105
            # we should die here because exception is what we expect to happen
106
            self.assertEqual(
107
                1, 0,
108
                "Expecting Error because page backup should not be possible "
109
                "without valid full backup.\n Output: {0} \n CMD: {1}".format(
110
                    repr(self.output), self.cmd))
111
        except ProbackupException as e:
112
            self.assertTrue(
113
                "WARNING: Valid full backup on current timeline 1 is not found" in e.message and
114
                "ERROR: Create new full backup before an incremental one" in e.message,
115
                "\n Unexpected Error Message: {0}\n CMD: {1}".format(
116
                    repr(e.message), self.cmd))
117

118
        self.assertEqual(
119
            self.show_pb(backup_dir, 'node')[0]['status'],
120
            "ERROR")
121

122
    # @unittest.skip("skip")
123
    def test_ptrack_threads(self):
124
        """ptrack multi thread backup mode"""
125
        node = self.make_simple_node(
126
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
127
            initdb_params=['--data-checksums'],
128
            ptrack_enable=True)
129

130
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
131
        self.init_pb(backup_dir)
132
        self.add_instance(backup_dir, 'node', node)
133
        self.set_archiving(backup_dir, 'node', node)
134
        node.slow_start()
135

136
        node.safe_psql(
137
            "postgres",
138
            "CREATE EXTENSION ptrack")
139

140
        self.backup_node(
141
            backup_dir, 'node', node,
142
            backup_type="full", options=["-j", "4"])
143
        self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
144

145
        self.backup_node(
146
            backup_dir, 'node', node,
147
            backup_type="ptrack", options=["-j", "4"])
148
        self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
149

150
    # @unittest.skip("skip")
151
    def test_ptrack_stop_pg(self):
152
        """
153
        create node, take full backup,
154
        restart node, check that ptrack backup
155
        can be taken
156
        """
157
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
158
        node = self.make_simple_node(
159
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
160
            set_replication=True,
161
            ptrack_enable=True,
162
            initdb_params=['--data-checksums'])
163

164
        self.init_pb(backup_dir)
165
        self.add_instance(backup_dir, 'node', node)
166
        node.slow_start()
167

168
        node.safe_psql(
169
            "postgres",
170
            "CREATE EXTENSION ptrack")
171

172
        node.pgbench_init(scale=1)
173

174
        # FULL backup
175
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
176

177
        node.stop()
178
        node.slow_start()
179

180
        self.backup_node(
181
            backup_dir, 'node', node,
182
            backup_type='ptrack', options=['--stream'])
183

184
        # @unittest.skip("skip")
185
    def test_ptrack_multi_timeline_backup(self):
186
        """
187
        t2            /------P2
188
        t1 ------F---*-----P1
189
        """
190
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
191
        node = self.make_simple_node(
192
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
193
            set_replication=True,
194
            ptrack_enable=True,
195
            initdb_params=['--data-checksums'])
196

197
        self.init_pb(backup_dir)
198
        self.add_instance(backup_dir, 'node', node)
199
        self.set_archiving(backup_dir, 'node', node)
200
        node.slow_start()
201

202
        node.safe_psql(
203
            "postgres",
204
            "CREATE EXTENSION ptrack")
205

206
        node.pgbench_init(scale=5)
207

208
        # FULL backup
209
        full_id = self.backup_node(backup_dir, 'node', node)
210

211
        pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum'])
212
        sleep(15)
213

214
        xid = node.safe_psql(
215
            'postgres',
216
            'SELECT txid_current()').decode('utf-8').rstrip()
217
        pgbench.wait()
218

219
        self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
220

221
        node.cleanup()
222

223
        # Restore from full backup to create Timeline 2
224
        print(self.restore_node(
225
            backup_dir, 'node', node,
226
            options=[
227
                '--recovery-target-xid={0}'.format(xid),
228
                '--recovery-target-action=promote']))
229

230
        node.slow_start()
231

232
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
233
        pgbench.wait()
234

235
        self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
236

237
        pgdata = self.pgdata_content(node.data_dir)
238

239
        node.cleanup()
240

241
        self.restore_node(backup_dir, 'node', node)
242

243
        pgdata_restored = self.pgdata_content(node.data_dir)
244
        self.compare_pgdata(pgdata, pgdata_restored)
245

246
        node.slow_start()
247

248
        balance = node.safe_psql(
249
            'postgres',
250
            'select (select sum(tbalance) from pgbench_tellers) - '
251
            '( select sum(bbalance) from pgbench_branches) + '
252
            '( select sum(abalance) from pgbench_accounts ) - '
253
            '(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip()
254

255
        self.assertEqual('0', balance)
256

257
        # @unittest.skip("skip")
258
    def test_ptrack_multi_timeline_backup_1(self):
259
        """
260
        t2              /------
261
        t1 ---F---P1---*
262

263
        # delete P1
264
        t2              /------P2
265
        t1 ---F--------*
266
        """
267
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
268
        node = self.make_simple_node(
269
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
270
            set_replication=True,
271
            ptrack_enable=True,
272
            initdb_params=['--data-checksums'])
273

274
        self.init_pb(backup_dir)
275
        self.add_instance(backup_dir, 'node', node)
276
        self.set_archiving(backup_dir, 'node', node)
277
        node.slow_start()
278

279
        node.safe_psql(
280
            "postgres",
281
            "CREATE EXTENSION ptrack")
282

283
        node.pgbench_init(scale=5)
284

285
        # FULL backup
286
        full_id = self.backup_node(backup_dir, 'node', node)
287

288
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
289
        pgbench.wait()
290

291
        ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
292
        node.cleanup()
293

294
        self.restore_node(backup_dir, 'node', node)
295

296
        node.slow_start()
297

298
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
299
        pgbench.wait()
300

301
        # delete old PTRACK backup
302
        self.delete_pb(backup_dir, 'node', backup_id=ptrack_id)
303

304
        # take new PTRACK backup
305
        self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
306

307
        pgdata = self.pgdata_content(node.data_dir)
308

309
        node.cleanup()
310

311
        self.restore_node(backup_dir, 'node', node)
312

313
        pgdata_restored = self.pgdata_content(node.data_dir)
314
        self.compare_pgdata(pgdata, pgdata_restored)
315

316
        node.slow_start()
317

318
        balance = node.safe_psql(
319
            'postgres',
320
            'select (select sum(tbalance) from pgbench_tellers) - '
321
            '( select sum(bbalance) from pgbench_branches) + '
322
            '( select sum(abalance) from pgbench_accounts ) - '
323
            '(select sum(delta) from pgbench_history) as must_be_zero').\
324
            decode('utf-8').rstrip()
325

326
        self.assertEqual('0', balance)
327

328
    # @unittest.skip("skip")
329
    def test_ptrack_eat_my_data(self):
330
        """
331
        PGPRO-4051
332
        """
333
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
334
        node = self.make_simple_node(
335
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
336
            set_replication=True,
337
            ptrack_enable=True,
338
            initdb_params=['--data-checksums'])
339

340
        self.init_pb(backup_dir)
341
        self.add_instance(backup_dir, 'node', node)
342
        self.set_archiving(backup_dir, 'node', node)
343
        node.slow_start()
344

345
        node.safe_psql(
346
            "postgres",
347
            "CREATE EXTENSION ptrack")
348

349
        node.pgbench_init(scale=50)
350

351
        self.backup_node(backup_dir, 'node', node)
352

353
        node_restored = self.make_simple_node(
354
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
355

356
        pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum'])
357

358
        for i in range(10):
359
            print("Iteration: {0}".format(i))
360

361
            sleep(2)
362

363
            self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
364
#            pgdata = self.pgdata_content(node.data_dir)
365
#
366
#            node_restored.cleanup()
367
#
368
#            self.restore_node(backup_dir, 'node', node_restored)
369
#            pgdata_restored = self.pgdata_content(node_restored.data_dir)
370
#
371
#            self.compare_pgdata(pgdata, pgdata_restored)
372

373
        pgbench.terminate()
374
        pgbench.wait()
375

376
        self.switch_wal_segment(node)
377

378
        result = node.table_checksum("pgbench_accounts")
379

380
        node_restored.cleanup()
381
        self.restore_node(backup_dir, 'node', node_restored)
382
        self.set_auto_conf(
383
            node_restored, {'port': node_restored.port})
384

385
        node_restored.slow_start()
386

387
        balance = node_restored.safe_psql(
388
            'postgres',
389
            'select (select sum(tbalance) from pgbench_tellers) - '
390
            '( select sum(bbalance) from pgbench_branches) + '
391
            '( select sum(abalance) from pgbench_accounts ) - '
392
            '(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip()
393

394
        self.assertEqual('0', balance)
395

396
        # Logical comparison
397
        self.assertEqual(
398
            result,
399
            node.table_checksum("pgbench_accounts"),
400
            'Data loss')
401

402
    # @unittest.skip("skip")
403
    def test_ptrack_simple(self):
404
        """make node, make full and ptrack stream backups,"
405
        " restore them and check data correctness"""
406
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
407
        node = self.make_simple_node(
408
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
409
            set_replication=True,
410
            ptrack_enable=True,
411
            initdb_params=['--data-checksums'])
412

413
        self.init_pb(backup_dir)
414
        self.add_instance(backup_dir, 'node', node)
415
        node.slow_start()
416

417
        node.safe_psql(
418
            "postgres",
419
            "CREATE EXTENSION ptrack")
420

421
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
422

423
        node.safe_psql(
424
            "postgres",
425
            "create table t_heap as select i"
426
            " as id from generate_series(0,1) i")
427

428
        self.backup_node(
429
            backup_dir, 'node', node, backup_type='ptrack',
430
            options=['--stream'])
431

432
        node.safe_psql(
433
            "postgres",
434
            "update t_heap set id = 100500")
435

436
        self.backup_node(
437
            backup_dir, 'node', node,
438
            backup_type='ptrack', options=['--stream'])
439

440
        if self.paranoia:
441
            pgdata = self.pgdata_content(node.data_dir)
442

443
        result = node.table_checksum("t_heap")
444

445
        node_restored = self.make_simple_node(
446
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
447
        node_restored.cleanup()
448

449
        self.restore_node(
450
            backup_dir, 'node', node_restored, options=["-j", "4"])
451

452
        # Physical comparison
453
        if self.paranoia:
454
            pgdata_restored = self.pgdata_content(
455
                node_restored.data_dir, ignore_ptrack=False)
456
            self.compare_pgdata(pgdata, pgdata_restored)
457

458
        self.set_auto_conf(
459
            node_restored, {'port': node_restored.port})
460

461
        node_restored.slow_start()
462

463
        # Logical comparison
464
        self.assertEqual(
465
            result,
466
            node_restored.table_checksum("t_heap"))
467

468
    # @unittest.skip("skip")
469
    def test_ptrack_unprivileged(self):
470
        """"""
471
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
472
        node = self.make_simple_node(
473
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
474
            set_replication=True,
475
            ptrack_enable=True,
476
            initdb_params=['--data-checksums'])
477

478
        self.init_pb(backup_dir)
479
        self.add_instance(backup_dir, 'node', node)
480
        # self.set_archiving(backup_dir, 'node', node)
481
        node.slow_start()
482

483
        node.safe_psql(
484
            "postgres",
485
            "CREATE DATABASE backupdb")
486

487
        # PG 9.5
488
        if self.get_version(node) < 90600:
489
            node.safe_psql(
490
                'backupdb',
491
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
492
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
493
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
494
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
495
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
496
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
497
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
498
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
499
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
500
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
501
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
502
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
503
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
504
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
505
                "GRANT CONNECT ON DATABASE backupdb to backup; "
506
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
507
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
508
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
509
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
510
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
511
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
512
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
513
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
514
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
515
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
516
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
517
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
518
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
519
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
520
        # PG 9.6
521
        elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
522
            node.safe_psql(
523
                'backupdb',
524
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
525
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
526
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
527
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
528
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
529
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
530
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
531
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
532
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
533
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
534
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
535
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
536
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
537
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
538
                "GRANT CONNECT ON DATABASE backupdb to backup; "
539
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
540
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
541
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
542
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
543
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
544
                "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
545
                "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
546
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
547
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
548
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
549
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
550
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
551
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
552
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
553
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
554
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
555
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
556
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
557
            )
558
        # >= 10 && < 15
559
        elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:
560
            node.safe_psql(
561
                'backupdb',
562
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
563
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
564
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
565
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
566
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
567
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
568
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
569
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
570
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
571
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
572
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
573
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
574
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
575
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
576
                "GRANT CONNECT ON DATABASE backupdb to backup; "
577
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
578
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
579
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
580
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
581
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
582
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
583
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
584
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
585
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
586
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
587
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
588
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
589
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
590
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
591
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
592
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
593
            )
594
        # >= 15
595
        else:
596
            node.safe_psql(
597
                'backupdb',
598
                "REVOKE ALL ON DATABASE backupdb from PUBLIC; "
599
                "REVOKE ALL ON SCHEMA public from PUBLIC; "
600
                "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
601
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
602
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
603
                "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
604
                "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
605
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
606
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
607
                "REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
608
                "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
609
                "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
610
                "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
611
                "CREATE ROLE backup WITH LOGIN REPLICATION; "
612
                "GRANT CONNECT ON DATABASE backupdb to backup; "
613
                "GRANT USAGE ON SCHEMA pg_catalog TO backup; "
614
                "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
615
                "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
616
                "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
617
                "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
618
                "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
619
                "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
620
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
621
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
622
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
623
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
624
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
625
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
626
                "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
627
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
628
                "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
629
            )
630

631
        node.safe_psql(
632
            "backupdb",
633
            "CREATE SCHEMA ptrack")
634
        node.safe_psql(
635
            "backupdb",
636
            "CREATE EXTENSION ptrack WITH SCHEMA ptrack")
637
        node.safe_psql(
638
            "backupdb",
639
            "GRANT USAGE ON SCHEMA ptrack TO backup")
640

641
        node.safe_psql(
642
            "backupdb",
643
            "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup")
644

645
        if ProbackupTest.enterprise:
646
            node.safe_psql(
647
                "backupdb",
648
                "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
649
                'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;')
650

651
        self.backup_node(
652
            backup_dir, 'node', node,
653
            datname='backupdb', options=['--stream', "-U", "backup"])
654

655
        self.backup_node(
656
            backup_dir, 'node', node, datname='backupdb',
657
            backup_type='ptrack', options=['--stream', "-U", "backup"])
658

659

660
    # @unittest.skip("skip")
661
    # @unittest.expectedFailure
662
    def test_ptrack_enable(self):
663
        """make ptrack without full backup, should result in error"""
664
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
665
        node = self.make_simple_node(
666
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
667
            set_replication=True, initdb_params=['--data-checksums'],
668
            pg_options={
669
                'checkpoint_timeout': '30s',
670
                'shared_preload_libraries': 'ptrack'})
671

672
        self.init_pb(backup_dir)
673
        self.add_instance(backup_dir, 'node', node)
674
        node.slow_start()
675

676
        node.safe_psql(
677
            "postgres",
678
            "CREATE EXTENSION ptrack")
679

680
        # PTRACK BACKUP
681
        try:
682
            self.backup_node(
683
                backup_dir, 'node', node,
684
                backup_type='ptrack', options=["--stream"]
685
            )
686
            # we should die here because exception is what we expect to happen
687
            self.assertEqual(
688
                1, 0,
689
                "Expecting Error because ptrack disabled.\n"
690
                " Output: {0} \n CMD: {1}".format(
691
                    repr(self.output), self.cmd
692
                )
693
            )
694
        except ProbackupException as e:
695
            self.assertIn(
696
                'ERROR: Ptrack is disabled\n',
697
                e.message,
698
                '\n Unexpected Error Message: {0}\n'
699
                ' CMD: {1}'.format(repr(e.message), self.cmd)
700
            )
701

702
    # @unittest.skip("skip")
703
    # @unittest.expectedFailure
704
    def test_ptrack_disable(self):
705
        """
706
        Take full backup, disable ptrack restart postgresql,
707
        enable ptrack, restart postgresql, take ptrack backup
708
        which should fail
709
        """
710
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
711
        node = self.make_simple_node(
712
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
713
            set_replication=True,
714
            ptrack_enable=True,
715
            initdb_params=['--data-checksums'],
716
            pg_options={'checkpoint_timeout': '30s'})
717

718
        self.init_pb(backup_dir)
719
        self.add_instance(backup_dir, 'node', node)
720
        node.slow_start()
721

722
        node.safe_psql(
723
            "postgres",
724
            "CREATE EXTENSION ptrack")
725

726
        # FULL BACKUP
727
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
728

729
        # DISABLE PTRACK
730
        node.safe_psql('postgres', "alter system set ptrack.map_size to 0")
731
        node.stop()
732
        node.slow_start()
733

734
        # ENABLE PTRACK
735
        node.safe_psql('postgres', "alter system set ptrack.map_size to '128'")
736
        node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'")
737
        node.stop()
738
        node.slow_start()
739

740
        # PTRACK BACKUP
741
        try:
742
            self.backup_node(
743
                backup_dir, 'node', node,
744
                backup_type='ptrack', options=["--stream"]
745
            )
746
            # we should die here because exception is what we expect to happen
747
            self.assertEqual(
748
                1, 0,
749
                "Expecting Error because ptrack_enable was set to OFF at some"
750
                " point after previous backup.\n"
751
                " Output: {0} \n CMD: {1}".format(
752
                    repr(self.output), self.cmd
753
                )
754
            )
755
        except ProbackupException as e:
756
            self.assertIn(
757
                'ERROR: LSN from ptrack_control',
758
                e.message,
759
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
760
                    repr(e.message), self.cmd
761
                )
762
            )
763

764
    # @unittest.skip("skip")
765
    def test_ptrack_uncommitted_xact(self):
766
        """make ptrack backup while there is uncommitted open transaction"""
767
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
768
        node = self.make_simple_node(
769
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
770
            set_replication=True,
771
            ptrack_enable=True,
772
            initdb_params=['--data-checksums'],
773
            pg_options={
774
                'wal_level': 'replica'})
775

776
        self.init_pb(backup_dir)
777
        self.add_instance(backup_dir, 'node', node)
778
        node.slow_start()
779

780
        node.safe_psql(
781
            "postgres",
782
            "CREATE EXTENSION ptrack")
783

784
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
785

786
        con = node.connect("postgres")
787
        con.execute(
788
            "create table t_heap as select i"
789
            " as id from generate_series(0,1) i")
790

791
        self.backup_node(
792
            backup_dir, 'node', node, backup_type='ptrack',
793
            options=['--stream'])
794

795
        if self.paranoia:
796
            pgdata = self.pgdata_content(node.data_dir)
797

798
        node_restored = self.make_simple_node(
799
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
800
        node_restored.cleanup()
801

802
        self.restore_node(
803
            backup_dir, 'node', node_restored,
804
            node_restored.data_dir, options=["-j", "4"])
805

806
        if self.paranoia:
807
            pgdata_restored = self.pgdata_content(
808
                    node_restored.data_dir, ignore_ptrack=False)
809

810
        self.set_auto_conf(
811
            node_restored, {'port': node_restored.port})
812

813
        node_restored.slow_start()
814

815
        # Physical comparison
816
        if self.paranoia:
817
            self.compare_pgdata(pgdata, pgdata_restored)
818

819
    # @unittest.skip("skip")
820
    def test_ptrack_vacuum_full(self):
821
        """make node, make full and ptrack stream backups,
822
          restore them and check data correctness"""
823
        self._check_gdb_flag_or_skip_test()
824

825
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
826
        node = self.make_simple_node(
827
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
828
            set_replication=True,
829
            ptrack_enable=True,
830
            initdb_params=['--data-checksums'])
831

832
        self.init_pb(backup_dir)
833
        self.add_instance(backup_dir, 'node', node)
834
        node.slow_start()
835

836
        self.create_tblspace_in_node(node, 'somedata')
837

838
        node.safe_psql(
839
            "postgres",
840
            "CREATE EXTENSION ptrack")
841

842
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
843

844
        node.safe_psql(
845
            "postgres",
846
            "create table t_heap tablespace somedata as select i"
847
            " as id from generate_series(0,1000000) i"
848
            )
849

850
        pg_connect = node.connect("postgres", autocommit=True)
851

852
        gdb = self.gdb_attach(pg_connect.pid)
853
        gdb.set_breakpoint('reform_and_rewrite_tuple')
854

855
        gdb.continue_execution_until_running()
856

857
        process = Thread(
858
            target=pg_connect.execute, args=["VACUUM FULL t_heap"])
859
        process.start()
860

861
        while not gdb.stopped_in_breakpoint:
862
            sleep(1)
863

864
        gdb.continue_execution_until_break(20)
865

866
        self.backup_node(
867
            backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
868

869
        self.backup_node(
870
            backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
871

872
        if self.paranoia:
873
            pgdata = self.pgdata_content(node.data_dir)
874

875
        gdb.remove_all_breakpoints()
876
        gdb._execute('detach')
877
        process.join()
878

879
        node_restored = self.make_simple_node(
880
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
881
        node_restored.cleanup()
882

883
        old_tablespace = self.get_tblspace_path(node, 'somedata')
884
        new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
885

886
        self.restore_node(
887
            backup_dir, 'node', node_restored,
888
            options=["-j", "4", "-T", "{0}={1}".format(
889
                old_tablespace, new_tablespace)]
890
        )
891

892
        # Physical comparison
893
        if self.paranoia:
894
            pgdata_restored = self.pgdata_content(
895
                node_restored.data_dir, ignore_ptrack=False)
896
            self.compare_pgdata(pgdata, pgdata_restored)
897

898
        self.set_auto_conf(
899
            node_restored, {'port': node_restored.port})
900

901
        node_restored.slow_start()
902

903
    # @unittest.skip("skip")
904
    def test_ptrack_vacuum_truncate(self):
905
        """make node, create table, take full backup,
906
           delete last 3 pages, vacuum relation,
907
           take ptrack backup, take second ptrack backup,
908
           restore last ptrack backup and check data correctness"""
909
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
910
        node = self.make_simple_node(
911
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
912
            set_replication=True,
913
            ptrack_enable=True,
914
            initdb_params=['--data-checksums'])
915

916
        self.init_pb(backup_dir)
917
        self.add_instance(backup_dir, 'node', node)
918
        node.slow_start()
919

920
        self.create_tblspace_in_node(node, 'somedata')
921

922
        node.safe_psql(
923
            "postgres",
924
            "CREATE EXTENSION ptrack")
925

926
        node.safe_psql(
927
            "postgres",
928
            "create sequence t_seq; "
929
            "create table t_heap tablespace somedata as select i as id, "
930
            "md5(i::text) as text, "
931
            "md5(repeat(i::text,10))::tsvector as tsvector "
932
            "from generate_series(0,1024) i;")
933

934
        node.safe_psql(
935
            "postgres",
936
            "vacuum t_heap")
937

938
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
939

940
        node.safe_psql(
941
            "postgres",
942
            "delete from t_heap where ctid >= '(11,0)'")
943

944
        node.safe_psql(
945
            "postgres",
946
            "vacuum t_heap")
947

948
        self.backup_node(
949
            backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
950

951
        self.backup_node(
952
            backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
953

954
        if self.paranoia:
955
            pgdata = self.pgdata_content(node.data_dir)
956

957
        node_restored = self.make_simple_node(
958
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
959
        node_restored.cleanup()
960

961
        old_tablespace = self.get_tblspace_path(node, 'somedata')
962
        new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
963

964
        self.restore_node(
965
            backup_dir, 'node', node_restored,
966
            options=["-j", "4", "-T", "{0}={1}".format(
967
                old_tablespace, new_tablespace)]
968
        )
969

970
        # Physical comparison
971
        if self.paranoia:
972
            pgdata_restored = self.pgdata_content(
973
                node_restored.data_dir,
974
                ignore_ptrack=False
975
                )
976
            self.compare_pgdata(pgdata, pgdata_restored)
977

978
        self.set_auto_conf(
979
            node_restored, {'port': node_restored.port})
980

981
        node_restored.slow_start()
982

983
    # @unittest.skip("skip")
984
    def test_ptrack_get_block(self):
985
        """
986
        make node, make full and ptrack stream backups,
987
        restore them and check data correctness
988
        """
989
        self._check_gdb_flag_or_skip_test()
990

991
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
992
        node = self.make_simple_node(
993
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
994
            set_replication=True,
995
            ptrack_enable=True,
996
            initdb_params=['--data-checksums'])
997

998
        self.init_pb(backup_dir)
999
        self.add_instance(backup_dir, 'node', node)
1000
        node.slow_start()
1001

1002
        node.safe_psql(
1003
            "postgres",
1004
            "CREATE EXTENSION ptrack")
1005

1006
        node.safe_psql(
1007
            "postgres",
1008
            "create table t_heap as select i"
1009
            " as id from generate_series(0,1) i")
1010

1011
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
1012
        gdb = self.backup_node(
1013
            backup_dir, 'node', node, backup_type='ptrack',
1014
            options=['--stream'],
1015
            gdb=True)
1016

1017
        gdb.set_breakpoint('make_pagemap_from_ptrack_2')
1018
        gdb.run_until_break()
1019

1020
        node.safe_psql(
1021
            "postgres",
1022
            "update t_heap set id = 100500")
1023

1024
        gdb.continue_execution_until_exit()
1025

1026
        self.backup_node(
1027
            backup_dir, 'node', node,
1028
            backup_type='ptrack', options=['--stream'])
1029

1030
        if self.paranoia:
1031
            pgdata = self.pgdata_content(node.data_dir)
1032

1033
        result = node.table_checksum("t_heap")
1034
        node.cleanup()
1035
        self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
1036

1037
        # Physical comparison
1038
        if self.paranoia:
1039
            pgdata_restored = self.pgdata_content(
1040
                node.data_dir, ignore_ptrack=False)
1041
            self.compare_pgdata(pgdata, pgdata_restored)
1042

1043
        node.slow_start()
1044
        # Logical comparison
1045
        self.assertEqual(
1046
            result,
1047
            node.table_checksum("t_heap"))
1048

1049
    # @unittest.skip("skip")
1050
    def test_ptrack_stream(self):
1051
        """make node, make full and ptrack stream backups,
1052
         restore them and check data correctness"""
1053
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1054
        node = self.make_simple_node(
1055
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1056
            set_replication=True,
1057
            ptrack_enable=True,
1058
            initdb_params=['--data-checksums'],
1059
            pg_options={
1060
                'checkpoint_timeout': '30s'})
1061

1062
        self.init_pb(backup_dir)
1063
        self.add_instance(backup_dir, 'node', node)
1064
        node.slow_start()
1065

1066
        node.safe_psql(
1067
            "postgres",
1068
            "CREATE EXTENSION ptrack")
1069

1070
        # FULL BACKUP
1071
        node.safe_psql("postgres", "create sequence t_seq")
1072
        node.safe_psql(
1073
            "postgres",
1074
            "create table t_heap as select i as id, nextval('t_seq')"
1075
            " as t_seq, md5(i::text) as text, md5(i::text)::tsvector"
1076
            " as tsvector from generate_series(0,100) i")
1077

1078
        full_result = node.table_checksum("t_heap")
1079
        full_backup_id = self.backup_node(
1080
            backup_dir, 'node', node, options=['--stream'])
1081

1082
        # PTRACK BACKUP
1083
        node.safe_psql(
1084
            "postgres",
1085
            "insert into t_heap select i as id, nextval('t_seq') as t_seq,"
1086
            " md5(i::text) as text, md5(i::text)::tsvector as tsvector"
1087
            " from generate_series(100,200) i")
1088

1089
        ptrack_result = node.table_checksum("t_heap")
1090
        ptrack_backup_id = self.backup_node(
1091
            backup_dir, 'node', node,
1092
            backup_type='ptrack', options=['--stream'])
1093

1094
        if self.paranoia:
1095
            pgdata = self.pgdata_content(node.data_dir)
1096

1097
        # Drop Node
1098
        node.cleanup()
1099

1100
        # Restore and check full backup
1101
        self.assertIn(
1102
            "INFO: Restore of backup {0} completed.".format(full_backup_id),
1103
            self.restore_node(
1104
                backup_dir, 'node', node,
1105
                backup_id=full_backup_id, options=["-j", "4"]
1106
            ),
1107
            '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1108
                repr(self.output), self.cmd)
1109
            )
1110
        node.slow_start()
1111
        full_result_new = node.table_checksum("t_heap")
1112
        self.assertEqual(full_result, full_result_new)
1113
        node.cleanup()
1114

1115
        # Restore and check ptrack backup
1116
        self.assertIn(
1117
            "INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
1118
            self.restore_node(
1119
                backup_dir, 'node', node,
1120
                backup_id=ptrack_backup_id, options=["-j", "4"]
1121
            ),
1122
            '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1123
                repr(self.output), self.cmd))
1124

1125
        if self.paranoia:
1126
            pgdata_restored = self.pgdata_content(
1127
                node.data_dir, ignore_ptrack=False)
1128
            self.compare_pgdata(pgdata, pgdata_restored)
1129

1130
        node.slow_start()
1131
        ptrack_result_new = node.table_checksum("t_heap")
1132
        self.assertEqual(ptrack_result, ptrack_result_new)
1133

1134
    # @unittest.skip("skip")
1135
    def test_ptrack_archive(self):
1136
        """make archive node, make full and ptrack backups,
1137
            check data correctness in restored instance"""
1138
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1139
        node = self.make_simple_node(
1140
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1141
            set_replication=True,
1142
            ptrack_enable=True,
1143
            initdb_params=['--data-checksums'],
1144
            pg_options={
1145
                'checkpoint_timeout': '30s'})
1146

1147
        self.init_pb(backup_dir)
1148
        self.add_instance(backup_dir, 'node', node)
1149
        self.set_archiving(backup_dir, 'node', node)
1150
        node.slow_start()
1151

1152
        node.safe_psql(
1153
            "postgres",
1154
            "CREATE EXTENSION ptrack")
1155

1156
        # FULL BACKUP
1157
        node.safe_psql(
1158
            "postgres",
1159
            "create table t_heap as"
1160
            " select i as id,"
1161
            " md5(i::text) as text,"
1162
            " md5(i::text)::tsvector as tsvector"
1163
            " from generate_series(0,100) i")
1164

1165
        full_result = node.table_checksum("t_heap")
1166
        full_backup_id = self.backup_node(backup_dir, 'node', node)
1167
        full_target_time = self.show_pb(
1168
            backup_dir, 'node', full_backup_id)['recovery-time']
1169

1170
        # PTRACK BACKUP
1171
        node.safe_psql(
1172
            "postgres",
1173
            "insert into t_heap select i as id,"
1174
            " md5(i::text) as text,"
1175
            " md5(i::text)::tsvector as tsvector"
1176
            " from generate_series(100,200) i")
1177

1178
        ptrack_result = node.table_checksum("t_heap")
1179
        ptrack_backup_id = self.backup_node(
1180
            backup_dir, 'node', node, backup_type='ptrack')
1181
        ptrack_target_time = self.show_pb(
1182
            backup_dir, 'node', ptrack_backup_id)['recovery-time']
1183
        if self.paranoia:
1184
            pgdata = self.pgdata_content(node.data_dir)
1185

1186
        node.safe_psql(
1187
            "postgres",
1188
            "insert into t_heap select i as id,"
1189
            " md5(i::text) as text,"
1190
            " md5(i::text)::tsvector as tsvector"
1191
            " from generate_series(200, 300) i")
1192

1193
        # Drop Node
1194
        node.cleanup()
1195

1196
        # Check full backup
1197
        self.assertIn(
1198
            "INFO: Restore of backup {0} completed.".format(full_backup_id),
1199
            self.restore_node(
1200
                backup_dir, 'node', node,
1201
                backup_id=full_backup_id,
1202
                options=[
1203
                    "-j", "4", "--recovery-target-action=promote",
1204
                    "--time={0}".format(full_target_time)]
1205
            ),
1206
            '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1207
                repr(self.output), self.cmd)
1208
        )
1209
        node.slow_start()
1210

1211
        full_result_new = node.table_checksum("t_heap")
1212
        self.assertEqual(full_result, full_result_new)
1213
        node.cleanup()
1214

1215
        # Check ptrack backup
1216
        self.assertIn(
1217
            "INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
1218
            self.restore_node(
1219
                backup_dir, 'node', node,
1220
                backup_id=ptrack_backup_id,
1221
                options=[
1222
                    "-j", "4",
1223
                    "--time={0}".format(ptrack_target_time),
1224
                    "--recovery-target-action=promote"]
1225
            ),
1226
            '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1227
                repr(self.output), self.cmd)
1228
        )
1229

1230
        if self.paranoia:
1231
            pgdata_restored = self.pgdata_content(
1232
                node.data_dir, ignore_ptrack=False)
1233
            self.compare_pgdata(pgdata, pgdata_restored)
1234

1235
        node.slow_start()
1236
        ptrack_result_new = node.table_checksum("t_heap")
1237
        self.assertEqual(ptrack_result, ptrack_result_new)
1238

1239
        node.cleanup()
1240

1241
    @unittest.skip("skip")
1242
    def test_ptrack_pgpro417(self):
1243
        """
1244
        Make  node, take full backup, take ptrack backup,
1245
        delete ptrack backup. Try to take ptrack backup,
1246
        which should fail. Actual only for PTRACK 1.x
1247
        """
1248
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1249
        node = self.make_simple_node(
1250
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1251
            set_replication=True,
1252
            ptrack_enable=True,
1253
            initdb_params=['--data-checksums'],
1254
            pg_options={
1255
                'checkpoint_timeout': '30s'})
1256

1257
        self.init_pb(backup_dir)
1258
        self.add_instance(backup_dir, 'node', node)
1259
        node.slow_start()
1260

1261
        # FULL BACKUP
1262
        node.safe_psql(
1263
            "postgres",
1264
            "create table t_heap as select i as id, md5(i::text) as text, "
1265
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1266

1267
        backup_id = self.backup_node(
1268
            backup_dir, 'node', node,
1269
            backup_type='full', options=["--stream"])
1270

1271
        start_lsn_full = self.show_pb(
1272
            backup_dir, 'node', backup_id)['start-lsn']
1273

1274
        # PTRACK BACKUP
1275
        node.safe_psql(
1276
            "postgres",
1277
            "insert into t_heap select i as id, md5(i::text) as text, "
1278
            "md5(i::text)::tsvector as tsvector "
1279
            "from generate_series(100,200) i")
1280
        node.table_checksum("t_heap")
1281
        backup_id = self.backup_node(
1282
            backup_dir, 'node', node,
1283
            backup_type='ptrack', options=["--stream"])
1284

1285
        start_lsn_ptrack = self.show_pb(
1286
            backup_dir, 'node', backup_id)['start-lsn']
1287

1288
        self.delete_pb(backup_dir, 'node', backup_id)
1289

1290
        # SECOND PTRACK BACKUP
1291
        node.safe_psql(
1292
            "postgres",
1293
            "insert into t_heap select i as id, md5(i::text) as text, "
1294
            "md5(i::text)::tsvector as tsvector "
1295
            "from generate_series(200,300) i")
1296

1297
        try:
1298
            self.backup_node(
1299
                backup_dir, 'node', node,
1300
                backup_type='ptrack', options=["--stream"])
1301
            # we should die here because exception is what we expect to happen
1302
            self.assertEqual(
1303
                1, 0,
1304
                "Expecting Error because of LSN mismatch from ptrack_control "
1305
                "and previous backup start_lsn.\n"
1306
                " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
1307
        except ProbackupException as e:
1308
            self.assertTrue(
1309
                'ERROR: LSN from ptrack_control' in e.message,
1310
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1311
                    repr(e.message), self.cmd))
1312

1313
    @unittest.skip("skip")
1314
    def test_page_pgpro417(self):
1315
        """
1316
        Make archive node, take full backup, take page backup,
1317
        delete page backup. Try to take ptrack backup, which should fail.
1318
        Actual only for PTRACK 1.x
1319
        """
1320
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1321
        node = self.make_simple_node(
1322
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1323
            set_replication=True,
1324
            ptrack_enable=True,
1325
            initdb_params=['--data-checksums'],
1326
            pg_options={
1327
                'checkpoint_timeout': '30s'})
1328

1329
        self.init_pb(backup_dir)
1330
        self.add_instance(backup_dir, 'node', node)
1331
        self.set_archiving(backup_dir, 'node', node)
1332
        node.slow_start()
1333

1334
        # FULL BACKUP
1335
        node.safe_psql(
1336
            "postgres",
1337
            "create table t_heap as select i as id, md5(i::text) as text, "
1338
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1339
        node.table_checksum("t_heap")
1340

1341
        # PAGE BACKUP
1342
        node.safe_psql(
1343
            "postgres",
1344
            "insert into t_heap select i as id, md5(i::text) as text, "
1345
            "md5(i::text)::tsvector as tsvector "
1346
            "from generate_series(100,200) i")
1347
        node.table_checksum("t_heap")
1348
        backup_id = self.backup_node(
1349
            backup_dir, 'node', node, backup_type='page')
1350

1351
        self.delete_pb(backup_dir, 'node', backup_id)
1352
#        sys.exit(1)
1353

1354
        # PTRACK BACKUP
1355
        node.safe_psql(
1356
            "postgres",
1357
            "insert into t_heap select i as id, md5(i::text) as text, "
1358
            "md5(i::text)::tsvector as tsvector "
1359
            "from generate_series(200,300) i")
1360

1361
        try:
1362
            self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
1363
            # we should die here because exception is what we expect to happen
1364
            self.assertEqual(
1365
                1, 0,
1366
                "Expecting Error because of LSN mismatch from ptrack_control "
1367
                "and previous backup start_lsn.\n "
1368
                "Output: {0}\n CMD: {1}".format(
1369
                    repr(self.output), self.cmd))
1370
        except ProbackupException as e:
1371
            self.assertTrue(
1372
                'ERROR: LSN from ptrack_control' in e.message,
1373
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1374
                    repr(e.message), self.cmd))
1375

1376
    @unittest.skip("skip")
1377
    def test_full_pgpro417(self):
1378
        """
1379
        Make node, take two full backups, delete full second backup.
1380
        Try to take ptrack backup, which should fail.
1381
        Relevant only for PTRACK 1.x
1382
        """
1383
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1384
        node = self.make_simple_node(
1385
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1386
            set_replication=True,
1387
            ptrack_enable=True,
1388
            initdb_params=['--data-checksums'],
1389
            pg_options={
1390
                'checkpoint_timeout': '30s'})
1391

1392
        self.init_pb(backup_dir)
1393
        self.add_instance(backup_dir, 'node', node)
1394
        node.slow_start()
1395

1396
        # FULL BACKUP
1397
        node.safe_psql(
1398
            "postgres",
1399
            "create table t_heap as select i as id, md5(i::text) as text,"
1400
            " md5(i::text)::tsvector as tsvector "
1401
            " from generate_series(0,100) i"
1402
        )
1403
        node.table_checksum("t_heap")
1404
        self.backup_node(backup_dir, 'node', node, options=["--stream"])
1405

1406
        # SECOND FULL BACKUP
1407
        node.safe_psql(
1408
            "postgres",
1409
            "insert into t_heap select i as id, md5(i::text) as text,"
1410
            " md5(i::text)::tsvector as tsvector"
1411
            " from generate_series(100,200) i"
1412
        )
1413
        node.table_checksum("t_heap")
1414
        backup_id = self.backup_node(
1415
            backup_dir, 'node', node, options=["--stream"])
1416

1417
        self.delete_pb(backup_dir, 'node', backup_id)
1418

1419
        # PTRACK BACKUP
1420
        node.safe_psql(
1421
            "postgres",
1422
            "insert into t_heap select i as id, md5(i::text) as text, "
1423
            "md5(i::text)::tsvector as tsvector "
1424
            "from generate_series(200,300) i")
1425
        try:
1426
            self.backup_node(
1427
                backup_dir, 'node', node,
1428
                backup_type='ptrack', options=["--stream"])
1429
            # we should die here because exception is what we expect to happen
1430
            self.assertEqual(
1431
                1, 0,
1432
                "Expecting Error because of LSN mismatch from ptrack_control "
1433
                "and previous backup start_lsn.\n "
1434
                "Output: {0} \n CMD: {1}".format(
1435
                    repr(self.output), self.cmd)
1436
            )
1437
        except ProbackupException as e:
1438
            self.assertTrue(
1439
                "ERROR: LSN from ptrack_control" in e.message and
1440
                "Create new full backup before "
1441
                "an incremental one" in e.message,
1442
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1443
                    repr(e.message), self.cmd))
1444

1445
    # @unittest.skip("skip")
1446
    def test_create_db(self):
1447
        """
1448
        Make node, take full backup, create database db1, take ptrack backup,
1449
        restore database and check it presense
1450
        """
1451
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1452
        node = self.make_simple_node(
1453
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1454
            set_replication=True,
1455
            ptrack_enable=True,
1456
            initdb_params=['--data-checksums'],
1457
            pg_options={
1458
                'max_wal_size': '10GB'})
1459

1460
        self.init_pb(backup_dir)
1461
        self.add_instance(backup_dir, 'node', node)
1462
        node.slow_start()
1463

1464
        node.safe_psql(
1465
            "postgres",
1466
            "CREATE EXTENSION ptrack")
1467

1468
        # FULL BACKUP
1469
        node.safe_psql(
1470
            "postgres",
1471
            "create table t_heap as select i as id, md5(i::text) as text, "
1472
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1473

1474
        node.table_checksum("t_heap")
1475
        self.backup_node(
1476
            backup_dir, 'node', node,
1477
            options=["--stream"])
1478

1479
        # CREATE DATABASE DB1
1480
        node.safe_psql("postgres", "create database db1")
1481
        node.safe_psql(
1482
            "db1",
1483
            "create table t_heap as select i as id, md5(i::text) as text, "
1484
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1485

1486
        # PTRACK BACKUP
1487
        backup_id = self.backup_node(
1488
            backup_dir, 'node', node,
1489
            backup_type='ptrack', options=["--stream"])
1490

1491
        if self.paranoia:
1492
            pgdata = self.pgdata_content(node.data_dir)
1493

1494
        # RESTORE
1495
        node_restored = self.make_simple_node(
1496
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1497

1498
        node_restored.cleanup()
1499
        self.restore_node(
1500
            backup_dir, 'node', node_restored,
1501
            backup_id=backup_id, options=["-j", "4"])
1502

1503
        # COMPARE PHYSICAL CONTENT
1504
        if self.paranoia:
1505
            pgdata_restored = self.pgdata_content(
1506
                node_restored.data_dir, ignore_ptrack=False)
1507
            self.compare_pgdata(pgdata, pgdata_restored)
1508

1509
        # START RESTORED NODE
1510
        self.set_auto_conf(
1511
            node_restored, {'port': node_restored.port})
1512
        node_restored.slow_start()
1513

1514
        # DROP DATABASE DB1
1515
        node.safe_psql(
1516
            "postgres", "drop database db1")
1517
        # SECOND PTRACK BACKUP
1518
        backup_id = self.backup_node(
1519
            backup_dir, 'node', node,
1520
            backup_type='ptrack', options=["--stream"]
1521
        )
1522

1523
        if self.paranoia:
1524
            pgdata = self.pgdata_content(node.data_dir)
1525

1526
        # RESTORE SECOND PTRACK BACKUP
1527
        node_restored.cleanup()
1528
        self.restore_node(
1529
            backup_dir, 'node', node_restored,
1530
            backup_id=backup_id, options=["-j", "4"])
1531

1532
        # COMPARE PHYSICAL CONTENT
1533
        if self.paranoia:
1534
            pgdata_restored = self.pgdata_content(
1535
                node_restored.data_dir, ignore_ptrack=False)
1536
            self.compare_pgdata(pgdata, pgdata_restored)
1537

1538
        # START RESTORED NODE
1539
        self.set_auto_conf(
1540
            node_restored, {'port': node_restored.port})
1541
        node_restored.slow_start()
1542

1543
        try:
1544
            node_restored.safe_psql('db1', 'select 1')
1545
            # we should die here because exception is what we expect to happen
1546
            self.assertEqual(
1547
                1, 0,
1548
                "Expecting Error because we are connecting to deleted database"
1549
                "\n Output: {0} \n CMD: {1}".format(
1550
                    repr(self.output), self.cmd))
1551
        except QueryException as e:
1552
            self.assertTrue(
1553
                'FATAL:  database "db1" does not exist' in e.message,
1554
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1555
                    repr(e.message), self.cmd))
1556

1557
    # @unittest.skip("skip")
1558
    def test_create_db_on_replica(self):
1559
        """
1560
        Make node, take full backup, create replica from it,
1561
        take full backup from replica,
1562
        create database db1, take ptrack backup from replica,
1563
        restore database and check it presense
1564
        """
1565
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1566
        node = self.make_simple_node(
1567
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1568
            set_replication=True,
1569
            ptrack_enable=True,
1570
            initdb_params=['--data-checksums'],
1571
            pg_options={
1572
                'checkpoint_timeout': '30s'})
1573

1574
        self.init_pb(backup_dir)
1575
        self.add_instance(backup_dir, 'node', node)
1576
        node.slow_start()
1577

1578
        node.safe_psql(
1579
            "postgres",
1580
            "CREATE EXTENSION ptrack")
1581

1582
        # FULL BACKUP
1583
        node.safe_psql(
1584
            "postgres",
1585
            "create table t_heap as select i as id, md5(i::text) as text, "
1586
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1587

1588
        replica = self.make_simple_node(
1589
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
1590
        replica.cleanup()
1591

1592
        self.backup_node(
1593
            backup_dir, 'node', node, options=['-j10', '--stream'])
1594

1595
        self.restore_node(backup_dir, 'node', replica)
1596

1597
        # Add replica
1598
        self.add_instance(backup_dir, 'replica', replica)
1599
        self.set_replica(node, replica, 'replica', synchronous=True)
1600
        replica.slow_start(replica=True)
1601

1602
        self.backup_node(
1603
            backup_dir, 'replica', replica,
1604
            options=[
1605
                '-j10',
1606
                '--master-host=localhost',
1607
                '--master-db=postgres',
1608
                '--master-port={0}'.format(node.port),
1609
                '--stream'
1610
                ]
1611
            )
1612

1613
        # CREATE DATABASE DB1
1614
        node.safe_psql("postgres", "create database db1")
1615
        node.safe_psql(
1616
            "db1",
1617
            "create table t_heap as select i as id, md5(i::text) as text, "
1618
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1619

1620
        # Wait until replica catch up with master
1621
        self.wait_until_replica_catch_with_master(node, replica)
1622
        replica.safe_psql('postgres', 'checkpoint')
1623

1624
        # PTRACK BACKUP
1625
        backup_id = self.backup_node(
1626
            backup_dir, 'replica',
1627
            replica, backup_type='ptrack',
1628
            options=[
1629
                '-j10',
1630
                '--stream',
1631
                '--master-host=localhost',
1632
                '--master-db=postgres',
1633
                '--master-port={0}'.format(node.port)
1634
                ]
1635
            )
1636

1637
        if self.paranoia:
1638
            pgdata = self.pgdata_content(replica.data_dir)
1639

1640
        # RESTORE
1641
        node_restored = self.make_simple_node(
1642
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1643
        node_restored.cleanup()
1644

1645
        self.restore_node(
1646
            backup_dir, 'replica', node_restored,
1647
            backup_id=backup_id, options=["-j", "4"])
1648

1649
        # COMPARE PHYSICAL CONTENT
1650
        if self.paranoia:
1651
            pgdata_restored = self.pgdata_content(
1652
                node_restored.data_dir)
1653
            self.compare_pgdata(pgdata, pgdata_restored)
1654

1655
    # @unittest.skip("skip")
1656
    def test_alter_table_set_tablespace_ptrack(self):
1657
        """Make node, create tablespace with table, take full backup,
1658
         alter tablespace location, take ptrack backup, restore database."""
1659
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1660
        node = self.make_simple_node(
1661
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1662
            set_replication=True,
1663
            ptrack_enable=True,
1664
            initdb_params=['--data-checksums'],
1665
            pg_options={
1666
                'checkpoint_timeout': '30s'})
1667

1668
        self.init_pb(backup_dir)
1669
        self.add_instance(backup_dir, 'node', node)
1670
        node.slow_start()
1671

1672
        node.safe_psql(
1673
            "postgres",
1674
            "CREATE EXTENSION ptrack")
1675

1676
        # FULL BACKUP
1677
        self.create_tblspace_in_node(node, 'somedata')
1678
        node.safe_psql(
1679
            "postgres",
1680
            "create table t_heap tablespace somedata as select i as id,"
1681
            " md5(i::text) as text, md5(i::text)::tsvector as tsvector"
1682
            " from generate_series(0,100) i")
1683
        # FULL backup
1684
        self.backup_node(backup_dir, 'node', node, options=["--stream"])
1685

1686
        # ALTER TABLESPACE
1687
        self.create_tblspace_in_node(node, 'somedata_new')
1688
        node.safe_psql(
1689
            "postgres",
1690
            "alter table t_heap set tablespace somedata_new")
1691

1692
        # sys.exit(1)
1693
        # PTRACK BACKUP
1694
        #result = node.table_checksum("t_heap")
1695
        self.backup_node(
1696
            backup_dir, 'node', node,
1697
            backup_type='ptrack',
1698
            options=["--stream"]
1699
        )
1700
        if self.paranoia:
1701
            pgdata = self.pgdata_content(node.data_dir)
1702
        # node.stop()
1703
        # node.cleanup()
1704

1705
        # RESTORE
1706
        node_restored = self.make_simple_node(
1707
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1708
        node_restored.cleanup()
1709

1710
        self.restore_node(
1711
            backup_dir, 'node', node_restored,
1712
            options=[
1713
                "-j", "4",
1714
                "-T", "{0}={1}".format(
1715
                    self.get_tblspace_path(node, 'somedata'),
1716
                    self.get_tblspace_path(node_restored, 'somedata')
1717
                ),
1718
                "-T", "{0}={1}".format(
1719
                    self.get_tblspace_path(node, 'somedata_new'),
1720
                    self.get_tblspace_path(node_restored, 'somedata_new')
1721
                )
1722
            ]
1723
        )
1724

1725
        # GET RESTORED PGDATA AND COMPARE
1726
        if self.paranoia:
1727
            pgdata_restored = self.pgdata_content(
1728
                node_restored.data_dir, ignore_ptrack=False)
1729
            self.compare_pgdata(pgdata, pgdata_restored)
1730

1731
        # START RESTORED NODE
1732
        self.set_auto_conf(
1733
            node_restored, {'port': node_restored.port})
1734
        node_restored.slow_start()
1735

1736
#        result_new = node_restored.table_checksum("t_heap")
1737
#
1738
#        self.assertEqual(result, result_new, 'lost some data after restore')
1739

1740
    # @unittest.skip("skip")
1741
    def test_alter_database_set_tablespace_ptrack(self):
1742
        """Make node, create tablespace with database,"
1743
        " take full backup, alter tablespace location,"
1744
        " take ptrack backup, restore database."""
1745
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1746
        node = self.make_simple_node(
1747
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1748
            set_replication=True,
1749
            ptrack_enable=True,
1750
            initdb_params=['--data-checksums'],
1751
            pg_options={
1752
                'checkpoint_timeout': '30s'})
1753

1754
        self.init_pb(backup_dir)
1755
        self.add_instance(backup_dir, 'node', node)
1756
        node.slow_start()
1757

1758
        node.safe_psql(
1759
            "postgres",
1760
            "CREATE EXTENSION ptrack")
1761

1762
        # FULL BACKUP
1763
        self.backup_node(backup_dir, 'node', node, options=["--stream"])
1764

1765
        # CREATE TABLESPACE
1766
        self.create_tblspace_in_node(node, 'somedata')
1767

1768
        # ALTER DATABASE
1769
        node.safe_psql(
1770
            "template1",
1771
            "alter database postgres set tablespace somedata")
1772

1773
        # PTRACK BACKUP
1774
        self.backup_node(
1775
            backup_dir, 'node', node, backup_type='ptrack',
1776
            options=["--stream"])
1777

1778
        if self.paranoia:
1779
            pgdata = self.pgdata_content(node.data_dir)
1780
        node.stop()
1781

1782
        # RESTORE
1783
        node_restored = self.make_simple_node(
1784
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1785
        node_restored.cleanup()
1786
        self.restore_node(
1787
            backup_dir, 'node',
1788
            node_restored,
1789
            options=[
1790
                "-j", "4",
1791
                "-T", "{0}={1}".format(
1792
                    self.get_tblspace_path(node, 'somedata'),
1793
                    self.get_tblspace_path(node_restored, 'somedata'))])
1794

1795
        # GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
1796
        if self.paranoia:
1797
            pgdata_restored = self.pgdata_content(
1798
                node_restored.data_dir, ignore_ptrack=False)
1799
            self.compare_pgdata(pgdata, pgdata_restored)
1800

1801
        # START RESTORED NODE
1802
        node_restored.port = node.port
1803
        node_restored.slow_start()
1804

1805
    # @unittest.skip("skip")
1806
    def test_drop_tablespace(self):
1807
        """
1808
        Make node, create table, alter table tablespace, take ptrack backup,
1809
        move table from tablespace, take ptrack backup
1810
        """
1811
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1812
        node = self.make_simple_node(
1813
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1814
            set_replication=True,
1815
            ptrack_enable=True,
1816
            initdb_params=['--data-checksums'],
1817
            pg_options={
1818
                'checkpoint_timeout': '30s'})
1819

1820
        self.init_pb(backup_dir)
1821
        self.add_instance(backup_dir, 'node', node)
1822
        node.slow_start()
1823

1824
        node.safe_psql(
1825
            "postgres",
1826
            "CREATE EXTENSION ptrack")
1827

1828
        self.create_tblspace_in_node(node, 'somedata')
1829

1830
        # CREATE TABLE
1831
        node.safe_psql(
1832
            "postgres",
1833
            "create table t_heap as select i as id, md5(i::text) as text, "
1834
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1835

1836
        result = node.table_checksum("t_heap")
1837
        # FULL BACKUP
1838
        self.backup_node(backup_dir, 'node', node, options=["--stream"])
1839

1840
        # Move table to tablespace 'somedata'
1841
        node.safe_psql(
1842
            "postgres", "alter table t_heap set tablespace somedata")
1843
        # PTRACK BACKUP
1844
        self.backup_node(
1845
            backup_dir, 'node', node,
1846
            backup_type='ptrack', options=["--stream"])
1847

1848
        # Move table back to default tablespace
1849
        node.safe_psql(
1850
            "postgres", "alter table t_heap set tablespace pg_default")
1851
        # SECOND PTRACK BACKUP
1852
        self.backup_node(
1853
            backup_dir, 'node', node,
1854
            backup_type='ptrack', options=["--stream"])
1855

1856
        # DROP TABLESPACE 'somedata'
1857
        node.safe_psql(
1858
            "postgres", "drop tablespace somedata")
1859
        # THIRD PTRACK BACKUP
1860
        self.backup_node(
1861
            backup_dir, 'node', node,
1862
            backup_type='ptrack', options=["--stream"])
1863

1864
        if self.paranoia:
1865
            pgdata = self.pgdata_content(
1866
                node.data_dir, ignore_ptrack=True)
1867

1868
        tblspace = self.get_tblspace_path(node, 'somedata')
1869
        node.cleanup()
1870
        shutil.rmtree(tblspace, ignore_errors=True)
1871
        self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
1872

1873
        if self.paranoia:
1874
            pgdata_restored = self.pgdata_content(
1875
                node.data_dir, ignore_ptrack=True)
1876

1877
        node.slow_start()
1878

1879
        tblspc_exist = node.safe_psql(
1880
            "postgres",
1881
            "select exists(select 1 from "
1882
            "pg_tablespace where spcname = 'somedata')")
1883

1884
        if tblspc_exist.rstrip() == 't':
1885
            self.assertEqual(
1886
                1, 0,
1887
                "Expecting Error because "
1888
                "tablespace 'somedata' should not be present")
1889

1890
        result_new = node.table_checksum("t_heap")
1891
        self.assertEqual(result, result_new)
1892

1893
        if self.paranoia:
1894
            self.compare_pgdata(pgdata, pgdata_restored)
1895

1896
    # @unittest.skip("skip")
1897
    def test_ptrack_alter_tablespace(self):
1898
        """
1899
        Make node, create table, alter table tablespace, take ptrack backup,
1900
        move table from tablespace, take ptrack backup
1901
        """
1902
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1903
        node = self.make_simple_node(
1904
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
1905
            set_replication=True,
1906
            ptrack_enable=True,
1907
            initdb_params=['--data-checksums'],
1908
            pg_options={
1909
                'checkpoint_timeout': '30s'})
1910

1911
        self.init_pb(backup_dir)
1912
        self.add_instance(backup_dir, 'node', node)
1913
        node.slow_start()
1914

1915
        node.safe_psql(
1916
            "postgres",
1917
            "CREATE EXTENSION ptrack")
1918

1919
        self.create_tblspace_in_node(node, 'somedata')
1920
        tblspc_path = self.get_tblspace_path(node, 'somedata')
1921

1922
        # CREATE TABLE
1923
        node.safe_psql(
1924
            "postgres",
1925
            "create table t_heap as select i as id, md5(i::text) as text, "
1926
            "md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1927

1928
        result = node.table_checksum("t_heap")
1929
        # FULL BACKUP
1930
        self.backup_node(backup_dir, 'node', node, options=["--stream"])
1931

1932
        # Move table to separate tablespace
1933
        node.safe_psql(
1934
            "postgres",
1935
            "alter table t_heap set tablespace somedata")
1936
        # GET LOGICAL CONTENT FROM NODE
1937
        result = node.table_checksum("t_heap")
1938

1939
        # FIRTS PTRACK BACKUP
1940
        self.backup_node(
1941
            backup_dir, 'node', node, backup_type='ptrack',
1942
            options=["--stream"])
1943

1944
        # GET PHYSICAL CONTENT FROM NODE
1945
        if self.paranoia:
1946
            pgdata = self.pgdata_content(node.data_dir)
1947

1948
        # Restore ptrack backup
1949
        restored_node = self.make_simple_node(
1950
            base_dir=os.path.join(self.module_name, self.fname, 'restored_node'))
1951
        restored_node.cleanup()
1952
        tblspc_path_new = self.get_tblspace_path(
1953
            restored_node, 'somedata_restored')
1954
        self.restore_node(backup_dir, 'node', restored_node, options=[
1955
            "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
1956

1957
        # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
1958
        if self.paranoia:
1959
            pgdata_restored = self.pgdata_content(
1960
                restored_node.data_dir, ignore_ptrack=False)
1961
            self.compare_pgdata(pgdata, pgdata_restored)
1962

1963
        # START RESTORED NODE
1964
        self.set_auto_conf(
1965
            restored_node, {'port': restored_node.port})
1966
        restored_node.slow_start()
1967

1968
        # COMPARE LOGICAL CONTENT
1969
        result_new = restored_node.table_checksum("t_heap")
1970
        self.assertEqual(result, result_new)
1971

1972
        restored_node.cleanup()
1973
        shutil.rmtree(tblspc_path_new, ignore_errors=True)
1974

1975
        # Move table to default tablespace
1976
        node.safe_psql(
1977
            "postgres", "alter table t_heap set tablespace pg_default")
1978
        # SECOND PTRACK BACKUP
1979
        self.backup_node(
1980
            backup_dir, 'node', node, backup_type='ptrack',
1981
            options=["--stream"])
1982

1983
        if self.paranoia:
1984
            pgdata = self.pgdata_content(node.data_dir)
1985

1986
        # Restore second ptrack backup and check table consistency
1987
        self.restore_node(
1988
            backup_dir, 'node', restored_node,
1989
            options=[
1990
                "-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
1991

1992
        # GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
1993
        if self.paranoia:
1994
            pgdata_restored = self.pgdata_content(
1995
                restored_node.data_dir, ignore_ptrack=False)
1996
            self.compare_pgdata(pgdata, pgdata_restored)
1997

1998
        # START RESTORED NODE
1999
        self.set_auto_conf(
2000
            restored_node, {'port': restored_node.port})
2001
        restored_node.slow_start()
2002

2003
        result_new = restored_node.table_checksum("t_heap")
2004
        self.assertEqual(result, result_new)
2005

2006
    # @unittest.skip("skip")
2007
    def test_ptrack_multiple_segments(self):
2008
        """
2009
        Make node, create table, alter table tablespace,
2010
        take ptrack backup, move table from tablespace, take ptrack backup
2011
        """
2012
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2013
        node = self.make_simple_node(
2014
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2015
            set_replication=True,
2016
            ptrack_enable=True,
2017
            initdb_params=['--data-checksums'],
2018
            pg_options={
2019
                'full_page_writes': 'off'})
2020

2021
        self.init_pb(backup_dir)
2022
        self.add_instance(backup_dir, 'node', node)
2023
        node.slow_start()
2024

2025
        node.safe_psql(
2026
            "postgres",
2027
            "CREATE EXTENSION ptrack")
2028

2029
        self.create_tblspace_in_node(node, 'somedata')
2030

2031
        # CREATE TABLE
2032
        node.pgbench_init(scale=100, options=['--tablespace=somedata'])
2033
        result = node.table_checksum("pgbench_accounts")
2034
        # FULL BACKUP
2035
        self.backup_node(backup_dir, 'node', node, options=['--stream'])
2036

2037
        # PTRACK STUFF
2038
        if node.major_version < 11:
2039
            idx_ptrack = {'type': 'heap'}
2040
            idx_ptrack['path'] = self.get_fork_path(node, 'pgbench_accounts')
2041
            idx_ptrack['old_size'] = self.get_fork_size(node, 'pgbench_accounts')
2042
            idx_ptrack['old_pages'] = self.get_md5_per_page_for_fork(
2043
                idx_ptrack['path'], idx_ptrack['old_size'])
2044

2045
        pgbench = node.pgbench(
2046
            options=['-T', '30', '-c', '1', '--no-vacuum'])
2047
        pgbench.wait()
2048

2049
        node.safe_psql("postgres", "checkpoint")
2050

2051
        if node.major_version < 11:
2052
            idx_ptrack['new_size'] = self.get_fork_size(
2053
                node,
2054
                'pgbench_accounts')
2055

2056
            idx_ptrack['new_pages'] = self.get_md5_per_page_for_fork(
2057
                idx_ptrack['path'],
2058
                idx_ptrack['new_size'])
2059

2060
            idx_ptrack['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2061
                node,
2062
                idx_ptrack['path'])
2063

2064
            if not self.check_ptrack_sanity(idx_ptrack):
2065
                self.assertTrue(
2066
                    False, 'Ptrack has failed to register changes in data files')
2067

2068
        # GET LOGICAL CONTENT FROM NODE
2069
        # it`s stupid, because hint`s are ignored by ptrack
2070
        result = node.table_checksum("pgbench_accounts")
2071
        # FIRTS PTRACK BACKUP
2072
        self.backup_node(
2073
            backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
2074

2075
        # GET PHYSICAL CONTENT FROM NODE
2076
        pgdata = self.pgdata_content(node.data_dir)
2077

2078
        # RESTORE NODE
2079
        restored_node = self.make_simple_node(
2080
            base_dir=os.path.join(self.module_name, self.fname, 'restored_node'))
2081
        restored_node.cleanup()
2082
        tblspc_path = self.get_tblspace_path(node, 'somedata')
2083
        tblspc_path_new = self.get_tblspace_path(
2084
            restored_node,
2085
            'somedata_restored')
2086

2087
        self.restore_node(
2088
            backup_dir, 'node', restored_node,
2089
            options=[
2090
                "-j", "4", "-T", "{0}={1}".format(
2091
                    tblspc_path, tblspc_path_new)])
2092

2093
        # GET PHYSICAL CONTENT FROM NODE_RESTORED
2094
        if self.paranoia:
2095
            pgdata_restored = self.pgdata_content(
2096
                restored_node.data_dir, ignore_ptrack=False)
2097

2098
        # START RESTORED NODE
2099
        self.set_auto_conf(
2100
            restored_node, {'port': restored_node.port})
2101
        restored_node.slow_start()
2102

2103
        result_new = restored_node.table_checksum("pgbench_accounts")
2104

2105
        # COMPARE RESTORED FILES
2106
        self.assertEqual(result, result_new, 'data is lost')
2107

2108
        if self.paranoia:
2109
            self.compare_pgdata(pgdata, pgdata_restored)
2110

2111
    @unittest.skip("skip")
2112
    def test_atexit_fail(self):
2113
        """
2114
        Take backups of every available types and check that PTRACK is clean.
2115
        Relevant only for PTRACK 1.x
2116
        """
2117
        node = self.make_simple_node(
2118
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2119
            set_replication=True,
2120
            ptrack_enable=True,
2121
            initdb_params=['--data-checksums'],
2122
            pg_options={
2123
                'max_connections': '15'})
2124

2125
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2126
        self.init_pb(backup_dir)
2127
        self.add_instance(backup_dir, 'node', node)
2128
        node.slow_start()
2129

2130
        # Take FULL backup to clean every ptrack
2131
        self.backup_node(
2132
            backup_dir, 'node', node, options=['--stream'])
2133

2134
        try:
2135
            self.backup_node(
2136
                backup_dir, 'node', node, backup_type='ptrack',
2137
                options=["--stream", "-j 30"])
2138

2139
            # we should die here because exception is what we expect to happen
2140
            self.assertEqual(
2141
                1, 0,
2142
                "Expecting Error because we are opening too many connections"
2143
                "\n Output: {0} \n CMD: {1}".format(
2144
                    repr(self.output), self.cmd)
2145
            )
2146
        except ProbackupException as e:
2147
            self.assertIn(
2148
                'setting its status to ERROR',
2149
                e.message,
2150
                '\n Unexpected Error Message: {0}\n'
2151
                ' CMD: {1}'.format(repr(e.message), self.cmd)
2152
            )
2153

2154
        self.assertEqual(
2155
            node.safe_psql(
2156
                "postgres",
2157
                "select * from pg_is_in_backup()").rstrip(),
2158
            "f")
2159

2160
    @unittest.skip("skip")
2161
    # @unittest.expectedFailure
2162
    def test_ptrack_clean(self):
2163
        """
2164
        Take backups of every available types and check that PTRACK is clean
2165
        Relevant only for PTRACK 1.x
2166
        """
2167
        node = self.make_simple_node(
2168
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2169
            set_replication=True,
2170
            ptrack_enable=True,
2171
            initdb_params=['--data-checksums'])
2172

2173
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2174
        self.init_pb(backup_dir)
2175
        self.add_instance(backup_dir, 'node', node)
2176
        node.slow_start()
2177

2178
        self.create_tblspace_in_node(node, 'somedata')
2179

2180
        # Create table and indexes
2181
        node.safe_psql(
2182
            "postgres",
2183
            "create extension bloom; create sequence t_seq; "
2184
            "create table t_heap tablespace somedata "
2185
            "as select i as id, nextval('t_seq') as t_seq, "
2186
            "md5(i::text) as text, "
2187
            "md5(repeat(i::text,10))::tsvector as tsvector "
2188
            "from generate_series(0,2560) i")
2189
        for i in idx_ptrack:
2190
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2191
                node.safe_psql(
2192
                    "postgres",
2193
                    "create index {0} on {1} using {2}({3}) "
2194
                    "tablespace somedata".format(
2195
                        i, idx_ptrack[i]['relation'],
2196
                        idx_ptrack[i]['type'],
2197
                        idx_ptrack[i]['column']))
2198

2199
        # Take FULL backup to clean every ptrack
2200
        self.backup_node(
2201
            backup_dir, 'node', node,
2202
            options=['-j10', '--stream'])
2203
        node.safe_psql('postgres', 'checkpoint')
2204

2205
        for i in idx_ptrack:
2206
            # get fork size and calculate it in pages
2207
            idx_ptrack[i]['size'] = self.get_fork_size(node, i)
2208
            # get path to heap and index files
2209
            idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2210
            # get ptrack for every idx
2211
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2212
                node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2213
            self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2214

2215
        # Update everything and vacuum it
2216
        node.safe_psql(
2217
            'postgres',
2218
            "update t_heap set t_seq = nextval('t_seq'), "
2219
            "text = md5(text), "
2220
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2221
        node.safe_psql('postgres', 'vacuum t_heap')
2222

2223
        # Take PTRACK backup to clean every ptrack
2224
        backup_id = self.backup_node(
2225
            backup_dir, 'node', node, backup_type='ptrack', options=['-j10', '--stream'])
2226

2227
        node.safe_psql('postgres', 'checkpoint')
2228

2229
        for i in idx_ptrack:
2230
            # get new size of heap and indexes and calculate it in pages
2231
            idx_ptrack[i]['size'] = self.get_fork_size(node, i)
2232
            # update path to heap and index files in case they`ve changed
2233
            idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2234
            # # get ptrack for every idx
2235
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2236
                node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2237
            # check that ptrack bits are cleaned
2238
            self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2239

2240
        # Update everything and vacuum it
2241
        node.safe_psql(
2242
            'postgres',
2243
            "update t_heap set t_seq = nextval('t_seq'), "
2244
            "text = md5(text), "
2245
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2246
        node.safe_psql('postgres', 'vacuum t_heap')
2247

2248
        # Take PAGE backup to clean every ptrack
2249
        self.backup_node(
2250
            backup_dir, 'node', node,
2251
            backup_type='page', options=['-j10', '--stream'])
2252
        node.safe_psql('postgres', 'checkpoint')
2253

2254
        for i in idx_ptrack:
2255
            # get new size of heap and indexes and calculate it in pages
2256
            idx_ptrack[i]['size'] = self.get_fork_size(node, i)
2257
            # update path to heap and index files in case they`ve changed
2258
            idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2259
            # # get ptrack for every idx
2260
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2261
                node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2262
            # check that ptrack bits are cleaned
2263
            self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2264

2265
    @unittest.skip("skip")
2266
    def test_ptrack_clean_replica(self):
2267
        """
2268
        Take backups of every available types from
2269
        master and check that PTRACK on replica is clean.
2270
        Relevant only for PTRACK 1.x
2271
        """
2272
        master = self.make_simple_node(
2273
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
2274
            set_replication=True,
2275
            ptrack_enable=True,
2276
            initdb_params=['--data-checksums'],
2277
            pg_options={
2278
                'archive_timeout': '30s'})
2279

2280
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2281
        self.init_pb(backup_dir)
2282
        self.add_instance(backup_dir, 'master', master)
2283
        master.slow_start()
2284

2285
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
2286

2287
        replica = self.make_simple_node(
2288
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2289
        replica.cleanup()
2290

2291
        self.restore_node(backup_dir, 'master', replica)
2292

2293
        self.add_instance(backup_dir, 'replica', replica)
2294
        self.set_replica(master, replica, synchronous=True)
2295
        replica.slow_start(replica=True)
2296

2297
        # Create table and indexes
2298
        master.safe_psql(
2299
            "postgres",
2300
            "create extension bloom; create sequence t_seq; "
2301
            "create table t_heap as select i as id, "
2302
            "nextval('t_seq') as t_seq, md5(i::text) as text, "
2303
            "md5(repeat(i::text,10))::tsvector as tsvector "
2304
            "from generate_series(0,2560) i")
2305
        for i in idx_ptrack:
2306
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2307
                master.safe_psql(
2308
                    "postgres",
2309
                    "create index {0} on {1} using {2}({3})".format(
2310
                        i, idx_ptrack[i]['relation'],
2311
                        idx_ptrack[i]['type'],
2312
                        idx_ptrack[i]['column']))
2313

2314
        # Take FULL backup to clean every ptrack
2315
        self.backup_node(
2316
            backup_dir,
2317
            'replica',
2318
            replica,
2319
            options=[
2320
                '-j10', '--stream',
2321
                '--master-host=localhost',
2322
                '--master-db=postgres',
2323
                '--master-port={0}'.format(master.port)])
2324
        master.safe_psql('postgres', 'checkpoint')
2325

2326
        for i in idx_ptrack:
2327
            # get fork size and calculate it in pages
2328
            idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
2329
            # get path to heap and index files
2330
            idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2331
            # get ptrack for every idx
2332
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2333
                replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2334
            self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2335

2336
        # Update everything and vacuum it
2337
        master.safe_psql(
2338
            'postgres',
2339
            "update t_heap set t_seq = nextval('t_seq'), "
2340
            "text = md5(text), "
2341
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2342
        master.safe_psql('postgres', 'vacuum t_heap')
2343

2344
        # Take PTRACK backup to clean every ptrack
2345
        backup_id = self.backup_node(
2346
            backup_dir,
2347
            'replica',
2348
            replica,
2349
            backup_type='ptrack',
2350
            options=[
2351
                '-j10', '--stream',
2352
                '--master-host=localhost',
2353
                '--master-db=postgres',
2354
                '--master-port={0}'.format(master.port)])
2355
        master.safe_psql('postgres', 'checkpoint')
2356

2357
        for i in idx_ptrack:
2358
            # get new size of heap and indexes and calculate it in pages
2359
            idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
2360
            # update path to heap and index files in case they`ve changed
2361
            idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2362
            # # get ptrack for every idx
2363
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2364
                replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2365
            # check that ptrack bits are cleaned
2366
            self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2367

2368
        # Update everything and vacuum it
2369
        master.safe_psql(
2370
            'postgres',
2371
            "update t_heap set t_seq = nextval('t_seq'), text = md5(text), "
2372
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2373
        master.safe_psql('postgres', 'vacuum t_heap')
2374
        master.safe_psql('postgres', 'checkpoint')
2375

2376
        # Take PAGE backup to clean every ptrack
2377
        self.backup_node(
2378
            backup_dir,
2379
            'replica',
2380
            replica,
2381
            backup_type='page',
2382
            options=[
2383
                '-j10', '--master-host=localhost',
2384
                '--master-db=postgres',
2385
                '--master-port={0}'.format(master.port),
2386
                '--stream'])
2387
        master.safe_psql('postgres', 'checkpoint')
2388

2389
        for i in idx_ptrack:
2390
            # get new size of heap and indexes and calculate it in pages
2391
            idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
2392
            # update path to heap and index files in case they`ve changed
2393
            idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2394
            # # get ptrack for every idx
2395
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2396
                replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2397
            # check that ptrack bits are cleaned
2398
            self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2399

2400
    # @unittest.skip("skip")
2401
    # @unittest.expectedFailure
2402
    def test_ptrack_cluster_on_btree(self):
2403
        node = self.make_simple_node(
2404
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2405
            set_replication=True,
2406
            ptrack_enable=True,
2407
            initdb_params=['--data-checksums'])
2408

2409
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2410
        self.init_pb(backup_dir)
2411
        self.add_instance(backup_dir, 'node', node)
2412
        node.slow_start()
2413

2414
        node.safe_psql(
2415
            "postgres",
2416
            "CREATE EXTENSION ptrack")
2417

2418
        self.create_tblspace_in_node(node, 'somedata')
2419

2420
        # Create table and indexes
2421
        node.safe_psql(
2422
            "postgres",
2423
            "create extension bloom; create sequence t_seq; "
2424
            "create table t_heap tablespace somedata "
2425
            "as select i as id, nextval('t_seq') as t_seq, "
2426
            "md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
2427
            "as tsvector from generate_series(0,2560) i")
2428
        for i in idx_ptrack:
2429
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2430
                node.safe_psql(
2431
                    "postgres",
2432
                    "create index {0} on {1} using {2}({3}) "
2433
                    "tablespace somedata".format(
2434
                        i, idx_ptrack[i]['relation'],
2435
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
2436

2437
        node.safe_psql('postgres', 'vacuum t_heap')
2438
        node.safe_psql('postgres', 'checkpoint')
2439

2440
        if node.major_version < 11:
2441
            for i in idx_ptrack:
2442
                # get size of heap and indexes. size calculated in pages
2443
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
2444
                # get path to heap and index files
2445
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2446
                # calculate md5sums of pages
2447
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2448
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2449

2450
        self.backup_node(
2451
            backup_dir, 'node', node, options=['-j10', '--stream'])
2452

2453
        node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
2454
        node.safe_psql('postgres', 'cluster t_heap using t_btree')
2455
        node.safe_psql('postgres', 'checkpoint')
2456

2457
        # CHECK PTRACK SANITY
2458
        if node.major_version < 11:
2459
            self.check_ptrack_map_sanity(node, idx_ptrack)
2460

2461
    # @unittest.skip("skip")
2462
    def test_ptrack_cluster_on_gist(self):
2463
        node = self.make_simple_node(
2464
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2465
            set_replication=True,
2466
            ptrack_enable=True,
2467
            initdb_params=['--data-checksums'])
2468

2469
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2470
        self.init_pb(backup_dir)
2471
        self.add_instance(backup_dir, 'node', node)
2472
        node.slow_start()
2473

2474
        node.safe_psql(
2475
            "postgres",
2476
            "CREATE EXTENSION ptrack")
2477

2478
        # Create table and indexes
2479
        node.safe_psql(
2480
            "postgres",
2481
            "create extension bloom; create sequence t_seq; "
2482
            "create table t_heap as select i as id, "
2483
            "nextval('t_seq') as t_seq, md5(i::text) as text, "
2484
            "md5(repeat(i::text,10))::tsvector as tsvector "
2485
            "from generate_series(0,2560) i")
2486
        for i in idx_ptrack:
2487
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2488
                node.safe_psql(
2489
                    "postgres",
2490
                    "create index {0} on {1} using {2}({3})".format(
2491
                        i, idx_ptrack[i]['relation'],
2492
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
2493

2494
        node.safe_psql('postgres', 'vacuum t_heap')
2495
        node.safe_psql('postgres', 'checkpoint')
2496

2497
        for i in idx_ptrack:
2498
            # get size of heap and indexes. size calculated in pages
2499
            idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
2500
            # get path to heap and index files
2501
            idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2502
            # calculate md5sums of pages
2503
            idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2504
                idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2505

2506
        self.backup_node(
2507
            backup_dir, 'node', node, options=['-j10', '--stream'])
2508

2509
        node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
2510
        node.safe_psql('postgres', 'cluster t_heap using t_gist')
2511
        node.safe_psql('postgres', 'checkpoint')
2512

2513
        # CHECK PTRACK SANITY
2514
        if node.major_version < 11:
2515
            self.check_ptrack_map_sanity(node, idx_ptrack)
2516

2517
        self.backup_node(
2518
            backup_dir, 'node', node,
2519
            backup_type='ptrack', options=['-j10', '--stream'])
2520

2521
        pgdata = self.pgdata_content(node.data_dir)
2522
        node.cleanup()
2523

2524
        self.restore_node(backup_dir, 'node', node)
2525

2526
        pgdata_restored = self.pgdata_content(node.data_dir)
2527
        self.compare_pgdata(pgdata, pgdata_restored)
2528

2529
    # @unittest.skip("skip")
2530
    def test_ptrack_cluster_on_btree_replica(self):
2531
        master = self.make_simple_node(
2532
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
2533
            set_replication=True,
2534
            ptrack_enable=True,
2535
            initdb_params=['--data-checksums'])
2536

2537
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2538
        self.init_pb(backup_dir)
2539
        self.add_instance(backup_dir, 'master', master)
2540
        master.slow_start()
2541

2542
        if master.major_version >= 11:
2543
            master.safe_psql(
2544
                "postgres",
2545
                "CREATE EXTENSION ptrack")
2546

2547
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
2548

2549
        replica = self.make_simple_node(
2550
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2551
        replica.cleanup()
2552

2553
        self.restore_node(backup_dir, 'master', replica)
2554

2555
        self.add_instance(backup_dir, 'replica', replica)
2556
        self.set_replica(master, replica, synchronous=True)
2557
        replica.slow_start(replica=True)
2558

2559
        # Create table and indexes
2560
        master.safe_psql(
2561
            "postgres",
2562
            "create extension bloom; create sequence t_seq; "
2563
            "create table t_heap as select i as id, "
2564
            "nextval('t_seq') as t_seq, md5(i::text) as text, "
2565
            "md5(repeat(i::text,10))::tsvector as tsvector "
2566
            "from generate_series(0,2560) i")
2567

2568
        for i in idx_ptrack:
2569
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2570
                master.safe_psql(
2571
                    "postgres",
2572
                    "create index {0} on {1} using {2}({3})".format(
2573
                        i, idx_ptrack[i]['relation'],
2574
                        idx_ptrack[i]['type'],
2575
                        idx_ptrack[i]['column']))
2576

2577
        master.safe_psql('postgres', 'vacuum t_heap')
2578
        master.safe_psql('postgres', 'checkpoint')
2579

2580
        self.backup_node(
2581
            backup_dir, 'replica', replica, options=[
2582
                '-j10', '--stream', '--master-host=localhost',
2583
                '--master-db=postgres', '--master-port={0}'.format(
2584
                    master.port)])
2585

2586
        for i in idx_ptrack:
2587
            # get size of heap and indexes. size calculated in pages
2588
            idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
2589
            # get path to heap and index files
2590
            idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2591
            # calculate md5sums of pages
2592
            idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2593
                idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2594

2595
        master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
2596
        master.safe_psql('postgres', 'cluster t_heap using t_btree')
2597
        master.safe_psql('postgres', 'checkpoint')
2598

2599
        # Sync master and replica
2600
        self.wait_until_replica_catch_with_master(master, replica)
2601
        replica.safe_psql('postgres', 'checkpoint')
2602

2603
        # CHECK PTRACK SANITY
2604
        if master.major_version < 11:
2605
            self.check_ptrack_map_sanity(replica, idx_ptrack)
2606

2607
        self.backup_node(
2608
            backup_dir, 'replica', replica,
2609
            backup_type='ptrack', options=['-j10', '--stream'])
2610

2611
        pgdata = self.pgdata_content(replica.data_dir)
2612

2613
        node = self.make_simple_node(
2614
            base_dir=os.path.join(self.module_name, self.fname, 'node'))
2615
        node.cleanup()
2616

2617
        self.restore_node(backup_dir, 'replica', node)
2618

2619
        pgdata_restored = self.pgdata_content(replica.data_dir)
2620
        self.compare_pgdata(pgdata, pgdata_restored)
2621

2622
    # @unittest.skip("skip")
2623
    def test_ptrack_cluster_on_gist_replica(self):
2624
        master = self.make_simple_node(
2625
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
2626
            set_replication=True,
2627
            ptrack_enable=True)
2628

2629
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2630
        self.init_pb(backup_dir)
2631
        self.add_instance(backup_dir, 'master', master)
2632
        master.slow_start()
2633

2634
        if master.major_version >= 11:
2635
            master.safe_psql(
2636
                "postgres",
2637
                "CREATE EXTENSION ptrack")
2638

2639
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
2640

2641
        replica = self.make_simple_node(
2642
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2643
        replica.cleanup()
2644

2645
        self.restore_node(backup_dir, 'master', replica)
2646

2647
        self.add_instance(backup_dir, 'replica', replica)
2648
        self.set_replica(master, replica, 'replica', synchronous=True)
2649
        replica.slow_start(replica=True)
2650

2651
        # Create table and indexes
2652
        master.safe_psql(
2653
            "postgres",
2654
            "create extension bloom; create sequence t_seq; "
2655
            "create table t_heap as select i as id, "
2656
            "nextval('t_seq') as t_seq, md5(i::text) as text, "
2657
            "md5(repeat(i::text,10))::tsvector as tsvector "
2658
            "from generate_series(0,2560) i")
2659

2660
        for i in idx_ptrack:
2661
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2662
                master.safe_psql(
2663
                    "postgres",
2664
                    "create index {0} on {1} using {2}({3})".format(
2665
                        i, idx_ptrack[i]['relation'],
2666
                        idx_ptrack[i]['type'],
2667
                        idx_ptrack[i]['column']))
2668

2669
        master.safe_psql('postgres', 'vacuum t_heap')
2670
        master.safe_psql('postgres', 'checkpoint')
2671

2672
        # Sync master and replica
2673
        self.wait_until_replica_catch_with_master(master, replica)
2674
        replica.safe_psql('postgres', 'checkpoint')
2675

2676
        self.backup_node(
2677
            backup_dir, 'replica', replica, options=[
2678
                '-j10', '--stream', '--master-host=localhost',
2679
                '--master-db=postgres', '--master-port={0}'.format(
2680
                    master.port)])
2681

2682
        for i in idx_ptrack:
2683
            # get size of heap and indexes. size calculated in pages
2684
            idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
2685
            # get path to heap and index files
2686
            idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2687
            # calculate md5sums of pages
2688
            idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2689
                idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2690

2691
        master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id%2 = 1')
2692
        master.safe_psql('postgres', 'CLUSTER t_heap USING t_gist')
2693

2694
        if master.major_version < 11:
2695
            master.safe_psql('postgres', 'CHECKPOINT')
2696

2697
        # Sync master and replica
2698
        self.wait_until_replica_catch_with_master(master, replica)
2699

2700
        if master.major_version < 11:
2701
            replica.safe_psql('postgres', 'CHECKPOINT')
2702
            self.check_ptrack_map_sanity(replica, idx_ptrack)
2703

2704
        self.backup_node(
2705
            backup_dir, 'replica', replica,
2706
            backup_type='ptrack', options=['-j10', '--stream'])
2707

2708
        if self.paranoia:
2709
            pgdata = self.pgdata_content(replica.data_dir)
2710

2711
        node = self.make_simple_node(
2712
            base_dir=os.path.join(self.module_name, self.fname, 'node'))
2713
        node.cleanup()
2714

2715
        self.restore_node(backup_dir, 'replica', node)
2716

2717
        if self.paranoia:
2718
            pgdata_restored = self.pgdata_content(replica.data_dir)
2719
            self.compare_pgdata(pgdata, pgdata_restored)
2720

2721
    # @unittest.skip("skip")
2722
    # @unittest.expectedFailure
2723
    def test_ptrack_empty(self):
2724
        """Take backups of every available types and check that PTRACK is clean"""
2725
        node = self.make_simple_node(
2726
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2727
            set_replication=True,
2728
            ptrack_enable=True,
2729
            initdb_params=['--data-checksums'])
2730

2731
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2732
        self.init_pb(backup_dir)
2733
        self.add_instance(backup_dir, 'node', node)
2734
        node.slow_start()
2735

2736
        node.safe_psql(
2737
            "postgres",
2738
            "CREATE EXTENSION ptrack")
2739

2740
        self.create_tblspace_in_node(node, 'somedata')
2741

2742
        # Create table
2743
        node.safe_psql(
2744
            "postgres",
2745
            "create extension bloom; create sequence t_seq; "
2746
            "create table t_heap "
2747
            "(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) "
2748
            "tablespace somedata")
2749

2750
        # Take FULL backup to clean every ptrack
2751
        self.backup_node(
2752
            backup_dir, 'node', node,
2753
            options=['-j10', '--stream'])
2754

2755
        # Create indexes
2756
        for i in idx_ptrack:
2757
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2758
                node.safe_psql(
2759
                    "postgres",
2760
                    "create index {0} on {1} using {2}({3}) "
2761
                    "tablespace somedata".format(
2762
                        i, idx_ptrack[i]['relation'],
2763
                        idx_ptrack[i]['type'],
2764
                        idx_ptrack[i]['column']))
2765

2766
        node.safe_psql('postgres', 'checkpoint')
2767

2768
        node_restored = self.make_simple_node(
2769
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
2770
        node_restored.cleanup()
2771

2772
        tblspace1 = self.get_tblspace_path(node, 'somedata')
2773
        tblspace2 = self.get_tblspace_path(node_restored, 'somedata')
2774

2775
        # Take PTRACK backup
2776
        backup_id = self.backup_node(
2777
            backup_dir, 'node', node, backup_type='ptrack',
2778
            options=['-j10', '--stream'])
2779

2780
        if self.paranoia:
2781
            pgdata = self.pgdata_content(node.data_dir)
2782

2783
        self.restore_node(
2784
            backup_dir, 'node', node_restored,
2785
            backup_id=backup_id,
2786
            options=[
2787
                "-j", "4",
2788
                "-T{0}={1}".format(tblspace1, tblspace2)])
2789

2790
        if self.paranoia:
2791
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
2792
            self.compare_pgdata(pgdata, pgdata_restored)
2793

2794
    # @unittest.skip("skip")
2795
    # @unittest.expectedFailure
2796
    def test_ptrack_empty_replica(self):
2797
        """
2798
        Take backups of every available types from master
2799
        and check that PTRACK on replica is clean
2800
        """
2801
        master = self.make_simple_node(
2802
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
2803
            set_replication=True,
2804
            initdb_params=['--data-checksums'],
2805
            ptrack_enable=True)
2806

2807
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2808
        self.init_pb(backup_dir)
2809
        self.add_instance(backup_dir, 'master', master)
2810
        master.slow_start()
2811

2812
        if master.major_version >= 11:
2813
            master.safe_psql(
2814
                "postgres",
2815
                "CREATE EXTENSION ptrack")
2816

2817
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
2818

2819
        replica = self.make_simple_node(
2820
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2821
        replica.cleanup()
2822

2823
        self.restore_node(backup_dir, 'master', replica)
2824

2825
        self.add_instance(backup_dir, 'replica', replica)
2826
        self.set_replica(master, replica, synchronous=True)
2827
        replica.slow_start(replica=True)
2828

2829
        # Create table
2830
        master.safe_psql(
2831
            "postgres",
2832
            "create extension bloom; create sequence t_seq; "
2833
            "create table t_heap "
2834
            "(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)")
2835
        self.wait_until_replica_catch_with_master(master, replica)
2836

2837
        # Take FULL backup
2838
        self.backup_node(
2839
            backup_dir,
2840
            'replica',
2841
            replica,
2842
            options=[
2843
                '-j10', '--stream',
2844
                '--master-host=localhost',
2845
                '--master-db=postgres',
2846
                '--master-port={0}'.format(master.port)])
2847

2848
        # Create indexes
2849
        for i in idx_ptrack:
2850
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2851
                master.safe_psql(
2852
                    "postgres",
2853
                    "create index {0} on {1} using {2}({3})".format(
2854
                        i, idx_ptrack[i]['relation'],
2855
                        idx_ptrack[i]['type'],
2856
                        idx_ptrack[i]['column']))
2857

2858
        self.wait_until_replica_catch_with_master(master, replica)
2859

2860
        # Take PTRACK backup
2861
        backup_id = self.backup_node(
2862
            backup_dir,
2863
            'replica',
2864
            replica,
2865
            backup_type='ptrack',
2866
            options=[
2867
                '-j1', '--stream',
2868
                '--master-host=localhost',
2869
                '--master-db=postgres',
2870
                '--master-port={0}'.format(master.port)])
2871

2872
        if self.paranoia:
2873
            pgdata = self.pgdata_content(replica.data_dir)
2874

2875
        node_restored = self.make_simple_node(
2876
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
2877
        node_restored.cleanup()
2878

2879
        self.restore_node(
2880
            backup_dir, 'replica', node_restored,
2881
            backup_id=backup_id, options=["-j", "4"])
2882

2883
        if self.paranoia:
2884
            pgdata_restored = self.pgdata_content(node_restored.data_dir)
2885
            self.compare_pgdata(pgdata, pgdata_restored)
2886

2887
    # @unittest.skip("skip")
2888
    # @unittest.expectedFailure
2889
    def test_ptrack_truncate(self):
2890
        node = self.make_simple_node(
2891
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
2892
            set_replication=True,
2893
            ptrack_enable=True,
2894
            initdb_params=['--data-checksums'])
2895

2896
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2897
        self.init_pb(backup_dir)
2898
        self.add_instance(backup_dir, 'node', node)
2899
        node.slow_start()
2900

2901
        node.safe_psql(
2902
            "postgres",
2903
            "CREATE EXTENSION ptrack")
2904

2905
        self.create_tblspace_in_node(node, 'somedata')
2906

2907
        # Create table and indexes
2908
        node.safe_psql(
2909
            "postgres",
2910
            "create extension bloom; create sequence t_seq; "
2911
            "create table t_heap tablespace somedata "
2912
            "as select i as id, md5(i::text) as text, "
2913
            "md5(repeat(i::text,10))::tsvector as tsvector "
2914
            "from generate_series(0,2560) i")
2915

2916
        if node.major_version < 11:
2917
            for i in idx_ptrack:
2918
                if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2919
                    node.safe_psql(
2920
                        "postgres",
2921
                        "create index {0} on {1} using {2}({3}) "
2922
                        "tablespace somedata".format(
2923
                            i, idx_ptrack[i]['relation'],
2924
                            idx_ptrack[i]['type'], idx_ptrack[i]['column']))
2925

2926
        self.backup_node(
2927
            backup_dir, 'node', node, options=['--stream'])
2928

2929
        node.safe_psql('postgres', 'truncate t_heap')
2930
        node.safe_psql('postgres', 'checkpoint')
2931

2932
        if node.major_version < 11:
2933
            for i in idx_ptrack:
2934
                # get fork size and calculate it in pages
2935
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
2936
                # get path to heap and index files
2937
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2938
                # calculate md5sums for every page of this fork
2939
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2940
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2941

2942
        # Make backup to clean every ptrack
2943
        self.backup_node(
2944
            backup_dir, 'node', node,
2945
            backup_type='ptrack', options=['-j10', '--stream'])
2946

2947
        pgdata = self.pgdata_content(node.data_dir)
2948

2949
        if node.major_version < 11:
2950
            for i in idx_ptrack:
2951
                idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2952
                    node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
2953
                self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
2954

2955
        node.cleanup()
2956
        shutil.rmtree(
2957
            self.get_tblspace_path(node, 'somedata'),
2958
            ignore_errors=True)
2959
    
2960
        self.restore_node(backup_dir, 'node', node)
2961

2962
        pgdata_restored = self.pgdata_content(node.data_dir)
2963
        self.compare_pgdata(pgdata, pgdata_restored)
2964

2965
    # @unittest.skip("skip")
2966
    def test_basic_ptrack_truncate_replica(self):
2967
        master = self.make_simple_node(
2968
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
2969
            set_replication=True,
2970
            ptrack_enable=True,
2971
            initdb_params=['--data-checksums'],
2972
            pg_options={
2973
                'max_wal_size': '32MB',
2974
                'archive_timeout': '10s',
2975
                'checkpoint_timeout': '5min'})
2976

2977
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2978
        self.init_pb(backup_dir)
2979
        self.add_instance(backup_dir, 'master', master)
2980
        master.slow_start()
2981

2982
        if master.major_version >= 11:
2983
            master.safe_psql(
2984
                "postgres",
2985
                "CREATE EXTENSION ptrack")
2986

2987
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
2988

2989
        replica = self.make_simple_node(
2990
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2991
        replica.cleanup()
2992

2993
        self.restore_node(backup_dir, 'master', replica)
2994

2995
        self.add_instance(backup_dir, 'replica', replica)
2996
        self.set_replica(master, replica, 'replica', synchronous=True)
2997
        replica.slow_start(replica=True)
2998

2999
        # Create table and indexes
3000
        master.safe_psql(
3001
            "postgres",
3002
            "create extension bloom; create sequence t_seq; "
3003
            "create table t_heap "
3004
            "as select i as id, md5(i::text) as text, "
3005
            "md5(repeat(i::text,10))::tsvector as tsvector "
3006
            "from generate_series(0,2560) i")
3007

3008
        for i in idx_ptrack:
3009
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3010
                master.safe_psql(
3011
                    "postgres",
3012
                    "create index {0} on {1} using {2}({3}) ".format(
3013
                        i, idx_ptrack[i]['relation'],
3014
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3015

3016
        # Sync master and replica
3017
        self.wait_until_replica_catch_with_master(master, replica)
3018
        replica.safe_psql('postgres', 'checkpoint')
3019

3020
        if replica.major_version < 11:
3021
            for i in idx_ptrack:
3022
                # get fork size and calculate it in pages
3023
                idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3024
                # get path to heap and index files
3025
                idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3026
                # calculate md5sums for every page of this fork
3027
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3028
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3029

3030
        # Make backup to clean every ptrack
3031
        self.backup_node(
3032
            backup_dir, 'replica', replica,
3033
            options=[
3034
                '-j10',
3035
                '--stream',
3036
                '--master-host=localhost',
3037
                '--master-db=postgres',
3038
                '--master-port={0}'.format(master.port)])
3039

3040
        if replica.major_version < 11:
3041
            for i in idx_ptrack:
3042
                idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3043
                    replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
3044
                self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
3045

3046
        master.safe_psql('postgres', 'truncate t_heap')
3047

3048
        # Sync master and replica
3049
        self.wait_until_replica_catch_with_master(master, replica)
3050

3051
        if replica.major_version < 10:
3052
            replica.safe_psql(
3053
                "postgres",
3054
                "select pg_xlog_replay_pause()")
3055
        else:
3056
            replica.safe_psql(
3057
                "postgres",
3058
                "select pg_wal_replay_pause()")
3059

3060
        self.backup_node(
3061
            backup_dir, 'replica', replica, backup_type='ptrack',
3062
            options=[
3063
                '-j10',
3064
                '--stream',
3065
                '--master-host=localhost',
3066
                '--master-db=postgres',
3067
                '--master-port={0}'.format(master.port)])
3068

3069
        pgdata = self.pgdata_content(replica.data_dir)
3070

3071
        node = self.make_simple_node(
3072
            base_dir=os.path.join(self.module_name, self.fname, 'node'))
3073
        node.cleanup()
3074

3075
        self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
3076

3077
        pgdata_restored = self.pgdata_content(node.data_dir)
3078

3079
        if self.paranoia:
3080
            self.compare_pgdata(pgdata, pgdata_restored)
3081

3082
        self.set_auto_conf(node, {'port': node.port})
3083

3084
        node.slow_start()
3085

3086
        node.safe_psql(
3087
            'postgres',
3088
            'select 1')
3089

3090
    # @unittest.skip("skip")
3091
    # @unittest.expectedFailure
3092
    def test_ptrack_vacuum(self):
3093
        node = self.make_simple_node(
3094
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3095
            set_replication=True,
3096
            ptrack_enable=True,
3097
            initdb_params=['--data-checksums'])
3098

3099
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3100
        self.init_pb(backup_dir)
3101
        self.add_instance(backup_dir, 'node', node)
3102
        node.slow_start()
3103

3104
        node.safe_psql(
3105
            "postgres",
3106
            "CREATE EXTENSION ptrack")
3107

3108
        self.create_tblspace_in_node(node, 'somedata')
3109

3110
        # Create table and indexes
3111
        node.safe_psql(
3112
            "postgres",
3113
            "create extension bloom; create sequence t_seq; "
3114
            "create table t_heap tablespace somedata "
3115
            "as select i as id, md5(i::text) as text, "
3116
            "md5(repeat(i::text,10))::tsvector as tsvector "
3117
            "from generate_series(0,2560) i")
3118
        for i in idx_ptrack:
3119
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3120
                node.safe_psql(
3121
                    "postgres",
3122
                    "create index {0} on {1} using {2}({3}) "
3123
                    "tablespace somedata".format(
3124
                        i, idx_ptrack[i]['relation'],
3125
                        idx_ptrack[i]['type'],
3126
                        idx_ptrack[i]['column']))
3127

3128
        comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node)
3129

3130
        node.safe_psql('postgres', 'vacuum t_heap')
3131
        node.safe_psql('postgres', 'checkpoint')
3132

3133
        # Make full backup to clean every ptrack
3134
        self.backup_node(
3135
            backup_dir, 'node', node, options=['-j10', '--stream'])
3136

3137
        if node.major_version < 11:
3138
            for i in idx_ptrack:
3139
                # get fork size and calculate it in pages
3140
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3141
                # get path to heap and index files
3142
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3143
                # calculate md5sums for every page of this fork
3144
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3145
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3146
                idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3147
                    node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
3148
                self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
3149

3150
        # Delete some rows, vacuum it and make checkpoint
3151
        node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3152
        node.safe_psql('postgres', 'vacuum t_heap')
3153
        node.safe_psql('postgres', 'checkpoint')
3154

3155
        # CHECK PTRACK SANITY
3156
        if node.major_version < 11:
3157
            self.check_ptrack_map_sanity(node, idx_ptrack)
3158

3159
        self.backup_node(
3160
            backup_dir, 'node', node,
3161
            backup_type='ptrack', options=['-j10', '--stream'])
3162

3163
        pgdata = self.pgdata_content(node.data_dir)
3164
        node.cleanup()
3165

3166
        shutil.rmtree(
3167
            self.get_tblspace_path(node, 'somedata'),
3168
            ignore_errors=True)
3169
    
3170
        self.restore_node(backup_dir, 'node', node)
3171

3172
        pgdata_restored = self.pgdata_content(node.data_dir)
3173
        self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion)
3174

3175
    # @unittest.skip("skip")
3176
    def test_ptrack_vacuum_replica(self):
3177
        master = self.make_simple_node(
3178
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
3179
            set_replication=True,
3180
            ptrack_enable=True,
3181
            initdb_params=['--data-checksums'],
3182
            pg_options={
3183
                'checkpoint_timeout': '30'})
3184

3185
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3186
        self.init_pb(backup_dir)
3187
        self.add_instance(backup_dir, 'master', master)
3188
        master.slow_start()
3189

3190
        if master.major_version >= 11:
3191
            master.safe_psql(
3192
                "postgres",
3193
                "CREATE EXTENSION ptrack")
3194

3195
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
3196

3197
        replica = self.make_simple_node(
3198
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3199
        replica.cleanup()
3200

3201
        self.restore_node(backup_dir, 'master', replica)
3202

3203
        self.add_instance(backup_dir, 'replica', replica)
3204
        self.set_replica(master, replica, 'replica', synchronous=True)
3205
        replica.slow_start(replica=True)
3206

3207
        # Create table and indexes
3208
        master.safe_psql(
3209
            "postgres",
3210
            "create extension bloom; create sequence t_seq; "
3211
            "create table t_heap as select i as id, "
3212
            "md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
3213
            "as tsvector from generate_series(0,2560) i")
3214

3215
        for i in idx_ptrack:
3216
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3217
                master.safe_psql(
3218
                    "postgres",
3219
                    "create index {0} on {1} using {2}({3})".format(
3220
                        i, idx_ptrack[i]['relation'],
3221
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3222

3223
        master.safe_psql('postgres', 'vacuum t_heap')
3224
        master.safe_psql('postgres', 'checkpoint')
3225

3226
        # Sync master and replica
3227
        self.wait_until_replica_catch_with_master(master, replica)
3228
        replica.safe_psql('postgres', 'checkpoint')
3229

3230
        # Make FULL backup to clean every ptrack
3231
        self.backup_node(
3232
            backup_dir, 'replica', replica, options=[
3233
                '-j10', '--master-host=localhost',
3234
                '--master-db=postgres',
3235
                '--master-port={0}'.format(master.port),
3236
                '--stream'])
3237

3238
        if replica.major_version < 11:
3239
            for i in idx_ptrack:
3240
                # get fork size and calculate it in pages
3241
                idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3242
                # get path to heap and index files
3243
                idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3244
                # calculate md5sums for every page of this fork
3245
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3246
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3247
                idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3248
                    replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
3249
                self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
3250

3251
        # Delete some rows, vacuum it and make checkpoint
3252
        master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3253
        master.safe_psql('postgres', 'vacuum t_heap')
3254
        master.safe_psql('postgres', 'checkpoint')
3255

3256
        # Sync master and replica
3257
        self.wait_until_replica_catch_with_master(master, replica)
3258
        replica.safe_psql('postgres', 'checkpoint')
3259

3260
        # CHECK PTRACK SANITY
3261
        if replica.major_version < 11:
3262
            self.check_ptrack_map_sanity(master, idx_ptrack)
3263

3264
        self.backup_node(
3265
            backup_dir, 'replica', replica,
3266
            backup_type='ptrack', options=['-j10', '--stream'])
3267

3268
        pgdata = self.pgdata_content(replica.data_dir)
3269

3270
        node = self.make_simple_node(
3271
            base_dir=os.path.join(self.module_name, self.fname, 'node'))
3272
        node.cleanup()
3273
    
3274
        self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
3275

3276
        pgdata_restored = self.pgdata_content(node.data_dir)
3277
        self.compare_pgdata(pgdata, pgdata_restored)
3278

3279
    # @unittest.skip("skip")
3280
    # @unittest.expectedFailure
3281
    def test_ptrack_vacuum_bits_frozen(self):
3282
        node = self.make_simple_node(
3283
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3284
            set_replication=True,
3285
            ptrack_enable=True,
3286
            initdb_params=['--data-checksums'])
3287

3288
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3289
        self.init_pb(backup_dir)
3290
        self.add_instance(backup_dir, 'node', node)
3291
        node.slow_start()
3292

3293
        node.safe_psql(
3294
            "postgres",
3295
            "CREATE EXTENSION ptrack")
3296

3297
        self.create_tblspace_in_node(node, 'somedata')
3298

3299
        # Create table and indexes
3300
        res = node.safe_psql(
3301
            "postgres",
3302
            "create extension bloom; create sequence t_seq; "
3303
            "create table t_heap tablespace somedata "
3304
            "as select i as id, md5(i::text) as text, "
3305
            "md5(repeat(i::text,10))::tsvector as tsvector "
3306
            "from generate_series(0,2560) i")
3307
        for i in idx_ptrack:
3308
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3309
                node.safe_psql(
3310
                    "postgres",
3311
                    "create index {0} on {1} using {2}({3}) "
3312
                    "tablespace somedata".format(
3313
                        i, idx_ptrack[i]['relation'],
3314
                        idx_ptrack[i]['type'],
3315
                        idx_ptrack[i]['column']))
3316

3317
        comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node)
3318
        node.safe_psql('postgres', 'checkpoint')
3319

3320
        self.backup_node(
3321
            backup_dir, 'node', node, options=['-j10', '--stream'])
3322

3323
        node.safe_psql('postgres', 'vacuum freeze t_heap')
3324
        node.safe_psql('postgres', 'checkpoint')
3325

3326
        if node.major_version < 11:
3327
            for i in idx_ptrack:
3328
                # get size of heap and indexes. size calculated in pages
3329
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3330
                # get path to heap and index files
3331
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3332
                # calculate md5sums of pages
3333
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3334
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3335

3336
        # CHECK PTRACK SANITY
3337
        if node.major_version < 11:
3338
            self.check_ptrack_map_sanity(node, idx_ptrack)
3339

3340
        self.backup_node(
3341
            backup_dir, 'node', node,
3342
            backup_type='ptrack', options=['-j10', '--stream'])
3343

3344
        pgdata = self.pgdata_content(node.data_dir)
3345
        node.cleanup()
3346
        shutil.rmtree(
3347
            self.get_tblspace_path(node, 'somedata'),
3348
            ignore_errors=True)
3349
    
3350
        self.restore_node(backup_dir, 'node', node)
3351

3352
        pgdata_restored = self.pgdata_content(node.data_dir)
3353
        self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion)
3354

3355
    # @unittest.skip("skip")
3356
    def test_ptrack_vacuum_bits_frozen_replica(self):
3357
        master = self.make_simple_node(
3358
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
3359
            set_replication=True,
3360
            ptrack_enable=True,
3361
            initdb_params=['--data-checksums'])
3362

3363
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3364
        self.init_pb(backup_dir)
3365
        self.add_instance(backup_dir, 'master', master)
3366
        master.slow_start()
3367

3368
        if master.major_version >= 11:
3369
            master.safe_psql(
3370
                "postgres",
3371
                "CREATE EXTENSION ptrack")
3372

3373
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
3374

3375
        replica = self.make_simple_node(
3376
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3377
        replica.cleanup()
3378

3379
        self.restore_node(backup_dir, 'master', replica)
3380

3381
        self.add_instance(backup_dir, 'replica', replica)
3382
        self.set_replica(master, replica, synchronous=True)
3383
        replica.slow_start(replica=True)
3384

3385
        # Create table and indexes
3386
        master.safe_psql(
3387
            "postgres",
3388
            "create extension bloom; create sequence t_seq; "
3389
            "create table t_heap as select i as id, "
3390
            "md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
3391
            "as tsvector from generate_series(0,2560) i")
3392
        for i in idx_ptrack:
3393
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3394
                master.safe_psql(
3395
                    "postgres",
3396
                    "create index {0} on {1} using {2}({3})".format(
3397
                        i, idx_ptrack[i]['relation'],
3398
                        idx_ptrack[i]['type'],
3399
                        idx_ptrack[i]['column']))
3400

3401
        master.safe_psql('postgres', 'checkpoint')
3402

3403
        # Sync master and replica
3404
        self.wait_until_replica_catch_with_master(master, replica)
3405
        replica.safe_psql('postgres', 'checkpoint')
3406

3407
        # Take backup to clean every ptrack
3408
        self.backup_node(
3409
            backup_dir, 'replica', replica,
3410
            options=[
3411
                '-j10',
3412
                '--master-host=localhost',
3413
                '--master-db=postgres',
3414
                '--master-port={0}'.format(master.port),
3415
                '--stream'])
3416

3417
        if replica.major_version < 11:
3418
            for i in idx_ptrack:
3419
                # get size of heap and indexes. size calculated in pages
3420
                idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3421
                # get path to heap and index files
3422
                idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3423
                # calculate md5sums of pages
3424
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3425
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3426

3427
        master.safe_psql('postgres', 'vacuum freeze t_heap')
3428
        master.safe_psql('postgres', 'checkpoint')
3429

3430
        # Sync master and replica
3431
        self.wait_until_replica_catch_with_master(master, replica)
3432
        replica.safe_psql('postgres', 'checkpoint')
3433

3434
        # CHECK PTRACK SANITY
3435
        if replica.major_version < 11:
3436
            self.check_ptrack_map_sanity(master, idx_ptrack)
3437

3438
        self.backup_node(
3439
            backup_dir, 'replica', replica, backup_type='ptrack',
3440
            options=['-j10', '--stream'])
3441

3442
        pgdata = self.pgdata_content(replica.data_dir)
3443
        replica.cleanup()
3444
    
3445
        self.restore_node(backup_dir, 'replica', replica)
3446

3447
        pgdata_restored = self.pgdata_content(replica.data_dir)
3448
        self.compare_pgdata(pgdata, pgdata_restored)
3449

3450
    # @unittest.skip("skip")
3451
    # @unittest.expectedFailure
3452
    def test_ptrack_vacuum_bits_visibility(self):
3453
        node = self.make_simple_node(
3454
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3455
            set_replication=True,
3456
            ptrack_enable=True,
3457
            initdb_params=['--data-checksums'])
3458

3459
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3460
        self.init_pb(backup_dir)
3461
        self.add_instance(backup_dir, 'node', node)
3462
        node.slow_start()
3463

3464
        node.safe_psql(
3465
            "postgres",
3466
            "CREATE EXTENSION ptrack")
3467

3468
        self.create_tblspace_in_node(node, 'somedata')
3469

3470
        # Create table and indexes
3471
        res = node.safe_psql(
3472
            "postgres",
3473
            "create extension bloom; create sequence t_seq; "
3474
            "create table t_heap tablespace somedata "
3475
            "as select i as id, md5(i::text) as text, "
3476
            "md5(repeat(i::text,10))::tsvector as tsvector "
3477
            "from generate_series(0,2560) i")
3478

3479
        for i in idx_ptrack:
3480
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3481
                node.safe_psql(
3482
                    "postgres",
3483
                    "create index {0} on {1} using {2}({3}) "
3484
                    "tablespace somedata".format(
3485
                        i, idx_ptrack[i]['relation'],
3486
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3487

3488
        comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node)
3489
        node.safe_psql('postgres', 'checkpoint')
3490

3491
        self.backup_node(
3492
            backup_dir, 'node', node, options=['-j10', '--stream'])
3493

3494
        if node.major_version < 11:
3495
            for i in idx_ptrack:
3496
                # get size of heap and indexes. size calculated in pages
3497
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3498
                # get path to heap and index files
3499
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3500
                # calculate md5sums of pages
3501
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3502
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3503

3504
        node.safe_psql('postgres', 'vacuum t_heap')
3505
        node.safe_psql('postgres', 'checkpoint')
3506

3507
        # CHECK PTRACK SANITY
3508
        if node.major_version < 11:
3509
            self.check_ptrack_map_sanity(node, idx_ptrack)
3510

3511
        self.backup_node(
3512
            backup_dir, 'node', node,
3513
            backup_type='ptrack', options=['-j10', '--stream'])
3514

3515
        pgdata = self.pgdata_content(node.data_dir)
3516
        node.cleanup()
3517
        shutil.rmtree(
3518
            self.get_tblspace_path(node, 'somedata'),
3519
            ignore_errors=True)
3520
    
3521
        self.restore_node(backup_dir, 'node', node)
3522

3523
        pgdata_restored = self.pgdata_content(node.data_dir)
3524
        self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion)
3525

3526
    # @unittest.skip("skip")
3527
    # @unittest.expectedFailure
3528
    def test_ptrack_vacuum_full_2(self):
3529
        node = self.make_simple_node(
3530
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3531
            set_replication=True,
3532
            ptrack_enable=True,
3533
            pg_options={ 'wal_log_hints': 'on' })
3534

3535
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3536
        self.init_pb(backup_dir)
3537
        self.add_instance(backup_dir, 'node', node)
3538
        node.slow_start()
3539

3540
        node.safe_psql(
3541
            "postgres",
3542
            "CREATE EXTENSION ptrack")
3543

3544
        self.create_tblspace_in_node(node, 'somedata')
3545

3546
        # Create table and indexes
3547
        res = node.safe_psql(
3548
            "postgres",
3549
            "create extension bloom; create sequence t_seq; "
3550
            "create table t_heap tablespace somedata "
3551
            "as select i as id, md5(i::text) as text, "
3552
            "md5(repeat(i::text,10))::tsvector as tsvector "
3553
            "from generate_series(0,2560) i")
3554
        for i in idx_ptrack:
3555
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3556
                node.safe_psql(
3557
                    "postgres", "create index {0} on {1} "
3558
                    "using {2}({3}) tablespace somedata".format(
3559
                        i, idx_ptrack[i]['relation'],
3560
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3561

3562
        node.safe_psql('postgres', 'vacuum t_heap')
3563
        node.safe_psql('postgres', 'checkpoint')
3564

3565
        self.backup_node(
3566
            backup_dir, 'node', node, options=['-j10', '--stream'])
3567

3568
        if node.major_version < 11:
3569
            for i in idx_ptrack:
3570
                # get size of heap and indexes. size calculated in pages
3571
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3572
                # get path to heap and index files
3573
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3574
                # calculate md5sums of pages
3575
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3576
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3577

3578
        node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3579
        node.safe_psql('postgres', 'vacuum full t_heap')
3580
        node.safe_psql('postgres', 'checkpoint')
3581

3582
        if node.major_version < 11:
3583
            self.check_ptrack_map_sanity(node, idx_ptrack)
3584

3585
        self.backup_node(
3586
            backup_dir, 'node', node,
3587
            backup_type='ptrack', options=['-j10', '--stream'])
3588

3589
        pgdata = self.pgdata_content(node.data_dir)
3590
        node.cleanup()
3591

3592
        shutil.rmtree(
3593
            self.get_tblspace_path(node, 'somedata'),
3594
            ignore_errors=True)
3595
    
3596
        self.restore_node(backup_dir, 'node', node)
3597

3598
        pgdata_restored = self.pgdata_content(node.data_dir)
3599
        self.compare_pgdata(pgdata, pgdata_restored)
3600

3601
    # @unittest.skip("skip")
3602
    # @unittest.expectedFailure
3603
    def test_ptrack_vacuum_full_replica(self):
3604
        master = self.make_simple_node(
3605
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
3606
            set_replication=True,
3607
            ptrack_enable=True,
3608
            initdb_params=['--data-checksums'])
3609

3610
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3611
        self.init_pb(backup_dir)
3612
        self.add_instance(backup_dir, 'master', master)
3613
        master.slow_start()
3614

3615
        if master.major_version >= 11:
3616
            master.safe_psql(
3617
                "postgres",
3618
                "CREATE EXTENSION ptrack")
3619

3620
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
3621
        replica = self.make_simple_node(
3622
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3623
        replica.cleanup()
3624

3625
        self.restore_node(backup_dir, 'master', replica)
3626

3627
        self.add_instance(backup_dir, 'replica', replica)
3628
        self.set_replica(master, replica, 'replica', synchronous=True)
3629
        replica.slow_start(replica=True)
3630

3631
        # Create table and indexes
3632
        master.safe_psql(
3633
            "postgres",
3634
            "create extension bloom; create sequence t_seq; "
3635
            "create table t_heap as select i as id, "
3636
            "md5(i::text) as text, md5(repeat(i::text,10))::tsvector as "
3637
            "tsvector from generate_series(0,256000) i")
3638

3639
        if master.major_version < 11:
3640
            for i in idx_ptrack:
3641
                if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3642
                    master.safe_psql(
3643
                        "postgres",
3644
                        "create index {0} on {1} using {2}({3})".format(
3645
                            i, idx_ptrack[i]['relation'],
3646
                            idx_ptrack[i]['type'],
3647
                            idx_ptrack[i]['column']))
3648

3649
        master.safe_psql('postgres', 'vacuum t_heap')
3650
        master.safe_psql('postgres', 'checkpoint')
3651

3652
        # Sync master and replica
3653
        self.wait_until_replica_catch_with_master(master, replica)
3654
        replica.safe_psql('postgres', 'checkpoint')
3655

3656
        # Take FULL backup to clean every ptrack
3657
        self.backup_node(
3658
            backup_dir, 'replica', replica,
3659
            options=[
3660
                '-j10',
3661
                '--master-host=localhost',
3662
                '--master-db=postgres',
3663
                '--master-port={0}'.format(master.port),
3664
                '--stream'])
3665

3666
        if replica.major_version < 11:
3667
            for i in idx_ptrack:
3668
                # get size of heap and indexes. size calculated in pages
3669
                idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3670
                # get path to heap and index files
3671
                idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3672
                # calculate md5sums of pages
3673
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3674
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3675

3676
        master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3677
        master.safe_psql('postgres', 'vacuum full t_heap')
3678
        master.safe_psql('postgres', 'checkpoint')
3679

3680
        # Sync master and replica
3681
        self.wait_until_replica_catch_with_master(master, replica)
3682
        replica.safe_psql('postgres', 'checkpoint')
3683

3684
        if replica.major_version < 11:
3685
            self.check_ptrack_map_sanity(master, idx_ptrack)
3686

3687
        self.backup_node(
3688
            backup_dir, 'replica', replica,
3689
            backup_type='ptrack', options=['-j10', '--stream'])
3690

3691
        pgdata = self.pgdata_content(replica.data_dir)
3692
        replica.cleanup()
3693
    
3694
        self.restore_node(backup_dir, 'replica', replica)
3695

3696
        pgdata_restored = self.pgdata_content(replica.data_dir)
3697
        self.compare_pgdata(pgdata, pgdata_restored)
3698

3699
    # @unittest.skip("skip")
3700
    # @unittest.expectedFailure
3701
    def test_ptrack_vacuum_truncate_2(self):
3702
        node = self.make_simple_node(
3703
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3704
            set_replication=True,
3705
            ptrack_enable=True,
3706
            initdb_params=['--data-checksums'])
3707

3708
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3709
        self.init_pb(backup_dir)
3710
        self.add_instance(backup_dir, 'node', node)
3711
        node.slow_start()
3712

3713
        node.safe_psql(
3714
            "postgres",
3715
            "CREATE EXTENSION ptrack")
3716

3717
        # Create table and indexes
3718
        res = node.safe_psql(
3719
            "postgres",
3720
            "create extension bloom; create sequence t_seq; "
3721
            "create table t_heap "
3722
            "as select i as id, md5(i::text) as text, "
3723
            "md5(repeat(i::text,10))::tsvector as tsvector "
3724
            "from generate_series(0,2560) i")
3725

3726
        if node.major_version < 11:
3727
            for i in idx_ptrack:
3728
                if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3729
                    node.safe_psql(
3730
                        "postgres", "create index {0} on {1} using {2}({3})".format(
3731
                            i, idx_ptrack[i]['relation'],
3732
                            idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3733

3734
        node.safe_psql('postgres', 'VACUUM t_heap')
3735

3736
        self.backup_node(
3737
            backup_dir, 'node', node, options=['-j10', '--stream'])
3738

3739
        if node.major_version < 11:
3740
            for i in idx_ptrack:
3741
                # get size of heap and indexes. size calculated in pages
3742
                idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3743
                # get path to heap and index files
3744
                idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3745
                # calculate md5sums of pages
3746
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3747
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3748

3749
        node.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128')
3750
        node.safe_psql('postgres', 'VACUUM t_heap')
3751
        node.safe_psql('postgres', 'CHECKPOINT')
3752

3753
        # CHECK PTRACK SANITY
3754
        if node.major_version < 11:
3755
            self.check_ptrack_map_sanity(node, idx_ptrack)
3756

3757
        self.backup_node(
3758
            backup_dir, 'node', node,
3759
            backup_type='ptrack', options=['--stream'])
3760

3761
        pgdata = self.pgdata_content(node.data_dir)
3762

3763
        node_restored = self.make_simple_node(
3764
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
3765
        node_restored.cleanup()
3766

3767
        self.restore_node(backup_dir, 'node', node_restored)
3768

3769
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
3770
        self.compare_pgdata(pgdata, pgdata_restored)
3771

3772
    # @unittest.skip("skip")
3773
    # @unittest.expectedFailure
3774
    def test_ptrack_vacuum_truncate_replica(self):
3775
        master = self.make_simple_node(
3776
            base_dir=os.path.join(self.module_name, self.fname, 'master'),
3777
            set_replication=True,
3778
            ptrack_enable=True,
3779
            initdb_params=['--data-checksums'])
3780

3781
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3782
        self.init_pb(backup_dir)
3783
        self.add_instance(backup_dir, 'master', master)
3784
        master.slow_start()
3785

3786
        if master.major_version >= 11:
3787
            master.safe_psql(
3788
                "postgres",
3789
                "CREATE EXTENSION ptrack")
3790

3791
        self.backup_node(backup_dir, 'master', master, options=['--stream'])
3792

3793
        replica = self.make_simple_node(
3794
            base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3795
        replica.cleanup()
3796

3797
        self.restore_node(backup_dir, 'master', replica)
3798

3799
        self.add_instance(backup_dir, 'replica', replica)
3800
        self.set_replica(master, replica, 'replica', synchronous=True)
3801
        replica.slow_start(replica=True)
3802

3803
        # Create table and indexes
3804
        master.safe_psql(
3805
            "postgres",
3806
            "create extension bloom; create sequence t_seq; "
3807
            "create table t_heap as select i as id, "
3808
            "md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
3809
            "as tsvector from generate_series(0,2560) i")
3810

3811
        for i in idx_ptrack:
3812
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3813
                master.safe_psql(
3814
                    "postgres", "create index {0} on {1} "
3815
                    "using {2}({3})".format(
3816
                        i, idx_ptrack[i]['relation'],
3817
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3818

3819
        master.safe_psql('postgres', 'vacuum t_heap')
3820
        master.safe_psql('postgres', 'checkpoint')
3821

3822
        # Take FULL backup to clean every ptrack
3823
        self.backup_node(
3824
            backup_dir, 'replica', replica,
3825
            options=[
3826
                '-j10',
3827
                '--stream',
3828
                '--master-host=localhost',
3829
                '--master-db=postgres',
3830
                '--master-port={0}'.format(master.port)
3831
                ]
3832
            )
3833

3834
        if master.major_version < 11:
3835
            for i in idx_ptrack:
3836
                # get size of heap and indexes. size calculated in pages
3837
                idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3838
                # get path to heap and index files
3839
                idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3840
                # calculate md5sums of pages
3841
                idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3842
                    idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3843

3844
        master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128;')
3845
        master.safe_psql('postgres', 'VACUUM t_heap')
3846
        master.safe_psql('postgres', 'CHECKPOINT')
3847

3848
        # Sync master and replica
3849
        self.wait_until_replica_catch_with_master(master, replica)
3850
        replica.safe_psql('postgres', 'CHECKPOINT')
3851

3852
        # CHECK PTRACK SANITY
3853
        if master.major_version < 11:
3854
            self.check_ptrack_map_sanity(master, idx_ptrack)
3855

3856
        self.backup_node(
3857
            backup_dir, 'replica', replica, backup_type='ptrack',
3858
            options=[
3859
                '--stream',
3860
                '--log-level-file=INFO',
3861
                '--archive-timeout=30'])
3862

3863
        pgdata = self.pgdata_content(replica.data_dir)
3864

3865
        node_restored = self.make_simple_node(
3866
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
3867
        node_restored.cleanup()
3868

3869
        self.restore_node(backup_dir, 'replica', node_restored)
3870

3871
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
3872
        self.compare_pgdata(pgdata, pgdata_restored)
3873

3874
    @unittest.skip("skip")
3875
    def test_ptrack_recovery(self):
3876
        """
3877
        Check that ptrack map contain correct bits after recovery.
3878
        Actual only for PTRACK 1.x
3879
        """
3880
        node = self.make_simple_node(
3881
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3882
            set_replication=True,
3883
            ptrack_enable=True,
3884
            initdb_params=['--data-checksums'])
3885

3886
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3887
        self.init_pb(backup_dir)
3888
        self.add_instance(backup_dir, 'node', node)
3889
        node.slow_start()
3890

3891
        self.create_tblspace_in_node(node, 'somedata')
3892

3893
        # Create table
3894
        node.safe_psql(
3895
            "postgres",
3896
            "create extension bloom; create sequence t_seq; "
3897
            "create table t_heap tablespace somedata "
3898
            "as select i as id, md5(i::text) as text, "
3899
            "md5(repeat(i::text,10))::tsvector as tsvector "
3900
            "from generate_series(0,2560) i")
3901

3902
        # Create indexes
3903
        for i in idx_ptrack:
3904
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3905
                node.safe_psql(
3906
                    "postgres", "create index {0} on {1} using {2}({3}) "
3907
                    "tablespace somedata".format(
3908
                        i, idx_ptrack[i]['relation'],
3909
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3910

3911
            # get size of heap and indexes. size calculated in pages
3912
            idx_ptrack[i]['size'] = int(self.get_fork_size(node, i))
3913
            # get path to heap and index files
3914
            idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3915

3916
        if self.verbose:
3917
            print('Killing postmaster. Losing Ptrack changes')
3918
        node.stop(['-m', 'immediate', '-D', node.data_dir])
3919
        if not node.status():
3920
            node.slow_start()
3921
        else:
3922
            print("Die! Die! Why won't you die?... Why won't you die?")
3923
            exit(1)
3924

3925
        for i in idx_ptrack:
3926
            # get ptrack for every idx
3927
            idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3928
                node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
3929
            # check that ptrack has correct bits after recovery
3930
            self.check_ptrack_recovery(idx_ptrack[i])
3931

3932
    # @unittest.skip("skip")
3933
    # @unittest.expectedFailure
3934
    def test_ptrack_recovery_1(self):
3935
        node = self.make_simple_node(
3936
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
3937
            set_replication=True,
3938
            ptrack_enable=True,
3939
            initdb_params=['--data-checksums'],
3940
            pg_options={
3941
                'shared_buffers': '512MB',
3942
                'max_wal_size': '3GB'})
3943

3944
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3945
        self.init_pb(backup_dir)
3946
        self.add_instance(backup_dir, 'node', node)
3947
        node.slow_start()
3948

3949
        node.safe_psql(
3950
            "postgres",
3951
            "CREATE EXTENSION ptrack")
3952

3953
        # Create table
3954
        node.safe_psql(
3955
            "postgres",
3956
            "create extension bloom; create sequence t_seq; "
3957
            "create table t_heap "
3958
            "as select nextval('t_seq')::int as id, md5(i::text) as text, "
3959
            "md5(repeat(i::text,10))::tsvector as tsvector "
3960
#            "from generate_series(0,25600) i")
3961
            "from generate_series(0,2560) i")
3962

3963
        self.backup_node(
3964
            backup_dir, 'node', node, options=['--stream'])
3965

3966
        # Create indexes
3967
        for i in idx_ptrack:
3968
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3969
                node.safe_psql(
3970
                    "postgres",
3971
                    "CREATE INDEX {0} ON {1} USING {2}({3})".format(
3972
                        i, idx_ptrack[i]['relation'],
3973
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3974

3975
        node.safe_psql(
3976
            'postgres',
3977
            "update t_heap set id = nextval('t_seq'), text = md5(text), "
3978
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector")
3979

3980
        node.safe_psql(
3981
            'postgres',
3982
            "create extension pg_buffercache")
3983

3984
        #print(node.safe_psql(
3985
        #    'postgres',
3986
        #    "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
3987

3988
        if self.verbose:
3989
            print('Killing postmaster. Losing Ptrack changes')
3990
        node.stop(['-m', 'immediate', '-D', node.data_dir])
3991

3992
        if not node.status():
3993
            node.slow_start()
3994
        else:
3995
            print("Die! Die! Why won't you die?... Why won't you die?")
3996
            exit(1)
3997

3998
        self.backup_node(
3999
            backup_dir, 'node', node,
4000
            backup_type='ptrack', options=['--stream'])
4001

4002
        pgdata = self.pgdata_content(node.data_dir)
4003

4004
        node_restored = self.make_simple_node(
4005
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
4006
        node_restored.cleanup()
4007

4008
        self.restore_node(
4009
            backup_dir, 'node', node_restored)
4010

4011
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
4012
        self.compare_pgdata(pgdata, pgdata_restored)
4013

4014
    # @unittest.skip("skip")
4015
    # @unittest.expectedFailure
4016
    def test_ptrack_zero_changes(self):
4017
        node = self.make_simple_node(
4018
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
4019
            set_replication=True,
4020
            ptrack_enable=True,
4021
            initdb_params=['--data-checksums'])
4022

4023
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4024
        self.init_pb(backup_dir)
4025
        self.add_instance(backup_dir, 'node', node)
4026
        node.slow_start()
4027

4028
        node.safe_psql(
4029
            "postgres",
4030
            "CREATE EXTENSION ptrack")
4031

4032
        # Create table
4033
        node.safe_psql(
4034
            "postgres",
4035
            "create table t_heap "
4036
            "as select i as id, md5(i::text) as text, "
4037
            "md5(repeat(i::text,10))::tsvector as tsvector "
4038
            "from generate_series(0,2560) i")
4039

4040
        self.backup_node(
4041
            backup_dir, 'node', node, options=['--stream'])
4042

4043
        self.backup_node(
4044
            backup_dir, 'node', node,
4045
            backup_type='ptrack', options=['--stream'])
4046

4047
        pgdata = self.pgdata_content(node.data_dir)
4048
        node.cleanup()
4049

4050
        self.restore_node(backup_dir, 'node', node)
4051

4052
        pgdata_restored = self.pgdata_content(node.data_dir)
4053
        self.compare_pgdata(pgdata, pgdata_restored)
4054

4055
    # @unittest.skip("skip")
4056
    # @unittest.expectedFailure
4057
    def test_ptrack_pg_resetxlog(self):
4058
        node = self.make_simple_node(
4059
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
4060
            set_replication=True,
4061
            ptrack_enable=True,
4062
            initdb_params=['--data-checksums'],
4063
            pg_options={
4064
                'shared_buffers': '512MB',
4065
                'max_wal_size': '3GB'})
4066

4067
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4068
        self.init_pb(backup_dir)
4069
        self.add_instance(backup_dir, 'node', node)
4070
        node.slow_start()
4071

4072
        node.safe_psql(
4073
            "postgres",
4074
            "CREATE EXTENSION ptrack")
4075

4076
        # Create table
4077
        node.safe_psql(
4078
            "postgres",
4079
            "create extension bloom; create sequence t_seq; "
4080
            "create table t_heap "
4081
            "as select nextval('t_seq')::int as id, md5(i::text) as text, "
4082
            "md5(repeat(i::text,10))::tsvector as tsvector "
4083
#            "from generate_series(0,25600) i")
4084
            "from generate_series(0,2560) i")
4085

4086
        self.backup_node(
4087
            backup_dir, 'node', node, options=['--stream'])
4088

4089
        # Create indexes
4090
        for i in idx_ptrack:
4091
            if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
4092
                node.safe_psql(
4093
                    "postgres",
4094
                    "CREATE INDEX {0} ON {1} USING {2}({3})".format(
4095
                        i, idx_ptrack[i]['relation'],
4096
                        idx_ptrack[i]['type'], idx_ptrack[i]['column']))
4097

4098
        node.safe_psql(
4099
            'postgres',
4100
            "update t_heap set id = nextval('t_seq'), text = md5(text), "
4101
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4102

4103
#        node.safe_psql(
4104
#            'postgres',
4105
#            "create extension pg_buffercache")
4106
#
4107
#        print(node.safe_psql(
4108
#            'postgres',
4109
#            "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
4110

4111
        # kill the bastard
4112
        if self.verbose:
4113
            print('Killing postmaster. Losing Ptrack changes')
4114
        node.stop(['-m', 'immediate', '-D', node.data_dir])
4115

4116
        # now smack it with sledgehammer
4117
        if node.major_version >= 10:
4118
            pg_resetxlog_path = self.get_bin_path('pg_resetwal')
4119
            wal_dir = 'pg_wal'
4120
        else:
4121
            pg_resetxlog_path = self.get_bin_path('pg_resetxlog')
4122
            wal_dir = 'pg_xlog'
4123

4124
        self.run_binary(
4125
            [
4126
                pg_resetxlog_path,
4127
                '-D',
4128
                node.data_dir,
4129
                '-o 42',
4130
                '-f'
4131
            ],
4132
            asynchronous=False)
4133

4134
        if not node.status():
4135
            node.slow_start()
4136
        else:
4137
            print("Die! Die! Why won't you die?... Why won't you die?")
4138
            exit(1)
4139

4140
        # take ptrack backup
4141
#        self.backup_node(
4142
#                backup_dir, 'node', node,
4143
#                backup_type='ptrack', options=['--stream'])
4144

4145
        try:
4146
            self.backup_node(
4147
                backup_dir, 'node', node,
4148
                backup_type='ptrack', options=['--stream'])
4149
            # we should die here because exception is what we expect to happen
4150
            self.assertEqual(
4151
                1, 0,
4152
                "Expecting Error because instance was brutalized by pg_resetxlog"
4153
                "\n Output: {0} \n CMD: {1}".format(
4154
                    repr(self.output), self.cmd)
4155
            )
4156
        except ProbackupException as e:
4157
            self.assertTrue(
4158
                'ERROR: LSN from ptrack_control ' in e.message and
4159
                'is greater than Start LSN of previous backup' in e.message,
4160
                '\n Unexpected Error Message: {0}\n'
4161
                ' CMD: {1}'.format(repr(e.message), self.cmd))
4162

4163
#        pgdata = self.pgdata_content(node.data_dir)
4164
#
4165
#        node_restored = self.make_simple_node(
4166
#            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
4167
#        node_restored.cleanup()
4168
#
4169
#        self.restore_node(
4170
#            backup_dir, 'node', node_restored)
4171
#
4172
#        pgdata_restored = self.pgdata_content(node_restored.data_dir)
4173
#        self.compare_pgdata(pgdata, pgdata_restored)
4174

4175
    # @unittest.skip("skip")
4176
    # @unittest.expectedFailure
4177
    def test_corrupt_ptrack_map(self):
4178
        node = self.make_simple_node(
4179
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
4180
            set_replication=True,
4181
            ptrack_enable=True,
4182
            initdb_params=['--data-checksums'])
4183

4184
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4185
        self.init_pb(backup_dir)
4186
        self.add_instance(backup_dir, 'node', node)
4187
        node.slow_start()
4188

4189
        node.safe_psql(
4190
            "postgres",
4191
            "CREATE EXTENSION ptrack")
4192

4193
        ptrack_version = self.get_ptrack_version(node)
4194

4195
        # Create table
4196
        node.safe_psql(
4197
            "postgres",
4198
            "create extension bloom; create sequence t_seq; "
4199
            "create table t_heap "
4200
            "as select nextval('t_seq')::int as id, md5(i::text) as text, "
4201
            "md5(repeat(i::text,10))::tsvector as tsvector "
4202
            "from generate_series(0,2560) i")
4203

4204
        self.backup_node(
4205
            backup_dir, 'node', node, options=['--stream'])
4206

4207
        node.safe_psql(
4208
            'postgres',
4209
            "update t_heap set id = nextval('t_seq'), text = md5(text), "
4210
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4211

4212
        # kill the bastard
4213
        if self.verbose:
4214
            print('Killing postmaster. Losing Ptrack changes')
4215

4216
        node.stop(['-m', 'immediate', '-D', node.data_dir])
4217

4218
        ptrack_map = os.path.join(node.data_dir, 'global', 'ptrack.map')
4219

4220
        # Let`s do index corruption. ptrack.map
4221
        with open(ptrack_map, "rb+", 0) as f:
4222
            f.seek(42)
4223
            f.write(b"blablahblahs")
4224
            f.flush()
4225
            f.close
4226

4227
#        os.remove(os.path.join(node.logs_dir, node.pg_log_name))
4228

4229
        if self.verbose:
4230
            print('Ptrack version:', ptrack_version)
4231
        if ptrack_version >= self.version_to_num("2.3"):
4232
            node.slow_start()
4233

4234
            log_file = os.path.join(node.logs_dir, 'postgresql.log')
4235
            with open(log_file, 'r') as f:
4236
                log_content = f.read()
4237

4238
            self.assertIn(
4239
                'WARNING:  ptrack read map: incorrect checksum of file "{0}"'.format(ptrack_map),
4240
                log_content)
4241

4242
            node.stop(['-D', node.data_dir])
4243
        else:
4244
            try:
4245
                node.slow_start()
4246
                # we should die here because exception is what we expect to happen
4247
                self.assertEqual(
4248
                    1, 0,
4249
                    "Expecting Error because ptrack.map is corrupted"
4250
                    "\n Output: {0} \n CMD: {1}".format(
4251
                        repr(self.output), self.cmd))
4252
            except StartNodeException as e:
4253
                self.assertIn(
4254
                    'Cannot start node',
4255
                    e.message,
4256
                    '\n Unexpected Error Message: {0}\n'
4257
                    ' CMD: {1}'.format(repr(e.message), self.cmd))
4258

4259
            log_file = os.path.join(node.logs_dir, 'postgresql.log')
4260
            with open(log_file, 'r') as f:
4261
                log_content = f.read()
4262

4263
            self.assertIn(
4264
               'FATAL:  ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map),
4265
                log_content)
4266

4267
        self.set_auto_conf(node, {'ptrack.map_size': '0'})
4268
        node.slow_start()
4269

4270
        try:
4271
            self.backup_node(
4272
                backup_dir, 'node', node,
4273
                backup_type='ptrack', options=['--stream'])
4274
            # we should die here because exception is what we expect to happen
4275
            self.assertEqual(
4276
                1, 0,
4277
                "Expecting Error because instance ptrack is disabled"
4278
                "\n Output: {0} \n CMD: {1}".format(
4279
                    repr(self.output), self.cmd))
4280
        except ProbackupException as e:
4281
            self.assertIn(
4282
                'ERROR: Ptrack is disabled',
4283
                e.message,
4284
                '\n Unexpected Error Message: {0}\n'
4285
                ' CMD: {1}'.format(repr(e.message), self.cmd))
4286

4287
        node.safe_psql(
4288
            'postgres',
4289
            "update t_heap set id = nextval('t_seq'), text = md5(text), "
4290
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4291

4292
        node.stop(['-m', 'immediate', '-D', node.data_dir])
4293

4294
        self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'})
4295
        node.slow_start()
4296

4297
        try:
4298
            self.backup_node(
4299
                backup_dir, 'node', node,
4300
                backup_type='ptrack', options=['--stream'])
4301
            # we should die here because exception is what we expect to happen
4302
            self.assertEqual(
4303
                1, 0,
4304
                "Expecting Error because ptrack map is from future"
4305
                "\n Output: {0} \n CMD: {1}".format(
4306
                    repr(self.output), self.cmd))
4307
        except ProbackupException as e:
4308
            self.assertIn(
4309
                'ERROR: LSN from ptrack_control',
4310
                e.message,
4311
                '\n Unexpected Error Message: {0}\n'
4312
                ' CMD: {1}'.format(repr(e.message), self.cmd))
4313

4314
        self.backup_node(
4315
            backup_dir, 'node', node,
4316
            backup_type='delta', options=['--stream'])
4317

4318
        node.safe_psql(
4319
            'postgres',
4320
            "update t_heap set id = nextval('t_seq'), text = md5(text), "
4321
            "tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4322

4323
        self.backup_node(
4324
            backup_dir, 'node', node,
4325
            backup_type='ptrack', options=['--stream'])
4326

4327
        pgdata = self.pgdata_content(node.data_dir)
4328

4329
        node.cleanup()
4330

4331
        self.restore_node(backup_dir, 'node', node)
4332

4333
        pgdata_restored = self.pgdata_content(node.data_dir)
4334
        self.compare_pgdata(pgdata, pgdata_restored)
4335

4336
    # @unittest.skip("skip")
4337
    def test_horizon_lsn_ptrack(self):
4338
        """
4339
        https://github.com/postgrespro/pg_probackup/pull/386
4340
        """
4341
        if not self.probackup_old_path:
4342
            self.skipTest("You must specify PGPROBACKUPBIN_OLD"
4343
                          " for run this test")
4344
        self.assertLessEqual(
4345
            self.version_to_num(self.old_probackup_version),
4346
            self.version_to_num('2.4.15'),
4347
            'You need pg_probackup old_binary =< 2.4.15 for this test')
4348

4349
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4350
        node = self.make_simple_node(
4351
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
4352
            set_replication=True,
4353
            ptrack_enable=True,
4354
            initdb_params=['--data-checksums'])
4355

4356
        self.init_pb(backup_dir)
4357
        self.add_instance(backup_dir, 'node', node)
4358
        node.slow_start()
4359

4360
        node.safe_psql(
4361
            "postgres",
4362
            "CREATE EXTENSION ptrack")
4363

4364
        self.assertGreaterEqual(
4365
            self.get_ptrack_version(node),
4366
            self.version_to_num("2.1"),
4367
            "You need ptrack >=2.1 for this test")
4368

4369
        # set map_size to a minimal value
4370
        self.set_auto_conf(node, {'ptrack.map_size': '1'})
4371
        node.restart()
4372

4373
        node.pgbench_init(scale=100)
4374

4375
        # FULL backup
4376
        full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True)
4377

4378
        # enable archiving so the WAL size to do interfere with data bytes comparison later
4379
        self.set_archiving(backup_dir, 'node', node)
4380
        node.restart()
4381

4382
        # change data
4383
        pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
4384
        pgbench.wait()
4385

4386
        # DELTA is exemplar
4387
        delta_id = self.backup_node(
4388
            backup_dir, 'node', node, backup_type='delta')
4389
        delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"]
4390
        self.delete_pb(backup_dir, 'node', backup_id=delta_id)
4391

4392
        # PTRACK with current binary
4393
        ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
4394
        ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"]
4395

4396
        # make sure that backup size is exactly the same
4397
        self.assertEqual(delta_bytes, ptrack_bytes)
4398

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.