pg_probackup
4397 строк · 163.8 Кб
1import os
2import unittest
3from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, idx_ptrack
4from datetime import datetime, timedelta
5import subprocess
6from testgres import QueryException, StartNodeException
7import shutil
8import sys
9from time import sleep
10from threading import Thread
11
12
13class PtrackTest(ProbackupTest, unittest.TestCase):
14def setUp(self):
15if self.pg_config_version < self.version_to_num('11.0'):
16self.skipTest('You need PostgreSQL >= 11 for this test')
17self.fname = self.id().split('.')[3]
18
19# @unittest.skip("skip")
20def test_drop_rel_during_backup_ptrack(self):
21"""
22drop relation during ptrack backup
23"""
24self._check_gdb_flag_or_skip_test()
25
26backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
27node = self.make_simple_node(
28base_dir=os.path.join(self.module_name, self.fname, 'node'),
29set_replication=True,
30ptrack_enable=self.ptrack,
31initdb_params=['--data-checksums'])
32
33self.init_pb(backup_dir)
34self.add_instance(backup_dir, 'node', node)
35self.set_archiving(backup_dir, 'node', node)
36node.slow_start()
37
38node.safe_psql(
39"postgres",
40"CREATE EXTENSION ptrack")
41
42node.safe_psql(
43"postgres",
44"create table t_heap as select i"
45" as id from generate_series(0,100) i")
46
47relative_path = node.safe_psql(
48"postgres",
49"select pg_relation_filepath('t_heap')").decode('utf-8').rstrip()
50
51absolute_path = os.path.join(node.data_dir, relative_path)
52
53# FULL backup
54self.backup_node(backup_dir, 'node', node, options=['--stream'])
55
56# PTRACK backup
57gdb = self.backup_node(
58backup_dir, 'node', node, backup_type='ptrack',
59gdb=True, options=['--log-level-file=LOG'])
60
61gdb.set_breakpoint('backup_files')
62gdb.run_until_break()
63
64# REMOVE file
65os.remove(absolute_path)
66
67# File removed, we can proceed with backup
68gdb.continue_execution_until_exit()
69
70pgdata = self.pgdata_content(node.data_dir)
71
72with open(os.path.join(backup_dir, 'log', 'pg_probackup.log')) as f:
73log_content = f.read()
74self.assertTrue(
75'LOG: File not found: "{0}"'.format(absolute_path) in log_content,
76'File "{0}" should be deleted but it`s not'.format(absolute_path))
77
78node.cleanup()
79self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
80
81# Physical comparison
82pgdata_restored = self.pgdata_content(node.data_dir)
83self.compare_pgdata(pgdata, pgdata_restored)
84
85# @unittest.skip("skip")
86def test_ptrack_without_full(self):
87"""ptrack backup without validated full backup"""
88node = self.make_simple_node(
89base_dir=os.path.join(self.module_name, self.fname, 'node'),
90initdb_params=['--data-checksums'],
91ptrack_enable=True)
92
93backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
94self.init_pb(backup_dir)
95self.add_instance(backup_dir, 'node', node)
96self.set_archiving(backup_dir, 'node', node)
97node.slow_start()
98
99node.safe_psql(
100"postgres",
101"CREATE EXTENSION ptrack")
102
103try:
104self.backup_node(backup_dir, 'node', node, backup_type="ptrack")
105# we should die here because exception is what we expect to happen
106self.assertEqual(
1071, 0,
108"Expecting Error because page backup should not be possible "
109"without valid full backup.\n Output: {0} \n CMD: {1}".format(
110repr(self.output), self.cmd))
111except ProbackupException as e:
112self.assertTrue(
113"WARNING: Valid full backup on current timeline 1 is not found" in e.message and
114"ERROR: Create new full backup before an incremental one" in e.message,
115"\n Unexpected Error Message: {0}\n CMD: {1}".format(
116repr(e.message), self.cmd))
117
118self.assertEqual(
119self.show_pb(backup_dir, 'node')[0]['status'],
120"ERROR")
121
122# @unittest.skip("skip")
123def test_ptrack_threads(self):
124"""ptrack multi thread backup mode"""
125node = self.make_simple_node(
126base_dir=os.path.join(self.module_name, self.fname, 'node'),
127initdb_params=['--data-checksums'],
128ptrack_enable=True)
129
130backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
131self.init_pb(backup_dir)
132self.add_instance(backup_dir, 'node', node)
133self.set_archiving(backup_dir, 'node', node)
134node.slow_start()
135
136node.safe_psql(
137"postgres",
138"CREATE EXTENSION ptrack")
139
140self.backup_node(
141backup_dir, 'node', node,
142backup_type="full", options=["-j", "4"])
143self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
144
145self.backup_node(
146backup_dir, 'node', node,
147backup_type="ptrack", options=["-j", "4"])
148self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK")
149
150# @unittest.skip("skip")
151def test_ptrack_stop_pg(self):
152"""
153create node, take full backup,
154restart node, check that ptrack backup
155can be taken
156"""
157backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
158node = self.make_simple_node(
159base_dir=os.path.join(self.module_name, self.fname, 'node'),
160set_replication=True,
161ptrack_enable=True,
162initdb_params=['--data-checksums'])
163
164self.init_pb(backup_dir)
165self.add_instance(backup_dir, 'node', node)
166node.slow_start()
167
168node.safe_psql(
169"postgres",
170"CREATE EXTENSION ptrack")
171
172node.pgbench_init(scale=1)
173
174# FULL backup
175self.backup_node(backup_dir, 'node', node, options=['--stream'])
176
177node.stop()
178node.slow_start()
179
180self.backup_node(
181backup_dir, 'node', node,
182backup_type='ptrack', options=['--stream'])
183
184# @unittest.skip("skip")
185def test_ptrack_multi_timeline_backup(self):
186"""
187t2 /------P2
188t1 ------F---*-----P1
189"""
190backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
191node = self.make_simple_node(
192base_dir=os.path.join(self.module_name, self.fname, 'node'),
193set_replication=True,
194ptrack_enable=True,
195initdb_params=['--data-checksums'])
196
197self.init_pb(backup_dir)
198self.add_instance(backup_dir, 'node', node)
199self.set_archiving(backup_dir, 'node', node)
200node.slow_start()
201
202node.safe_psql(
203"postgres",
204"CREATE EXTENSION ptrack")
205
206node.pgbench_init(scale=5)
207
208# FULL backup
209full_id = self.backup_node(backup_dir, 'node', node)
210
211pgbench = node.pgbench(options=['-T', '30', '-c', '1', '--no-vacuum'])
212sleep(15)
213
214xid = node.safe_psql(
215'postgres',
216'SELECT txid_current()').decode('utf-8').rstrip()
217pgbench.wait()
218
219self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
220
221node.cleanup()
222
223# Restore from full backup to create Timeline 2
224print(self.restore_node(
225backup_dir, 'node', node,
226options=[
227'--recovery-target-xid={0}'.format(xid),
228'--recovery-target-action=promote']))
229
230node.slow_start()
231
232pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
233pgbench.wait()
234
235self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
236
237pgdata = self.pgdata_content(node.data_dir)
238
239node.cleanup()
240
241self.restore_node(backup_dir, 'node', node)
242
243pgdata_restored = self.pgdata_content(node.data_dir)
244self.compare_pgdata(pgdata, pgdata_restored)
245
246node.slow_start()
247
248balance = node.safe_psql(
249'postgres',
250'select (select sum(tbalance) from pgbench_tellers) - '
251'( select sum(bbalance) from pgbench_branches) + '
252'( select sum(abalance) from pgbench_accounts ) - '
253'(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip()
254
255self.assertEqual('0', balance)
256
257# @unittest.skip("skip")
258def test_ptrack_multi_timeline_backup_1(self):
259"""
260t2 /------
261t1 ---F---P1---*
262
263# delete P1
264t2 /------P2
265t1 ---F--------*
266"""
267backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
268node = self.make_simple_node(
269base_dir=os.path.join(self.module_name, self.fname, 'node'),
270set_replication=True,
271ptrack_enable=True,
272initdb_params=['--data-checksums'])
273
274self.init_pb(backup_dir)
275self.add_instance(backup_dir, 'node', node)
276self.set_archiving(backup_dir, 'node', node)
277node.slow_start()
278
279node.safe_psql(
280"postgres",
281"CREATE EXTENSION ptrack")
282
283node.pgbench_init(scale=5)
284
285# FULL backup
286full_id = self.backup_node(backup_dir, 'node', node)
287
288pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
289pgbench.wait()
290
291ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
292node.cleanup()
293
294self.restore_node(backup_dir, 'node', node)
295
296node.slow_start()
297
298pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
299pgbench.wait()
300
301# delete old PTRACK backup
302self.delete_pb(backup_dir, 'node', backup_id=ptrack_id)
303
304# take new PTRACK backup
305self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
306
307pgdata = self.pgdata_content(node.data_dir)
308
309node.cleanup()
310
311self.restore_node(backup_dir, 'node', node)
312
313pgdata_restored = self.pgdata_content(node.data_dir)
314self.compare_pgdata(pgdata, pgdata_restored)
315
316node.slow_start()
317
318balance = node.safe_psql(
319'postgres',
320'select (select sum(tbalance) from pgbench_tellers) - '
321'( select sum(bbalance) from pgbench_branches) + '
322'( select sum(abalance) from pgbench_accounts ) - '
323'(select sum(delta) from pgbench_history) as must_be_zero').\
324decode('utf-8').rstrip()
325
326self.assertEqual('0', balance)
327
328# @unittest.skip("skip")
329def test_ptrack_eat_my_data(self):
330"""
331PGPRO-4051
332"""
333backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
334node = self.make_simple_node(
335base_dir=os.path.join(self.module_name, self.fname, 'node'),
336set_replication=True,
337ptrack_enable=True,
338initdb_params=['--data-checksums'])
339
340self.init_pb(backup_dir)
341self.add_instance(backup_dir, 'node', node)
342self.set_archiving(backup_dir, 'node', node)
343node.slow_start()
344
345node.safe_psql(
346"postgres",
347"CREATE EXTENSION ptrack")
348
349node.pgbench_init(scale=50)
350
351self.backup_node(backup_dir, 'node', node)
352
353node_restored = self.make_simple_node(
354base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
355
356pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum'])
357
358for i in range(10):
359print("Iteration: {0}".format(i))
360
361sleep(2)
362
363self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
364# pgdata = self.pgdata_content(node.data_dir)
365#
366# node_restored.cleanup()
367#
368# self.restore_node(backup_dir, 'node', node_restored)
369# pgdata_restored = self.pgdata_content(node_restored.data_dir)
370#
371# self.compare_pgdata(pgdata, pgdata_restored)
372
373pgbench.terminate()
374pgbench.wait()
375
376self.switch_wal_segment(node)
377
378result = node.table_checksum("pgbench_accounts")
379
380node_restored.cleanup()
381self.restore_node(backup_dir, 'node', node_restored)
382self.set_auto_conf(
383node_restored, {'port': node_restored.port})
384
385node_restored.slow_start()
386
387balance = node_restored.safe_psql(
388'postgres',
389'select (select sum(tbalance) from pgbench_tellers) - '
390'( select sum(bbalance) from pgbench_branches) + '
391'( select sum(abalance) from pgbench_accounts ) - '
392'(select sum(delta) from pgbench_history) as must_be_zero').decode('utf-8').rstrip()
393
394self.assertEqual('0', balance)
395
396# Logical comparison
397self.assertEqual(
398result,
399node.table_checksum("pgbench_accounts"),
400'Data loss')
401
402# @unittest.skip("skip")
403def test_ptrack_simple(self):
404"""make node, make full and ptrack stream backups,"
405" restore them and check data correctness"""
406backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
407node = self.make_simple_node(
408base_dir=os.path.join(self.module_name, self.fname, 'node'),
409set_replication=True,
410ptrack_enable=True,
411initdb_params=['--data-checksums'])
412
413self.init_pb(backup_dir)
414self.add_instance(backup_dir, 'node', node)
415node.slow_start()
416
417node.safe_psql(
418"postgres",
419"CREATE EXTENSION ptrack")
420
421self.backup_node(backup_dir, 'node', node, options=['--stream'])
422
423node.safe_psql(
424"postgres",
425"create table t_heap as select i"
426" as id from generate_series(0,1) i")
427
428self.backup_node(
429backup_dir, 'node', node, backup_type='ptrack',
430options=['--stream'])
431
432node.safe_psql(
433"postgres",
434"update t_heap set id = 100500")
435
436self.backup_node(
437backup_dir, 'node', node,
438backup_type='ptrack', options=['--stream'])
439
440if self.paranoia:
441pgdata = self.pgdata_content(node.data_dir)
442
443result = node.table_checksum("t_heap")
444
445node_restored = self.make_simple_node(
446base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
447node_restored.cleanup()
448
449self.restore_node(
450backup_dir, 'node', node_restored, options=["-j", "4"])
451
452# Physical comparison
453if self.paranoia:
454pgdata_restored = self.pgdata_content(
455node_restored.data_dir, ignore_ptrack=False)
456self.compare_pgdata(pgdata, pgdata_restored)
457
458self.set_auto_conf(
459node_restored, {'port': node_restored.port})
460
461node_restored.slow_start()
462
463# Logical comparison
464self.assertEqual(
465result,
466node_restored.table_checksum("t_heap"))
467
468# @unittest.skip("skip")
469def test_ptrack_unprivileged(self):
470""""""
471backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
472node = self.make_simple_node(
473base_dir=os.path.join(self.module_name, self.fname, 'node'),
474set_replication=True,
475ptrack_enable=True,
476initdb_params=['--data-checksums'])
477
478self.init_pb(backup_dir)
479self.add_instance(backup_dir, 'node', node)
480# self.set_archiving(backup_dir, 'node', node)
481node.slow_start()
482
483node.safe_psql(
484"postgres",
485"CREATE DATABASE backupdb")
486
487# PG 9.5
488if self.get_version(node) < 90600:
489node.safe_psql(
490'backupdb',
491"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
492"REVOKE ALL ON SCHEMA public from PUBLIC; "
493"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
494"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
495"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
496"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
497"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
498"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
499"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
500"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
501"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
502"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
503"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
504"CREATE ROLE backup WITH LOGIN REPLICATION; "
505"GRANT CONNECT ON DATABASE backupdb to backup; "
506"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
507"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
508"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
509"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
510"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
511"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
512"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
513"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
514"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
515"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
516"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; "
517"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; "
518"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
519"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;")
520# PG 9.6
521elif self.get_version(node) > 90600 and self.get_version(node) < 100000:
522node.safe_psql(
523'backupdb',
524"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
525"REVOKE ALL ON SCHEMA public from PUBLIC; "
526"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
527"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
528"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
529"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
530"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
531"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
532"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
533"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
534"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
535"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
536"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
537"CREATE ROLE backup WITH LOGIN REPLICATION; "
538"GRANT CONNECT ON DATABASE backupdb to backup; "
539"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
540"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
541"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
542"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
543"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
544"GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; "
545"GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; "
546"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
547"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
548"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
549"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
550"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
551"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; "
552"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
553"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; "
554"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; "
555"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
556"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
557)
558# >= 10 && < 15
559elif self.get_version(node) >= 100000 and self.get_version(node) < 150000:
560node.safe_psql(
561'backupdb',
562"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
563"REVOKE ALL ON SCHEMA public from PUBLIC; "
564"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
565"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
566"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
567"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
568"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
569"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
570"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
571"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
572"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
573"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
574"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
575"CREATE ROLE backup WITH LOGIN REPLICATION; "
576"GRANT CONNECT ON DATABASE backupdb to backup; "
577"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
578"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
579"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
580"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
581"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
582"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
583"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
584"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
585"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
586"GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; "
587"GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; "
588"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
589"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
590"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
591"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
592"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
593)
594# >= 15
595else:
596node.safe_psql(
597'backupdb',
598"REVOKE ALL ON DATABASE backupdb from PUBLIC; "
599"REVOKE ALL ON SCHEMA public from PUBLIC; "
600"REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; "
601"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; "
602"REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; "
603"REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; "
604"REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; "
605"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; "
606"REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; "
607"REVOKE ALL ON SCHEMA information_schema from PUBLIC; "
608"REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; "
609"REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; "
610"REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; "
611"CREATE ROLE backup WITH LOGIN REPLICATION; "
612"GRANT CONNECT ON DATABASE backupdb to backup; "
613"GRANT USAGE ON SCHEMA pg_catalog TO backup; "
614"GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; "
615"GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack
616"GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; "
617"GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; "
618"GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; "
619"GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; "
620"GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; "
621"GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; "
622"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; "
623"GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; "
624"GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; "
625"GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; "
626"GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; "
627"GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; "
628"GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;"
629)
630
631node.safe_psql(
632"backupdb",
633"CREATE SCHEMA ptrack")
634node.safe_psql(
635"backupdb",
636"CREATE EXTENSION ptrack WITH SCHEMA ptrack")
637node.safe_psql(
638"backupdb",
639"GRANT USAGE ON SCHEMA ptrack TO backup")
640
641node.safe_psql(
642"backupdb",
643"GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup")
644
645if ProbackupTest.enterprise:
646node.safe_psql(
647"backupdb",
648"GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; "
649'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;')
650
651self.backup_node(
652backup_dir, 'node', node,
653datname='backupdb', options=['--stream', "-U", "backup"])
654
655self.backup_node(
656backup_dir, 'node', node, datname='backupdb',
657backup_type='ptrack', options=['--stream', "-U", "backup"])
658
659
660# @unittest.skip("skip")
661# @unittest.expectedFailure
662def test_ptrack_enable(self):
663"""make ptrack without full backup, should result in error"""
664backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
665node = self.make_simple_node(
666base_dir=os.path.join(self.module_name, self.fname, 'node'),
667set_replication=True, initdb_params=['--data-checksums'],
668pg_options={
669'checkpoint_timeout': '30s',
670'shared_preload_libraries': 'ptrack'})
671
672self.init_pb(backup_dir)
673self.add_instance(backup_dir, 'node', node)
674node.slow_start()
675
676node.safe_psql(
677"postgres",
678"CREATE EXTENSION ptrack")
679
680# PTRACK BACKUP
681try:
682self.backup_node(
683backup_dir, 'node', node,
684backup_type='ptrack', options=["--stream"]
685)
686# we should die here because exception is what we expect to happen
687self.assertEqual(
6881, 0,
689"Expecting Error because ptrack disabled.\n"
690" Output: {0} \n CMD: {1}".format(
691repr(self.output), self.cmd
692)
693)
694except ProbackupException as e:
695self.assertIn(
696'ERROR: Ptrack is disabled\n',
697e.message,
698'\n Unexpected Error Message: {0}\n'
699' CMD: {1}'.format(repr(e.message), self.cmd)
700)
701
702# @unittest.skip("skip")
703# @unittest.expectedFailure
704def test_ptrack_disable(self):
705"""
706Take full backup, disable ptrack restart postgresql,
707enable ptrack, restart postgresql, take ptrack backup
708which should fail
709"""
710backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
711node = self.make_simple_node(
712base_dir=os.path.join(self.module_name, self.fname, 'node'),
713set_replication=True,
714ptrack_enable=True,
715initdb_params=['--data-checksums'],
716pg_options={'checkpoint_timeout': '30s'})
717
718self.init_pb(backup_dir)
719self.add_instance(backup_dir, 'node', node)
720node.slow_start()
721
722node.safe_psql(
723"postgres",
724"CREATE EXTENSION ptrack")
725
726# FULL BACKUP
727self.backup_node(backup_dir, 'node', node, options=['--stream'])
728
729# DISABLE PTRACK
730node.safe_psql('postgres', "alter system set ptrack.map_size to 0")
731node.stop()
732node.slow_start()
733
734# ENABLE PTRACK
735node.safe_psql('postgres', "alter system set ptrack.map_size to '128'")
736node.safe_psql('postgres', "alter system set shared_preload_libraries to 'ptrack'")
737node.stop()
738node.slow_start()
739
740# PTRACK BACKUP
741try:
742self.backup_node(
743backup_dir, 'node', node,
744backup_type='ptrack', options=["--stream"]
745)
746# we should die here because exception is what we expect to happen
747self.assertEqual(
7481, 0,
749"Expecting Error because ptrack_enable was set to OFF at some"
750" point after previous backup.\n"
751" Output: {0} \n CMD: {1}".format(
752repr(self.output), self.cmd
753)
754)
755except ProbackupException as e:
756self.assertIn(
757'ERROR: LSN from ptrack_control',
758e.message,
759'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
760repr(e.message), self.cmd
761)
762)
763
764# @unittest.skip("skip")
765def test_ptrack_uncommitted_xact(self):
766"""make ptrack backup while there is uncommitted open transaction"""
767backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
768node = self.make_simple_node(
769base_dir=os.path.join(self.module_name, self.fname, 'node'),
770set_replication=True,
771ptrack_enable=True,
772initdb_params=['--data-checksums'],
773pg_options={
774'wal_level': 'replica'})
775
776self.init_pb(backup_dir)
777self.add_instance(backup_dir, 'node', node)
778node.slow_start()
779
780node.safe_psql(
781"postgres",
782"CREATE EXTENSION ptrack")
783
784self.backup_node(backup_dir, 'node', node, options=['--stream'])
785
786con = node.connect("postgres")
787con.execute(
788"create table t_heap as select i"
789" as id from generate_series(0,1) i")
790
791self.backup_node(
792backup_dir, 'node', node, backup_type='ptrack',
793options=['--stream'])
794
795if self.paranoia:
796pgdata = self.pgdata_content(node.data_dir)
797
798node_restored = self.make_simple_node(
799base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
800node_restored.cleanup()
801
802self.restore_node(
803backup_dir, 'node', node_restored,
804node_restored.data_dir, options=["-j", "4"])
805
806if self.paranoia:
807pgdata_restored = self.pgdata_content(
808node_restored.data_dir, ignore_ptrack=False)
809
810self.set_auto_conf(
811node_restored, {'port': node_restored.port})
812
813node_restored.slow_start()
814
815# Physical comparison
816if self.paranoia:
817self.compare_pgdata(pgdata, pgdata_restored)
818
819# @unittest.skip("skip")
820def test_ptrack_vacuum_full(self):
821"""make node, make full and ptrack stream backups,
822restore them and check data correctness"""
823self._check_gdb_flag_or_skip_test()
824
825backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
826node = self.make_simple_node(
827base_dir=os.path.join(self.module_name, self.fname, 'node'),
828set_replication=True,
829ptrack_enable=True,
830initdb_params=['--data-checksums'])
831
832self.init_pb(backup_dir)
833self.add_instance(backup_dir, 'node', node)
834node.slow_start()
835
836self.create_tblspace_in_node(node, 'somedata')
837
838node.safe_psql(
839"postgres",
840"CREATE EXTENSION ptrack")
841
842self.backup_node(backup_dir, 'node', node, options=['--stream'])
843
844node.safe_psql(
845"postgres",
846"create table t_heap tablespace somedata as select i"
847" as id from generate_series(0,1000000) i"
848)
849
850pg_connect = node.connect("postgres", autocommit=True)
851
852gdb = self.gdb_attach(pg_connect.pid)
853gdb.set_breakpoint('reform_and_rewrite_tuple')
854
855gdb.continue_execution_until_running()
856
857process = Thread(
858target=pg_connect.execute, args=["VACUUM FULL t_heap"])
859process.start()
860
861while not gdb.stopped_in_breakpoint:
862sleep(1)
863
864gdb.continue_execution_until_break(20)
865
866self.backup_node(
867backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
868
869self.backup_node(
870backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
871
872if self.paranoia:
873pgdata = self.pgdata_content(node.data_dir)
874
875gdb.remove_all_breakpoints()
876gdb._execute('detach')
877process.join()
878
879node_restored = self.make_simple_node(
880base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
881node_restored.cleanup()
882
883old_tablespace = self.get_tblspace_path(node, 'somedata')
884new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
885
886self.restore_node(
887backup_dir, 'node', node_restored,
888options=["-j", "4", "-T", "{0}={1}".format(
889old_tablespace, new_tablespace)]
890)
891
892# Physical comparison
893if self.paranoia:
894pgdata_restored = self.pgdata_content(
895node_restored.data_dir, ignore_ptrack=False)
896self.compare_pgdata(pgdata, pgdata_restored)
897
898self.set_auto_conf(
899node_restored, {'port': node_restored.port})
900
901node_restored.slow_start()
902
903# @unittest.skip("skip")
904def test_ptrack_vacuum_truncate(self):
905"""make node, create table, take full backup,
906delete last 3 pages, vacuum relation,
907take ptrack backup, take second ptrack backup,
908restore last ptrack backup and check data correctness"""
909backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
910node = self.make_simple_node(
911base_dir=os.path.join(self.module_name, self.fname, 'node'),
912set_replication=True,
913ptrack_enable=True,
914initdb_params=['--data-checksums'])
915
916self.init_pb(backup_dir)
917self.add_instance(backup_dir, 'node', node)
918node.slow_start()
919
920self.create_tblspace_in_node(node, 'somedata')
921
922node.safe_psql(
923"postgres",
924"CREATE EXTENSION ptrack")
925
926node.safe_psql(
927"postgres",
928"create sequence t_seq; "
929"create table t_heap tablespace somedata as select i as id, "
930"md5(i::text) as text, "
931"md5(repeat(i::text,10))::tsvector as tsvector "
932"from generate_series(0,1024) i;")
933
934node.safe_psql(
935"postgres",
936"vacuum t_heap")
937
938self.backup_node(backup_dir, 'node', node, options=['--stream'])
939
940node.safe_psql(
941"postgres",
942"delete from t_heap where ctid >= '(11,0)'")
943
944node.safe_psql(
945"postgres",
946"vacuum t_heap")
947
948self.backup_node(
949backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
950
951self.backup_node(
952backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
953
954if self.paranoia:
955pgdata = self.pgdata_content(node.data_dir)
956
957node_restored = self.make_simple_node(
958base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
959node_restored.cleanup()
960
961old_tablespace = self.get_tblspace_path(node, 'somedata')
962new_tablespace = self.get_tblspace_path(node_restored, 'somedata_new')
963
964self.restore_node(
965backup_dir, 'node', node_restored,
966options=["-j", "4", "-T", "{0}={1}".format(
967old_tablespace, new_tablespace)]
968)
969
970# Physical comparison
971if self.paranoia:
972pgdata_restored = self.pgdata_content(
973node_restored.data_dir,
974ignore_ptrack=False
975)
976self.compare_pgdata(pgdata, pgdata_restored)
977
978self.set_auto_conf(
979node_restored, {'port': node_restored.port})
980
981node_restored.slow_start()
982
983# @unittest.skip("skip")
984def test_ptrack_get_block(self):
985"""
986make node, make full and ptrack stream backups,
987restore them and check data correctness
988"""
989self._check_gdb_flag_or_skip_test()
990
991backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
992node = self.make_simple_node(
993base_dir=os.path.join(self.module_name, self.fname, 'node'),
994set_replication=True,
995ptrack_enable=True,
996initdb_params=['--data-checksums'])
997
998self.init_pb(backup_dir)
999self.add_instance(backup_dir, 'node', node)
1000node.slow_start()
1001
1002node.safe_psql(
1003"postgres",
1004"CREATE EXTENSION ptrack")
1005
1006node.safe_psql(
1007"postgres",
1008"create table t_heap as select i"
1009" as id from generate_series(0,1) i")
1010
1011self.backup_node(backup_dir, 'node', node, options=['--stream'])
1012gdb = self.backup_node(
1013backup_dir, 'node', node, backup_type='ptrack',
1014options=['--stream'],
1015gdb=True)
1016
1017gdb.set_breakpoint('make_pagemap_from_ptrack_2')
1018gdb.run_until_break()
1019
1020node.safe_psql(
1021"postgres",
1022"update t_heap set id = 100500")
1023
1024gdb.continue_execution_until_exit()
1025
1026self.backup_node(
1027backup_dir, 'node', node,
1028backup_type='ptrack', options=['--stream'])
1029
1030if self.paranoia:
1031pgdata = self.pgdata_content(node.data_dir)
1032
1033result = node.table_checksum("t_heap")
1034node.cleanup()
1035self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
1036
1037# Physical comparison
1038if self.paranoia:
1039pgdata_restored = self.pgdata_content(
1040node.data_dir, ignore_ptrack=False)
1041self.compare_pgdata(pgdata, pgdata_restored)
1042
1043node.slow_start()
1044# Logical comparison
1045self.assertEqual(
1046result,
1047node.table_checksum("t_heap"))
1048
1049# @unittest.skip("skip")
1050def test_ptrack_stream(self):
1051"""make node, make full and ptrack stream backups,
1052restore them and check data correctness"""
1053backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1054node = self.make_simple_node(
1055base_dir=os.path.join(self.module_name, self.fname, 'node'),
1056set_replication=True,
1057ptrack_enable=True,
1058initdb_params=['--data-checksums'],
1059pg_options={
1060'checkpoint_timeout': '30s'})
1061
1062self.init_pb(backup_dir)
1063self.add_instance(backup_dir, 'node', node)
1064node.slow_start()
1065
1066node.safe_psql(
1067"postgres",
1068"CREATE EXTENSION ptrack")
1069
1070# FULL BACKUP
1071node.safe_psql("postgres", "create sequence t_seq")
1072node.safe_psql(
1073"postgres",
1074"create table t_heap as select i as id, nextval('t_seq')"
1075" as t_seq, md5(i::text) as text, md5(i::text)::tsvector"
1076" as tsvector from generate_series(0,100) i")
1077
1078full_result = node.table_checksum("t_heap")
1079full_backup_id = self.backup_node(
1080backup_dir, 'node', node, options=['--stream'])
1081
1082# PTRACK BACKUP
1083node.safe_psql(
1084"postgres",
1085"insert into t_heap select i as id, nextval('t_seq') as t_seq,"
1086" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
1087" from generate_series(100,200) i")
1088
1089ptrack_result = node.table_checksum("t_heap")
1090ptrack_backup_id = self.backup_node(
1091backup_dir, 'node', node,
1092backup_type='ptrack', options=['--stream'])
1093
1094if self.paranoia:
1095pgdata = self.pgdata_content(node.data_dir)
1096
1097# Drop Node
1098node.cleanup()
1099
1100# Restore and check full backup
1101self.assertIn(
1102"INFO: Restore of backup {0} completed.".format(full_backup_id),
1103self.restore_node(
1104backup_dir, 'node', node,
1105backup_id=full_backup_id, options=["-j", "4"]
1106),
1107'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1108repr(self.output), self.cmd)
1109)
1110node.slow_start()
1111full_result_new = node.table_checksum("t_heap")
1112self.assertEqual(full_result, full_result_new)
1113node.cleanup()
1114
1115# Restore and check ptrack backup
1116self.assertIn(
1117"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
1118self.restore_node(
1119backup_dir, 'node', node,
1120backup_id=ptrack_backup_id, options=["-j", "4"]
1121),
1122'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1123repr(self.output), self.cmd))
1124
1125if self.paranoia:
1126pgdata_restored = self.pgdata_content(
1127node.data_dir, ignore_ptrack=False)
1128self.compare_pgdata(pgdata, pgdata_restored)
1129
1130node.slow_start()
1131ptrack_result_new = node.table_checksum("t_heap")
1132self.assertEqual(ptrack_result, ptrack_result_new)
1133
1134# @unittest.skip("skip")
1135def test_ptrack_archive(self):
1136"""make archive node, make full and ptrack backups,
1137check data correctness in restored instance"""
1138backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1139node = self.make_simple_node(
1140base_dir=os.path.join(self.module_name, self.fname, 'node'),
1141set_replication=True,
1142ptrack_enable=True,
1143initdb_params=['--data-checksums'],
1144pg_options={
1145'checkpoint_timeout': '30s'})
1146
1147self.init_pb(backup_dir)
1148self.add_instance(backup_dir, 'node', node)
1149self.set_archiving(backup_dir, 'node', node)
1150node.slow_start()
1151
1152node.safe_psql(
1153"postgres",
1154"CREATE EXTENSION ptrack")
1155
1156# FULL BACKUP
1157node.safe_psql(
1158"postgres",
1159"create table t_heap as"
1160" select i as id,"
1161" md5(i::text) as text,"
1162" md5(i::text)::tsvector as tsvector"
1163" from generate_series(0,100) i")
1164
1165full_result = node.table_checksum("t_heap")
1166full_backup_id = self.backup_node(backup_dir, 'node', node)
1167full_target_time = self.show_pb(
1168backup_dir, 'node', full_backup_id)['recovery-time']
1169
1170# PTRACK BACKUP
1171node.safe_psql(
1172"postgres",
1173"insert into t_heap select i as id,"
1174" md5(i::text) as text,"
1175" md5(i::text)::tsvector as tsvector"
1176" from generate_series(100,200) i")
1177
1178ptrack_result = node.table_checksum("t_heap")
1179ptrack_backup_id = self.backup_node(
1180backup_dir, 'node', node, backup_type='ptrack')
1181ptrack_target_time = self.show_pb(
1182backup_dir, 'node', ptrack_backup_id)['recovery-time']
1183if self.paranoia:
1184pgdata = self.pgdata_content(node.data_dir)
1185
1186node.safe_psql(
1187"postgres",
1188"insert into t_heap select i as id,"
1189" md5(i::text) as text,"
1190" md5(i::text)::tsvector as tsvector"
1191" from generate_series(200, 300) i")
1192
1193# Drop Node
1194node.cleanup()
1195
1196# Check full backup
1197self.assertIn(
1198"INFO: Restore of backup {0} completed.".format(full_backup_id),
1199self.restore_node(
1200backup_dir, 'node', node,
1201backup_id=full_backup_id,
1202options=[
1203"-j", "4", "--recovery-target-action=promote",
1204"--time={0}".format(full_target_time)]
1205),
1206'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1207repr(self.output), self.cmd)
1208)
1209node.slow_start()
1210
1211full_result_new = node.table_checksum("t_heap")
1212self.assertEqual(full_result, full_result_new)
1213node.cleanup()
1214
1215# Check ptrack backup
1216self.assertIn(
1217"INFO: Restore of backup {0} completed.".format(ptrack_backup_id),
1218self.restore_node(
1219backup_dir, 'node', node,
1220backup_id=ptrack_backup_id,
1221options=[
1222"-j", "4",
1223"--time={0}".format(ptrack_target_time),
1224"--recovery-target-action=promote"]
1225),
1226'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1227repr(self.output), self.cmd)
1228)
1229
1230if self.paranoia:
1231pgdata_restored = self.pgdata_content(
1232node.data_dir, ignore_ptrack=False)
1233self.compare_pgdata(pgdata, pgdata_restored)
1234
1235node.slow_start()
1236ptrack_result_new = node.table_checksum("t_heap")
1237self.assertEqual(ptrack_result, ptrack_result_new)
1238
1239node.cleanup()
1240
1241@unittest.skip("skip")
1242def test_ptrack_pgpro417(self):
1243"""
1244Make node, take full backup, take ptrack backup,
1245delete ptrack backup. Try to take ptrack backup,
1246which should fail. Actual only for PTRACK 1.x
1247"""
1248backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1249node = self.make_simple_node(
1250base_dir=os.path.join(self.module_name, self.fname, 'node'),
1251set_replication=True,
1252ptrack_enable=True,
1253initdb_params=['--data-checksums'],
1254pg_options={
1255'checkpoint_timeout': '30s'})
1256
1257self.init_pb(backup_dir)
1258self.add_instance(backup_dir, 'node', node)
1259node.slow_start()
1260
1261# FULL BACKUP
1262node.safe_psql(
1263"postgres",
1264"create table t_heap as select i as id, md5(i::text) as text, "
1265"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1266
1267backup_id = self.backup_node(
1268backup_dir, 'node', node,
1269backup_type='full', options=["--stream"])
1270
1271start_lsn_full = self.show_pb(
1272backup_dir, 'node', backup_id)['start-lsn']
1273
1274# PTRACK BACKUP
1275node.safe_psql(
1276"postgres",
1277"insert into t_heap select i as id, md5(i::text) as text, "
1278"md5(i::text)::tsvector as tsvector "
1279"from generate_series(100,200) i")
1280node.table_checksum("t_heap")
1281backup_id = self.backup_node(
1282backup_dir, 'node', node,
1283backup_type='ptrack', options=["--stream"])
1284
1285start_lsn_ptrack = self.show_pb(
1286backup_dir, 'node', backup_id)['start-lsn']
1287
1288self.delete_pb(backup_dir, 'node', backup_id)
1289
1290# SECOND PTRACK BACKUP
1291node.safe_psql(
1292"postgres",
1293"insert into t_heap select i as id, md5(i::text) as text, "
1294"md5(i::text)::tsvector as tsvector "
1295"from generate_series(200,300) i")
1296
1297try:
1298self.backup_node(
1299backup_dir, 'node', node,
1300backup_type='ptrack', options=["--stream"])
1301# we should die here because exception is what we expect to happen
1302self.assertEqual(
13031, 0,
1304"Expecting Error because of LSN mismatch from ptrack_control "
1305"and previous backup start_lsn.\n"
1306" Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
1307except ProbackupException as e:
1308self.assertTrue(
1309'ERROR: LSN from ptrack_control' in e.message,
1310'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1311repr(e.message), self.cmd))
1312
1313@unittest.skip("skip")
1314def test_page_pgpro417(self):
1315"""
1316Make archive node, take full backup, take page backup,
1317delete page backup. Try to take ptrack backup, which should fail.
1318Actual only for PTRACK 1.x
1319"""
1320backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1321node = self.make_simple_node(
1322base_dir=os.path.join(self.module_name, self.fname, 'node'),
1323set_replication=True,
1324ptrack_enable=True,
1325initdb_params=['--data-checksums'],
1326pg_options={
1327'checkpoint_timeout': '30s'})
1328
1329self.init_pb(backup_dir)
1330self.add_instance(backup_dir, 'node', node)
1331self.set_archiving(backup_dir, 'node', node)
1332node.slow_start()
1333
1334# FULL BACKUP
1335node.safe_psql(
1336"postgres",
1337"create table t_heap as select i as id, md5(i::text) as text, "
1338"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1339node.table_checksum("t_heap")
1340
1341# PAGE BACKUP
1342node.safe_psql(
1343"postgres",
1344"insert into t_heap select i as id, md5(i::text) as text, "
1345"md5(i::text)::tsvector as tsvector "
1346"from generate_series(100,200) i")
1347node.table_checksum("t_heap")
1348backup_id = self.backup_node(
1349backup_dir, 'node', node, backup_type='page')
1350
1351self.delete_pb(backup_dir, 'node', backup_id)
1352# sys.exit(1)
1353
1354# PTRACK BACKUP
1355node.safe_psql(
1356"postgres",
1357"insert into t_heap select i as id, md5(i::text) as text, "
1358"md5(i::text)::tsvector as tsvector "
1359"from generate_series(200,300) i")
1360
1361try:
1362self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
1363# we should die here because exception is what we expect to happen
1364self.assertEqual(
13651, 0,
1366"Expecting Error because of LSN mismatch from ptrack_control "
1367"and previous backup start_lsn.\n "
1368"Output: {0}\n CMD: {1}".format(
1369repr(self.output), self.cmd))
1370except ProbackupException as e:
1371self.assertTrue(
1372'ERROR: LSN from ptrack_control' in e.message,
1373'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1374repr(e.message), self.cmd))
1375
1376@unittest.skip("skip")
1377def test_full_pgpro417(self):
1378"""
1379Make node, take two full backups, delete full second backup.
1380Try to take ptrack backup, which should fail.
1381Relevant only for PTRACK 1.x
1382"""
1383backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1384node = self.make_simple_node(
1385base_dir=os.path.join(self.module_name, self.fname, 'node'),
1386set_replication=True,
1387ptrack_enable=True,
1388initdb_params=['--data-checksums'],
1389pg_options={
1390'checkpoint_timeout': '30s'})
1391
1392self.init_pb(backup_dir)
1393self.add_instance(backup_dir, 'node', node)
1394node.slow_start()
1395
1396# FULL BACKUP
1397node.safe_psql(
1398"postgres",
1399"create table t_heap as select i as id, md5(i::text) as text,"
1400" md5(i::text)::tsvector as tsvector "
1401" from generate_series(0,100) i"
1402)
1403node.table_checksum("t_heap")
1404self.backup_node(backup_dir, 'node', node, options=["--stream"])
1405
1406# SECOND FULL BACKUP
1407node.safe_psql(
1408"postgres",
1409"insert into t_heap select i as id, md5(i::text) as text,"
1410" md5(i::text)::tsvector as tsvector"
1411" from generate_series(100,200) i"
1412)
1413node.table_checksum("t_heap")
1414backup_id = self.backup_node(
1415backup_dir, 'node', node, options=["--stream"])
1416
1417self.delete_pb(backup_dir, 'node', backup_id)
1418
1419# PTRACK BACKUP
1420node.safe_psql(
1421"postgres",
1422"insert into t_heap select i as id, md5(i::text) as text, "
1423"md5(i::text)::tsvector as tsvector "
1424"from generate_series(200,300) i")
1425try:
1426self.backup_node(
1427backup_dir, 'node', node,
1428backup_type='ptrack', options=["--stream"])
1429# we should die here because exception is what we expect to happen
1430self.assertEqual(
14311, 0,
1432"Expecting Error because of LSN mismatch from ptrack_control "
1433"and previous backup start_lsn.\n "
1434"Output: {0} \n CMD: {1}".format(
1435repr(self.output), self.cmd)
1436)
1437except ProbackupException as e:
1438self.assertTrue(
1439"ERROR: LSN from ptrack_control" in e.message and
1440"Create new full backup before "
1441"an incremental one" in e.message,
1442'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1443repr(e.message), self.cmd))
1444
1445# @unittest.skip("skip")
1446def test_create_db(self):
1447"""
1448Make node, take full backup, create database db1, take ptrack backup,
1449restore database and check it presense
1450"""
1451backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1452node = self.make_simple_node(
1453base_dir=os.path.join(self.module_name, self.fname, 'node'),
1454set_replication=True,
1455ptrack_enable=True,
1456initdb_params=['--data-checksums'],
1457pg_options={
1458'max_wal_size': '10GB'})
1459
1460self.init_pb(backup_dir)
1461self.add_instance(backup_dir, 'node', node)
1462node.slow_start()
1463
1464node.safe_psql(
1465"postgres",
1466"CREATE EXTENSION ptrack")
1467
1468# FULL BACKUP
1469node.safe_psql(
1470"postgres",
1471"create table t_heap as select i as id, md5(i::text) as text, "
1472"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1473
1474node.table_checksum("t_heap")
1475self.backup_node(
1476backup_dir, 'node', node,
1477options=["--stream"])
1478
1479# CREATE DATABASE DB1
1480node.safe_psql("postgres", "create database db1")
1481node.safe_psql(
1482"db1",
1483"create table t_heap as select i as id, md5(i::text) as text, "
1484"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1485
1486# PTRACK BACKUP
1487backup_id = self.backup_node(
1488backup_dir, 'node', node,
1489backup_type='ptrack', options=["--stream"])
1490
1491if self.paranoia:
1492pgdata = self.pgdata_content(node.data_dir)
1493
1494# RESTORE
1495node_restored = self.make_simple_node(
1496base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1497
1498node_restored.cleanup()
1499self.restore_node(
1500backup_dir, 'node', node_restored,
1501backup_id=backup_id, options=["-j", "4"])
1502
1503# COMPARE PHYSICAL CONTENT
1504if self.paranoia:
1505pgdata_restored = self.pgdata_content(
1506node_restored.data_dir, ignore_ptrack=False)
1507self.compare_pgdata(pgdata, pgdata_restored)
1508
1509# START RESTORED NODE
1510self.set_auto_conf(
1511node_restored, {'port': node_restored.port})
1512node_restored.slow_start()
1513
1514# DROP DATABASE DB1
1515node.safe_psql(
1516"postgres", "drop database db1")
1517# SECOND PTRACK BACKUP
1518backup_id = self.backup_node(
1519backup_dir, 'node', node,
1520backup_type='ptrack', options=["--stream"]
1521)
1522
1523if self.paranoia:
1524pgdata = self.pgdata_content(node.data_dir)
1525
1526# RESTORE SECOND PTRACK BACKUP
1527node_restored.cleanup()
1528self.restore_node(
1529backup_dir, 'node', node_restored,
1530backup_id=backup_id, options=["-j", "4"])
1531
1532# COMPARE PHYSICAL CONTENT
1533if self.paranoia:
1534pgdata_restored = self.pgdata_content(
1535node_restored.data_dir, ignore_ptrack=False)
1536self.compare_pgdata(pgdata, pgdata_restored)
1537
1538# START RESTORED NODE
1539self.set_auto_conf(
1540node_restored, {'port': node_restored.port})
1541node_restored.slow_start()
1542
1543try:
1544node_restored.safe_psql('db1', 'select 1')
1545# we should die here because exception is what we expect to happen
1546self.assertEqual(
15471, 0,
1548"Expecting Error because we are connecting to deleted database"
1549"\n Output: {0} \n CMD: {1}".format(
1550repr(self.output), self.cmd))
1551except QueryException as e:
1552self.assertTrue(
1553'FATAL: database "db1" does not exist' in e.message,
1554'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1555repr(e.message), self.cmd))
1556
1557# @unittest.skip("skip")
1558def test_create_db_on_replica(self):
1559"""
1560Make node, take full backup, create replica from it,
1561take full backup from replica,
1562create database db1, take ptrack backup from replica,
1563restore database and check it presense
1564"""
1565backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1566node = self.make_simple_node(
1567base_dir=os.path.join(self.module_name, self.fname, 'node'),
1568set_replication=True,
1569ptrack_enable=True,
1570initdb_params=['--data-checksums'],
1571pg_options={
1572'checkpoint_timeout': '30s'})
1573
1574self.init_pb(backup_dir)
1575self.add_instance(backup_dir, 'node', node)
1576node.slow_start()
1577
1578node.safe_psql(
1579"postgres",
1580"CREATE EXTENSION ptrack")
1581
1582# FULL BACKUP
1583node.safe_psql(
1584"postgres",
1585"create table t_heap as select i as id, md5(i::text) as text, "
1586"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1587
1588replica = self.make_simple_node(
1589base_dir=os.path.join(self.module_name, self.fname, 'replica'))
1590replica.cleanup()
1591
1592self.backup_node(
1593backup_dir, 'node', node, options=['-j10', '--stream'])
1594
1595self.restore_node(backup_dir, 'node', replica)
1596
1597# Add replica
1598self.add_instance(backup_dir, 'replica', replica)
1599self.set_replica(node, replica, 'replica', synchronous=True)
1600replica.slow_start(replica=True)
1601
1602self.backup_node(
1603backup_dir, 'replica', replica,
1604options=[
1605'-j10',
1606'--master-host=localhost',
1607'--master-db=postgres',
1608'--master-port={0}'.format(node.port),
1609'--stream'
1610]
1611)
1612
1613# CREATE DATABASE DB1
1614node.safe_psql("postgres", "create database db1")
1615node.safe_psql(
1616"db1",
1617"create table t_heap as select i as id, md5(i::text) as text, "
1618"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1619
1620# Wait until replica catch up with master
1621self.wait_until_replica_catch_with_master(node, replica)
1622replica.safe_psql('postgres', 'checkpoint')
1623
1624# PTRACK BACKUP
1625backup_id = self.backup_node(
1626backup_dir, 'replica',
1627replica, backup_type='ptrack',
1628options=[
1629'-j10',
1630'--stream',
1631'--master-host=localhost',
1632'--master-db=postgres',
1633'--master-port={0}'.format(node.port)
1634]
1635)
1636
1637if self.paranoia:
1638pgdata = self.pgdata_content(replica.data_dir)
1639
1640# RESTORE
1641node_restored = self.make_simple_node(
1642base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1643node_restored.cleanup()
1644
1645self.restore_node(
1646backup_dir, 'replica', node_restored,
1647backup_id=backup_id, options=["-j", "4"])
1648
1649# COMPARE PHYSICAL CONTENT
1650if self.paranoia:
1651pgdata_restored = self.pgdata_content(
1652node_restored.data_dir)
1653self.compare_pgdata(pgdata, pgdata_restored)
1654
1655# @unittest.skip("skip")
1656def test_alter_table_set_tablespace_ptrack(self):
1657"""Make node, create tablespace with table, take full backup,
1658alter tablespace location, take ptrack backup, restore database."""
1659backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1660node = self.make_simple_node(
1661base_dir=os.path.join(self.module_name, self.fname, 'node'),
1662set_replication=True,
1663ptrack_enable=True,
1664initdb_params=['--data-checksums'],
1665pg_options={
1666'checkpoint_timeout': '30s'})
1667
1668self.init_pb(backup_dir)
1669self.add_instance(backup_dir, 'node', node)
1670node.slow_start()
1671
1672node.safe_psql(
1673"postgres",
1674"CREATE EXTENSION ptrack")
1675
1676# FULL BACKUP
1677self.create_tblspace_in_node(node, 'somedata')
1678node.safe_psql(
1679"postgres",
1680"create table t_heap tablespace somedata as select i as id,"
1681" md5(i::text) as text, md5(i::text)::tsvector as tsvector"
1682" from generate_series(0,100) i")
1683# FULL backup
1684self.backup_node(backup_dir, 'node', node, options=["--stream"])
1685
1686# ALTER TABLESPACE
1687self.create_tblspace_in_node(node, 'somedata_new')
1688node.safe_psql(
1689"postgres",
1690"alter table t_heap set tablespace somedata_new")
1691
1692# sys.exit(1)
1693# PTRACK BACKUP
1694#result = node.table_checksum("t_heap")
1695self.backup_node(
1696backup_dir, 'node', node,
1697backup_type='ptrack',
1698options=["--stream"]
1699)
1700if self.paranoia:
1701pgdata = self.pgdata_content(node.data_dir)
1702# node.stop()
1703# node.cleanup()
1704
1705# RESTORE
1706node_restored = self.make_simple_node(
1707base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1708node_restored.cleanup()
1709
1710self.restore_node(
1711backup_dir, 'node', node_restored,
1712options=[
1713"-j", "4",
1714"-T", "{0}={1}".format(
1715self.get_tblspace_path(node, 'somedata'),
1716self.get_tblspace_path(node_restored, 'somedata')
1717),
1718"-T", "{0}={1}".format(
1719self.get_tblspace_path(node, 'somedata_new'),
1720self.get_tblspace_path(node_restored, 'somedata_new')
1721)
1722]
1723)
1724
1725# GET RESTORED PGDATA AND COMPARE
1726if self.paranoia:
1727pgdata_restored = self.pgdata_content(
1728node_restored.data_dir, ignore_ptrack=False)
1729self.compare_pgdata(pgdata, pgdata_restored)
1730
1731# START RESTORED NODE
1732self.set_auto_conf(
1733node_restored, {'port': node_restored.port})
1734node_restored.slow_start()
1735
1736# result_new = node_restored.table_checksum("t_heap")
1737#
1738# self.assertEqual(result, result_new, 'lost some data after restore')
1739
1740# @unittest.skip("skip")
1741def test_alter_database_set_tablespace_ptrack(self):
1742"""Make node, create tablespace with database,"
1743" take full backup, alter tablespace location,"
1744" take ptrack backup, restore database."""
1745backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1746node = self.make_simple_node(
1747base_dir=os.path.join(self.module_name, self.fname, 'node'),
1748set_replication=True,
1749ptrack_enable=True,
1750initdb_params=['--data-checksums'],
1751pg_options={
1752'checkpoint_timeout': '30s'})
1753
1754self.init_pb(backup_dir)
1755self.add_instance(backup_dir, 'node', node)
1756node.slow_start()
1757
1758node.safe_psql(
1759"postgres",
1760"CREATE EXTENSION ptrack")
1761
1762# FULL BACKUP
1763self.backup_node(backup_dir, 'node', node, options=["--stream"])
1764
1765# CREATE TABLESPACE
1766self.create_tblspace_in_node(node, 'somedata')
1767
1768# ALTER DATABASE
1769node.safe_psql(
1770"template1",
1771"alter database postgres set tablespace somedata")
1772
1773# PTRACK BACKUP
1774self.backup_node(
1775backup_dir, 'node', node, backup_type='ptrack',
1776options=["--stream"])
1777
1778if self.paranoia:
1779pgdata = self.pgdata_content(node.data_dir)
1780node.stop()
1781
1782# RESTORE
1783node_restored = self.make_simple_node(
1784base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1785node_restored.cleanup()
1786self.restore_node(
1787backup_dir, 'node',
1788node_restored,
1789options=[
1790"-j", "4",
1791"-T", "{0}={1}".format(
1792self.get_tblspace_path(node, 'somedata'),
1793self.get_tblspace_path(node_restored, 'somedata'))])
1794
1795# GET PHYSICAL CONTENT and COMPARE PHYSICAL CONTENT
1796if self.paranoia:
1797pgdata_restored = self.pgdata_content(
1798node_restored.data_dir, ignore_ptrack=False)
1799self.compare_pgdata(pgdata, pgdata_restored)
1800
1801# START RESTORED NODE
1802node_restored.port = node.port
1803node_restored.slow_start()
1804
1805# @unittest.skip("skip")
1806def test_drop_tablespace(self):
1807"""
1808Make node, create table, alter table tablespace, take ptrack backup,
1809move table from tablespace, take ptrack backup
1810"""
1811backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1812node = self.make_simple_node(
1813base_dir=os.path.join(self.module_name, self.fname, 'node'),
1814set_replication=True,
1815ptrack_enable=True,
1816initdb_params=['--data-checksums'],
1817pg_options={
1818'checkpoint_timeout': '30s'})
1819
1820self.init_pb(backup_dir)
1821self.add_instance(backup_dir, 'node', node)
1822node.slow_start()
1823
1824node.safe_psql(
1825"postgres",
1826"CREATE EXTENSION ptrack")
1827
1828self.create_tblspace_in_node(node, 'somedata')
1829
1830# CREATE TABLE
1831node.safe_psql(
1832"postgres",
1833"create table t_heap as select i as id, md5(i::text) as text, "
1834"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1835
1836result = node.table_checksum("t_heap")
1837# FULL BACKUP
1838self.backup_node(backup_dir, 'node', node, options=["--stream"])
1839
1840# Move table to tablespace 'somedata'
1841node.safe_psql(
1842"postgres", "alter table t_heap set tablespace somedata")
1843# PTRACK BACKUP
1844self.backup_node(
1845backup_dir, 'node', node,
1846backup_type='ptrack', options=["--stream"])
1847
1848# Move table back to default tablespace
1849node.safe_psql(
1850"postgres", "alter table t_heap set tablespace pg_default")
1851# SECOND PTRACK BACKUP
1852self.backup_node(
1853backup_dir, 'node', node,
1854backup_type='ptrack', options=["--stream"])
1855
1856# DROP TABLESPACE 'somedata'
1857node.safe_psql(
1858"postgres", "drop tablespace somedata")
1859# THIRD PTRACK BACKUP
1860self.backup_node(
1861backup_dir, 'node', node,
1862backup_type='ptrack', options=["--stream"])
1863
1864if self.paranoia:
1865pgdata = self.pgdata_content(
1866node.data_dir, ignore_ptrack=True)
1867
1868tblspace = self.get_tblspace_path(node, 'somedata')
1869node.cleanup()
1870shutil.rmtree(tblspace, ignore_errors=True)
1871self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
1872
1873if self.paranoia:
1874pgdata_restored = self.pgdata_content(
1875node.data_dir, ignore_ptrack=True)
1876
1877node.slow_start()
1878
1879tblspc_exist = node.safe_psql(
1880"postgres",
1881"select exists(select 1 from "
1882"pg_tablespace where spcname = 'somedata')")
1883
1884if tblspc_exist.rstrip() == 't':
1885self.assertEqual(
18861, 0,
1887"Expecting Error because "
1888"tablespace 'somedata' should not be present")
1889
1890result_new = node.table_checksum("t_heap")
1891self.assertEqual(result, result_new)
1892
1893if self.paranoia:
1894self.compare_pgdata(pgdata, pgdata_restored)
1895
1896# @unittest.skip("skip")
1897def test_ptrack_alter_tablespace(self):
1898"""
1899Make node, create table, alter table tablespace, take ptrack backup,
1900move table from tablespace, take ptrack backup
1901"""
1902backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1903node = self.make_simple_node(
1904base_dir=os.path.join(self.module_name, self.fname, 'node'),
1905set_replication=True,
1906ptrack_enable=True,
1907initdb_params=['--data-checksums'],
1908pg_options={
1909'checkpoint_timeout': '30s'})
1910
1911self.init_pb(backup_dir)
1912self.add_instance(backup_dir, 'node', node)
1913node.slow_start()
1914
1915node.safe_psql(
1916"postgres",
1917"CREATE EXTENSION ptrack")
1918
1919self.create_tblspace_in_node(node, 'somedata')
1920tblspc_path = self.get_tblspace_path(node, 'somedata')
1921
1922# CREATE TABLE
1923node.safe_psql(
1924"postgres",
1925"create table t_heap as select i as id, md5(i::text) as text, "
1926"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
1927
1928result = node.table_checksum("t_heap")
1929# FULL BACKUP
1930self.backup_node(backup_dir, 'node', node, options=["--stream"])
1931
1932# Move table to separate tablespace
1933node.safe_psql(
1934"postgres",
1935"alter table t_heap set tablespace somedata")
1936# GET LOGICAL CONTENT FROM NODE
1937result = node.table_checksum("t_heap")
1938
1939# FIRTS PTRACK BACKUP
1940self.backup_node(
1941backup_dir, 'node', node, backup_type='ptrack',
1942options=["--stream"])
1943
1944# GET PHYSICAL CONTENT FROM NODE
1945if self.paranoia:
1946pgdata = self.pgdata_content(node.data_dir)
1947
1948# Restore ptrack backup
1949restored_node = self.make_simple_node(
1950base_dir=os.path.join(self.module_name, self.fname, 'restored_node'))
1951restored_node.cleanup()
1952tblspc_path_new = self.get_tblspace_path(
1953restored_node, 'somedata_restored')
1954self.restore_node(backup_dir, 'node', restored_node, options=[
1955"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
1956
1957# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
1958if self.paranoia:
1959pgdata_restored = self.pgdata_content(
1960restored_node.data_dir, ignore_ptrack=False)
1961self.compare_pgdata(pgdata, pgdata_restored)
1962
1963# START RESTORED NODE
1964self.set_auto_conf(
1965restored_node, {'port': restored_node.port})
1966restored_node.slow_start()
1967
1968# COMPARE LOGICAL CONTENT
1969result_new = restored_node.table_checksum("t_heap")
1970self.assertEqual(result, result_new)
1971
1972restored_node.cleanup()
1973shutil.rmtree(tblspc_path_new, ignore_errors=True)
1974
1975# Move table to default tablespace
1976node.safe_psql(
1977"postgres", "alter table t_heap set tablespace pg_default")
1978# SECOND PTRACK BACKUP
1979self.backup_node(
1980backup_dir, 'node', node, backup_type='ptrack',
1981options=["--stream"])
1982
1983if self.paranoia:
1984pgdata = self.pgdata_content(node.data_dir)
1985
1986# Restore second ptrack backup and check table consistency
1987self.restore_node(
1988backup_dir, 'node', restored_node,
1989options=[
1990"-j", "4", "-T", "{0}={1}".format(tblspc_path, tblspc_path_new)])
1991
1992# GET PHYSICAL CONTENT FROM RESTORED NODE and COMPARE PHYSICAL CONTENT
1993if self.paranoia:
1994pgdata_restored = self.pgdata_content(
1995restored_node.data_dir, ignore_ptrack=False)
1996self.compare_pgdata(pgdata, pgdata_restored)
1997
1998# START RESTORED NODE
1999self.set_auto_conf(
2000restored_node, {'port': restored_node.port})
2001restored_node.slow_start()
2002
2003result_new = restored_node.table_checksum("t_heap")
2004self.assertEqual(result, result_new)
2005
2006# @unittest.skip("skip")
2007def test_ptrack_multiple_segments(self):
2008"""
2009Make node, create table, alter table tablespace,
2010take ptrack backup, move table from tablespace, take ptrack backup
2011"""
2012backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2013node = self.make_simple_node(
2014base_dir=os.path.join(self.module_name, self.fname, 'node'),
2015set_replication=True,
2016ptrack_enable=True,
2017initdb_params=['--data-checksums'],
2018pg_options={
2019'full_page_writes': 'off'})
2020
2021self.init_pb(backup_dir)
2022self.add_instance(backup_dir, 'node', node)
2023node.slow_start()
2024
2025node.safe_psql(
2026"postgres",
2027"CREATE EXTENSION ptrack")
2028
2029self.create_tblspace_in_node(node, 'somedata')
2030
2031# CREATE TABLE
2032node.pgbench_init(scale=100, options=['--tablespace=somedata'])
2033result = node.table_checksum("pgbench_accounts")
2034# FULL BACKUP
2035self.backup_node(backup_dir, 'node', node, options=['--stream'])
2036
2037# PTRACK STUFF
2038if node.major_version < 11:
2039idx_ptrack = {'type': 'heap'}
2040idx_ptrack['path'] = self.get_fork_path(node, 'pgbench_accounts')
2041idx_ptrack['old_size'] = self.get_fork_size(node, 'pgbench_accounts')
2042idx_ptrack['old_pages'] = self.get_md5_per_page_for_fork(
2043idx_ptrack['path'], idx_ptrack['old_size'])
2044
2045pgbench = node.pgbench(
2046options=['-T', '30', '-c', '1', '--no-vacuum'])
2047pgbench.wait()
2048
2049node.safe_psql("postgres", "checkpoint")
2050
2051if node.major_version < 11:
2052idx_ptrack['new_size'] = self.get_fork_size(
2053node,
2054'pgbench_accounts')
2055
2056idx_ptrack['new_pages'] = self.get_md5_per_page_for_fork(
2057idx_ptrack['path'],
2058idx_ptrack['new_size'])
2059
2060idx_ptrack['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2061node,
2062idx_ptrack['path'])
2063
2064if not self.check_ptrack_sanity(idx_ptrack):
2065self.assertTrue(
2066False, 'Ptrack has failed to register changes in data files')
2067
2068# GET LOGICAL CONTENT FROM NODE
2069# it`s stupid, because hint`s are ignored by ptrack
2070result = node.table_checksum("pgbench_accounts")
2071# FIRTS PTRACK BACKUP
2072self.backup_node(
2073backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
2074
2075# GET PHYSICAL CONTENT FROM NODE
2076pgdata = self.pgdata_content(node.data_dir)
2077
2078# RESTORE NODE
2079restored_node = self.make_simple_node(
2080base_dir=os.path.join(self.module_name, self.fname, 'restored_node'))
2081restored_node.cleanup()
2082tblspc_path = self.get_tblspace_path(node, 'somedata')
2083tblspc_path_new = self.get_tblspace_path(
2084restored_node,
2085'somedata_restored')
2086
2087self.restore_node(
2088backup_dir, 'node', restored_node,
2089options=[
2090"-j", "4", "-T", "{0}={1}".format(
2091tblspc_path, tblspc_path_new)])
2092
2093# GET PHYSICAL CONTENT FROM NODE_RESTORED
2094if self.paranoia:
2095pgdata_restored = self.pgdata_content(
2096restored_node.data_dir, ignore_ptrack=False)
2097
2098# START RESTORED NODE
2099self.set_auto_conf(
2100restored_node, {'port': restored_node.port})
2101restored_node.slow_start()
2102
2103result_new = restored_node.table_checksum("pgbench_accounts")
2104
2105# COMPARE RESTORED FILES
2106self.assertEqual(result, result_new, 'data is lost')
2107
2108if self.paranoia:
2109self.compare_pgdata(pgdata, pgdata_restored)
2110
2111@unittest.skip("skip")
2112def test_atexit_fail(self):
2113"""
2114Take backups of every available types and check that PTRACK is clean.
2115Relevant only for PTRACK 1.x
2116"""
2117node = self.make_simple_node(
2118base_dir=os.path.join(self.module_name, self.fname, 'node'),
2119set_replication=True,
2120ptrack_enable=True,
2121initdb_params=['--data-checksums'],
2122pg_options={
2123'max_connections': '15'})
2124
2125backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2126self.init_pb(backup_dir)
2127self.add_instance(backup_dir, 'node', node)
2128node.slow_start()
2129
2130# Take FULL backup to clean every ptrack
2131self.backup_node(
2132backup_dir, 'node', node, options=['--stream'])
2133
2134try:
2135self.backup_node(
2136backup_dir, 'node', node, backup_type='ptrack',
2137options=["--stream", "-j 30"])
2138
2139# we should die here because exception is what we expect to happen
2140self.assertEqual(
21411, 0,
2142"Expecting Error because we are opening too many connections"
2143"\n Output: {0} \n CMD: {1}".format(
2144repr(self.output), self.cmd)
2145)
2146except ProbackupException as e:
2147self.assertIn(
2148'setting its status to ERROR',
2149e.message,
2150'\n Unexpected Error Message: {0}\n'
2151' CMD: {1}'.format(repr(e.message), self.cmd)
2152)
2153
2154self.assertEqual(
2155node.safe_psql(
2156"postgres",
2157"select * from pg_is_in_backup()").rstrip(),
2158"f")
2159
2160@unittest.skip("skip")
2161# @unittest.expectedFailure
2162def test_ptrack_clean(self):
2163"""
2164Take backups of every available types and check that PTRACK is clean
2165Relevant only for PTRACK 1.x
2166"""
2167node = self.make_simple_node(
2168base_dir=os.path.join(self.module_name, self.fname, 'node'),
2169set_replication=True,
2170ptrack_enable=True,
2171initdb_params=['--data-checksums'])
2172
2173backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2174self.init_pb(backup_dir)
2175self.add_instance(backup_dir, 'node', node)
2176node.slow_start()
2177
2178self.create_tblspace_in_node(node, 'somedata')
2179
2180# Create table and indexes
2181node.safe_psql(
2182"postgres",
2183"create extension bloom; create sequence t_seq; "
2184"create table t_heap tablespace somedata "
2185"as select i as id, nextval('t_seq') as t_seq, "
2186"md5(i::text) as text, "
2187"md5(repeat(i::text,10))::tsvector as tsvector "
2188"from generate_series(0,2560) i")
2189for i in idx_ptrack:
2190if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2191node.safe_psql(
2192"postgres",
2193"create index {0} on {1} using {2}({3}) "
2194"tablespace somedata".format(
2195i, idx_ptrack[i]['relation'],
2196idx_ptrack[i]['type'],
2197idx_ptrack[i]['column']))
2198
2199# Take FULL backup to clean every ptrack
2200self.backup_node(
2201backup_dir, 'node', node,
2202options=['-j10', '--stream'])
2203node.safe_psql('postgres', 'checkpoint')
2204
2205for i in idx_ptrack:
2206# get fork size and calculate it in pages
2207idx_ptrack[i]['size'] = self.get_fork_size(node, i)
2208# get path to heap and index files
2209idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2210# get ptrack for every idx
2211idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2212node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2213self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2214
2215# Update everything and vacuum it
2216node.safe_psql(
2217'postgres',
2218"update t_heap set t_seq = nextval('t_seq'), "
2219"text = md5(text), "
2220"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2221node.safe_psql('postgres', 'vacuum t_heap')
2222
2223# Take PTRACK backup to clean every ptrack
2224backup_id = self.backup_node(
2225backup_dir, 'node', node, backup_type='ptrack', options=['-j10', '--stream'])
2226
2227node.safe_psql('postgres', 'checkpoint')
2228
2229for i in idx_ptrack:
2230# get new size of heap and indexes and calculate it in pages
2231idx_ptrack[i]['size'] = self.get_fork_size(node, i)
2232# update path to heap and index files in case they`ve changed
2233idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2234# # get ptrack for every idx
2235idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2236node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2237# check that ptrack bits are cleaned
2238self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2239
2240# Update everything and vacuum it
2241node.safe_psql(
2242'postgres',
2243"update t_heap set t_seq = nextval('t_seq'), "
2244"text = md5(text), "
2245"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2246node.safe_psql('postgres', 'vacuum t_heap')
2247
2248# Take PAGE backup to clean every ptrack
2249self.backup_node(
2250backup_dir, 'node', node,
2251backup_type='page', options=['-j10', '--stream'])
2252node.safe_psql('postgres', 'checkpoint')
2253
2254for i in idx_ptrack:
2255# get new size of heap and indexes and calculate it in pages
2256idx_ptrack[i]['size'] = self.get_fork_size(node, i)
2257# update path to heap and index files in case they`ve changed
2258idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2259# # get ptrack for every idx
2260idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2261node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2262# check that ptrack bits are cleaned
2263self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2264
2265@unittest.skip("skip")
2266def test_ptrack_clean_replica(self):
2267"""
2268Take backups of every available types from
2269master and check that PTRACK on replica is clean.
2270Relevant only for PTRACK 1.x
2271"""
2272master = self.make_simple_node(
2273base_dir=os.path.join(self.module_name, self.fname, 'master'),
2274set_replication=True,
2275ptrack_enable=True,
2276initdb_params=['--data-checksums'],
2277pg_options={
2278'archive_timeout': '30s'})
2279
2280backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2281self.init_pb(backup_dir)
2282self.add_instance(backup_dir, 'master', master)
2283master.slow_start()
2284
2285self.backup_node(backup_dir, 'master', master, options=['--stream'])
2286
2287replica = self.make_simple_node(
2288base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2289replica.cleanup()
2290
2291self.restore_node(backup_dir, 'master', replica)
2292
2293self.add_instance(backup_dir, 'replica', replica)
2294self.set_replica(master, replica, synchronous=True)
2295replica.slow_start(replica=True)
2296
2297# Create table and indexes
2298master.safe_psql(
2299"postgres",
2300"create extension bloom; create sequence t_seq; "
2301"create table t_heap as select i as id, "
2302"nextval('t_seq') as t_seq, md5(i::text) as text, "
2303"md5(repeat(i::text,10))::tsvector as tsvector "
2304"from generate_series(0,2560) i")
2305for i in idx_ptrack:
2306if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2307master.safe_psql(
2308"postgres",
2309"create index {0} on {1} using {2}({3})".format(
2310i, idx_ptrack[i]['relation'],
2311idx_ptrack[i]['type'],
2312idx_ptrack[i]['column']))
2313
2314# Take FULL backup to clean every ptrack
2315self.backup_node(
2316backup_dir,
2317'replica',
2318replica,
2319options=[
2320'-j10', '--stream',
2321'--master-host=localhost',
2322'--master-db=postgres',
2323'--master-port={0}'.format(master.port)])
2324master.safe_psql('postgres', 'checkpoint')
2325
2326for i in idx_ptrack:
2327# get fork size and calculate it in pages
2328idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
2329# get path to heap and index files
2330idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2331# get ptrack for every idx
2332idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2333replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2334self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2335
2336# Update everything and vacuum it
2337master.safe_psql(
2338'postgres',
2339"update t_heap set t_seq = nextval('t_seq'), "
2340"text = md5(text), "
2341"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2342master.safe_psql('postgres', 'vacuum t_heap')
2343
2344# Take PTRACK backup to clean every ptrack
2345backup_id = self.backup_node(
2346backup_dir,
2347'replica',
2348replica,
2349backup_type='ptrack',
2350options=[
2351'-j10', '--stream',
2352'--master-host=localhost',
2353'--master-db=postgres',
2354'--master-port={0}'.format(master.port)])
2355master.safe_psql('postgres', 'checkpoint')
2356
2357for i in idx_ptrack:
2358# get new size of heap and indexes and calculate it in pages
2359idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
2360# update path to heap and index files in case they`ve changed
2361idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2362# # get ptrack for every idx
2363idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2364replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2365# check that ptrack bits are cleaned
2366self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2367
2368# Update everything and vacuum it
2369master.safe_psql(
2370'postgres',
2371"update t_heap set t_seq = nextval('t_seq'), text = md5(text), "
2372"tsvector = md5(repeat(tsvector::text, 10))::tsvector;")
2373master.safe_psql('postgres', 'vacuum t_heap')
2374master.safe_psql('postgres', 'checkpoint')
2375
2376# Take PAGE backup to clean every ptrack
2377self.backup_node(
2378backup_dir,
2379'replica',
2380replica,
2381backup_type='page',
2382options=[
2383'-j10', '--master-host=localhost',
2384'--master-db=postgres',
2385'--master-port={0}'.format(master.port),
2386'--stream'])
2387master.safe_psql('postgres', 'checkpoint')
2388
2389for i in idx_ptrack:
2390# get new size of heap and indexes and calculate it in pages
2391idx_ptrack[i]['size'] = self.get_fork_size(replica, i)
2392# update path to heap and index files in case they`ve changed
2393idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2394# # get ptrack for every idx
2395idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2396replica, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
2397# check that ptrack bits are cleaned
2398self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
2399
2400# @unittest.skip("skip")
2401# @unittest.expectedFailure
2402def test_ptrack_cluster_on_btree(self):
2403node = self.make_simple_node(
2404base_dir=os.path.join(self.module_name, self.fname, 'node'),
2405set_replication=True,
2406ptrack_enable=True,
2407initdb_params=['--data-checksums'])
2408
2409backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2410self.init_pb(backup_dir)
2411self.add_instance(backup_dir, 'node', node)
2412node.slow_start()
2413
2414node.safe_psql(
2415"postgres",
2416"CREATE EXTENSION ptrack")
2417
2418self.create_tblspace_in_node(node, 'somedata')
2419
2420# Create table and indexes
2421node.safe_psql(
2422"postgres",
2423"create extension bloom; create sequence t_seq; "
2424"create table t_heap tablespace somedata "
2425"as select i as id, nextval('t_seq') as t_seq, "
2426"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
2427"as tsvector from generate_series(0,2560) i")
2428for i in idx_ptrack:
2429if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2430node.safe_psql(
2431"postgres",
2432"create index {0} on {1} using {2}({3}) "
2433"tablespace somedata".format(
2434i, idx_ptrack[i]['relation'],
2435idx_ptrack[i]['type'], idx_ptrack[i]['column']))
2436
2437node.safe_psql('postgres', 'vacuum t_heap')
2438node.safe_psql('postgres', 'checkpoint')
2439
2440if node.major_version < 11:
2441for i in idx_ptrack:
2442# get size of heap and indexes. size calculated in pages
2443idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
2444# get path to heap and index files
2445idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2446# calculate md5sums of pages
2447idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2448idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2449
2450self.backup_node(
2451backup_dir, 'node', node, options=['-j10', '--stream'])
2452
2453node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
2454node.safe_psql('postgres', 'cluster t_heap using t_btree')
2455node.safe_psql('postgres', 'checkpoint')
2456
2457# CHECK PTRACK SANITY
2458if node.major_version < 11:
2459self.check_ptrack_map_sanity(node, idx_ptrack)
2460
2461# @unittest.skip("skip")
2462def test_ptrack_cluster_on_gist(self):
2463node = self.make_simple_node(
2464base_dir=os.path.join(self.module_name, self.fname, 'node'),
2465set_replication=True,
2466ptrack_enable=True,
2467initdb_params=['--data-checksums'])
2468
2469backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2470self.init_pb(backup_dir)
2471self.add_instance(backup_dir, 'node', node)
2472node.slow_start()
2473
2474node.safe_psql(
2475"postgres",
2476"CREATE EXTENSION ptrack")
2477
2478# Create table and indexes
2479node.safe_psql(
2480"postgres",
2481"create extension bloom; create sequence t_seq; "
2482"create table t_heap as select i as id, "
2483"nextval('t_seq') as t_seq, md5(i::text) as text, "
2484"md5(repeat(i::text,10))::tsvector as tsvector "
2485"from generate_series(0,2560) i")
2486for i in idx_ptrack:
2487if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2488node.safe_psql(
2489"postgres",
2490"create index {0} on {1} using {2}({3})".format(
2491i, idx_ptrack[i]['relation'],
2492idx_ptrack[i]['type'], idx_ptrack[i]['column']))
2493
2494node.safe_psql('postgres', 'vacuum t_heap')
2495node.safe_psql('postgres', 'checkpoint')
2496
2497for i in idx_ptrack:
2498# get size of heap and indexes. size calculated in pages
2499idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
2500# get path to heap and index files
2501idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2502# calculate md5sums of pages
2503idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2504idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2505
2506self.backup_node(
2507backup_dir, 'node', node, options=['-j10', '--stream'])
2508
2509node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
2510node.safe_psql('postgres', 'cluster t_heap using t_gist')
2511node.safe_psql('postgres', 'checkpoint')
2512
2513# CHECK PTRACK SANITY
2514if node.major_version < 11:
2515self.check_ptrack_map_sanity(node, idx_ptrack)
2516
2517self.backup_node(
2518backup_dir, 'node', node,
2519backup_type='ptrack', options=['-j10', '--stream'])
2520
2521pgdata = self.pgdata_content(node.data_dir)
2522node.cleanup()
2523
2524self.restore_node(backup_dir, 'node', node)
2525
2526pgdata_restored = self.pgdata_content(node.data_dir)
2527self.compare_pgdata(pgdata, pgdata_restored)
2528
2529# @unittest.skip("skip")
2530def test_ptrack_cluster_on_btree_replica(self):
2531master = self.make_simple_node(
2532base_dir=os.path.join(self.module_name, self.fname, 'master'),
2533set_replication=True,
2534ptrack_enable=True,
2535initdb_params=['--data-checksums'])
2536
2537backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2538self.init_pb(backup_dir)
2539self.add_instance(backup_dir, 'master', master)
2540master.slow_start()
2541
2542if master.major_version >= 11:
2543master.safe_psql(
2544"postgres",
2545"CREATE EXTENSION ptrack")
2546
2547self.backup_node(backup_dir, 'master', master, options=['--stream'])
2548
2549replica = self.make_simple_node(
2550base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2551replica.cleanup()
2552
2553self.restore_node(backup_dir, 'master', replica)
2554
2555self.add_instance(backup_dir, 'replica', replica)
2556self.set_replica(master, replica, synchronous=True)
2557replica.slow_start(replica=True)
2558
2559# Create table and indexes
2560master.safe_psql(
2561"postgres",
2562"create extension bloom; create sequence t_seq; "
2563"create table t_heap as select i as id, "
2564"nextval('t_seq') as t_seq, md5(i::text) as text, "
2565"md5(repeat(i::text,10))::tsvector as tsvector "
2566"from generate_series(0,2560) i")
2567
2568for i in idx_ptrack:
2569if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2570master.safe_psql(
2571"postgres",
2572"create index {0} on {1} using {2}({3})".format(
2573i, idx_ptrack[i]['relation'],
2574idx_ptrack[i]['type'],
2575idx_ptrack[i]['column']))
2576
2577master.safe_psql('postgres', 'vacuum t_heap')
2578master.safe_psql('postgres', 'checkpoint')
2579
2580self.backup_node(
2581backup_dir, 'replica', replica, options=[
2582'-j10', '--stream', '--master-host=localhost',
2583'--master-db=postgres', '--master-port={0}'.format(
2584master.port)])
2585
2586for i in idx_ptrack:
2587# get size of heap and indexes. size calculated in pages
2588idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
2589# get path to heap and index files
2590idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2591# calculate md5sums of pages
2592idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2593idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2594
2595master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
2596master.safe_psql('postgres', 'cluster t_heap using t_btree')
2597master.safe_psql('postgres', 'checkpoint')
2598
2599# Sync master and replica
2600self.wait_until_replica_catch_with_master(master, replica)
2601replica.safe_psql('postgres', 'checkpoint')
2602
2603# CHECK PTRACK SANITY
2604if master.major_version < 11:
2605self.check_ptrack_map_sanity(replica, idx_ptrack)
2606
2607self.backup_node(
2608backup_dir, 'replica', replica,
2609backup_type='ptrack', options=['-j10', '--stream'])
2610
2611pgdata = self.pgdata_content(replica.data_dir)
2612
2613node = self.make_simple_node(
2614base_dir=os.path.join(self.module_name, self.fname, 'node'))
2615node.cleanup()
2616
2617self.restore_node(backup_dir, 'replica', node)
2618
2619pgdata_restored = self.pgdata_content(replica.data_dir)
2620self.compare_pgdata(pgdata, pgdata_restored)
2621
2622# @unittest.skip("skip")
2623def test_ptrack_cluster_on_gist_replica(self):
2624master = self.make_simple_node(
2625base_dir=os.path.join(self.module_name, self.fname, 'master'),
2626set_replication=True,
2627ptrack_enable=True)
2628
2629backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2630self.init_pb(backup_dir)
2631self.add_instance(backup_dir, 'master', master)
2632master.slow_start()
2633
2634if master.major_version >= 11:
2635master.safe_psql(
2636"postgres",
2637"CREATE EXTENSION ptrack")
2638
2639self.backup_node(backup_dir, 'master', master, options=['--stream'])
2640
2641replica = self.make_simple_node(
2642base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2643replica.cleanup()
2644
2645self.restore_node(backup_dir, 'master', replica)
2646
2647self.add_instance(backup_dir, 'replica', replica)
2648self.set_replica(master, replica, 'replica', synchronous=True)
2649replica.slow_start(replica=True)
2650
2651# Create table and indexes
2652master.safe_psql(
2653"postgres",
2654"create extension bloom; create sequence t_seq; "
2655"create table t_heap as select i as id, "
2656"nextval('t_seq') as t_seq, md5(i::text) as text, "
2657"md5(repeat(i::text,10))::tsvector as tsvector "
2658"from generate_series(0,2560) i")
2659
2660for i in idx_ptrack:
2661if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2662master.safe_psql(
2663"postgres",
2664"create index {0} on {1} using {2}({3})".format(
2665i, idx_ptrack[i]['relation'],
2666idx_ptrack[i]['type'],
2667idx_ptrack[i]['column']))
2668
2669master.safe_psql('postgres', 'vacuum t_heap')
2670master.safe_psql('postgres', 'checkpoint')
2671
2672# Sync master and replica
2673self.wait_until_replica_catch_with_master(master, replica)
2674replica.safe_psql('postgres', 'checkpoint')
2675
2676self.backup_node(
2677backup_dir, 'replica', replica, options=[
2678'-j10', '--stream', '--master-host=localhost',
2679'--master-db=postgres', '--master-port={0}'.format(
2680master.port)])
2681
2682for i in idx_ptrack:
2683# get size of heap and indexes. size calculated in pages
2684idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
2685# get path to heap and index files
2686idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
2687# calculate md5sums of pages
2688idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2689idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2690
2691master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id%2 = 1')
2692master.safe_psql('postgres', 'CLUSTER t_heap USING t_gist')
2693
2694if master.major_version < 11:
2695master.safe_psql('postgres', 'CHECKPOINT')
2696
2697# Sync master and replica
2698self.wait_until_replica_catch_with_master(master, replica)
2699
2700if master.major_version < 11:
2701replica.safe_psql('postgres', 'CHECKPOINT')
2702self.check_ptrack_map_sanity(replica, idx_ptrack)
2703
2704self.backup_node(
2705backup_dir, 'replica', replica,
2706backup_type='ptrack', options=['-j10', '--stream'])
2707
2708if self.paranoia:
2709pgdata = self.pgdata_content(replica.data_dir)
2710
2711node = self.make_simple_node(
2712base_dir=os.path.join(self.module_name, self.fname, 'node'))
2713node.cleanup()
2714
2715self.restore_node(backup_dir, 'replica', node)
2716
2717if self.paranoia:
2718pgdata_restored = self.pgdata_content(replica.data_dir)
2719self.compare_pgdata(pgdata, pgdata_restored)
2720
2721# @unittest.skip("skip")
2722# @unittest.expectedFailure
2723def test_ptrack_empty(self):
2724"""Take backups of every available types and check that PTRACK is clean"""
2725node = self.make_simple_node(
2726base_dir=os.path.join(self.module_name, self.fname, 'node'),
2727set_replication=True,
2728ptrack_enable=True,
2729initdb_params=['--data-checksums'])
2730
2731backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2732self.init_pb(backup_dir)
2733self.add_instance(backup_dir, 'node', node)
2734node.slow_start()
2735
2736node.safe_psql(
2737"postgres",
2738"CREATE EXTENSION ptrack")
2739
2740self.create_tblspace_in_node(node, 'somedata')
2741
2742# Create table
2743node.safe_psql(
2744"postgres",
2745"create extension bloom; create sequence t_seq; "
2746"create table t_heap "
2747"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector) "
2748"tablespace somedata")
2749
2750# Take FULL backup to clean every ptrack
2751self.backup_node(
2752backup_dir, 'node', node,
2753options=['-j10', '--stream'])
2754
2755# Create indexes
2756for i in idx_ptrack:
2757if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2758node.safe_psql(
2759"postgres",
2760"create index {0} on {1} using {2}({3}) "
2761"tablespace somedata".format(
2762i, idx_ptrack[i]['relation'],
2763idx_ptrack[i]['type'],
2764idx_ptrack[i]['column']))
2765
2766node.safe_psql('postgres', 'checkpoint')
2767
2768node_restored = self.make_simple_node(
2769base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
2770node_restored.cleanup()
2771
2772tblspace1 = self.get_tblspace_path(node, 'somedata')
2773tblspace2 = self.get_tblspace_path(node_restored, 'somedata')
2774
2775# Take PTRACK backup
2776backup_id = self.backup_node(
2777backup_dir, 'node', node, backup_type='ptrack',
2778options=['-j10', '--stream'])
2779
2780if self.paranoia:
2781pgdata = self.pgdata_content(node.data_dir)
2782
2783self.restore_node(
2784backup_dir, 'node', node_restored,
2785backup_id=backup_id,
2786options=[
2787"-j", "4",
2788"-T{0}={1}".format(tblspace1, tblspace2)])
2789
2790if self.paranoia:
2791pgdata_restored = self.pgdata_content(node_restored.data_dir)
2792self.compare_pgdata(pgdata, pgdata_restored)
2793
2794# @unittest.skip("skip")
2795# @unittest.expectedFailure
2796def test_ptrack_empty_replica(self):
2797"""
2798Take backups of every available types from master
2799and check that PTRACK on replica is clean
2800"""
2801master = self.make_simple_node(
2802base_dir=os.path.join(self.module_name, self.fname, 'master'),
2803set_replication=True,
2804initdb_params=['--data-checksums'],
2805ptrack_enable=True)
2806
2807backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2808self.init_pb(backup_dir)
2809self.add_instance(backup_dir, 'master', master)
2810master.slow_start()
2811
2812if master.major_version >= 11:
2813master.safe_psql(
2814"postgres",
2815"CREATE EXTENSION ptrack")
2816
2817self.backup_node(backup_dir, 'master', master, options=['--stream'])
2818
2819replica = self.make_simple_node(
2820base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2821replica.cleanup()
2822
2823self.restore_node(backup_dir, 'master', replica)
2824
2825self.add_instance(backup_dir, 'replica', replica)
2826self.set_replica(master, replica, synchronous=True)
2827replica.slow_start(replica=True)
2828
2829# Create table
2830master.safe_psql(
2831"postgres",
2832"create extension bloom; create sequence t_seq; "
2833"create table t_heap "
2834"(id int DEFAULT nextval('t_seq'), text text, tsvector tsvector)")
2835self.wait_until_replica_catch_with_master(master, replica)
2836
2837# Take FULL backup
2838self.backup_node(
2839backup_dir,
2840'replica',
2841replica,
2842options=[
2843'-j10', '--stream',
2844'--master-host=localhost',
2845'--master-db=postgres',
2846'--master-port={0}'.format(master.port)])
2847
2848# Create indexes
2849for i in idx_ptrack:
2850if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2851master.safe_psql(
2852"postgres",
2853"create index {0} on {1} using {2}({3})".format(
2854i, idx_ptrack[i]['relation'],
2855idx_ptrack[i]['type'],
2856idx_ptrack[i]['column']))
2857
2858self.wait_until_replica_catch_with_master(master, replica)
2859
2860# Take PTRACK backup
2861backup_id = self.backup_node(
2862backup_dir,
2863'replica',
2864replica,
2865backup_type='ptrack',
2866options=[
2867'-j1', '--stream',
2868'--master-host=localhost',
2869'--master-db=postgres',
2870'--master-port={0}'.format(master.port)])
2871
2872if self.paranoia:
2873pgdata = self.pgdata_content(replica.data_dir)
2874
2875node_restored = self.make_simple_node(
2876base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
2877node_restored.cleanup()
2878
2879self.restore_node(
2880backup_dir, 'replica', node_restored,
2881backup_id=backup_id, options=["-j", "4"])
2882
2883if self.paranoia:
2884pgdata_restored = self.pgdata_content(node_restored.data_dir)
2885self.compare_pgdata(pgdata, pgdata_restored)
2886
2887# @unittest.skip("skip")
2888# @unittest.expectedFailure
2889def test_ptrack_truncate(self):
2890node = self.make_simple_node(
2891base_dir=os.path.join(self.module_name, self.fname, 'node'),
2892set_replication=True,
2893ptrack_enable=True,
2894initdb_params=['--data-checksums'])
2895
2896backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2897self.init_pb(backup_dir)
2898self.add_instance(backup_dir, 'node', node)
2899node.slow_start()
2900
2901node.safe_psql(
2902"postgres",
2903"CREATE EXTENSION ptrack")
2904
2905self.create_tblspace_in_node(node, 'somedata')
2906
2907# Create table and indexes
2908node.safe_psql(
2909"postgres",
2910"create extension bloom; create sequence t_seq; "
2911"create table t_heap tablespace somedata "
2912"as select i as id, md5(i::text) as text, "
2913"md5(repeat(i::text,10))::tsvector as tsvector "
2914"from generate_series(0,2560) i")
2915
2916if node.major_version < 11:
2917for i in idx_ptrack:
2918if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
2919node.safe_psql(
2920"postgres",
2921"create index {0} on {1} using {2}({3}) "
2922"tablespace somedata".format(
2923i, idx_ptrack[i]['relation'],
2924idx_ptrack[i]['type'], idx_ptrack[i]['column']))
2925
2926self.backup_node(
2927backup_dir, 'node', node, options=['--stream'])
2928
2929node.safe_psql('postgres', 'truncate t_heap')
2930node.safe_psql('postgres', 'checkpoint')
2931
2932if node.major_version < 11:
2933for i in idx_ptrack:
2934# get fork size and calculate it in pages
2935idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
2936# get path to heap and index files
2937idx_ptrack[i]['path'] = self.get_fork_path(node, i)
2938# calculate md5sums for every page of this fork
2939idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
2940idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
2941
2942# Make backup to clean every ptrack
2943self.backup_node(
2944backup_dir, 'node', node,
2945backup_type='ptrack', options=['-j10', '--stream'])
2946
2947pgdata = self.pgdata_content(node.data_dir)
2948
2949if node.major_version < 11:
2950for i in idx_ptrack:
2951idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
2952node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
2953self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
2954
2955node.cleanup()
2956shutil.rmtree(
2957self.get_tblspace_path(node, 'somedata'),
2958ignore_errors=True)
2959
2960self.restore_node(backup_dir, 'node', node)
2961
2962pgdata_restored = self.pgdata_content(node.data_dir)
2963self.compare_pgdata(pgdata, pgdata_restored)
2964
2965# @unittest.skip("skip")
2966def test_basic_ptrack_truncate_replica(self):
2967master = self.make_simple_node(
2968base_dir=os.path.join(self.module_name, self.fname, 'master'),
2969set_replication=True,
2970ptrack_enable=True,
2971initdb_params=['--data-checksums'],
2972pg_options={
2973'max_wal_size': '32MB',
2974'archive_timeout': '10s',
2975'checkpoint_timeout': '5min'})
2976
2977backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
2978self.init_pb(backup_dir)
2979self.add_instance(backup_dir, 'master', master)
2980master.slow_start()
2981
2982if master.major_version >= 11:
2983master.safe_psql(
2984"postgres",
2985"CREATE EXTENSION ptrack")
2986
2987self.backup_node(backup_dir, 'master', master, options=['--stream'])
2988
2989replica = self.make_simple_node(
2990base_dir=os.path.join(self.module_name, self.fname, 'replica'))
2991replica.cleanup()
2992
2993self.restore_node(backup_dir, 'master', replica)
2994
2995self.add_instance(backup_dir, 'replica', replica)
2996self.set_replica(master, replica, 'replica', synchronous=True)
2997replica.slow_start(replica=True)
2998
2999# Create table and indexes
3000master.safe_psql(
3001"postgres",
3002"create extension bloom; create sequence t_seq; "
3003"create table t_heap "
3004"as select i as id, md5(i::text) as text, "
3005"md5(repeat(i::text,10))::tsvector as tsvector "
3006"from generate_series(0,2560) i")
3007
3008for i in idx_ptrack:
3009if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3010master.safe_psql(
3011"postgres",
3012"create index {0} on {1} using {2}({3}) ".format(
3013i, idx_ptrack[i]['relation'],
3014idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3015
3016# Sync master and replica
3017self.wait_until_replica_catch_with_master(master, replica)
3018replica.safe_psql('postgres', 'checkpoint')
3019
3020if replica.major_version < 11:
3021for i in idx_ptrack:
3022# get fork size and calculate it in pages
3023idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3024# get path to heap and index files
3025idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3026# calculate md5sums for every page of this fork
3027idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3028idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3029
3030# Make backup to clean every ptrack
3031self.backup_node(
3032backup_dir, 'replica', replica,
3033options=[
3034'-j10',
3035'--stream',
3036'--master-host=localhost',
3037'--master-db=postgres',
3038'--master-port={0}'.format(master.port)])
3039
3040if replica.major_version < 11:
3041for i in idx_ptrack:
3042idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3043replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
3044self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
3045
3046master.safe_psql('postgres', 'truncate t_heap')
3047
3048# Sync master and replica
3049self.wait_until_replica_catch_with_master(master, replica)
3050
3051if replica.major_version < 10:
3052replica.safe_psql(
3053"postgres",
3054"select pg_xlog_replay_pause()")
3055else:
3056replica.safe_psql(
3057"postgres",
3058"select pg_wal_replay_pause()")
3059
3060self.backup_node(
3061backup_dir, 'replica', replica, backup_type='ptrack',
3062options=[
3063'-j10',
3064'--stream',
3065'--master-host=localhost',
3066'--master-db=postgres',
3067'--master-port={0}'.format(master.port)])
3068
3069pgdata = self.pgdata_content(replica.data_dir)
3070
3071node = self.make_simple_node(
3072base_dir=os.path.join(self.module_name, self.fname, 'node'))
3073node.cleanup()
3074
3075self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
3076
3077pgdata_restored = self.pgdata_content(node.data_dir)
3078
3079if self.paranoia:
3080self.compare_pgdata(pgdata, pgdata_restored)
3081
3082self.set_auto_conf(node, {'port': node.port})
3083
3084node.slow_start()
3085
3086node.safe_psql(
3087'postgres',
3088'select 1')
3089
3090# @unittest.skip("skip")
3091# @unittest.expectedFailure
3092def test_ptrack_vacuum(self):
3093node = self.make_simple_node(
3094base_dir=os.path.join(self.module_name, self.fname, 'node'),
3095set_replication=True,
3096ptrack_enable=True,
3097initdb_params=['--data-checksums'])
3098
3099backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3100self.init_pb(backup_dir)
3101self.add_instance(backup_dir, 'node', node)
3102node.slow_start()
3103
3104node.safe_psql(
3105"postgres",
3106"CREATE EXTENSION ptrack")
3107
3108self.create_tblspace_in_node(node, 'somedata')
3109
3110# Create table and indexes
3111node.safe_psql(
3112"postgres",
3113"create extension bloom; create sequence t_seq; "
3114"create table t_heap tablespace somedata "
3115"as select i as id, md5(i::text) as text, "
3116"md5(repeat(i::text,10))::tsvector as tsvector "
3117"from generate_series(0,2560) i")
3118for i in idx_ptrack:
3119if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3120node.safe_psql(
3121"postgres",
3122"create index {0} on {1} using {2}({3}) "
3123"tablespace somedata".format(
3124i, idx_ptrack[i]['relation'],
3125idx_ptrack[i]['type'],
3126idx_ptrack[i]['column']))
3127
3128comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node)
3129
3130node.safe_psql('postgres', 'vacuum t_heap')
3131node.safe_psql('postgres', 'checkpoint')
3132
3133# Make full backup to clean every ptrack
3134self.backup_node(
3135backup_dir, 'node', node, options=['-j10', '--stream'])
3136
3137if node.major_version < 11:
3138for i in idx_ptrack:
3139# get fork size and calculate it in pages
3140idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3141# get path to heap and index files
3142idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3143# calculate md5sums for every page of this fork
3144idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3145idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3146idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3147node, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
3148self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
3149
3150# Delete some rows, vacuum it and make checkpoint
3151node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3152node.safe_psql('postgres', 'vacuum t_heap')
3153node.safe_psql('postgres', 'checkpoint')
3154
3155# CHECK PTRACK SANITY
3156if node.major_version < 11:
3157self.check_ptrack_map_sanity(node, idx_ptrack)
3158
3159self.backup_node(
3160backup_dir, 'node', node,
3161backup_type='ptrack', options=['-j10', '--stream'])
3162
3163pgdata = self.pgdata_content(node.data_dir)
3164node.cleanup()
3165
3166shutil.rmtree(
3167self.get_tblspace_path(node, 'somedata'),
3168ignore_errors=True)
3169
3170self.restore_node(backup_dir, 'node', node)
3171
3172pgdata_restored = self.pgdata_content(node.data_dir)
3173self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion)
3174
3175# @unittest.skip("skip")
3176def test_ptrack_vacuum_replica(self):
3177master = self.make_simple_node(
3178base_dir=os.path.join(self.module_name, self.fname, 'master'),
3179set_replication=True,
3180ptrack_enable=True,
3181initdb_params=['--data-checksums'],
3182pg_options={
3183'checkpoint_timeout': '30'})
3184
3185backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3186self.init_pb(backup_dir)
3187self.add_instance(backup_dir, 'master', master)
3188master.slow_start()
3189
3190if master.major_version >= 11:
3191master.safe_psql(
3192"postgres",
3193"CREATE EXTENSION ptrack")
3194
3195self.backup_node(backup_dir, 'master', master, options=['--stream'])
3196
3197replica = self.make_simple_node(
3198base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3199replica.cleanup()
3200
3201self.restore_node(backup_dir, 'master', replica)
3202
3203self.add_instance(backup_dir, 'replica', replica)
3204self.set_replica(master, replica, 'replica', synchronous=True)
3205replica.slow_start(replica=True)
3206
3207# Create table and indexes
3208master.safe_psql(
3209"postgres",
3210"create extension bloom; create sequence t_seq; "
3211"create table t_heap as select i as id, "
3212"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
3213"as tsvector from generate_series(0,2560) i")
3214
3215for i in idx_ptrack:
3216if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3217master.safe_psql(
3218"postgres",
3219"create index {0} on {1} using {2}({3})".format(
3220i, idx_ptrack[i]['relation'],
3221idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3222
3223master.safe_psql('postgres', 'vacuum t_heap')
3224master.safe_psql('postgres', 'checkpoint')
3225
3226# Sync master and replica
3227self.wait_until_replica_catch_with_master(master, replica)
3228replica.safe_psql('postgres', 'checkpoint')
3229
3230# Make FULL backup to clean every ptrack
3231self.backup_node(
3232backup_dir, 'replica', replica, options=[
3233'-j10', '--master-host=localhost',
3234'--master-db=postgres',
3235'--master-port={0}'.format(master.port),
3236'--stream'])
3237
3238if replica.major_version < 11:
3239for i in idx_ptrack:
3240# get fork size and calculate it in pages
3241idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3242# get path to heap and index files
3243idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3244# calculate md5sums for every page of this fork
3245idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3246idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3247idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3248replica, idx_ptrack[i]['path'], [idx_ptrack[i]['old_size']])
3249self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['old_size'])
3250
3251# Delete some rows, vacuum it and make checkpoint
3252master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3253master.safe_psql('postgres', 'vacuum t_heap')
3254master.safe_psql('postgres', 'checkpoint')
3255
3256# Sync master and replica
3257self.wait_until_replica_catch_with_master(master, replica)
3258replica.safe_psql('postgres', 'checkpoint')
3259
3260# CHECK PTRACK SANITY
3261if replica.major_version < 11:
3262self.check_ptrack_map_sanity(master, idx_ptrack)
3263
3264self.backup_node(
3265backup_dir, 'replica', replica,
3266backup_type='ptrack', options=['-j10', '--stream'])
3267
3268pgdata = self.pgdata_content(replica.data_dir)
3269
3270node = self.make_simple_node(
3271base_dir=os.path.join(self.module_name, self.fname, 'node'))
3272node.cleanup()
3273
3274self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
3275
3276pgdata_restored = self.pgdata_content(node.data_dir)
3277self.compare_pgdata(pgdata, pgdata_restored)
3278
3279# @unittest.skip("skip")
3280# @unittest.expectedFailure
3281def test_ptrack_vacuum_bits_frozen(self):
3282node = self.make_simple_node(
3283base_dir=os.path.join(self.module_name, self.fname, 'node'),
3284set_replication=True,
3285ptrack_enable=True,
3286initdb_params=['--data-checksums'])
3287
3288backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3289self.init_pb(backup_dir)
3290self.add_instance(backup_dir, 'node', node)
3291node.slow_start()
3292
3293node.safe_psql(
3294"postgres",
3295"CREATE EXTENSION ptrack")
3296
3297self.create_tblspace_in_node(node, 'somedata')
3298
3299# Create table and indexes
3300res = node.safe_psql(
3301"postgres",
3302"create extension bloom; create sequence t_seq; "
3303"create table t_heap tablespace somedata "
3304"as select i as id, md5(i::text) as text, "
3305"md5(repeat(i::text,10))::tsvector as tsvector "
3306"from generate_series(0,2560) i")
3307for i in idx_ptrack:
3308if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3309node.safe_psql(
3310"postgres",
3311"create index {0} on {1} using {2}({3}) "
3312"tablespace somedata".format(
3313i, idx_ptrack[i]['relation'],
3314idx_ptrack[i]['type'],
3315idx_ptrack[i]['column']))
3316
3317comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node)
3318node.safe_psql('postgres', 'checkpoint')
3319
3320self.backup_node(
3321backup_dir, 'node', node, options=['-j10', '--stream'])
3322
3323node.safe_psql('postgres', 'vacuum freeze t_heap')
3324node.safe_psql('postgres', 'checkpoint')
3325
3326if node.major_version < 11:
3327for i in idx_ptrack:
3328# get size of heap and indexes. size calculated in pages
3329idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3330# get path to heap and index files
3331idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3332# calculate md5sums of pages
3333idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3334idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3335
3336# CHECK PTRACK SANITY
3337if node.major_version < 11:
3338self.check_ptrack_map_sanity(node, idx_ptrack)
3339
3340self.backup_node(
3341backup_dir, 'node', node,
3342backup_type='ptrack', options=['-j10', '--stream'])
3343
3344pgdata = self.pgdata_content(node.data_dir)
3345node.cleanup()
3346shutil.rmtree(
3347self.get_tblspace_path(node, 'somedata'),
3348ignore_errors=True)
3349
3350self.restore_node(backup_dir, 'node', node)
3351
3352pgdata_restored = self.pgdata_content(node.data_dir)
3353self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion)
3354
3355# @unittest.skip("skip")
3356def test_ptrack_vacuum_bits_frozen_replica(self):
3357master = self.make_simple_node(
3358base_dir=os.path.join(self.module_name, self.fname, 'master'),
3359set_replication=True,
3360ptrack_enable=True,
3361initdb_params=['--data-checksums'])
3362
3363backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3364self.init_pb(backup_dir)
3365self.add_instance(backup_dir, 'master', master)
3366master.slow_start()
3367
3368if master.major_version >= 11:
3369master.safe_psql(
3370"postgres",
3371"CREATE EXTENSION ptrack")
3372
3373self.backup_node(backup_dir, 'master', master, options=['--stream'])
3374
3375replica = self.make_simple_node(
3376base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3377replica.cleanup()
3378
3379self.restore_node(backup_dir, 'master', replica)
3380
3381self.add_instance(backup_dir, 'replica', replica)
3382self.set_replica(master, replica, synchronous=True)
3383replica.slow_start(replica=True)
3384
3385# Create table and indexes
3386master.safe_psql(
3387"postgres",
3388"create extension bloom; create sequence t_seq; "
3389"create table t_heap as select i as id, "
3390"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
3391"as tsvector from generate_series(0,2560) i")
3392for i in idx_ptrack:
3393if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3394master.safe_psql(
3395"postgres",
3396"create index {0} on {1} using {2}({3})".format(
3397i, idx_ptrack[i]['relation'],
3398idx_ptrack[i]['type'],
3399idx_ptrack[i]['column']))
3400
3401master.safe_psql('postgres', 'checkpoint')
3402
3403# Sync master and replica
3404self.wait_until_replica_catch_with_master(master, replica)
3405replica.safe_psql('postgres', 'checkpoint')
3406
3407# Take backup to clean every ptrack
3408self.backup_node(
3409backup_dir, 'replica', replica,
3410options=[
3411'-j10',
3412'--master-host=localhost',
3413'--master-db=postgres',
3414'--master-port={0}'.format(master.port),
3415'--stream'])
3416
3417if replica.major_version < 11:
3418for i in idx_ptrack:
3419# get size of heap and indexes. size calculated in pages
3420idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3421# get path to heap and index files
3422idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3423# calculate md5sums of pages
3424idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3425idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3426
3427master.safe_psql('postgres', 'vacuum freeze t_heap')
3428master.safe_psql('postgres', 'checkpoint')
3429
3430# Sync master and replica
3431self.wait_until_replica_catch_with_master(master, replica)
3432replica.safe_psql('postgres', 'checkpoint')
3433
3434# CHECK PTRACK SANITY
3435if replica.major_version < 11:
3436self.check_ptrack_map_sanity(master, idx_ptrack)
3437
3438self.backup_node(
3439backup_dir, 'replica', replica, backup_type='ptrack',
3440options=['-j10', '--stream'])
3441
3442pgdata = self.pgdata_content(replica.data_dir)
3443replica.cleanup()
3444
3445self.restore_node(backup_dir, 'replica', replica)
3446
3447pgdata_restored = self.pgdata_content(replica.data_dir)
3448self.compare_pgdata(pgdata, pgdata_restored)
3449
3450# @unittest.skip("skip")
3451# @unittest.expectedFailure
3452def test_ptrack_vacuum_bits_visibility(self):
3453node = self.make_simple_node(
3454base_dir=os.path.join(self.module_name, self.fname, 'node'),
3455set_replication=True,
3456ptrack_enable=True,
3457initdb_params=['--data-checksums'])
3458
3459backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3460self.init_pb(backup_dir)
3461self.add_instance(backup_dir, 'node', node)
3462node.slow_start()
3463
3464node.safe_psql(
3465"postgres",
3466"CREATE EXTENSION ptrack")
3467
3468self.create_tblspace_in_node(node, 'somedata')
3469
3470# Create table and indexes
3471res = node.safe_psql(
3472"postgres",
3473"create extension bloom; create sequence t_seq; "
3474"create table t_heap tablespace somedata "
3475"as select i as id, md5(i::text) as text, "
3476"md5(repeat(i::text,10))::tsvector as tsvector "
3477"from generate_series(0,2560) i")
3478
3479for i in idx_ptrack:
3480if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3481node.safe_psql(
3482"postgres",
3483"create index {0} on {1} using {2}({3}) "
3484"tablespace somedata".format(
3485i, idx_ptrack[i]['relation'],
3486idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3487
3488comparision_exclusion = self.get_known_bugs_comparision_exclusion_dict(node)
3489node.safe_psql('postgres', 'checkpoint')
3490
3491self.backup_node(
3492backup_dir, 'node', node, options=['-j10', '--stream'])
3493
3494if node.major_version < 11:
3495for i in idx_ptrack:
3496# get size of heap and indexes. size calculated in pages
3497idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3498# get path to heap and index files
3499idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3500# calculate md5sums of pages
3501idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3502idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3503
3504node.safe_psql('postgres', 'vacuum t_heap')
3505node.safe_psql('postgres', 'checkpoint')
3506
3507# CHECK PTRACK SANITY
3508if node.major_version < 11:
3509self.check_ptrack_map_sanity(node, idx_ptrack)
3510
3511self.backup_node(
3512backup_dir, 'node', node,
3513backup_type='ptrack', options=['-j10', '--stream'])
3514
3515pgdata = self.pgdata_content(node.data_dir)
3516node.cleanup()
3517shutil.rmtree(
3518self.get_tblspace_path(node, 'somedata'),
3519ignore_errors=True)
3520
3521self.restore_node(backup_dir, 'node', node)
3522
3523pgdata_restored = self.pgdata_content(node.data_dir)
3524self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion)
3525
3526# @unittest.skip("skip")
3527# @unittest.expectedFailure
3528def test_ptrack_vacuum_full_2(self):
3529node = self.make_simple_node(
3530base_dir=os.path.join(self.module_name, self.fname, 'node'),
3531set_replication=True,
3532ptrack_enable=True,
3533pg_options={ 'wal_log_hints': 'on' })
3534
3535backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3536self.init_pb(backup_dir)
3537self.add_instance(backup_dir, 'node', node)
3538node.slow_start()
3539
3540node.safe_psql(
3541"postgres",
3542"CREATE EXTENSION ptrack")
3543
3544self.create_tblspace_in_node(node, 'somedata')
3545
3546# Create table and indexes
3547res = node.safe_psql(
3548"postgres",
3549"create extension bloom; create sequence t_seq; "
3550"create table t_heap tablespace somedata "
3551"as select i as id, md5(i::text) as text, "
3552"md5(repeat(i::text,10))::tsvector as tsvector "
3553"from generate_series(0,2560) i")
3554for i in idx_ptrack:
3555if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3556node.safe_psql(
3557"postgres", "create index {0} on {1} "
3558"using {2}({3}) tablespace somedata".format(
3559i, idx_ptrack[i]['relation'],
3560idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3561
3562node.safe_psql('postgres', 'vacuum t_heap')
3563node.safe_psql('postgres', 'checkpoint')
3564
3565self.backup_node(
3566backup_dir, 'node', node, options=['-j10', '--stream'])
3567
3568if node.major_version < 11:
3569for i in idx_ptrack:
3570# get size of heap and indexes. size calculated in pages
3571idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3572# get path to heap and index files
3573idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3574# calculate md5sums of pages
3575idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3576idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3577
3578node.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3579node.safe_psql('postgres', 'vacuum full t_heap')
3580node.safe_psql('postgres', 'checkpoint')
3581
3582if node.major_version < 11:
3583self.check_ptrack_map_sanity(node, idx_ptrack)
3584
3585self.backup_node(
3586backup_dir, 'node', node,
3587backup_type='ptrack', options=['-j10', '--stream'])
3588
3589pgdata = self.pgdata_content(node.data_dir)
3590node.cleanup()
3591
3592shutil.rmtree(
3593self.get_tblspace_path(node, 'somedata'),
3594ignore_errors=True)
3595
3596self.restore_node(backup_dir, 'node', node)
3597
3598pgdata_restored = self.pgdata_content(node.data_dir)
3599self.compare_pgdata(pgdata, pgdata_restored)
3600
3601# @unittest.skip("skip")
3602# @unittest.expectedFailure
3603def test_ptrack_vacuum_full_replica(self):
3604master = self.make_simple_node(
3605base_dir=os.path.join(self.module_name, self.fname, 'master'),
3606set_replication=True,
3607ptrack_enable=True,
3608initdb_params=['--data-checksums'])
3609
3610backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3611self.init_pb(backup_dir)
3612self.add_instance(backup_dir, 'master', master)
3613master.slow_start()
3614
3615if master.major_version >= 11:
3616master.safe_psql(
3617"postgres",
3618"CREATE EXTENSION ptrack")
3619
3620self.backup_node(backup_dir, 'master', master, options=['--stream'])
3621replica = self.make_simple_node(
3622base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3623replica.cleanup()
3624
3625self.restore_node(backup_dir, 'master', replica)
3626
3627self.add_instance(backup_dir, 'replica', replica)
3628self.set_replica(master, replica, 'replica', synchronous=True)
3629replica.slow_start(replica=True)
3630
3631# Create table and indexes
3632master.safe_psql(
3633"postgres",
3634"create extension bloom; create sequence t_seq; "
3635"create table t_heap as select i as id, "
3636"md5(i::text) as text, md5(repeat(i::text,10))::tsvector as "
3637"tsvector from generate_series(0,256000) i")
3638
3639if master.major_version < 11:
3640for i in idx_ptrack:
3641if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3642master.safe_psql(
3643"postgres",
3644"create index {0} on {1} using {2}({3})".format(
3645i, idx_ptrack[i]['relation'],
3646idx_ptrack[i]['type'],
3647idx_ptrack[i]['column']))
3648
3649master.safe_psql('postgres', 'vacuum t_heap')
3650master.safe_psql('postgres', 'checkpoint')
3651
3652# Sync master and replica
3653self.wait_until_replica_catch_with_master(master, replica)
3654replica.safe_psql('postgres', 'checkpoint')
3655
3656# Take FULL backup to clean every ptrack
3657self.backup_node(
3658backup_dir, 'replica', replica,
3659options=[
3660'-j10',
3661'--master-host=localhost',
3662'--master-db=postgres',
3663'--master-port={0}'.format(master.port),
3664'--stream'])
3665
3666if replica.major_version < 11:
3667for i in idx_ptrack:
3668# get size of heap and indexes. size calculated in pages
3669idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3670# get path to heap and index files
3671idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3672# calculate md5sums of pages
3673idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3674idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3675
3676master.safe_psql('postgres', 'delete from t_heap where id%2 = 1')
3677master.safe_psql('postgres', 'vacuum full t_heap')
3678master.safe_psql('postgres', 'checkpoint')
3679
3680# Sync master and replica
3681self.wait_until_replica_catch_with_master(master, replica)
3682replica.safe_psql('postgres', 'checkpoint')
3683
3684if replica.major_version < 11:
3685self.check_ptrack_map_sanity(master, idx_ptrack)
3686
3687self.backup_node(
3688backup_dir, 'replica', replica,
3689backup_type='ptrack', options=['-j10', '--stream'])
3690
3691pgdata = self.pgdata_content(replica.data_dir)
3692replica.cleanup()
3693
3694self.restore_node(backup_dir, 'replica', replica)
3695
3696pgdata_restored = self.pgdata_content(replica.data_dir)
3697self.compare_pgdata(pgdata, pgdata_restored)
3698
3699# @unittest.skip("skip")
3700# @unittest.expectedFailure
3701def test_ptrack_vacuum_truncate_2(self):
3702node = self.make_simple_node(
3703base_dir=os.path.join(self.module_name, self.fname, 'node'),
3704set_replication=True,
3705ptrack_enable=True,
3706initdb_params=['--data-checksums'])
3707
3708backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3709self.init_pb(backup_dir)
3710self.add_instance(backup_dir, 'node', node)
3711node.slow_start()
3712
3713node.safe_psql(
3714"postgres",
3715"CREATE EXTENSION ptrack")
3716
3717# Create table and indexes
3718res = node.safe_psql(
3719"postgres",
3720"create extension bloom; create sequence t_seq; "
3721"create table t_heap "
3722"as select i as id, md5(i::text) as text, "
3723"md5(repeat(i::text,10))::tsvector as tsvector "
3724"from generate_series(0,2560) i")
3725
3726if node.major_version < 11:
3727for i in idx_ptrack:
3728if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3729node.safe_psql(
3730"postgres", "create index {0} on {1} using {2}({3})".format(
3731i, idx_ptrack[i]['relation'],
3732idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3733
3734node.safe_psql('postgres', 'VACUUM t_heap')
3735
3736self.backup_node(
3737backup_dir, 'node', node, options=['-j10', '--stream'])
3738
3739if node.major_version < 11:
3740for i in idx_ptrack:
3741# get size of heap and indexes. size calculated in pages
3742idx_ptrack[i]['old_size'] = self.get_fork_size(node, i)
3743# get path to heap and index files
3744idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3745# calculate md5sums of pages
3746idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3747idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3748
3749node.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128')
3750node.safe_psql('postgres', 'VACUUM t_heap')
3751node.safe_psql('postgres', 'CHECKPOINT')
3752
3753# CHECK PTRACK SANITY
3754if node.major_version < 11:
3755self.check_ptrack_map_sanity(node, idx_ptrack)
3756
3757self.backup_node(
3758backup_dir, 'node', node,
3759backup_type='ptrack', options=['--stream'])
3760
3761pgdata = self.pgdata_content(node.data_dir)
3762
3763node_restored = self.make_simple_node(
3764base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
3765node_restored.cleanup()
3766
3767self.restore_node(backup_dir, 'node', node_restored)
3768
3769pgdata_restored = self.pgdata_content(node_restored.data_dir)
3770self.compare_pgdata(pgdata, pgdata_restored)
3771
3772# @unittest.skip("skip")
3773# @unittest.expectedFailure
3774def test_ptrack_vacuum_truncate_replica(self):
3775master = self.make_simple_node(
3776base_dir=os.path.join(self.module_name, self.fname, 'master'),
3777set_replication=True,
3778ptrack_enable=True,
3779initdb_params=['--data-checksums'])
3780
3781backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3782self.init_pb(backup_dir)
3783self.add_instance(backup_dir, 'master', master)
3784master.slow_start()
3785
3786if master.major_version >= 11:
3787master.safe_psql(
3788"postgres",
3789"CREATE EXTENSION ptrack")
3790
3791self.backup_node(backup_dir, 'master', master, options=['--stream'])
3792
3793replica = self.make_simple_node(
3794base_dir=os.path.join(self.module_name, self.fname, 'replica'))
3795replica.cleanup()
3796
3797self.restore_node(backup_dir, 'master', replica)
3798
3799self.add_instance(backup_dir, 'replica', replica)
3800self.set_replica(master, replica, 'replica', synchronous=True)
3801replica.slow_start(replica=True)
3802
3803# Create table and indexes
3804master.safe_psql(
3805"postgres",
3806"create extension bloom; create sequence t_seq; "
3807"create table t_heap as select i as id, "
3808"md5(i::text) as text, md5(repeat(i::text,10))::tsvector "
3809"as tsvector from generate_series(0,2560) i")
3810
3811for i in idx_ptrack:
3812if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3813master.safe_psql(
3814"postgres", "create index {0} on {1} "
3815"using {2}({3})".format(
3816i, idx_ptrack[i]['relation'],
3817idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3818
3819master.safe_psql('postgres', 'vacuum t_heap')
3820master.safe_psql('postgres', 'checkpoint')
3821
3822# Take FULL backup to clean every ptrack
3823self.backup_node(
3824backup_dir, 'replica', replica,
3825options=[
3826'-j10',
3827'--stream',
3828'--master-host=localhost',
3829'--master-db=postgres',
3830'--master-port={0}'.format(master.port)
3831]
3832)
3833
3834if master.major_version < 11:
3835for i in idx_ptrack:
3836# get size of heap and indexes. size calculated in pages
3837idx_ptrack[i]['old_size'] = self.get_fork_size(replica, i)
3838# get path to heap and index files
3839idx_ptrack[i]['path'] = self.get_fork_path(replica, i)
3840# calculate md5sums of pages
3841idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork(
3842idx_ptrack[i]['path'], idx_ptrack[i]['old_size'])
3843
3844master.safe_psql('postgres', 'DELETE FROM t_heap WHERE id > 128;')
3845master.safe_psql('postgres', 'VACUUM t_heap')
3846master.safe_psql('postgres', 'CHECKPOINT')
3847
3848# Sync master and replica
3849self.wait_until_replica_catch_with_master(master, replica)
3850replica.safe_psql('postgres', 'CHECKPOINT')
3851
3852# CHECK PTRACK SANITY
3853if master.major_version < 11:
3854self.check_ptrack_map_sanity(master, idx_ptrack)
3855
3856self.backup_node(
3857backup_dir, 'replica', replica, backup_type='ptrack',
3858options=[
3859'--stream',
3860'--log-level-file=INFO',
3861'--archive-timeout=30'])
3862
3863pgdata = self.pgdata_content(replica.data_dir)
3864
3865node_restored = self.make_simple_node(
3866base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
3867node_restored.cleanup()
3868
3869self.restore_node(backup_dir, 'replica', node_restored)
3870
3871pgdata_restored = self.pgdata_content(node_restored.data_dir)
3872self.compare_pgdata(pgdata, pgdata_restored)
3873
3874@unittest.skip("skip")
3875def test_ptrack_recovery(self):
3876"""
3877Check that ptrack map contain correct bits after recovery.
3878Actual only for PTRACK 1.x
3879"""
3880node = self.make_simple_node(
3881base_dir=os.path.join(self.module_name, self.fname, 'node'),
3882set_replication=True,
3883ptrack_enable=True,
3884initdb_params=['--data-checksums'])
3885
3886backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3887self.init_pb(backup_dir)
3888self.add_instance(backup_dir, 'node', node)
3889node.slow_start()
3890
3891self.create_tblspace_in_node(node, 'somedata')
3892
3893# Create table
3894node.safe_psql(
3895"postgres",
3896"create extension bloom; create sequence t_seq; "
3897"create table t_heap tablespace somedata "
3898"as select i as id, md5(i::text) as text, "
3899"md5(repeat(i::text,10))::tsvector as tsvector "
3900"from generate_series(0,2560) i")
3901
3902# Create indexes
3903for i in idx_ptrack:
3904if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3905node.safe_psql(
3906"postgres", "create index {0} on {1} using {2}({3}) "
3907"tablespace somedata".format(
3908i, idx_ptrack[i]['relation'],
3909idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3910
3911# get size of heap and indexes. size calculated in pages
3912idx_ptrack[i]['size'] = int(self.get_fork_size(node, i))
3913# get path to heap and index files
3914idx_ptrack[i]['path'] = self.get_fork_path(node, i)
3915
3916if self.verbose:
3917print('Killing postmaster. Losing Ptrack changes')
3918node.stop(['-m', 'immediate', '-D', node.data_dir])
3919if not node.status():
3920node.slow_start()
3921else:
3922print("Die! Die! Why won't you die?... Why won't you die?")
3923exit(1)
3924
3925for i in idx_ptrack:
3926# get ptrack for every idx
3927idx_ptrack[i]['ptrack'] = self.get_ptrack_bits_per_page_for_fork(
3928node, idx_ptrack[i]['path'], [idx_ptrack[i]['size']])
3929# check that ptrack has correct bits after recovery
3930self.check_ptrack_recovery(idx_ptrack[i])
3931
3932# @unittest.skip("skip")
3933# @unittest.expectedFailure
3934def test_ptrack_recovery_1(self):
3935node = self.make_simple_node(
3936base_dir=os.path.join(self.module_name, self.fname, 'node'),
3937set_replication=True,
3938ptrack_enable=True,
3939initdb_params=['--data-checksums'],
3940pg_options={
3941'shared_buffers': '512MB',
3942'max_wal_size': '3GB'})
3943
3944backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
3945self.init_pb(backup_dir)
3946self.add_instance(backup_dir, 'node', node)
3947node.slow_start()
3948
3949node.safe_psql(
3950"postgres",
3951"CREATE EXTENSION ptrack")
3952
3953# Create table
3954node.safe_psql(
3955"postgres",
3956"create extension bloom; create sequence t_seq; "
3957"create table t_heap "
3958"as select nextval('t_seq')::int as id, md5(i::text) as text, "
3959"md5(repeat(i::text,10))::tsvector as tsvector "
3960# "from generate_series(0,25600) i")
3961"from generate_series(0,2560) i")
3962
3963self.backup_node(
3964backup_dir, 'node', node, options=['--stream'])
3965
3966# Create indexes
3967for i in idx_ptrack:
3968if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
3969node.safe_psql(
3970"postgres",
3971"CREATE INDEX {0} ON {1} USING {2}({3})".format(
3972i, idx_ptrack[i]['relation'],
3973idx_ptrack[i]['type'], idx_ptrack[i]['column']))
3974
3975node.safe_psql(
3976'postgres',
3977"update t_heap set id = nextval('t_seq'), text = md5(text), "
3978"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
3979
3980node.safe_psql(
3981'postgres',
3982"create extension pg_buffercache")
3983
3984#print(node.safe_psql(
3985# 'postgres',
3986# "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
3987
3988if self.verbose:
3989print('Killing postmaster. Losing Ptrack changes')
3990node.stop(['-m', 'immediate', '-D', node.data_dir])
3991
3992if not node.status():
3993node.slow_start()
3994else:
3995print("Die! Die! Why won't you die?... Why won't you die?")
3996exit(1)
3997
3998self.backup_node(
3999backup_dir, 'node', node,
4000backup_type='ptrack', options=['--stream'])
4001
4002pgdata = self.pgdata_content(node.data_dir)
4003
4004node_restored = self.make_simple_node(
4005base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
4006node_restored.cleanup()
4007
4008self.restore_node(
4009backup_dir, 'node', node_restored)
4010
4011pgdata_restored = self.pgdata_content(node_restored.data_dir)
4012self.compare_pgdata(pgdata, pgdata_restored)
4013
4014# @unittest.skip("skip")
4015# @unittest.expectedFailure
4016def test_ptrack_zero_changes(self):
4017node = self.make_simple_node(
4018base_dir=os.path.join(self.module_name, self.fname, 'node'),
4019set_replication=True,
4020ptrack_enable=True,
4021initdb_params=['--data-checksums'])
4022
4023backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4024self.init_pb(backup_dir)
4025self.add_instance(backup_dir, 'node', node)
4026node.slow_start()
4027
4028node.safe_psql(
4029"postgres",
4030"CREATE EXTENSION ptrack")
4031
4032# Create table
4033node.safe_psql(
4034"postgres",
4035"create table t_heap "
4036"as select i as id, md5(i::text) as text, "
4037"md5(repeat(i::text,10))::tsvector as tsvector "
4038"from generate_series(0,2560) i")
4039
4040self.backup_node(
4041backup_dir, 'node', node, options=['--stream'])
4042
4043self.backup_node(
4044backup_dir, 'node', node,
4045backup_type='ptrack', options=['--stream'])
4046
4047pgdata = self.pgdata_content(node.data_dir)
4048node.cleanup()
4049
4050self.restore_node(backup_dir, 'node', node)
4051
4052pgdata_restored = self.pgdata_content(node.data_dir)
4053self.compare_pgdata(pgdata, pgdata_restored)
4054
4055# @unittest.skip("skip")
4056# @unittest.expectedFailure
4057def test_ptrack_pg_resetxlog(self):
4058node = self.make_simple_node(
4059base_dir=os.path.join(self.module_name, self.fname, 'node'),
4060set_replication=True,
4061ptrack_enable=True,
4062initdb_params=['--data-checksums'],
4063pg_options={
4064'shared_buffers': '512MB',
4065'max_wal_size': '3GB'})
4066
4067backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4068self.init_pb(backup_dir)
4069self.add_instance(backup_dir, 'node', node)
4070node.slow_start()
4071
4072node.safe_psql(
4073"postgres",
4074"CREATE EXTENSION ptrack")
4075
4076# Create table
4077node.safe_psql(
4078"postgres",
4079"create extension bloom; create sequence t_seq; "
4080"create table t_heap "
4081"as select nextval('t_seq')::int as id, md5(i::text) as text, "
4082"md5(repeat(i::text,10))::tsvector as tsvector "
4083# "from generate_series(0,25600) i")
4084"from generate_series(0,2560) i")
4085
4086self.backup_node(
4087backup_dir, 'node', node, options=['--stream'])
4088
4089# Create indexes
4090for i in idx_ptrack:
4091if idx_ptrack[i]['type'] != 'heap' and idx_ptrack[i]['type'] != 'seq':
4092node.safe_psql(
4093"postgres",
4094"CREATE INDEX {0} ON {1} USING {2}({3})".format(
4095i, idx_ptrack[i]['relation'],
4096idx_ptrack[i]['type'], idx_ptrack[i]['column']))
4097
4098node.safe_psql(
4099'postgres',
4100"update t_heap set id = nextval('t_seq'), text = md5(text), "
4101"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4102
4103# node.safe_psql(
4104# 'postgres',
4105# "create extension pg_buffercache")
4106#
4107# print(node.safe_psql(
4108# 'postgres',
4109# "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
4110
4111# kill the bastard
4112if self.verbose:
4113print('Killing postmaster. Losing Ptrack changes')
4114node.stop(['-m', 'immediate', '-D', node.data_dir])
4115
4116# now smack it with sledgehammer
4117if node.major_version >= 10:
4118pg_resetxlog_path = self.get_bin_path('pg_resetwal')
4119wal_dir = 'pg_wal'
4120else:
4121pg_resetxlog_path = self.get_bin_path('pg_resetxlog')
4122wal_dir = 'pg_xlog'
4123
4124self.run_binary(
4125[
4126pg_resetxlog_path,
4127'-D',
4128node.data_dir,
4129'-o 42',
4130'-f'
4131],
4132asynchronous=False)
4133
4134if not node.status():
4135node.slow_start()
4136else:
4137print("Die! Die! Why won't you die?... Why won't you die?")
4138exit(1)
4139
4140# take ptrack backup
4141# self.backup_node(
4142# backup_dir, 'node', node,
4143# backup_type='ptrack', options=['--stream'])
4144
4145try:
4146self.backup_node(
4147backup_dir, 'node', node,
4148backup_type='ptrack', options=['--stream'])
4149# we should die here because exception is what we expect to happen
4150self.assertEqual(
41511, 0,
4152"Expecting Error because instance was brutalized by pg_resetxlog"
4153"\n Output: {0} \n CMD: {1}".format(
4154repr(self.output), self.cmd)
4155)
4156except ProbackupException as e:
4157self.assertTrue(
4158'ERROR: LSN from ptrack_control ' in e.message and
4159'is greater than Start LSN of previous backup' in e.message,
4160'\n Unexpected Error Message: {0}\n'
4161' CMD: {1}'.format(repr(e.message), self.cmd))
4162
4163# pgdata = self.pgdata_content(node.data_dir)
4164#
4165# node_restored = self.make_simple_node(
4166# base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
4167# node_restored.cleanup()
4168#
4169# self.restore_node(
4170# backup_dir, 'node', node_restored)
4171#
4172# pgdata_restored = self.pgdata_content(node_restored.data_dir)
4173# self.compare_pgdata(pgdata, pgdata_restored)
4174
4175# @unittest.skip("skip")
4176# @unittest.expectedFailure
4177def test_corrupt_ptrack_map(self):
4178node = self.make_simple_node(
4179base_dir=os.path.join(self.module_name, self.fname, 'node'),
4180set_replication=True,
4181ptrack_enable=True,
4182initdb_params=['--data-checksums'])
4183
4184backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4185self.init_pb(backup_dir)
4186self.add_instance(backup_dir, 'node', node)
4187node.slow_start()
4188
4189node.safe_psql(
4190"postgres",
4191"CREATE EXTENSION ptrack")
4192
4193ptrack_version = self.get_ptrack_version(node)
4194
4195# Create table
4196node.safe_psql(
4197"postgres",
4198"create extension bloom; create sequence t_seq; "
4199"create table t_heap "
4200"as select nextval('t_seq')::int as id, md5(i::text) as text, "
4201"md5(repeat(i::text,10))::tsvector as tsvector "
4202"from generate_series(0,2560) i")
4203
4204self.backup_node(
4205backup_dir, 'node', node, options=['--stream'])
4206
4207node.safe_psql(
4208'postgres',
4209"update t_heap set id = nextval('t_seq'), text = md5(text), "
4210"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4211
4212# kill the bastard
4213if self.verbose:
4214print('Killing postmaster. Losing Ptrack changes')
4215
4216node.stop(['-m', 'immediate', '-D', node.data_dir])
4217
4218ptrack_map = os.path.join(node.data_dir, 'global', 'ptrack.map')
4219
4220# Let`s do index corruption. ptrack.map
4221with open(ptrack_map, "rb+", 0) as f:
4222f.seek(42)
4223f.write(b"blablahblahs")
4224f.flush()
4225f.close
4226
4227# os.remove(os.path.join(node.logs_dir, node.pg_log_name))
4228
4229if self.verbose:
4230print('Ptrack version:', ptrack_version)
4231if ptrack_version >= self.version_to_num("2.3"):
4232node.slow_start()
4233
4234log_file = os.path.join(node.logs_dir, 'postgresql.log')
4235with open(log_file, 'r') as f:
4236log_content = f.read()
4237
4238self.assertIn(
4239'WARNING: ptrack read map: incorrect checksum of file "{0}"'.format(ptrack_map),
4240log_content)
4241
4242node.stop(['-D', node.data_dir])
4243else:
4244try:
4245node.slow_start()
4246# we should die here because exception is what we expect to happen
4247self.assertEqual(
42481, 0,
4249"Expecting Error because ptrack.map is corrupted"
4250"\n Output: {0} \n CMD: {1}".format(
4251repr(self.output), self.cmd))
4252except StartNodeException as e:
4253self.assertIn(
4254'Cannot start node',
4255e.message,
4256'\n Unexpected Error Message: {0}\n'
4257' CMD: {1}'.format(repr(e.message), self.cmd))
4258
4259log_file = os.path.join(node.logs_dir, 'postgresql.log')
4260with open(log_file, 'r') as f:
4261log_content = f.read()
4262
4263self.assertIn(
4264'FATAL: ptrack init: incorrect checksum of file "{0}"'.format(ptrack_map),
4265log_content)
4266
4267self.set_auto_conf(node, {'ptrack.map_size': '0'})
4268node.slow_start()
4269
4270try:
4271self.backup_node(
4272backup_dir, 'node', node,
4273backup_type='ptrack', options=['--stream'])
4274# we should die here because exception is what we expect to happen
4275self.assertEqual(
42761, 0,
4277"Expecting Error because instance ptrack is disabled"
4278"\n Output: {0} \n CMD: {1}".format(
4279repr(self.output), self.cmd))
4280except ProbackupException as e:
4281self.assertIn(
4282'ERROR: Ptrack is disabled',
4283e.message,
4284'\n Unexpected Error Message: {0}\n'
4285' CMD: {1}'.format(repr(e.message), self.cmd))
4286
4287node.safe_psql(
4288'postgres',
4289"update t_heap set id = nextval('t_seq'), text = md5(text), "
4290"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4291
4292node.stop(['-m', 'immediate', '-D', node.data_dir])
4293
4294self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'})
4295node.slow_start()
4296
4297try:
4298self.backup_node(
4299backup_dir, 'node', node,
4300backup_type='ptrack', options=['--stream'])
4301# we should die here because exception is what we expect to happen
4302self.assertEqual(
43031, 0,
4304"Expecting Error because ptrack map is from future"
4305"\n Output: {0} \n CMD: {1}".format(
4306repr(self.output), self.cmd))
4307except ProbackupException as e:
4308self.assertIn(
4309'ERROR: LSN from ptrack_control',
4310e.message,
4311'\n Unexpected Error Message: {0}\n'
4312' CMD: {1}'.format(repr(e.message), self.cmd))
4313
4314self.backup_node(
4315backup_dir, 'node', node,
4316backup_type='delta', options=['--stream'])
4317
4318node.safe_psql(
4319'postgres',
4320"update t_heap set id = nextval('t_seq'), text = md5(text), "
4321"tsvector = md5(repeat(tsvector::text, 10))::tsvector")
4322
4323self.backup_node(
4324backup_dir, 'node', node,
4325backup_type='ptrack', options=['--stream'])
4326
4327pgdata = self.pgdata_content(node.data_dir)
4328
4329node.cleanup()
4330
4331self.restore_node(backup_dir, 'node', node)
4332
4333pgdata_restored = self.pgdata_content(node.data_dir)
4334self.compare_pgdata(pgdata, pgdata_restored)
4335
4336# @unittest.skip("skip")
4337def test_horizon_lsn_ptrack(self):
4338"""
4339https://github.com/postgrespro/pg_probackup/pull/386
4340"""
4341if not self.probackup_old_path:
4342self.skipTest("You must specify PGPROBACKUPBIN_OLD"
4343" for run this test")
4344self.assertLessEqual(
4345self.version_to_num(self.old_probackup_version),
4346self.version_to_num('2.4.15'),
4347'You need pg_probackup old_binary =< 2.4.15 for this test')
4348
4349backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
4350node = self.make_simple_node(
4351base_dir=os.path.join(self.module_name, self.fname, 'node'),
4352set_replication=True,
4353ptrack_enable=True,
4354initdb_params=['--data-checksums'])
4355
4356self.init_pb(backup_dir)
4357self.add_instance(backup_dir, 'node', node)
4358node.slow_start()
4359
4360node.safe_psql(
4361"postgres",
4362"CREATE EXTENSION ptrack")
4363
4364self.assertGreaterEqual(
4365self.get_ptrack_version(node),
4366self.version_to_num("2.1"),
4367"You need ptrack >=2.1 for this test")
4368
4369# set map_size to a minimal value
4370self.set_auto_conf(node, {'ptrack.map_size': '1'})
4371node.restart()
4372
4373node.pgbench_init(scale=100)
4374
4375# FULL backup
4376full_id = self.backup_node(backup_dir, 'node', node, options=['--stream'], old_binary=True)
4377
4378# enable archiving so the WAL size to do interfere with data bytes comparison later
4379self.set_archiving(backup_dir, 'node', node)
4380node.restart()
4381
4382# change data
4383pgbench = node.pgbench(options=['-T', '10', '-c', '1', '--no-vacuum'])
4384pgbench.wait()
4385
4386# DELTA is exemplar
4387delta_id = self.backup_node(
4388backup_dir, 'node', node, backup_type='delta')
4389delta_bytes = self.show_pb(backup_dir, 'node', backup_id=delta_id)["data-bytes"]
4390self.delete_pb(backup_dir, 'node', backup_id=delta_id)
4391
4392# PTRACK with current binary
4393ptrack_id = self.backup_node(backup_dir, 'node', node, backup_type='ptrack')
4394ptrack_bytes = self.show_pb(backup_dir, 'node', backup_id=ptrack_id)["data-bytes"]
4395
4396# make sure that backup size is exactly the same
4397self.assertEqual(delta_bytes, ptrack_bytes)
4398