pg_probackup
1503 строки · 50.5 Кб
1import unittest
2import subprocess
3import os
4from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
5from sys import exit
6import shutil
7
8
9def check_manual_tests_enabled():
10return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON'
11
12
13def check_ssh_agent_path_exists():
14return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ
15
16
17class CrossCompatibilityTest(ProbackupTest, unittest.TestCase):
18@unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test')
19@unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist')
20# @unittest.skip("skip")
21def test_catchup_with_different_remote_major_pg(self):
22"""
23Decription in jira issue PBCKP-236
24This test exposures ticket error using pg_probackup builds for both PGPROEE11 and PGPROEE9_6
25
26Prerequisites:
27- pg_probackup git tag for PBCKP 2.5.1
28- master pg_probackup build should be made for PGPROEE11
29- agent pg_probackup build should be made for PGPROEE9_6
30
31Calling probackup PGPROEE9_6 pg_probackup agent from PGPROEE11 pg_probackup master for DELTA backup causes
32the PBCKP-236 problem
33
34Please give env variables PROBACKUP_MANUAL=ON;PGPROBACKUP_SSH_AGENT_PATH=<pg_probackup_ssh_agent_path>
35for the test
36
37Please make path for agent's pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.ee.9.6/bin/'
38without pg_probackup executable
39"""
40
41self.verbose = True
42self.remote = True
43# please use your own local path like
44# pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/'
45pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH']
46
47src_pg = self.make_simple_node(
48base_dir=os.path.join(self.module_name, self.fname, 'src'),
49set_replication=True,
50)
51src_pg.slow_start()
52src_pg.safe_psql(
53"postgres",
54"CREATE TABLE ultimate_question AS SELECT 42 AS answer")
55
56# do full catchup
57dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst'))
58self.catchup_node(
59backup_mode='FULL',
60source_pgdata=src_pg.data_dir,
61destination_node=dst_pg,
62options=['-d', 'postgres', '-p', str(src_pg.port), '--stream']
63)
64
65dst_options = {'port': str(dst_pg.port)}
66self.set_auto_conf(dst_pg, dst_options)
67dst_pg.slow_start()
68dst_pg.stop()
69
70src_pg.safe_psql(
71"postgres",
72"CREATE TABLE ultimate_question2 AS SELECT 42 AS answer")
73
74# do delta catchup with remote pg_probackup agent with another postgres major version
75# this DELTA backup should fail without PBCKP-236 patch.
76self.catchup_node(
77backup_mode='DELTA',
78source_pgdata=src_pg.data_dir,
79destination_node=dst_pg,
80# here's substitution of --remoge-path pg_probackup agent compiled with another postgres version
81options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path]
82)
83
84
85class CompatibilityTest(ProbackupTest, unittest.TestCase):
86
87def setUp(self):
88super().setUp()
89if not self.probackup_old_path:
90self.skipTest('PGPROBACKUPBIN_OLD is not set')
91
92# @unittest.expectedFailure
93# @unittest.skip("skip")
94def test_backward_compatibility_page(self):
95"""Description in jira issue PGPRO-434"""
96backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
97node = self.make_simple_node(
98base_dir=os.path.join(self.module_name, self.fname, 'node'),
99set_replication=True,
100initdb_params=['--data-checksums'])
101
102self.init_pb(backup_dir, old_binary=True)
103self.show_pb(backup_dir)
104
105self.add_instance(backup_dir, 'node', node, old_binary=True)
106self.show_pb(backup_dir)
107
108self.set_archiving(backup_dir, 'node', node, old_binary=True)
109node.slow_start()
110
111node.pgbench_init(scale=10)
112
113# FULL backup with old binary
114self.backup_node(
115backup_dir, 'node', node, old_binary=True)
116
117if self.paranoia:
118pgdata = self.pgdata_content(node.data_dir)
119
120self.show_pb(backup_dir)
121
122self.validate_pb(backup_dir)
123
124# RESTORE old FULL with new binary
125node_restored = self.make_simple_node(
126base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
127
128node_restored.cleanup()
129
130self.restore_node(
131backup_dir, 'node', node_restored, options=["-j", "4"])
132
133if self.paranoia:
134pgdata_restored = self.pgdata_content(node_restored.data_dir)
135self.compare_pgdata(pgdata, pgdata_restored)
136
137# Page BACKUP with old binary
138pgbench = node.pgbench(
139stdout=subprocess.PIPE,
140stderr=subprocess.STDOUT,
141options=["-c", "4", "-T", "20"]
142)
143pgbench.wait()
144pgbench.stdout.close()
145
146self.backup_node(
147backup_dir, 'node', node, backup_type='page',
148old_binary=True)
149
150if self.paranoia:
151pgdata = self.pgdata_content(node.data_dir)
152
153node_restored.cleanup()
154self.restore_node(
155backup_dir, 'node', node_restored, options=["-j", "4"])
156
157if self.paranoia:
158pgdata_restored = self.pgdata_content(node_restored.data_dir)
159self.compare_pgdata(pgdata, pgdata_restored)
160
161# Page BACKUP with new binary
162pgbench = node.pgbench(
163stdout=subprocess.PIPE,
164stderr=subprocess.STDOUT,
165options=["-c", "4", "-T", "20"])
166
167pgbench.wait()
168pgbench.stdout.close()
169
170self.backup_node(
171backup_dir, 'node', node, backup_type='page')
172
173if self.paranoia:
174pgdata = self.pgdata_content(node.data_dir)
175
176node_restored.cleanup()
177
178self.restore_node(
179backup_dir, 'node', node_restored, options=["-j", "4"])
180
181if self.paranoia:
182pgdata_restored = self.pgdata_content(node_restored.data_dir)
183self.compare_pgdata(pgdata, pgdata_restored)
184
185node.safe_psql(
186'postgres',
187'create table tmp as select * from pgbench_accounts where aid < 1000')
188
189node.safe_psql(
190'postgres',
191'delete from pgbench_accounts')
192
193node.safe_psql(
194'postgres',
195'VACUUM')
196
197self.backup_node(backup_dir, 'node', node, backup_type='page')
198
199pgdata = self.pgdata_content(node.data_dir)
200
201node_restored.cleanup()
202self.restore_node(
203backup_dir, 'node', node_restored, options=["-j", "4"])
204
205pgdata_restored = self.pgdata_content(node_restored.data_dir)
206self.compare_pgdata(pgdata, pgdata_restored)
207
208node.safe_psql(
209'postgres',
210'insert into pgbench_accounts select * from pgbench_accounts')
211
212self.backup_node(backup_dir, 'node', node, backup_type='page')
213
214pgdata = self.pgdata_content(node.data_dir)
215
216node_restored.cleanup()
217self.restore_node(
218backup_dir, 'node', node_restored, options=["-j", "4"])
219
220pgdata_restored = self.pgdata_content(node_restored.data_dir)
221self.compare_pgdata(pgdata, pgdata_restored)
222
223# @unittest.expectedFailure
224# @unittest.skip("skip")
225def test_backward_compatibility_delta(self):
226"""Description in jira issue PGPRO-434"""
227backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
228node = self.make_simple_node(
229base_dir=os.path.join(self.module_name, self.fname, 'node'),
230set_replication=True,
231initdb_params=['--data-checksums'])
232
233self.init_pb(backup_dir, old_binary=True)
234self.show_pb(backup_dir)
235
236self.add_instance(backup_dir, 'node', node, old_binary=True)
237self.show_pb(backup_dir)
238
239self.set_archiving(backup_dir, 'node', node, old_binary=True)
240node.slow_start()
241
242node.pgbench_init(scale=10)
243
244# FULL backup with old binary
245self.backup_node(
246backup_dir, 'node', node, old_binary=True)
247
248if self.paranoia:
249pgdata = self.pgdata_content(node.data_dir)
250
251self.show_pb(backup_dir)
252
253self.validate_pb(backup_dir)
254
255# RESTORE old FULL with new binary
256node_restored = self.make_simple_node(
257base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
258
259node_restored.cleanup()
260
261self.restore_node(
262backup_dir, 'node', node_restored, options=["-j", "4"])
263
264if self.paranoia:
265pgdata_restored = self.pgdata_content(node_restored.data_dir)
266self.compare_pgdata(pgdata, pgdata_restored)
267
268# Delta BACKUP with old binary
269pgbench = node.pgbench(
270stdout=subprocess.PIPE,
271stderr=subprocess.STDOUT,
272options=["-c", "4", "-T", "20"]
273)
274pgbench.wait()
275pgbench.stdout.close()
276
277self.backup_node(
278backup_dir, 'node', node, backup_type='delta',
279old_binary=True)
280
281if self.paranoia:
282pgdata = self.pgdata_content(node.data_dir)
283
284node_restored.cleanup()
285self.restore_node(
286backup_dir, 'node', node_restored, options=["-j", "4"])
287
288if self.paranoia:
289pgdata_restored = self.pgdata_content(node_restored.data_dir)
290self.compare_pgdata(pgdata, pgdata_restored)
291
292# Delta BACKUP with new binary
293pgbench = node.pgbench(
294stdout=subprocess.PIPE,
295stderr=subprocess.STDOUT,
296options=["-c", "4", "-T", "20"]
297)
298pgbench.wait()
299pgbench.stdout.close()
300
301self.backup_node(backup_dir, 'node', node, backup_type='delta')
302
303if self.paranoia:
304pgdata = self.pgdata_content(node.data_dir)
305
306node_restored.cleanup()
307
308self.restore_node(
309backup_dir, 'node', node_restored, options=["-j", "4"])
310
311if self.paranoia:
312pgdata_restored = self.pgdata_content(node_restored.data_dir)
313self.compare_pgdata(pgdata, pgdata_restored)
314
315node.safe_psql(
316'postgres',
317'create table tmp as select * from pgbench_accounts where aid < 1000')
318
319node.safe_psql(
320'postgres',
321'delete from pgbench_accounts')
322
323node.safe_psql(
324'postgres',
325'VACUUM')
326
327self.backup_node(backup_dir, 'node', node, backup_type='delta')
328
329pgdata = self.pgdata_content(node.data_dir)
330
331node_restored.cleanup()
332self.restore_node(
333backup_dir, 'node', node_restored, options=["-j", "4"])
334
335pgdata_restored = self.pgdata_content(node_restored.data_dir)
336self.compare_pgdata(pgdata, pgdata_restored)
337
338node.safe_psql(
339'postgres',
340'insert into pgbench_accounts select * from pgbench_accounts')
341
342self.backup_node(backup_dir, 'node', node, backup_type='delta')
343
344pgdata = self.pgdata_content(node.data_dir)
345
346node_restored.cleanup()
347self.restore_node(
348backup_dir, 'node', node_restored, options=["-j", "4"])
349
350pgdata_restored = self.pgdata_content(node_restored.data_dir)
351self.compare_pgdata(pgdata, pgdata_restored)
352
353# @unittest.expectedFailure
354# @unittest.skip("skip")
355def test_backward_compatibility_ptrack(self):
356"""Description in jira issue PGPRO-434"""
357
358if not self.ptrack:
359self.skipTest('Skipped because ptrack support is disabled')
360
361backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
362node = self.make_simple_node(
363base_dir=os.path.join(self.module_name, self.fname, 'node'),
364set_replication=True,
365ptrack_enable=True,
366initdb_params=['--data-checksums'])
367
368self.init_pb(backup_dir, old_binary=True)
369self.show_pb(backup_dir)
370
371self.add_instance(backup_dir, 'node', node, old_binary=True)
372self.show_pb(backup_dir)
373
374self.set_archiving(backup_dir, 'node', node, old_binary=True)
375node.slow_start()
376
377node.safe_psql(
378"postgres",
379"CREATE EXTENSION ptrack")
380
381node.pgbench_init(scale=10)
382
383# FULL backup with old binary
384self.backup_node(
385backup_dir, 'node', node, old_binary=True)
386
387if self.paranoia:
388pgdata = self.pgdata_content(node.data_dir)
389
390self.show_pb(backup_dir)
391
392self.validate_pb(backup_dir)
393
394# RESTORE old FULL with new binary
395node_restored = self.make_simple_node(
396base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
397
398node_restored.cleanup()
399
400self.restore_node(
401backup_dir, 'node', node_restored, options=["-j", "4"])
402
403if self.paranoia:
404pgdata_restored = self.pgdata_content(node_restored.data_dir)
405self.compare_pgdata(pgdata, pgdata_restored)
406
407# ptrack BACKUP with old binary
408pgbench = node.pgbench(
409stdout=subprocess.PIPE,
410stderr=subprocess.STDOUT,
411options=["-c", "4", "-T", "20"]
412)
413pgbench.wait()
414pgbench.stdout.close()
415
416self.backup_node(
417backup_dir, 'node', node, backup_type='ptrack',
418old_binary=True)
419
420if self.paranoia:
421pgdata = self.pgdata_content(node.data_dir)
422
423node_restored.cleanup()
424self.restore_node(
425backup_dir, 'node', node_restored,
426options=[
427"-j", "4",
428"--recovery-target=latest",
429"--recovery-target-action=promote"])
430
431if self.paranoia:
432pgdata_restored = self.pgdata_content(node_restored.data_dir)
433self.compare_pgdata(pgdata, pgdata_restored)
434
435# Ptrack BACKUP with new binary
436pgbench = node.pgbench(
437stdout=subprocess.PIPE,
438stderr=subprocess.STDOUT,
439options=["-c", "4", "-T", "20"]
440)
441pgbench.wait()
442pgbench.stdout.close()
443
444self.backup_node(
445backup_dir, 'node', node, backup_type='ptrack')
446
447if self.paranoia:
448pgdata = self.pgdata_content(node.data_dir)
449
450node_restored.cleanup()
451
452self.restore_node(
453backup_dir, 'node', node_restored,
454options=[
455"-j", "4",
456"--recovery-target=latest",
457"--recovery-target-action=promote"])
458
459if self.paranoia:
460pgdata_restored = self.pgdata_content(node_restored.data_dir)
461self.compare_pgdata(pgdata, pgdata_restored)
462
463# @unittest.expectedFailure
464# @unittest.skip("skip")
465def test_backward_compatibility_compression(self):
466"""Description in jira issue PGPRO-434"""
467backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
468node = self.make_simple_node(
469base_dir=os.path.join(self.module_name, self.fname, 'node'),
470set_replication=True,
471initdb_params=['--data-checksums'])
472
473self.init_pb(backup_dir, old_binary=True)
474self.add_instance(backup_dir, 'node', node, old_binary=True)
475
476self.set_archiving(backup_dir, 'node', node, old_binary=True)
477node.slow_start()
478
479node.pgbench_init(scale=10)
480
481# FULL backup with OLD binary
482backup_id = self.backup_node(
483backup_dir, 'node', node,
484old_binary=True,
485options=['--compress'])
486
487if self.paranoia:
488pgdata = self.pgdata_content(node.data_dir)
489
490# restore OLD FULL with new binary
491node_restored = self.make_simple_node(
492base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
493
494node_restored.cleanup()
495
496self.restore_node(
497backup_dir, 'node', node_restored,
498options=["-j", "4"])
499
500if self.paranoia:
501pgdata_restored = self.pgdata_content(node_restored.data_dir)
502self.compare_pgdata(pgdata, pgdata_restored)
503
504# PAGE backup with OLD binary
505pgbench = node.pgbench(
506stdout=subprocess.PIPE,
507stderr=subprocess.STDOUT,
508options=["-c", "4", "-T", "10"])
509pgbench.wait()
510pgbench.stdout.close()
511
512self.backup_node(
513backup_dir, 'node', node,
514backup_type='page',
515old_binary=True,
516options=['--compress'])
517
518if self.paranoia:
519pgdata = self.pgdata_content(node.data_dir)
520
521node_restored.cleanup()
522self.restore_node(
523backup_dir, 'node', node_restored,
524options=["-j", "4"])
525
526if self.paranoia:
527pgdata_restored = self.pgdata_content(node_restored.data_dir)
528self.compare_pgdata(pgdata, pgdata_restored)
529
530# PAGE backup with new binary
531pgbench = node.pgbench(
532stdout=subprocess.PIPE,
533stderr=subprocess.STDOUT,
534options=["-c", "4", "-T", "10"])
535pgbench.wait()
536pgbench.stdout.close()
537
538self.backup_node(
539backup_dir, 'node', node,
540backup_type='page',
541options=['--compress'])
542
543if self.paranoia:
544pgdata = self.pgdata_content(node.data_dir)
545
546node_restored.cleanup()
547
548self.restore_node(
549backup_dir, 'node', node_restored,
550options=["-j", "4"])
551
552if self.paranoia:
553pgdata_restored = self.pgdata_content(node_restored.data_dir)
554self.compare_pgdata(pgdata, pgdata_restored)
555
556# Delta backup with old binary
557self.delete_pb(backup_dir, 'node', backup_id)
558
559self.backup_node(
560backup_dir, 'node', node,
561old_binary=True,
562options=['--compress'])
563
564pgbench = node.pgbench(
565stdout=subprocess.PIPE,
566stderr=subprocess.STDOUT,
567options=["-c", "4", "-T", "10"])
568
569pgbench.wait()
570pgbench.stdout.close()
571
572self.backup_node(
573backup_dir, 'node', node,
574backup_type='delta',
575options=['--compress'],
576old_binary=True)
577
578if self.paranoia:
579pgdata = self.pgdata_content(node.data_dir)
580
581node_restored.cleanup()
582
583self.restore_node(
584backup_dir, 'node', node_restored,
585options=["-j", "4"])
586
587if self.paranoia:
588pgdata_restored = self.pgdata_content(node_restored.data_dir)
589self.compare_pgdata(pgdata, pgdata_restored)
590
591# Delta backup with new binary
592pgbench = node.pgbench(
593stdout=subprocess.PIPE,
594stderr=subprocess.STDOUT,
595options=["-c", "4", "-T", "10"])
596
597pgbench.wait()
598pgbench.stdout.close()
599
600self.backup_node(
601backup_dir, 'node', node,
602backup_type='delta',
603options=['--compress'])
604
605if self.paranoia:
606pgdata = self.pgdata_content(node.data_dir)
607
608node_restored.cleanup()
609
610self.restore_node(
611backup_dir, 'node', node_restored,
612options=["-j", "4"])
613
614if self.paranoia:
615pgdata_restored = self.pgdata_content(node_restored.data_dir)
616self.compare_pgdata(pgdata, pgdata_restored)
617
618# @unittest.expectedFailure
619# @unittest.skip("skip")
620def test_backward_compatibility_merge(self):
621"""
622Create node, take FULL and PAGE backups with old binary,
623merge them with new binary
624"""
625backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
626node = self.make_simple_node(
627base_dir=os.path.join(self.module_name, self.fname, 'node'),
628set_replication=True,
629initdb_params=['--data-checksums'])
630
631self.init_pb(backup_dir, old_binary=True)
632self.add_instance(backup_dir, 'node', node, old_binary=True)
633
634self.set_archiving(backup_dir, 'node', node, old_binary=True)
635node.slow_start()
636
637# FULL backup with OLD binary
638self.backup_node(
639backup_dir, 'node', node,
640old_binary=True)
641
642node.pgbench_init(scale=1)
643
644# PAGE backup with OLD binary
645backup_id = self.backup_node(
646backup_dir, 'node', node,
647backup_type='page', old_binary=True)
648
649if self.paranoia:
650pgdata = self.pgdata_content(node.data_dir)
651
652self.merge_backup(backup_dir, "node", backup_id)
653
654self.show_pb(backup_dir, as_text=True, as_json=False)
655
656# restore OLD FULL with new binary
657node_restored = self.make_simple_node(
658base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
659
660node_restored.cleanup()
661
662self.restore_node(
663backup_dir, 'node', node_restored, options=["-j", "4"])
664
665if self.paranoia:
666pgdata_restored = self.pgdata_content(node_restored.data_dir)
667self.compare_pgdata(pgdata, pgdata_restored)
668
669# @unittest.expectedFailure
670# @unittest.skip("skip")
671def test_backward_compatibility_merge_1(self):
672"""
673Create node, take FULL and PAGE backups with old binary,
674merge them with new binary.
675old binary version =< 2.2.7
676"""
677backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
678node = self.make_simple_node(
679base_dir=os.path.join(self.module_name, self.fname, 'node'),
680set_replication=True,
681initdb_params=['--data-checksums'])
682
683self.init_pb(backup_dir, old_binary=True)
684self.add_instance(backup_dir, 'node', node, old_binary=True)
685
686self.set_archiving(backup_dir, 'node', node, old_binary=True)
687node.slow_start()
688
689node.pgbench_init(scale=20)
690
691# FULL backup with OLD binary
692self.backup_node(backup_dir, 'node', node, old_binary=True)
693
694pgbench = node.pgbench(
695stdout=subprocess.PIPE,
696stderr=subprocess.STDOUT,
697options=["-c", "1", "-T", "10", "--no-vacuum"])
698pgbench.wait()
699pgbench.stdout.close()
700
701# PAGE1 backup with OLD binary
702self.backup_node(
703backup_dir, 'node', node, backup_type='page', old_binary=True)
704
705node.safe_psql(
706'postgres',
707'DELETE from pgbench_accounts')
708
709node.safe_psql(
710'postgres',
711'VACUUM pgbench_accounts')
712
713# PAGE2 backup with OLD binary
714backup_id = self.backup_node(
715backup_dir, 'node', node, backup_type='page', old_binary=True)
716
717pgdata = self.pgdata_content(node.data_dir)
718
719# merge chain created by old binary with new binary
720output = self.merge_backup(backup_dir, "node", backup_id)
721
722# check that in-place is disabled
723self.assertIn(
724"WARNING: In-place merge is disabled "
725"because of storage format incompatibility", output)
726
727# restore merged backup
728node_restored = self.make_simple_node(
729base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
730node_restored.cleanup()
731
732self.restore_node(backup_dir, 'node', node_restored)
733
734pgdata_restored = self.pgdata_content(node_restored.data_dir)
735self.compare_pgdata(pgdata, pgdata_restored)
736
737# @unittest.expectedFailure
738# @unittest.skip("skip")
739def test_backward_compatibility_merge_2(self):
740"""
741Create node, take FULL and PAGE backups with old binary,
742merge them with new binary.
743old binary version =< 2.2.7
744"""
745backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
746node = self.make_simple_node(
747base_dir=os.path.join(self.module_name, self.fname, 'node'),
748set_replication=True,
749initdb_params=['--data-checksums'])
750
751self.init_pb(backup_dir, old_binary=True)
752self.add_instance(backup_dir, 'node', node, old_binary=True)
753
754self.set_archiving(backup_dir, 'node', node, old_binary=True)
755node.slow_start()
756
757node.pgbench_init(scale=50)
758
759node.safe_psql(
760'postgres',
761'VACUUM pgbench_accounts')
762
763node_restored = self.make_simple_node(
764base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
765
766# FULL backup with OLD binary
767self.backup_node(backup_dir, 'node', node, old_binary=True)
768
769pgbench = node.pgbench(
770stdout=subprocess.PIPE,
771stderr=subprocess.STDOUT,
772options=["-c", "1", "-T", "10", "--no-vacuum"])
773pgbench.wait()
774pgbench.stdout.close()
775
776# PAGE1 backup with OLD binary
777page1 = self.backup_node(
778backup_dir, 'node', node,
779backup_type='page', old_binary=True)
780
781pgdata1 = self.pgdata_content(node.data_dir)
782
783node.safe_psql(
784'postgres',
785"DELETE from pgbench_accounts where ctid > '(10,1)'")
786
787# PAGE2 backup with OLD binary
788page2 = self.backup_node(
789backup_dir, 'node', node,
790backup_type='page', old_binary=True)
791
792pgdata2 = self.pgdata_content(node.data_dir)
793
794# PAGE3 backup with OLD binary
795page3 = self.backup_node(
796backup_dir, 'node', node,
797backup_type='page', old_binary=True)
798
799pgdata3 = self.pgdata_content(node.data_dir)
800
801pgbench = node.pgbench(
802stdout=subprocess.PIPE,
803stderr=subprocess.STDOUT,
804options=["-c", "1", "-T", "10", "--no-vacuum"])
805pgbench.wait()
806pgbench.stdout.close()
807
808# PAGE4 backup with NEW binary
809page4 = self.backup_node(
810backup_dir, 'node', node, backup_type='page')
811pgdata4 = self.pgdata_content(node.data_dir)
812
813# merge backups one by one and check data correctness
814# merge PAGE1
815self.merge_backup(
816backup_dir, "node", page1, options=['--log-level-file=VERBOSE'])
817
818# check data correctness for PAGE1
819node_restored.cleanup()
820self.restore_node(
821backup_dir, 'node', node_restored, backup_id=page1,
822options=['--log-level-file=VERBOSE'])
823pgdata_restored = self.pgdata_content(node_restored.data_dir)
824self.compare_pgdata(pgdata1, pgdata_restored)
825
826# merge PAGE2
827self.merge_backup(backup_dir, "node", page2)
828
829# check data correctness for PAGE2
830node_restored.cleanup()
831self.restore_node(backup_dir, 'node', node_restored, backup_id=page2)
832pgdata_restored = self.pgdata_content(node_restored.data_dir)
833self.compare_pgdata(pgdata2, pgdata_restored)
834
835# merge PAGE3
836self.show_pb(backup_dir, 'node', page3)
837self.merge_backup(backup_dir, "node", page3)
838self.show_pb(backup_dir, 'node', page3)
839
840# check data correctness for PAGE3
841node_restored.cleanup()
842self.restore_node(backup_dir, 'node', node_restored, backup_id=page3)
843pgdata_restored = self.pgdata_content(node_restored.data_dir)
844self.compare_pgdata(pgdata3, pgdata_restored)
845
846# merge PAGE4
847self.merge_backup(backup_dir, "node", page4)
848
849# check data correctness for PAGE4
850node_restored.cleanup()
851self.restore_node(backup_dir, 'node', node_restored, backup_id=page4)
852pgdata_restored = self.pgdata_content(node_restored.data_dir)
853self.compare_pgdata(pgdata4, pgdata_restored)
854
855# @unittest.expectedFailure
856# @unittest.skip("skip")
857def test_backward_compatibility_merge_3(self):
858"""
859Create node, take FULL and PAGE backups with old binary,
860merge them with new binary.
861old binary version =< 2.2.7
862"""
863backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
864node = self.make_simple_node(
865base_dir=os.path.join(self.module_name, self.fname, 'node'),
866set_replication=True,
867initdb_params=['--data-checksums'])
868
869self.init_pb(backup_dir, old_binary=True)
870self.add_instance(backup_dir, 'node', node, old_binary=True)
871
872self.set_archiving(backup_dir, 'node', node, old_binary=True)
873node.slow_start()
874
875node.pgbench_init(scale=50)
876
877node.safe_psql(
878'postgres',
879'VACUUM pgbench_accounts')
880
881node_restored = self.make_simple_node(
882base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
883
884# FULL backup with OLD binary
885self.backup_node(
886backup_dir, 'node', node, old_binary=True, options=['--compress'])
887
888pgbench = node.pgbench(
889stdout=subprocess.PIPE,
890stderr=subprocess.STDOUT,
891options=["-c", "1", "-T", "10", "--no-vacuum"])
892pgbench.wait()
893pgbench.stdout.close()
894
895# PAGE1 backup with OLD binary
896page1 = self.backup_node(
897backup_dir, 'node', node,
898backup_type='page', old_binary=True, options=['--compress'])
899
900pgdata1 = self.pgdata_content(node.data_dir)
901
902node.safe_psql(
903'postgres',
904"DELETE from pgbench_accounts where ctid > '(10,1)'")
905
906# PAGE2 backup with OLD binary
907page2 = self.backup_node(
908backup_dir, 'node', node,
909backup_type='page', old_binary=True, options=['--compress'])
910
911pgdata2 = self.pgdata_content(node.data_dir)
912
913# PAGE3 backup with OLD binary
914page3 = self.backup_node(
915backup_dir, 'node', node,
916backup_type='page', old_binary=True, options=['--compress'])
917
918pgdata3 = self.pgdata_content(node.data_dir)
919
920pgbench = node.pgbench(
921stdout=subprocess.PIPE,
922stderr=subprocess.STDOUT,
923options=["-c", "1", "-T", "10", "--no-vacuum"])
924pgbench.wait()
925pgbench.stdout.close()
926
927# PAGE4 backup with NEW binary
928page4 = self.backup_node(
929backup_dir, 'node', node, backup_type='page', options=['--compress'])
930pgdata4 = self.pgdata_content(node.data_dir)
931
932# merge backups one by one and check data correctness
933# merge PAGE1
934self.merge_backup(
935backup_dir, "node", page1, options=['--log-level-file=VERBOSE'])
936
937# check data correctness for PAGE1
938node_restored.cleanup()
939self.restore_node(
940backup_dir, 'node', node_restored, backup_id=page1,
941options=['--log-level-file=VERBOSE'])
942pgdata_restored = self.pgdata_content(node_restored.data_dir)
943self.compare_pgdata(pgdata1, pgdata_restored)
944
945# merge PAGE2
946self.merge_backup(backup_dir, "node", page2)
947
948# check data correctness for PAGE2
949node_restored.cleanup()
950self.restore_node(backup_dir, 'node', node_restored, backup_id=page2)
951pgdata_restored = self.pgdata_content(node_restored.data_dir)
952self.compare_pgdata(pgdata2, pgdata_restored)
953
954# merge PAGE3
955self.show_pb(backup_dir, 'node', page3)
956self.merge_backup(backup_dir, "node", page3)
957self.show_pb(backup_dir, 'node', page3)
958
959# check data correctness for PAGE3
960node_restored.cleanup()
961self.restore_node(backup_dir, 'node', node_restored, backup_id=page3)
962pgdata_restored = self.pgdata_content(node_restored.data_dir)
963self.compare_pgdata(pgdata3, pgdata_restored)
964
965# merge PAGE4
966self.merge_backup(backup_dir, "node", page4)
967
968# check data correctness for PAGE4
969node_restored.cleanup()
970self.restore_node(backup_dir, 'node', node_restored, backup_id=page4)
971pgdata_restored = self.pgdata_content(node_restored.data_dir)
972self.compare_pgdata(pgdata4, pgdata_restored)
973
974# @unittest.expectedFailure
975# @unittest.skip("skip")
976def test_backward_compatibility_merge_4(self):
977"""
978Start merge between minor version, crash and retry it.
979old binary version =< 2.4.0
980"""
981if self.version_to_num(self.old_probackup_version) > self.version_to_num('2.4.0'):
982self.assertTrue(
983False, 'You need pg_probackup old_binary =< 2.4.0 for this test')
984
985backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
986node = self.make_simple_node(
987base_dir=os.path.join(self.module_name, self.fname, 'node'),
988set_replication=True,
989initdb_params=['--data-checksums'])
990
991self.init_pb(backup_dir, old_binary=True)
992self.add_instance(backup_dir, 'node', node, old_binary=True)
993
994self.set_archiving(backup_dir, 'node', node, old_binary=True)
995node.slow_start()
996
997node.pgbench_init(scale=20)
998
999node.safe_psql(
1000'postgres',
1001'VACUUM pgbench_accounts')
1002
1003node_restored = self.make_simple_node(
1004base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1005
1006# FULL backup with OLD binary
1007self.backup_node(
1008backup_dir, 'node', node, old_binary=True, options=['--compress'])
1009
1010pgbench = node.pgbench(
1011stdout=subprocess.PIPE,
1012stderr=subprocess.STDOUT,
1013options=["-c", "1", "-T", "20", "--no-vacuum"])
1014pgbench.wait()
1015pgbench.stdout.close()
1016
1017# PAGE backup with NEW binary
1018page_id = self.backup_node(
1019backup_dir, 'node', node, backup_type='page', options=['--compress'])
1020pgdata = self.pgdata_content(node.data_dir)
1021
1022# merge PAGE4
1023gdb = self.merge_backup(backup_dir, "node", page_id, gdb=True)
1024
1025gdb.set_breakpoint('rename')
1026gdb.run_until_break()
1027gdb.continue_execution_until_break(500)
1028gdb._execute('signal SIGKILL')
1029
1030try:
1031self.merge_backup(backup_dir, "node", page_id)
1032self.assertEqual(
10331, 0,
1034"Expecting Error because of format changes.\n "
1035"Output: {0} \n CMD: {1}".format(
1036repr(self.output), self.cmd))
1037except ProbackupException as e:
1038self.assertIn(
1039"ERROR: Retry of failed merge for backups with different "
1040"between minor versions is forbidden to avoid data corruption "
1041"because of storage format changes introduced in 2.4.0 version, "
1042"please take a new full backup",
1043e.message,
1044'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1045repr(e.message), self.cmd))
1046
1047# @unittest.expectedFailure
1048# @unittest.skip("skip")
1049def test_backward_compatibility_merge_5(self):
1050"""
1051Create node, take FULL and PAGE backups with old binary,
1052merge them with new binary.
1053old binary version >= STORAGE_FORMAT_VERSION (2.4.4)
1054"""
1055if self.version_to_num(self.old_probackup_version) < self.version_to_num('2.4.4'):
1056self.assertTrue(
1057False, 'OLD pg_probackup binary must be >= 2.4.4 for this test')
1058
1059self.assertNotEqual(
1060self.version_to_num(self.old_probackup_version),
1061self.version_to_num(self.probackup_version))
1062
1063backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1064node = self.make_simple_node(
1065base_dir=os.path.join(self.module_name, self.fname, 'node'),
1066set_replication=True,
1067initdb_params=['--data-checksums'])
1068
1069self.init_pb(backup_dir, old_binary=True)
1070self.add_instance(backup_dir, 'node', node, old_binary=True)
1071
1072self.set_archiving(backup_dir, 'node', node, old_binary=True)
1073node.slow_start()
1074
1075node.pgbench_init(scale=20)
1076
1077# FULL backup with OLD binary
1078self.backup_node(backup_dir, 'node', node, old_binary=True)
1079
1080pgbench = node.pgbench(
1081stdout=subprocess.PIPE,
1082stderr=subprocess.STDOUT,
1083options=["-c", "1", "-T", "10", "--no-vacuum"])
1084pgbench.wait()
1085pgbench.stdout.close()
1086
1087# PAGE1 backup with OLD binary
1088self.backup_node(
1089backup_dir, 'node', node, backup_type='page', old_binary=True)
1090
1091node.safe_psql(
1092'postgres',
1093'DELETE from pgbench_accounts')
1094
1095node.safe_psql(
1096'postgres',
1097'VACUUM pgbench_accounts')
1098
1099# PAGE2 backup with OLD binary
1100backup_id = self.backup_node(
1101backup_dir, 'node', node, backup_type='page', old_binary=True)
1102
1103pgdata = self.pgdata_content(node.data_dir)
1104
1105# merge chain created by old binary with new binary
1106output = self.merge_backup(backup_dir, "node", backup_id)
1107
1108# check that in-place is disabled
1109self.assertNotIn(
1110"WARNING: In-place merge is disabled "
1111"because of storage format incompatibility", output)
1112
1113# restore merged backup
1114node_restored = self.make_simple_node(
1115base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1116node_restored.cleanup()
1117
1118self.restore_node(backup_dir, 'node', node_restored)
1119
1120pgdata_restored = self.pgdata_content(node_restored.data_dir)
1121self.compare_pgdata(pgdata, pgdata_restored)
1122
1123# @unittest.skip("skip")
1124def test_page_vacuum_truncate(self):
1125"""
1126make node, create table, take full backup,
1127delete all data, vacuum relation,
1128take page backup, insert some data,
1129take second page backup,
1130restore latest page backup using new binary
1131and check data correctness
1132old binary should be 2.2.x version
1133"""
1134backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1135node = self.make_simple_node(
1136base_dir=os.path.join(self.module_name, self.fname, 'node'),
1137set_replication=True,
1138initdb_params=['--data-checksums'])
1139
1140self.init_pb(backup_dir, old_binary=True)
1141self.add_instance(backup_dir, 'node', node, old_binary=True)
1142self.set_archiving(backup_dir, 'node', node, old_binary=True)
1143node.slow_start()
1144
1145node.safe_psql(
1146"postgres",
1147"create sequence t_seq; "
1148"create table t_heap as select i as id, "
1149"md5(i::text) as text, "
1150"md5(repeat(i::text,10))::tsvector as tsvector "
1151"from generate_series(0,1024) i")
1152
1153node.safe_psql(
1154"postgres",
1155"vacuum t_heap")
1156
1157id1 = self.backup_node(backup_dir, 'node', node, old_binary=True)
1158pgdata1 = self.pgdata_content(node.data_dir)
1159
1160node.safe_psql(
1161"postgres",
1162"delete from t_heap")
1163
1164node.safe_psql(
1165"postgres",
1166"vacuum t_heap")
1167
1168id2 = self.backup_node(
1169backup_dir, 'node', node, backup_type='page', old_binary=True)
1170pgdata2 = self.pgdata_content(node.data_dir)
1171
1172node.safe_psql(
1173"postgres",
1174"insert into t_heap select i as id, "
1175"md5(i::text) as text, "
1176"md5(repeat(i::text,10))::tsvector as tsvector "
1177"from generate_series(0,1) i")
1178
1179id3 = self.backup_node(
1180backup_dir, 'node', node, backup_type='page', old_binary=True)
1181pgdata3 = self.pgdata_content(node.data_dir)
1182
1183node_restored = self.make_simple_node(
1184base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1185node_restored.cleanup()
1186
1187self.restore_node(
1188backup_dir, 'node', node_restored,
1189data_dir=node_restored.data_dir, backup_id=id1)
1190
1191# Physical comparison
1192pgdata_restored = self.pgdata_content(node_restored.data_dir)
1193self.compare_pgdata(pgdata1, pgdata_restored)
1194
1195self.set_auto_conf(node_restored, {'port': node_restored.port})
1196node_restored.slow_start()
1197node_restored.cleanup()
1198
1199self.restore_node(
1200backup_dir, 'node', node_restored,
1201data_dir=node_restored.data_dir, backup_id=id2)
1202
1203# Physical comparison
1204pgdata_restored = self.pgdata_content(node_restored.data_dir)
1205self.compare_pgdata(pgdata2, pgdata_restored)
1206
1207self.set_auto_conf(node_restored, {'port': node_restored.port})
1208node_restored.slow_start()
1209node_restored.cleanup()
1210
1211self.restore_node(
1212backup_dir, 'node', node_restored,
1213data_dir=node_restored.data_dir, backup_id=id3)
1214
1215# Physical comparison
1216pgdata_restored = self.pgdata_content(node_restored.data_dir)
1217self.compare_pgdata(pgdata3, pgdata_restored)
1218
1219self.set_auto_conf(node_restored, {'port': node_restored.port})
1220node_restored.slow_start()
1221node_restored.cleanup()
1222
1223# @unittest.skip("skip")
1224def test_page_vacuum_truncate_compression(self):
1225"""
1226make node, create table, take full backup,
1227delete all data, vacuum relation,
1228take page backup, insert some data,
1229take second page backup,
1230restore latest page backup using new binary
1231and check data correctness
1232old binary should be 2.2.x version
1233"""
1234backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1235node = self.make_simple_node(
1236base_dir=os.path.join(self.module_name, self.fname, 'node'),
1237set_replication=True,
1238initdb_params=['--data-checksums'])
1239
1240self.init_pb(backup_dir, old_binary=True)
1241self.add_instance(backup_dir, 'node', node, old_binary=True)
1242self.set_archiving(backup_dir, 'node', node, old_binary=True)
1243node.slow_start()
1244
1245node.safe_psql(
1246"postgres",
1247"create sequence t_seq; "
1248"create table t_heap as select i as id, "
1249"md5(i::text) as text, "
1250"md5(repeat(i::text,10))::tsvector as tsvector "
1251"from generate_series(0,1024) i")
1252
1253node.safe_psql(
1254"postgres",
1255"vacuum t_heap")
1256
1257self.backup_node(
1258backup_dir, 'node',node, old_binary=True, options=['--compress'])
1259
1260node.safe_psql(
1261"postgres",
1262"delete from t_heap")
1263
1264node.safe_psql(
1265"postgres",
1266"vacuum t_heap")
1267
1268self.backup_node(
1269backup_dir, 'node', node, backup_type='page',
1270old_binary=True, options=['--compress'])
1271
1272node.safe_psql(
1273"postgres",
1274"insert into t_heap select i as id, "
1275"md5(i::text) as text, "
1276"md5(repeat(i::text,10))::tsvector as tsvector "
1277"from generate_series(0,1) i")
1278
1279self.backup_node(
1280backup_dir, 'node', node, backup_type='page',
1281old_binary=True, options=['--compress'])
1282
1283pgdata = self.pgdata_content(node.data_dir)
1284
1285node_restored = self.make_simple_node(
1286base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1287node_restored.cleanup()
1288
1289self.restore_node(backup_dir, 'node', node_restored)
1290
1291# Physical comparison
1292pgdata_restored = self.pgdata_content(node_restored.data_dir)
1293self.compare_pgdata(pgdata, pgdata_restored)
1294
1295self.set_auto_conf(node_restored, {'port': node_restored.port})
1296node_restored.slow_start()
1297
1298# @unittest.skip("skip")
1299def test_page_vacuum_truncate_compressed_1(self):
1300"""
1301make node, create table, take full backup,
1302delete all data, vacuum relation,
1303take page backup, insert some data,
1304take second page backup,
1305restore latest page backup using new binary
1306and check data correctness
1307old binary should be 2.2.x version
1308"""
1309backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1310node = self.make_simple_node(
1311base_dir=os.path.join(self.module_name, self.fname, 'node'),
1312set_replication=True,
1313initdb_params=['--data-checksums'])
1314
1315self.init_pb(backup_dir, old_binary=True)
1316self.add_instance(backup_dir, 'node', node, old_binary=True)
1317self.set_archiving(backup_dir, 'node', node, old_binary=True)
1318node.slow_start()
1319
1320node.safe_psql(
1321"postgres",
1322"create sequence t_seq; "
1323"create table t_heap as select i as id, "
1324"md5(i::text) as text, "
1325"md5(repeat(i::text,10))::tsvector as tsvector "
1326"from generate_series(0,1024) i")
1327
1328node.safe_psql(
1329"postgres",
1330"vacuum t_heap")
1331
1332id1 = self.backup_node(
1333backup_dir, 'node', node,
1334old_binary=True, options=['--compress'])
1335pgdata1 = self.pgdata_content(node.data_dir)
1336
1337node.safe_psql(
1338"postgres",
1339"delete from t_heap")
1340
1341node.safe_psql(
1342"postgres",
1343"vacuum t_heap")
1344
1345id2 = self.backup_node(
1346backup_dir, 'node', node, backup_type='page',
1347old_binary=True, options=['--compress'])
1348pgdata2 = self.pgdata_content(node.data_dir)
1349
1350node.safe_psql(
1351"postgres",
1352"insert into t_heap select i as id, "
1353"md5(i::text) as text, "
1354"md5(repeat(i::text,10))::tsvector as tsvector "
1355"from generate_series(0,1) i")
1356
1357id3 = self.backup_node(
1358backup_dir, 'node', node, backup_type='page',
1359old_binary=True, options=['--compress'])
1360pgdata3 = self.pgdata_content(node.data_dir)
1361
1362node_restored = self.make_simple_node(
1363base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1364node_restored.cleanup()
1365
1366self.restore_node(
1367backup_dir, 'node', node_restored,
1368data_dir=node_restored.data_dir, backup_id=id1)
1369
1370# Physical comparison
1371pgdata_restored = self.pgdata_content(node_restored.data_dir)
1372self.compare_pgdata(pgdata1, pgdata_restored)
1373
1374self.set_auto_conf(node_restored, {'port': node_restored.port})
1375node_restored.slow_start()
1376node_restored.cleanup()
1377
1378self.restore_node(
1379backup_dir, 'node', node_restored,
1380data_dir=node_restored.data_dir, backup_id=id2)
1381
1382# Physical comparison
1383pgdata_restored = self.pgdata_content(node_restored.data_dir)
1384self.compare_pgdata(pgdata2, pgdata_restored)
1385
1386self.set_auto_conf(node_restored, {'port': node_restored.port})
1387node_restored.slow_start()
1388node_restored.cleanup()
1389
1390self.restore_node(
1391backup_dir, 'node', node_restored,
1392data_dir=node_restored.data_dir, backup_id=id3)
1393
1394# Physical comparison
1395pgdata_restored = self.pgdata_content(node_restored.data_dir)
1396self.compare_pgdata(pgdata3, pgdata_restored)
1397
1398self.set_auto_conf(node_restored, {'port': node_restored.port})
1399node_restored.slow_start()
1400node_restored.cleanup()
1401
1402# @unittest.skip("skip")
1403def test_hidden_files(self):
1404"""
1405old_version should be < 2.3.0
1406Create hidden file in pgdata, take backup
1407with old binary, then try to delete backup
1408with new binary
1409"""
1410backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1411node = self.make_simple_node(
1412base_dir=os.path.join(self.module_name, self.fname, 'node'),
1413set_replication=True,
1414initdb_params=['--data-checksums'])
1415
1416self.init_pb(backup_dir, old_binary=True)
1417self.add_instance(backup_dir, 'node', node, old_binary=True)
1418node.slow_start()
1419
1420open(os.path.join(node.data_dir, ".hidden_stuff"), 'a').close()
1421
1422backup_id = self.backup_node(
1423backup_dir, 'node',node, old_binary=True, options=['--stream'])
1424
1425self.delete_pb(backup_dir, 'node', backup_id)
1426
1427# @unittest.skip("skip")
1428def test_compatibility_tablespace(self):
1429"""
1430https://github.com/postgrespro/pg_probackup/issues/348
1431"""
1432node = self.make_simple_node(
1433base_dir=os.path.join(self.module_name, self.fname, 'node'),
1434set_replication=True,
1435initdb_params=['--data-checksums'])
1436
1437backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
1438
1439self.init_pb(backup_dir)
1440self.add_instance(backup_dir, 'node', node, old_binary=True)
1441node.slow_start()
1442
1443backup_id = self.backup_node(
1444backup_dir, 'node', node, backup_type="full",
1445options=["-j", "4", "--stream"], old_binary=True)
1446
1447tblspace_old_path = self.get_tblspace_path(node, 'tblspace_old')
1448
1449self.create_tblspace_in_node(
1450node, 'tblspace',
1451tblspc_path=tblspace_old_path)
1452
1453node.safe_psql(
1454"postgres",
1455"create table t_heap_lame tablespace tblspace "
1456"as select 1 as id, md5(i::text) as text, "
1457"md5(repeat(i::text,10))::tsvector as tsvector "
1458"from generate_series(0,1000) i")
1459
1460tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new')
1461
1462node_restored = self.make_simple_node(
1463base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
1464node_restored.cleanup()
1465
1466try:
1467self.restore_node(
1468backup_dir, 'node', node_restored,
1469options=[
1470"-j", "4",
1471"-T", "{0}={1}".format(
1472tblspace_old_path, tblspace_new_path)])
1473# we should die here because exception is what we expect to happen
1474self.assertEqual(
14751, 0,
1476"Expecting Error because tablespace mapping is incorrect"
1477"\n Output: {0} \n CMD: {1}".format(
1478repr(self.output), self.cmd))
1479except ProbackupException as e:
1480self.assertIn(
1481'ERROR: Backup {0} has no tablespaceses, '
1482'nothing to remap'.format(backup_id),
1483e.message,
1484'\n Unexpected Error Message: {0}\n CMD: {1}'.format(
1485repr(e.message), self.cmd))
1486
1487self.backup_node(
1488backup_dir, 'node', node, backup_type="delta",
1489options=["-j", "4", "--stream"], old_binary=True)
1490
1491self.restore_node(
1492backup_dir, 'node', node_restored,
1493options=[
1494"-j", "4",
1495"-T", "{0}={1}".format(
1496tblspace_old_path, tblspace_new_path)])
1497
1498if self.paranoia:
1499pgdata = self.pgdata_content(node.data_dir)
1500
1501if self.paranoia:
1502pgdata_restored = self.pgdata_content(node_restored.data_dir)
1503self.compare_pgdata(pgdata, pgdata_restored)
1504