pg_probackup

Форк
0
/
false_positive_test.py 
337 строк · 12.2 Кб
1
import unittest
2
import os
3
from time import sleep
4

5
from .helpers.ptrack_helpers import ProbackupTest, ProbackupException
6
from datetime import datetime, timedelta
7
import subprocess
8

9

10
class FalsePositive(ProbackupTest, unittest.TestCase):
11

12
    # @unittest.skip("skip")
13
    @unittest.expectedFailure
14
    def test_validate_wal_lost_segment(self):
15
        """
16
        Loose segment located between backups. ExpectedFailure. This is BUG
17
        """
18
        node = self.make_simple_node(
19
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
20
            set_replication=True,
21
            initdb_params=['--data-checksums'])
22

23
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
24
        self.init_pb(backup_dir)
25
        self.add_instance(backup_dir, 'node', node)
26
        self.set_archiving(backup_dir, 'node', node)
27
        node.slow_start()
28

29
        self.backup_node(backup_dir, 'node', node)
30

31
        # make some wals
32
        node.pgbench_init(scale=5)
33

34
        # delete last wal segment
35
        wals_dir = os.path.join(backup_dir, "wal", 'node')
36
        wals = [f for f in os.listdir(wals_dir) if os.path.isfile(
37
            os.path.join(wals_dir, f)) and not f.endswith('.backup')]
38
        wals = map(int, wals)
39
        os.remove(os.path.join(wals_dir, '0000000' + str(max(wals))))
40

41
        # We just lost a wal segment and know nothing about it
42
        self.backup_node(backup_dir, 'node', node)
43
        self.assertTrue(
44
            'validation completed successfully' in self.validate_pb(
45
                backup_dir, 'node'))
46
        ########
47

48
    @unittest.expectedFailure
49
    # Need to force validation of ancestor-chain
50
    def test_incremental_backup_corrupt_full_1(self):
51
        """page-level backup with corrupted full backup"""
52
        node = self.make_simple_node(
53
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
54
            initdb_params=['--data-checksums'])
55

56
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
57
        self.init_pb(backup_dir)
58
        self.add_instance(backup_dir, 'node', node)
59
        self.set_archiving(backup_dir, 'node', node)
60
        node.slow_start()
61

62
        backup_id = self.backup_node(backup_dir, 'node', node)
63
        file = os.path.join(
64
            backup_dir, "backups", "node",
65
            backup_id.decode("utf-8"), "database", "postgresql.conf")
66
        os.remove(file)
67

68
        try:
69
            self.backup_node(backup_dir, 'node', node, backup_type="page")
70
            # we should die here because exception is what we expect to happen
71
            self.assertEqual(
72
                1, 0,
73
                "Expecting Error because page backup should not be "
74
                "possible without valid full backup.\n "
75
                "Output: {0} \n CMD: {1}".format(
76
                    repr(self.output), self.cmd))
77
        except ProbackupException as e:
78
            self.assertEqual(
79
                e.message,
80
                'ERROR: Valid full backup on current timeline is not found. '
81
                'Create new FULL backup before an incremental one.\n',
82
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
83
                    repr(e.message), self.cmd))
84
            self.assertFalse(
85
                True,
86
                "Expecting Error because page backup should not be "
87
                "possible without valid full backup.\n "
88
                "Output: {0} \n CMD: {1}".format(
89
                    repr(self.output), self.cmd))
90
        except ProbackupException as e:
91
            self.assertEqual(
92
                e.message,
93
                'ERROR: Valid full backup on current timeline is not found. '
94
                'Create new FULL backup before an incremental one.\n',
95
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
96
                    repr(e.message), self.cmd))
97

98
        self.assertEqual(
99
            self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR")
100

101
    # @unittest.skip("skip")
102
    @unittest.expectedFailure
103
    def test_pg_10_waldir(self):
104
        """
105
        test group access for PG >= 11
106
        """
107
        if self.pg_config_version < self.version_to_num('10.0'):
108
            self.skipTest('You need PostgreSQL >= 10 for this test')
109

110
        wal_dir = os.path.join(
111
            os.path.join(self.tmp_path, self.module_name, self.fname), 'wal_dir')
112
        import shutil
113
        shutil.rmtree(wal_dir, ignore_errors=True)
114
        node = self.make_simple_node(
115
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
116
            set_replication=True,
117
            initdb_params=[
118
                '--data-checksums',
119
                '--waldir={0}'.format(wal_dir)])
120

121
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
122
        self.init_pb(backup_dir)
123
        self.add_instance(backup_dir, 'node', node)
124
        node.slow_start()
125

126
        # take FULL backup
127
        self.backup_node(
128
            backup_dir, 'node', node, options=['--stream'])
129

130
        pgdata = self.pgdata_content(node.data_dir)
131

132
        # restore backup
133
        node_restored = self.make_simple_node(
134
            base_dir=os.path.join(self.module_name, self.fname, 'node_restored'))
135
        node_restored.cleanup()
136

137
        self.restore_node(
138
            backup_dir, 'node', node_restored)
139

140
        # compare pgdata permissions
141
        pgdata_restored = self.pgdata_content(node_restored.data_dir)
142
        self.compare_pgdata(pgdata, pgdata_restored)
143

144
        self.assertTrue(
145
            os.path.islink(os.path.join(node_restored.data_dir, 'pg_wal')),
146
            'pg_wal should be symlink')
147

148
    @unittest.expectedFailure
149
    # @unittest.skip("skip")
150
    def test_recovery_target_time_backup_victim(self):
151
        """
152
        Check that for validation to recovery target
153
        probackup chooses valid backup
154
        https://github.com/postgrespro/pg_probackup/issues/104
155
        """
156
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
157
        node = self.make_simple_node(
158
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
159
            set_replication=True,
160
            initdb_params=['--data-checksums'])
161

162
        self.init_pb(backup_dir)
163
        self.add_instance(backup_dir, 'node', node)
164
        self.set_archiving(backup_dir, 'node', node)
165
        node.slow_start()
166

167
        # FULL backup
168
        self.backup_node(backup_dir, 'node', node)
169

170
        node.safe_psql(
171
            "postgres",
172
            "create table t_heap as select 1 as id, md5(i::text) as text, "
173
            "md5(repeat(i::text,10))::tsvector as tsvector "
174
            "from generate_series(0,10000) i")
175

176
        target_time = node.safe_psql(
177
            "postgres",
178
            "select now()").rstrip()
179

180
        node.safe_psql(
181
            "postgres",
182
            "create table t_heap1 as select 1 as id, md5(i::text) as text, "
183
            "md5(repeat(i::text,10))::tsvector as tsvector "
184
            "from generate_series(0,100) i")
185

186
        gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
187

188
        # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one
189
        gdb.set_breakpoint('pg_stop_backup')
190
        gdb.run_until_break()
191
        gdb.remove_all_breakpoints()
192
        gdb._execute('signal SIGINT')
193
        gdb.continue_execution_until_error()
194

195
        backup_id = self.show_pb(backup_dir, 'node')[1]['id']
196

197
        self.assertEqual(
198
            'ERROR',
199
            self.show_pb(backup_dir, 'node', backup_id)['status'],
200
            'Backup STATUS should be "ERROR"')
201

202
        self.validate_pb(
203
            backup_dir, 'node',
204
            options=['--recovery-target-time={0}'.format(target_time)])
205

206
    @unittest.expectedFailure
207
    # @unittest.skip("skip")
208
    def test_recovery_target_lsn_backup_victim(self):
209
        """
210
        Check that for validation to recovery target
211
        probackup chooses valid backup
212
        https://github.com/postgrespro/pg_probackup/issues/104
213
        """
214
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
215
        node = self.make_simple_node(
216
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
217
            set_replication=True,
218
            initdb_params=['--data-checksums'])
219

220
        self.init_pb(backup_dir)
221
        self.add_instance(backup_dir, 'node', node)
222
        self.set_archiving(backup_dir, 'node', node)
223
        node.slow_start()
224

225
        # FULL backup
226
        self.backup_node(backup_dir, 'node', node)
227

228
        node.safe_psql(
229
            "postgres",
230
            "create table t_heap as select 1 as id, md5(i::text) as text, "
231
            "md5(repeat(i::text,10))::tsvector as tsvector "
232
            "from generate_series(0,10000) i")
233

234
        node.safe_psql(
235
            "postgres",
236
            "create table t_heap1 as select 1 as id, md5(i::text) as text, "
237
            "md5(repeat(i::text,10))::tsvector as tsvector "
238
            "from generate_series(0,100) i")
239

240
        gdb = self.backup_node(
241
            backup_dir, 'node', node,
242
            options=['--log-level-console=LOG'], gdb=True)
243

244
        # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one
245
        gdb.set_breakpoint('pg_stop_backup')
246
        gdb.run_until_break()
247
        gdb.remove_all_breakpoints()
248
        gdb._execute('signal SIGINT')
249
        gdb.continue_execution_until_error()
250

251
        backup_id = self.show_pb(backup_dir, 'node')[1]['id']
252

253
        self.assertEqual(
254
            'ERROR',
255
            self.show_pb(backup_dir, 'node', backup_id)['status'],
256
            'Backup STATUS should be "ERROR"')
257

258
        self.switch_wal_segment(node)
259

260
        target_lsn = self.show_pb(backup_dir, 'node', backup_id)['start-lsn']
261

262
        self.validate_pb(
263
            backup_dir, 'node',
264
            options=['--recovery-target-lsn={0}'.format(target_lsn)])
265

266
    # @unittest.skip("skip")
267
    @unittest.expectedFailure
268
    def test_streaming_timeout(self):
269
        """
270
        Illustrate the problem of loosing exact error
271
        message because our WAL streaming engine is "borrowed"
272
        from pg_receivexlog
273
        """
274
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
275
        node = self.make_simple_node(
276
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
277
            set_replication=True,
278
            initdb_params=['--data-checksums'],
279
            pg_options={
280
                'checkpoint_timeout': '1h',
281
                'wal_sender_timeout': '5s'})
282

283
        self.init_pb(backup_dir)
284
        self.add_instance(backup_dir, 'node', node)
285
        node.slow_start()
286

287
        # FULL backup
288
        gdb = self.backup_node(
289
            backup_dir, 'node', node, gdb=True,
290
            options=['--stream', '--log-level-file=LOG'])
291

292
        # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one
293
        gdb.set_breakpoint('pg_stop_backup')
294
        gdb.run_until_break()
295

296
        sleep(10)
297
        gdb.continue_execution_until_error()
298
        gdb._execute('detach')
299
        sleep(2)
300

301
        log_file_path = os.path.join(backup_dir, 'log', 'pg_probackup.log')
302
        with open(log_file_path) as f:
303
            log_content = f.read()
304

305
        self.assertIn(
306
            'could not receive data from WAL stream',
307
            log_content)
308

309
        self.assertIn(
310
            'ERROR: Problem in receivexlog',
311
            log_content)
312

313
    # @unittest.skip("skip")
314
    @unittest.expectedFailure
315
    def test_validate_all_empty_catalog(self):
316
        """
317
        """
318
        node = self.make_simple_node(
319
            base_dir=os.path.join(self.module_name, self.fname, 'node'),
320
            initdb_params=['--data-checksums'])
321

322
        backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup')
323
        self.init_pb(backup_dir)
324

325
        try:
326
            self.validate_pb(backup_dir)
327
            self.assertEqual(
328
                1, 0,
329
                "Expecting Error because backup_dir is empty.\n "
330
                "Output: {0} \n CMD: {1}".format(
331
                    repr(self.output), self.cmd))
332
        except ProbackupException as e:
333
            self.assertIn(
334
                'ERROR: This backup catalog contains no backup instances',
335
                e.message,
336
                '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
337
                    repr(e.message), self.cmd))
338

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.