git

Форк
0
/
stack_test.c 
1105 строк · 27.8 Кб
1
/*
2
Copyright 2020 Google LLC
3

4
Use of this source code is governed by a BSD-style
5
license that can be found in the LICENSE file or at
6
https://developers.google.com/open-source/licenses/bsd
7
*/
8

9
#include "stack.h"
10

11
#include "system.h"
12

13
#include "reftable-reader.h"
14
#include "merged.h"
15
#include "basics.h"
16
#include "record.h"
17
#include "test_framework.h"
18
#include "reftable-tests.h"
19
#include "reader.h"
20

21
#include <sys/types.h>
22
#include <dirent.h>
23

24
static void clear_dir(const char *dirname)
25
{
26
	struct strbuf path = STRBUF_INIT;
27
	strbuf_addstr(&path, dirname);
28
	remove_dir_recursively(&path, 0);
29
	strbuf_release(&path);
30
}
31

32
static int count_dir_entries(const char *dirname)
33
{
34
	DIR *dir = opendir(dirname);
35
	int len = 0;
36
	struct dirent *d;
37
	if (!dir)
38
		return 0;
39

40
	while ((d = readdir(dir))) {
41
		/*
42
		 * Besides skipping over "." and "..", we also need to
43
		 * skip over other files that have a leading ".". This
44
		 * is due to behaviour of NFS, which will rename files
45
		 * to ".nfs*" to emulate delete-on-last-close.
46
		 *
47
		 * In any case this should be fine as the reftable
48
		 * library will never write files with leading dots
49
		 * anyway.
50
		 */
51
		if (starts_with(d->d_name, "."))
52
			continue;
53
		len++;
54
	}
55
	closedir(dir);
56
	return len;
57
}
58

59
/*
60
 * Work linenumber into the tempdir, so we can see which tests forget to
61
 * cleanup.
62
 */
63
static char *get_tmp_template(int linenumber)
64
{
65
	const char *tmp = getenv("TMPDIR");
66
	static char template[1024];
67
	snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX",
68
		 tmp ? tmp : "/tmp", linenumber);
69
	return template;
70
}
71

72
static char *get_tmp_dir(int linenumber)
73
{
74
	char *dir = get_tmp_template(linenumber);
75
	EXPECT(mkdtemp(dir));
76
	return dir;
77
}
78

79
static void test_read_file(void)
80
{
81
	char *fn = get_tmp_template(__LINE__);
82
	int fd = mkstemp(fn);
83
	char out[1024] = "line1\n\nline2\nline3";
84
	int n, err;
85
	char **names = NULL;
86
	const char *want[] = { "line1", "line2", "line3" };
87
	int i = 0;
88

89
	EXPECT(fd > 0);
90
	n = write_in_full(fd, out, strlen(out));
91
	EXPECT(n == strlen(out));
92
	err = close(fd);
93
	EXPECT(err >= 0);
94

95
	err = read_lines(fn, &names);
96
	EXPECT_ERR(err);
97

98
	for (i = 0; names[i]; i++) {
99
		EXPECT(0 == strcmp(want[i], names[i]));
100
	}
101
	free_names(names);
102
	(void) remove(fn);
103
}
104

105
static int write_test_ref(struct reftable_writer *wr, void *arg)
106
{
107
	struct reftable_ref_record *ref = arg;
108
	reftable_writer_set_limits(wr, ref->update_index, ref->update_index);
109
	return reftable_writer_add_ref(wr, ref);
110
}
111

112
static void write_n_ref_tables(struct reftable_stack *st,
113
			       size_t n)
114
{
115
	struct strbuf buf = STRBUF_INIT;
116
	int disable_auto_compact;
117
	int err;
118

119
	disable_auto_compact = st->opts.disable_auto_compact;
120
	st->opts.disable_auto_compact = 1;
121

122
	for (size_t i = 0; i < n; i++) {
123
		struct reftable_ref_record ref = {
124
			.update_index = reftable_stack_next_update_index(st),
125
			.value_type = REFTABLE_REF_VAL1,
126
		};
127

128
		strbuf_addf(&buf, "refs/heads/branch-%04u", (unsigned) i);
129
		ref.refname = buf.buf;
130
		set_test_hash(ref.value.val1, i);
131

132
		err = reftable_stack_add(st, &write_test_ref, &ref);
133
		EXPECT_ERR(err);
134
	}
135

136
	st->opts.disable_auto_compact = disable_auto_compact;
137
	strbuf_release(&buf);
138
}
139

140
struct write_log_arg {
141
	struct reftable_log_record *log;
142
	uint64_t update_index;
143
};
144

145
static int write_test_log(struct reftable_writer *wr, void *arg)
146
{
147
	struct write_log_arg *wla = arg;
148

149
	reftable_writer_set_limits(wr, wla->update_index, wla->update_index);
150
	return reftable_writer_add_log(wr, wla->log);
151
}
152

153
static void test_reftable_stack_add_one(void)
154
{
155
	char *dir = get_tmp_dir(__LINE__);
156
	struct strbuf scratch = STRBUF_INIT;
157
	int mask = umask(002);
158
	struct reftable_write_options opts = {
159
		.default_permissions = 0660,
160
	};
161
	struct reftable_stack *st = NULL;
162
	int err;
163
	struct reftable_ref_record ref = {
164
		.refname = (char *) "HEAD",
165
		.update_index = 1,
166
		.value_type = REFTABLE_REF_SYMREF,
167
		.value.symref = (char *) "master",
168
	};
169
	struct reftable_ref_record dest = { NULL };
170
	struct stat stat_result = { 0 };
171
	err = reftable_new_stack(&st, dir, &opts);
172
	EXPECT_ERR(err);
173

174
	err = reftable_stack_add(st, &write_test_ref, &ref);
175
	EXPECT_ERR(err);
176

177
	err = reftable_stack_read_ref(st, ref.refname, &dest);
178
	EXPECT_ERR(err);
179
	EXPECT(0 == strcmp("master", dest.value.symref));
180
	EXPECT(st->readers_len > 0);
181

182
#ifndef GIT_WINDOWS_NATIVE
183
	strbuf_addstr(&scratch, dir);
184
	strbuf_addstr(&scratch, "/tables.list");
185
	err = stat(scratch.buf, &stat_result);
186
	EXPECT(!err);
187
	EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
188

189
	strbuf_reset(&scratch);
190
	strbuf_addstr(&scratch, dir);
191
	strbuf_addstr(&scratch, "/");
192
	/* do not try at home; not an external API for reftable. */
193
	strbuf_addstr(&scratch, st->readers[0]->name);
194
	err = stat(scratch.buf, &stat_result);
195
	EXPECT(!err);
196
	EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
197
#else
198
	(void) stat_result;
199
#endif
200

201
	reftable_ref_record_release(&dest);
202
	reftable_stack_destroy(st);
203
	strbuf_release(&scratch);
204
	clear_dir(dir);
205
	umask(mask);
206
}
207

208
static void test_reftable_stack_uptodate(void)
209
{
210
	struct reftable_write_options opts = { 0 };
211
	struct reftable_stack *st1 = NULL;
212
	struct reftable_stack *st2 = NULL;
213
	char *dir = get_tmp_dir(__LINE__);
214

215
	int err;
216
	struct reftable_ref_record ref1 = {
217
		.refname = (char *) "HEAD",
218
		.update_index = 1,
219
		.value_type = REFTABLE_REF_SYMREF,
220
		.value.symref = (char *) "master",
221
	};
222
	struct reftable_ref_record ref2 = {
223
		.refname = (char *) "branch2",
224
		.update_index = 2,
225
		.value_type = REFTABLE_REF_SYMREF,
226
		.value.symref = (char *) "master",
227
	};
228

229

230
	/* simulate multi-process access to the same stack
231
	   by creating two stacks for the same directory.
232
	 */
233
	err = reftable_new_stack(&st1, dir, &opts);
234
	EXPECT_ERR(err);
235

236
	err = reftable_new_stack(&st2, dir, &opts);
237
	EXPECT_ERR(err);
238

239
	err = reftable_stack_add(st1, &write_test_ref, &ref1);
240
	EXPECT_ERR(err);
241

242
	err = reftable_stack_add(st2, &write_test_ref, &ref2);
243
	EXPECT(err == REFTABLE_OUTDATED_ERROR);
244

245
	err = reftable_stack_reload(st2);
246
	EXPECT_ERR(err);
247

248
	err = reftable_stack_add(st2, &write_test_ref, &ref2);
249
	EXPECT_ERR(err);
250
	reftable_stack_destroy(st1);
251
	reftable_stack_destroy(st2);
252
	clear_dir(dir);
253
}
254

255
static void test_reftable_stack_transaction_api(void)
256
{
257
	char *dir = get_tmp_dir(__LINE__);
258
	struct reftable_write_options opts = { 0 };
259
	struct reftable_stack *st = NULL;
260
	int err;
261
	struct reftable_addition *add = NULL;
262

263
	struct reftable_ref_record ref = {
264
		.refname = (char *) "HEAD",
265
		.update_index = 1,
266
		.value_type = REFTABLE_REF_SYMREF,
267
		.value.symref = (char *) "master",
268
	};
269
	struct reftable_ref_record dest = { NULL };
270

271
	err = reftable_new_stack(&st, dir, &opts);
272
	EXPECT_ERR(err);
273

274
	reftable_addition_destroy(add);
275

276
	err = reftable_stack_new_addition(&add, st);
277
	EXPECT_ERR(err);
278

279
	err = reftable_addition_add(add, &write_test_ref, &ref);
280
	EXPECT_ERR(err);
281

282
	err = reftable_addition_commit(add);
283
	EXPECT_ERR(err);
284

285
	reftable_addition_destroy(add);
286

287
	err = reftable_stack_read_ref(st, ref.refname, &dest);
288
	EXPECT_ERR(err);
289
	EXPECT(REFTABLE_REF_SYMREF == dest.value_type);
290
	EXPECT(0 == strcmp("master", dest.value.symref));
291

292
	reftable_ref_record_release(&dest);
293
	reftable_stack_destroy(st);
294
	clear_dir(dir);
295
}
296

297
static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
298
{
299
	char *dir = get_tmp_dir(__LINE__);
300
	struct reftable_write_options opts = {0};
301
	struct reftable_addition *add = NULL;
302
	struct reftable_stack *st = NULL;
303
	int i, n = 20, err;
304

305
	err = reftable_new_stack(&st, dir, &opts);
306
	EXPECT_ERR(err);
307

308
	for (i = 0; i <= n; i++) {
309
		struct reftable_ref_record ref = {
310
			.update_index = reftable_stack_next_update_index(st),
311
			.value_type = REFTABLE_REF_SYMREF,
312
			.value.symref = (char *) "master",
313
		};
314
		char name[100];
315

316
		snprintf(name, sizeof(name), "branch%04d", i);
317
		ref.refname = name;
318

319
		/*
320
		 * Disable auto-compaction for all but the last runs. Like this
321
		 * we can ensure that we indeed honor this setting and have
322
		 * better control over when exactly auto compaction runs.
323
		 */
324
		st->opts.disable_auto_compact = i != n;
325

326
		err = reftable_stack_new_addition(&add, st);
327
		EXPECT_ERR(err);
328

329
		err = reftable_addition_add(add, &write_test_ref, &ref);
330
		EXPECT_ERR(err);
331

332
		err = reftable_addition_commit(add);
333
		EXPECT_ERR(err);
334

335
		reftable_addition_destroy(add);
336

337
		/*
338
		 * The stack length should grow continuously for all runs where
339
		 * auto compaction is disabled. When enabled, we should merge
340
		 * all tables in the stack.
341
		 */
342
		if (i != n)
343
			EXPECT(st->merged->readers_len == i + 1);
344
		else
345
			EXPECT(st->merged->readers_len == 1);
346
	}
347

348
	reftable_stack_destroy(st);
349
	clear_dir(dir);
350
}
351

352
static void test_reftable_stack_auto_compaction_fails_gracefully(void)
353
{
354
	struct reftable_ref_record ref = {
355
		.refname = (char *) "refs/heads/master",
356
		.update_index = 1,
357
		.value_type = REFTABLE_REF_VAL1,
358
		.value.val1 = {0x01},
359
	};
360
	struct reftable_write_options opts = {0};
361
	struct reftable_stack *st;
362
	struct strbuf table_path = STRBUF_INIT;
363
	char *dir = get_tmp_dir(__LINE__);
364
	int err;
365

366
	err = reftable_new_stack(&st, dir, &opts);
367
	EXPECT_ERR(err);
368

369
	err = reftable_stack_add(st, write_test_ref, &ref);
370
	EXPECT_ERR(err);
371
	EXPECT(st->merged->readers_len == 1);
372
	EXPECT(st->stats.attempts == 0);
373
	EXPECT(st->stats.failures == 0);
374

375
	/*
376
	 * Lock the newly written table such that it cannot be compacted.
377
	 * Adding a new table to the stack should not be impacted by this, even
378
	 * though auto-compaction will now fail.
379
	 */
380
	strbuf_addf(&table_path, "%s/%s.lock", dir, st->readers[0]->name);
381
	write_file_buf(table_path.buf, "", 0);
382

383
	ref.update_index = 2;
384
	err = reftable_stack_add(st, write_test_ref, &ref);
385
	EXPECT_ERR(err);
386
	EXPECT(st->merged->readers_len == 2);
387
	EXPECT(st->stats.attempts == 1);
388
	EXPECT(st->stats.failures == 1);
389

390
	reftable_stack_destroy(st);
391
	strbuf_release(&table_path);
392
	clear_dir(dir);
393
}
394

395
static int write_error(struct reftable_writer *wr UNUSED, void *arg)
396
{
397
	return *((int *)arg);
398
}
399

400
static void test_reftable_stack_update_index_check(void)
401
{
402
	char *dir = get_tmp_dir(__LINE__);
403
	struct reftable_write_options opts = { 0 };
404
	struct reftable_stack *st = NULL;
405
	int err;
406
	struct reftable_ref_record ref1 = {
407
		.refname = (char *) "name1",
408
		.update_index = 1,
409
		.value_type = REFTABLE_REF_SYMREF,
410
		.value.symref = (char *) "master",
411
	};
412
	struct reftable_ref_record ref2 = {
413
		.refname = (char *) "name2",
414
		.update_index = 1,
415
		.value_type = REFTABLE_REF_SYMREF,
416
		.value.symref = (char *) "master",
417
	};
418

419
	err = reftable_new_stack(&st, dir, &opts);
420
	EXPECT_ERR(err);
421

422
	err = reftable_stack_add(st, &write_test_ref, &ref1);
423
	EXPECT_ERR(err);
424

425
	err = reftable_stack_add(st, &write_test_ref, &ref2);
426
	EXPECT(err == REFTABLE_API_ERROR);
427
	reftable_stack_destroy(st);
428
	clear_dir(dir);
429
}
430

431
static void test_reftable_stack_lock_failure(void)
432
{
433
	char *dir = get_tmp_dir(__LINE__);
434
	struct reftable_write_options opts = { 0 };
435
	struct reftable_stack *st = NULL;
436
	int err, i;
437

438
	err = reftable_new_stack(&st, dir, &opts);
439
	EXPECT_ERR(err);
440
	for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) {
441
		err = reftable_stack_add(st, &write_error, &i);
442
		EXPECT(err == i);
443
	}
444

445
	reftable_stack_destroy(st);
446
	clear_dir(dir);
447
}
448

449
static void test_reftable_stack_add(void)
450
{
451
	int i = 0;
452
	int err = 0;
453
	struct reftable_write_options opts = {
454
		.exact_log_message = 1,
455
		.default_permissions = 0660,
456
		.disable_auto_compact = 1,
457
	};
458
	struct reftable_stack *st = NULL;
459
	char *dir = get_tmp_dir(__LINE__);
460
	struct reftable_ref_record refs[2] = { { NULL } };
461
	struct reftable_log_record logs[2] = { { NULL } };
462
	struct strbuf path = STRBUF_INIT;
463
	struct stat stat_result;
464
	int N = ARRAY_SIZE(refs);
465

466
	err = reftable_new_stack(&st, dir, &opts);
467
	EXPECT_ERR(err);
468

469
	for (i = 0; i < N; i++) {
470
		char buf[256];
471
		snprintf(buf, sizeof(buf), "branch%02d", i);
472
		refs[i].refname = xstrdup(buf);
473
		refs[i].update_index = i + 1;
474
		refs[i].value_type = REFTABLE_REF_VAL1;
475
		set_test_hash(refs[i].value.val1, i);
476

477
		logs[i].refname = xstrdup(buf);
478
		logs[i].update_index = N + i + 1;
479
		logs[i].value_type = REFTABLE_LOG_UPDATE;
480
		logs[i].value.update.email = xstrdup("identity@invalid");
481
		set_test_hash(logs[i].value.update.new_hash, i);
482
	}
483

484
	for (i = 0; i < N; i++) {
485
		int err = reftable_stack_add(st, &write_test_ref, &refs[i]);
486
		EXPECT_ERR(err);
487
	}
488

489
	for (i = 0; i < N; i++) {
490
		struct write_log_arg arg = {
491
			.log = &logs[i],
492
			.update_index = reftable_stack_next_update_index(st),
493
		};
494
		int err = reftable_stack_add(st, &write_test_log, &arg);
495
		EXPECT_ERR(err);
496
	}
497

498
	err = reftable_stack_compact_all(st, NULL);
499
	EXPECT_ERR(err);
500

501
	for (i = 0; i < N; i++) {
502
		struct reftable_ref_record dest = { NULL };
503

504
		int err = reftable_stack_read_ref(st, refs[i].refname, &dest);
505
		EXPECT_ERR(err);
506
		EXPECT(reftable_ref_record_equal(&dest, refs + i,
507
						 GIT_SHA1_RAWSZ));
508
		reftable_ref_record_release(&dest);
509
	}
510

511
	for (i = 0; i < N; i++) {
512
		struct reftable_log_record dest = { NULL };
513
		int err = reftable_stack_read_log(st, refs[i].refname, &dest);
514
		EXPECT_ERR(err);
515
		EXPECT(reftable_log_record_equal(&dest, logs + i,
516
						 GIT_SHA1_RAWSZ));
517
		reftable_log_record_release(&dest);
518
	}
519

520
#ifndef GIT_WINDOWS_NATIVE
521
	strbuf_addstr(&path, dir);
522
	strbuf_addstr(&path, "/tables.list");
523
	err = stat(path.buf, &stat_result);
524
	EXPECT(!err);
525
	EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
526

527
	strbuf_reset(&path);
528
	strbuf_addstr(&path, dir);
529
	strbuf_addstr(&path, "/");
530
	/* do not try at home; not an external API for reftable. */
531
	strbuf_addstr(&path, st->readers[0]->name);
532
	err = stat(path.buf, &stat_result);
533
	EXPECT(!err);
534
	EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
535
#else
536
	(void) stat_result;
537
#endif
538

539
	/* cleanup */
540
	reftable_stack_destroy(st);
541
	for (i = 0; i < N; i++) {
542
		reftable_ref_record_release(&refs[i]);
543
		reftable_log_record_release(&logs[i]);
544
	}
545
	strbuf_release(&path);
546
	clear_dir(dir);
547
}
548

549
static void test_reftable_stack_log_normalize(void)
550
{
551
	int err = 0;
552
	struct reftable_write_options opts = {
553
		0,
554
	};
555
	struct reftable_stack *st = NULL;
556
	char *dir = get_tmp_dir(__LINE__);
557
	struct reftable_log_record input = {
558
		.refname = (char *) "branch",
559
		.update_index = 1,
560
		.value_type = REFTABLE_LOG_UPDATE,
561
		.value = {
562
			.update = {
563
				.new_hash = { 1 },
564
				.old_hash = { 2 },
565
			},
566
		},
567
	};
568
	struct reftable_log_record dest = {
569
		.update_index = 0,
570
	};
571
	struct write_log_arg arg = {
572
		.log = &input,
573
		.update_index = 1,
574
	};
575

576
	err = reftable_new_stack(&st, dir, &opts);
577
	EXPECT_ERR(err);
578

579
	input.value.update.message = (char *) "one\ntwo";
580
	err = reftable_stack_add(st, &write_test_log, &arg);
581
	EXPECT(err == REFTABLE_API_ERROR);
582

583
	input.value.update.message = (char *) "one";
584
	err = reftable_stack_add(st, &write_test_log, &arg);
585
	EXPECT_ERR(err);
586

587
	err = reftable_stack_read_log(st, input.refname, &dest);
588
	EXPECT_ERR(err);
589
	EXPECT(0 == strcmp(dest.value.update.message, "one\n"));
590

591
	input.value.update.message = (char *) "two\n";
592
	arg.update_index = 2;
593
	err = reftable_stack_add(st, &write_test_log, &arg);
594
	EXPECT_ERR(err);
595
	err = reftable_stack_read_log(st, input.refname, &dest);
596
	EXPECT_ERR(err);
597
	EXPECT(0 == strcmp(dest.value.update.message, "two\n"));
598

599
	/* cleanup */
600
	reftable_stack_destroy(st);
601
	reftable_log_record_release(&dest);
602
	clear_dir(dir);
603
}
604

605
static void test_reftable_stack_tombstone(void)
606
{
607
	int i = 0;
608
	char *dir = get_tmp_dir(__LINE__);
609
	struct reftable_write_options opts = { 0 };
610
	struct reftable_stack *st = NULL;
611
	int err;
612
	struct reftable_ref_record refs[2] = { { NULL } };
613
	struct reftable_log_record logs[2] = { { NULL } };
614
	int N = ARRAY_SIZE(refs);
615
	struct reftable_ref_record dest = { NULL };
616
	struct reftable_log_record log_dest = { NULL };
617

618
	err = reftable_new_stack(&st, dir, &opts);
619
	EXPECT_ERR(err);
620

621
	/* even entries add the refs, odd entries delete them. */
622
	for (i = 0; i < N; i++) {
623
		const char *buf = "branch";
624
		refs[i].refname = xstrdup(buf);
625
		refs[i].update_index = i + 1;
626
		if (i % 2 == 0) {
627
			refs[i].value_type = REFTABLE_REF_VAL1;
628
			set_test_hash(refs[i].value.val1, i);
629
		}
630

631
		logs[i].refname = xstrdup(buf);
632
		/* update_index is part of the key. */
633
		logs[i].update_index = 42;
634
		if (i % 2 == 0) {
635
			logs[i].value_type = REFTABLE_LOG_UPDATE;
636
			set_test_hash(logs[i].value.update.new_hash, i);
637
			logs[i].value.update.email =
638
				xstrdup("identity@invalid");
639
		}
640
	}
641
	for (i = 0; i < N; i++) {
642
		int err = reftable_stack_add(st, &write_test_ref, &refs[i]);
643
		EXPECT_ERR(err);
644
	}
645

646
	for (i = 0; i < N; i++) {
647
		struct write_log_arg arg = {
648
			.log = &logs[i],
649
			.update_index = reftable_stack_next_update_index(st),
650
		};
651
		int err = reftable_stack_add(st, &write_test_log, &arg);
652
		EXPECT_ERR(err);
653
	}
654

655
	err = reftable_stack_read_ref(st, "branch", &dest);
656
	EXPECT(err == 1);
657
	reftable_ref_record_release(&dest);
658

659
	err = reftable_stack_read_log(st, "branch", &log_dest);
660
	EXPECT(err == 1);
661
	reftable_log_record_release(&log_dest);
662

663
	err = reftable_stack_compact_all(st, NULL);
664
	EXPECT_ERR(err);
665

666
	err = reftable_stack_read_ref(st, "branch", &dest);
667
	EXPECT(err == 1);
668

669
	err = reftable_stack_read_log(st, "branch", &log_dest);
670
	EXPECT(err == 1);
671
	reftable_ref_record_release(&dest);
672
	reftable_log_record_release(&log_dest);
673

674
	/* cleanup */
675
	reftable_stack_destroy(st);
676
	for (i = 0; i < N; i++) {
677
		reftable_ref_record_release(&refs[i]);
678
		reftable_log_record_release(&logs[i]);
679
	}
680
	clear_dir(dir);
681
}
682

683
static void test_reftable_stack_hash_id(void)
684
{
685
	char *dir = get_tmp_dir(__LINE__);
686
	struct reftable_write_options opts = { 0 };
687
	struct reftable_stack *st = NULL;
688
	int err;
689

690
	struct reftable_ref_record ref = {
691
		.refname = (char *) "master",
692
		.value_type = REFTABLE_REF_SYMREF,
693
		.value.symref = (char *) "target",
694
		.update_index = 1,
695
	};
696
	struct reftable_write_options opts32 = { .hash_id = GIT_SHA256_FORMAT_ID };
697
	struct reftable_stack *st32 = NULL;
698
	struct reftable_write_options opts_default = { 0 };
699
	struct reftable_stack *st_default = NULL;
700
	struct reftable_ref_record dest = { NULL };
701

702
	err = reftable_new_stack(&st, dir, &opts);
703
	EXPECT_ERR(err);
704

705
	err = reftable_stack_add(st, &write_test_ref, &ref);
706
	EXPECT_ERR(err);
707

708
	/* can't read it with the wrong hash ID. */
709
	err = reftable_new_stack(&st32, dir, &opts32);
710
	EXPECT(err == REFTABLE_FORMAT_ERROR);
711

712
	/* check that we can read it back with default opts too. */
713
	err = reftable_new_stack(&st_default, dir, &opts_default);
714
	EXPECT_ERR(err);
715

716
	err = reftable_stack_read_ref(st_default, "master", &dest);
717
	EXPECT_ERR(err);
718

719
	EXPECT(reftable_ref_record_equal(&ref, &dest, GIT_SHA1_RAWSZ));
720
	reftable_ref_record_release(&dest);
721
	reftable_stack_destroy(st);
722
	reftable_stack_destroy(st_default);
723
	clear_dir(dir);
724
}
725

726
static void test_suggest_compaction_segment(void)
727
{
728
	uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
729
	struct segment min =
730
		suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2);
731
	EXPECT(min.start == 1);
732
	EXPECT(min.end == 10);
733
}
734

735
static void test_suggest_compaction_segment_nothing(void)
736
{
737
	uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 };
738
	struct segment result =
739
		suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2);
740
	EXPECT(result.start == result.end);
741
}
742

743
static void test_reflog_expire(void)
744
{
745
	char *dir = get_tmp_dir(__LINE__);
746
	struct reftable_write_options opts = { 0 };
747
	struct reftable_stack *st = NULL;
748
	struct reftable_log_record logs[20] = { { NULL } };
749
	int N = ARRAY_SIZE(logs) - 1;
750
	int i = 0;
751
	int err;
752
	struct reftable_log_expiry_config expiry = {
753
		.time = 10,
754
	};
755
	struct reftable_log_record log = { NULL };
756

757
	err = reftable_new_stack(&st, dir, &opts);
758
	EXPECT_ERR(err);
759

760
	for (i = 1; i <= N; i++) {
761
		char buf[256];
762
		snprintf(buf, sizeof(buf), "branch%02d", i);
763

764
		logs[i].refname = xstrdup(buf);
765
		logs[i].update_index = i;
766
		logs[i].value_type = REFTABLE_LOG_UPDATE;
767
		logs[i].value.update.time = i;
768
		logs[i].value.update.email = xstrdup("identity@invalid");
769
		set_test_hash(logs[i].value.update.new_hash, i);
770
	}
771

772
	for (i = 1; i <= N; i++) {
773
		struct write_log_arg arg = {
774
			.log = &logs[i],
775
			.update_index = reftable_stack_next_update_index(st),
776
		};
777
		int err = reftable_stack_add(st, &write_test_log, &arg);
778
		EXPECT_ERR(err);
779
	}
780

781
	err = reftable_stack_compact_all(st, NULL);
782
	EXPECT_ERR(err);
783

784
	err = reftable_stack_compact_all(st, &expiry);
785
	EXPECT_ERR(err);
786

787
	err = reftable_stack_read_log(st, logs[9].refname, &log);
788
	EXPECT(err == 1);
789

790
	err = reftable_stack_read_log(st, logs[11].refname, &log);
791
	EXPECT_ERR(err);
792

793
	expiry.min_update_index = 15;
794
	err = reftable_stack_compact_all(st, &expiry);
795
	EXPECT_ERR(err);
796

797
	err = reftable_stack_read_log(st, logs[14].refname, &log);
798
	EXPECT(err == 1);
799

800
	err = reftable_stack_read_log(st, logs[16].refname, &log);
801
	EXPECT_ERR(err);
802

803
	/* cleanup */
804
	reftable_stack_destroy(st);
805
	for (i = 0; i <= N; i++) {
806
		reftable_log_record_release(&logs[i]);
807
	}
808
	clear_dir(dir);
809
	reftable_log_record_release(&log);
810
}
811

812
static int write_nothing(struct reftable_writer *wr, void *arg UNUSED)
813
{
814
	reftable_writer_set_limits(wr, 1, 1);
815
	return 0;
816
}
817

818
static void test_empty_add(void)
819
{
820
	struct reftable_write_options opts = { 0 };
821
	struct reftable_stack *st = NULL;
822
	int err;
823
	char *dir = get_tmp_dir(__LINE__);
824
	struct reftable_stack *st2 = NULL;
825

826
	err = reftable_new_stack(&st, dir, &opts);
827
	EXPECT_ERR(err);
828

829
	err = reftable_stack_add(st, &write_nothing, NULL);
830
	EXPECT_ERR(err);
831

832
	err = reftable_new_stack(&st2, dir, &opts);
833
	EXPECT_ERR(err);
834
	clear_dir(dir);
835
	reftable_stack_destroy(st);
836
	reftable_stack_destroy(st2);
837
}
838

839
static int fastlog2(uint64_t sz)
840
{
841
	int l = 0;
842
	if (sz == 0)
843
		return 0;
844
	for (; sz; sz /= 2)
845
		l++;
846
	return l - 1;
847
}
848

849
static void test_reftable_stack_auto_compaction(void)
850
{
851
	struct reftable_write_options opts = {
852
		.disable_auto_compact = 1,
853
	};
854
	struct reftable_stack *st = NULL;
855
	char *dir = get_tmp_dir(__LINE__);
856
	int err, i;
857
	int N = 100;
858

859
	err = reftable_new_stack(&st, dir, &opts);
860
	EXPECT_ERR(err);
861

862
	for (i = 0; i < N; i++) {
863
		char name[100];
864
		struct reftable_ref_record ref = {
865
			.refname = name,
866
			.update_index = reftable_stack_next_update_index(st),
867
			.value_type = REFTABLE_REF_SYMREF,
868
			.value.symref = (char *) "master",
869
		};
870
		snprintf(name, sizeof(name), "branch%04d", i);
871

872
		err = reftable_stack_add(st, &write_test_ref, &ref);
873
		EXPECT_ERR(err);
874

875
		err = reftable_stack_auto_compact(st);
876
		EXPECT_ERR(err);
877
		EXPECT(i < 3 || st->merged->readers_len < 2 * fastlog2(i));
878
	}
879

880
	EXPECT(reftable_stack_compaction_stats(st)->entries_written <
881
	       (uint64_t)(N * fastlog2(N)));
882

883
	reftable_stack_destroy(st);
884
	clear_dir(dir);
885
}
886

887
static void test_reftable_stack_auto_compaction_with_locked_tables(void)
888
{
889
	struct reftable_write_options opts = {
890
		.disable_auto_compact = 1,
891
	};
892
	struct reftable_stack *st = NULL;
893
	struct strbuf buf = STRBUF_INIT;
894
	char *dir = get_tmp_dir(__LINE__);
895
	int err;
896

897
	err = reftable_new_stack(&st, dir, &opts);
898
	EXPECT_ERR(err);
899

900
	write_n_ref_tables(st, 5);
901
	EXPECT(st->merged->readers_len == 5);
902

903
	/*
904
	 * Given that all tables we have written should be roughly the same
905
	 * size, we expect that auto-compaction will want to compact all of the
906
	 * tables. Locking any of the tables will keep it from doing so.
907
	 */
908
	strbuf_reset(&buf);
909
	strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[2]->name);
910
	write_file_buf(buf.buf, "", 0);
911

912
	/*
913
	 * When parts of the stack are locked, then auto-compaction does a best
914
	 * effort compaction of those tables which aren't locked. So while this
915
	 * would in theory compact all tables, due to the preexisting lock we
916
	 * only compact the newest two tables.
917
	 */
918
	err = reftable_stack_auto_compact(st);
919
	EXPECT_ERR(err);
920
	EXPECT(st->stats.failures == 0);
921
	EXPECT(st->merged->readers_len == 4);
922

923
	reftable_stack_destroy(st);
924
	strbuf_release(&buf);
925
	clear_dir(dir);
926
}
927

928
static void test_reftable_stack_add_performs_auto_compaction(void)
929
{
930
	struct reftable_write_options opts = { 0 };
931
	struct reftable_stack *st = NULL;
932
	struct strbuf refname = STRBUF_INIT;
933
	char *dir = get_tmp_dir(__LINE__);
934
	int err, i, n = 20;
935

936
	err = reftable_new_stack(&st, dir, &opts);
937
	EXPECT_ERR(err);
938

939
	for (i = 0; i <= n; i++) {
940
		struct reftable_ref_record ref = {
941
			.update_index = reftable_stack_next_update_index(st),
942
			.value_type = REFTABLE_REF_SYMREF,
943
			.value.symref = (char *) "master",
944
		};
945

946
		/*
947
		 * Disable auto-compaction for all but the last runs. Like this
948
		 * we can ensure that we indeed honor this setting and have
949
		 * better control over when exactly auto compaction runs.
950
		 */
951
		st->opts.disable_auto_compact = i != n;
952

953
		strbuf_reset(&refname);
954
		strbuf_addf(&refname, "branch-%04d", i);
955
		ref.refname = refname.buf;
956

957
		err = reftable_stack_add(st, &write_test_ref, &ref);
958
		EXPECT_ERR(err);
959

960
		/*
961
		 * The stack length should grow continuously for all runs where
962
		 * auto compaction is disabled. When enabled, we should merge
963
		 * all tables in the stack.
964
		 */
965
		if (i != n)
966
			EXPECT(st->merged->readers_len == i + 1);
967
		else
968
			EXPECT(st->merged->readers_len == 1);
969
	}
970

971
	reftable_stack_destroy(st);
972
	strbuf_release(&refname);
973
	clear_dir(dir);
974
}
975

976
static void test_reftable_stack_compaction_with_locked_tables(void)
977
{
978
	struct reftable_write_options opts = {
979
		.disable_auto_compact = 1,
980
	};
981
	struct reftable_stack *st = NULL;
982
	struct strbuf buf = STRBUF_INIT;
983
	char *dir = get_tmp_dir(__LINE__);
984
	int err;
985

986
	err = reftable_new_stack(&st, dir, &opts);
987
	EXPECT_ERR(err);
988

989
	write_n_ref_tables(st, 3);
990
	EXPECT(st->merged->readers_len == 3);
991

992
	/* Lock one of the tables that we're about to compact. */
993
	strbuf_reset(&buf);
994
	strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[1]->name);
995
	write_file_buf(buf.buf, "", 0);
996

997
	/*
998
	 * Compaction is expected to fail given that we were not able to
999
	 * compact all tables.
1000
	 */
1001
	err = reftable_stack_compact_all(st, NULL);
1002
	EXPECT(err == REFTABLE_LOCK_ERROR);
1003
	EXPECT(st->stats.failures == 1);
1004
	EXPECT(st->merged->readers_len == 3);
1005

1006
	reftable_stack_destroy(st);
1007
	strbuf_release(&buf);
1008
	clear_dir(dir);
1009
}
1010

1011
static void test_reftable_stack_compaction_concurrent(void)
1012
{
1013
	struct reftable_write_options opts = { 0 };
1014
	struct reftable_stack *st1 = NULL, *st2 = NULL;
1015
	char *dir = get_tmp_dir(__LINE__);
1016
	int err;
1017

1018
	err = reftable_new_stack(&st1, dir, &opts);
1019
	EXPECT_ERR(err);
1020
	write_n_ref_tables(st1, 3);
1021

1022
	err = reftable_new_stack(&st2, dir, &opts);
1023
	EXPECT_ERR(err);
1024

1025
	err = reftable_stack_compact_all(st1, NULL);
1026
	EXPECT_ERR(err);
1027

1028
	reftable_stack_destroy(st1);
1029
	reftable_stack_destroy(st2);
1030

1031
	EXPECT(count_dir_entries(dir) == 2);
1032
	clear_dir(dir);
1033
}
1034

1035
static void unclean_stack_close(struct reftable_stack *st)
1036
{
1037
	/* break abstraction boundary to simulate unclean shutdown. */
1038
	int i = 0;
1039
	for (; i < st->readers_len; i++) {
1040
		reftable_reader_free(st->readers[i]);
1041
	}
1042
	st->readers_len = 0;
1043
	FREE_AND_NULL(st->readers);
1044
}
1045

1046
static void test_reftable_stack_compaction_concurrent_clean(void)
1047
{
1048
	struct reftable_write_options opts = { 0 };
1049
	struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL;
1050
	char *dir = get_tmp_dir(__LINE__);
1051
	int err;
1052

1053
	err = reftable_new_stack(&st1, dir, &opts);
1054
	EXPECT_ERR(err);
1055
	write_n_ref_tables(st1, 3);
1056

1057
	err = reftable_new_stack(&st2, dir, &opts);
1058
	EXPECT_ERR(err);
1059

1060
	err = reftable_stack_compact_all(st1, NULL);
1061
	EXPECT_ERR(err);
1062

1063
	unclean_stack_close(st1);
1064
	unclean_stack_close(st2);
1065

1066
	err = reftable_new_stack(&st3, dir, &opts);
1067
	EXPECT_ERR(err);
1068

1069
	err = reftable_stack_clean(st3);
1070
	EXPECT_ERR(err);
1071
	EXPECT(count_dir_entries(dir) == 2);
1072

1073
	reftable_stack_destroy(st1);
1074
	reftable_stack_destroy(st2);
1075
	reftable_stack_destroy(st3);
1076

1077
	clear_dir(dir);
1078
}
1079

1080
int stack_test_main(int argc UNUSED, const char *argv[] UNUSED)
1081
{
1082
	RUN_TEST(test_empty_add);
1083
	RUN_TEST(test_read_file);
1084
	RUN_TEST(test_reflog_expire);
1085
	RUN_TEST(test_reftable_stack_add);
1086
	RUN_TEST(test_reftable_stack_add_one);
1087
	RUN_TEST(test_reftable_stack_auto_compaction);
1088
	RUN_TEST(test_reftable_stack_auto_compaction_with_locked_tables);
1089
	RUN_TEST(test_reftable_stack_add_performs_auto_compaction);
1090
	RUN_TEST(test_reftable_stack_compaction_concurrent);
1091
	RUN_TEST(test_reftable_stack_compaction_concurrent_clean);
1092
	RUN_TEST(test_reftable_stack_compaction_with_locked_tables);
1093
	RUN_TEST(test_reftable_stack_hash_id);
1094
	RUN_TEST(test_reftable_stack_lock_failure);
1095
	RUN_TEST(test_reftable_stack_log_normalize);
1096
	RUN_TEST(test_reftable_stack_tombstone);
1097
	RUN_TEST(test_reftable_stack_transaction_api);
1098
	RUN_TEST(test_reftable_stack_transaction_api_performs_auto_compaction);
1099
	RUN_TEST(test_reftable_stack_auto_compaction_fails_gracefully);
1100
	RUN_TEST(test_reftable_stack_update_index_check);
1101
	RUN_TEST(test_reftable_stack_uptodate);
1102
	RUN_TEST(test_suggest_compaction_segment);
1103
	RUN_TEST(test_suggest_compaction_segment_nothing);
1104
	return 0;
1105
}
1106

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.