2
Copyright 2020 Google LLC
4
Use of this source code is governed by a BSD-style
5
license that can be found in the LICENSE file or at
6
https://developers.google.com/open-source/licenses/bsd
13
#include "reftable-reader.h"
17
#include "test_framework.h"
18
#include "reftable-tests.h"
24
static void clear_dir(const char *dirname)
26
struct strbuf path = STRBUF_INIT;
27
strbuf_addstr(&path, dirname);
28
remove_dir_recursively(&path, 0);
29
strbuf_release(&path);
32
static int count_dir_entries(const char *dirname)
34
DIR *dir = opendir(dirname);
40
while ((d = readdir(dir))) {
42
* Besides skipping over "." and "..", we also need to
43
* skip over other files that have a leading ".". This
44
* is due to behaviour of NFS, which will rename files
45
* to ".nfs*" to emulate delete-on-last-close.
47
* In any case this should be fine as the reftable
48
* library will never write files with leading dots
51
if (starts_with(d->d_name, "."))
60
* Work linenumber into the tempdir, so we can see which tests forget to
63
static char *get_tmp_template(int linenumber)
65
const char *tmp = getenv("TMPDIR");
66
static char template[1024];
67
snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX",
68
tmp ? tmp : "/tmp", linenumber);
72
static char *get_tmp_dir(int linenumber)
74
char *dir = get_tmp_template(linenumber);
79
static void test_read_file(void)
81
char *fn = get_tmp_template(__LINE__);
83
char out[1024] = "line1\n\nline2\nline3";
86
const char *want[] = { "line1", "line2", "line3" };
90
n = write_in_full(fd, out, strlen(out));
91
EXPECT(n == strlen(out));
95
err = read_lines(fn, &names);
98
for (i = 0; names[i]; i++) {
99
EXPECT(0 == strcmp(want[i], names[i]));
105
static int write_test_ref(struct reftable_writer *wr, void *arg)
107
struct reftable_ref_record *ref = arg;
108
reftable_writer_set_limits(wr, ref->update_index, ref->update_index);
109
return reftable_writer_add_ref(wr, ref);
112
static void write_n_ref_tables(struct reftable_stack *st,
115
struct strbuf buf = STRBUF_INIT;
116
int disable_auto_compact;
119
disable_auto_compact = st->opts.disable_auto_compact;
120
st->opts.disable_auto_compact = 1;
122
for (size_t i = 0; i < n; i++) {
123
struct reftable_ref_record ref = {
124
.update_index = reftable_stack_next_update_index(st),
125
.value_type = REFTABLE_REF_VAL1,
128
strbuf_addf(&buf, "refs/heads/branch-%04u", (unsigned) i);
129
ref.refname = buf.buf;
130
set_test_hash(ref.value.val1, i);
132
err = reftable_stack_add(st, &write_test_ref, &ref);
136
st->opts.disable_auto_compact = disable_auto_compact;
137
strbuf_release(&buf);
140
struct write_log_arg {
141
struct reftable_log_record *log;
142
uint64_t update_index;
145
static int write_test_log(struct reftable_writer *wr, void *arg)
147
struct write_log_arg *wla = arg;
149
reftable_writer_set_limits(wr, wla->update_index, wla->update_index);
150
return reftable_writer_add_log(wr, wla->log);
153
static void test_reftable_stack_add_one(void)
155
char *dir = get_tmp_dir(__LINE__);
156
struct strbuf scratch = STRBUF_INIT;
157
int mask = umask(002);
158
struct reftable_write_options opts = {
159
.default_permissions = 0660,
161
struct reftable_stack *st = NULL;
163
struct reftable_ref_record ref = {
164
.refname = (char *) "HEAD",
166
.value_type = REFTABLE_REF_SYMREF,
167
.value.symref = (char *) "master",
169
struct reftable_ref_record dest = { NULL };
170
struct stat stat_result = { 0 };
171
err = reftable_new_stack(&st, dir, &opts);
174
err = reftable_stack_add(st, &write_test_ref, &ref);
177
err = reftable_stack_read_ref(st, ref.refname, &dest);
179
EXPECT(0 == strcmp("master", dest.value.symref));
180
EXPECT(st->readers_len > 0);
182
#ifndef GIT_WINDOWS_NATIVE
183
strbuf_addstr(&scratch, dir);
184
strbuf_addstr(&scratch, "/tables.list");
185
err = stat(scratch.buf, &stat_result);
187
EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
189
strbuf_reset(&scratch);
190
strbuf_addstr(&scratch, dir);
191
strbuf_addstr(&scratch, "/");
192
/* do not try at home; not an external API for reftable. */
193
strbuf_addstr(&scratch, st->readers[0]->name);
194
err = stat(scratch.buf, &stat_result);
196
EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
201
reftable_ref_record_release(&dest);
202
reftable_stack_destroy(st);
203
strbuf_release(&scratch);
208
static void test_reftable_stack_uptodate(void)
210
struct reftable_write_options opts = { 0 };
211
struct reftable_stack *st1 = NULL;
212
struct reftable_stack *st2 = NULL;
213
char *dir = get_tmp_dir(__LINE__);
216
struct reftable_ref_record ref1 = {
217
.refname = (char *) "HEAD",
219
.value_type = REFTABLE_REF_SYMREF,
220
.value.symref = (char *) "master",
222
struct reftable_ref_record ref2 = {
223
.refname = (char *) "branch2",
225
.value_type = REFTABLE_REF_SYMREF,
226
.value.symref = (char *) "master",
230
/* simulate multi-process access to the same stack
231
by creating two stacks for the same directory.
233
err = reftable_new_stack(&st1, dir, &opts);
236
err = reftable_new_stack(&st2, dir, &opts);
239
err = reftable_stack_add(st1, &write_test_ref, &ref1);
242
err = reftable_stack_add(st2, &write_test_ref, &ref2);
243
EXPECT(err == REFTABLE_OUTDATED_ERROR);
245
err = reftable_stack_reload(st2);
248
err = reftable_stack_add(st2, &write_test_ref, &ref2);
250
reftable_stack_destroy(st1);
251
reftable_stack_destroy(st2);
255
static void test_reftable_stack_transaction_api(void)
257
char *dir = get_tmp_dir(__LINE__);
258
struct reftable_write_options opts = { 0 };
259
struct reftable_stack *st = NULL;
261
struct reftable_addition *add = NULL;
263
struct reftable_ref_record ref = {
264
.refname = (char *) "HEAD",
266
.value_type = REFTABLE_REF_SYMREF,
267
.value.symref = (char *) "master",
269
struct reftable_ref_record dest = { NULL };
271
err = reftable_new_stack(&st, dir, &opts);
274
reftable_addition_destroy(add);
276
err = reftable_stack_new_addition(&add, st);
279
err = reftable_addition_add(add, &write_test_ref, &ref);
282
err = reftable_addition_commit(add);
285
reftable_addition_destroy(add);
287
err = reftable_stack_read_ref(st, ref.refname, &dest);
289
EXPECT(REFTABLE_REF_SYMREF == dest.value_type);
290
EXPECT(0 == strcmp("master", dest.value.symref));
292
reftable_ref_record_release(&dest);
293
reftable_stack_destroy(st);
297
static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
299
char *dir = get_tmp_dir(__LINE__);
300
struct reftable_write_options opts = {0};
301
struct reftable_addition *add = NULL;
302
struct reftable_stack *st = NULL;
305
err = reftable_new_stack(&st, dir, &opts);
308
for (i = 0; i <= n; i++) {
309
struct reftable_ref_record ref = {
310
.update_index = reftable_stack_next_update_index(st),
311
.value_type = REFTABLE_REF_SYMREF,
312
.value.symref = (char *) "master",
316
snprintf(name, sizeof(name), "branch%04d", i);
320
* Disable auto-compaction for all but the last runs. Like this
321
* we can ensure that we indeed honor this setting and have
322
* better control over when exactly auto compaction runs.
324
st->opts.disable_auto_compact = i != n;
326
err = reftable_stack_new_addition(&add, st);
329
err = reftable_addition_add(add, &write_test_ref, &ref);
332
err = reftable_addition_commit(add);
335
reftable_addition_destroy(add);
338
* The stack length should grow continuously for all runs where
339
* auto compaction is disabled. When enabled, we should merge
340
* all tables in the stack.
343
EXPECT(st->merged->readers_len == i + 1);
345
EXPECT(st->merged->readers_len == 1);
348
reftable_stack_destroy(st);
352
static void test_reftable_stack_auto_compaction_fails_gracefully(void)
354
struct reftable_ref_record ref = {
355
.refname = (char *) "refs/heads/master",
357
.value_type = REFTABLE_REF_VAL1,
358
.value.val1 = {0x01},
360
struct reftable_write_options opts = {0};
361
struct reftable_stack *st;
362
struct strbuf table_path = STRBUF_INIT;
363
char *dir = get_tmp_dir(__LINE__);
366
err = reftable_new_stack(&st, dir, &opts);
369
err = reftable_stack_add(st, write_test_ref, &ref);
371
EXPECT(st->merged->readers_len == 1);
372
EXPECT(st->stats.attempts == 0);
373
EXPECT(st->stats.failures == 0);
376
* Lock the newly written table such that it cannot be compacted.
377
* Adding a new table to the stack should not be impacted by this, even
378
* though auto-compaction will now fail.
380
strbuf_addf(&table_path, "%s/%s.lock", dir, st->readers[0]->name);
381
write_file_buf(table_path.buf, "", 0);
383
ref.update_index = 2;
384
err = reftable_stack_add(st, write_test_ref, &ref);
386
EXPECT(st->merged->readers_len == 2);
387
EXPECT(st->stats.attempts == 1);
388
EXPECT(st->stats.failures == 1);
390
reftable_stack_destroy(st);
391
strbuf_release(&table_path);
395
static int write_error(struct reftable_writer *wr UNUSED, void *arg)
397
return *((int *)arg);
400
static void test_reftable_stack_update_index_check(void)
402
char *dir = get_tmp_dir(__LINE__);
403
struct reftable_write_options opts = { 0 };
404
struct reftable_stack *st = NULL;
406
struct reftable_ref_record ref1 = {
407
.refname = (char *) "name1",
409
.value_type = REFTABLE_REF_SYMREF,
410
.value.symref = (char *) "master",
412
struct reftable_ref_record ref2 = {
413
.refname = (char *) "name2",
415
.value_type = REFTABLE_REF_SYMREF,
416
.value.symref = (char *) "master",
419
err = reftable_new_stack(&st, dir, &opts);
422
err = reftable_stack_add(st, &write_test_ref, &ref1);
425
err = reftable_stack_add(st, &write_test_ref, &ref2);
426
EXPECT(err == REFTABLE_API_ERROR);
427
reftable_stack_destroy(st);
431
static void test_reftable_stack_lock_failure(void)
433
char *dir = get_tmp_dir(__LINE__);
434
struct reftable_write_options opts = { 0 };
435
struct reftable_stack *st = NULL;
438
err = reftable_new_stack(&st, dir, &opts);
440
for (i = -1; i != REFTABLE_EMPTY_TABLE_ERROR; i--) {
441
err = reftable_stack_add(st, &write_error, &i);
445
reftable_stack_destroy(st);
449
static void test_reftable_stack_add(void)
453
struct reftable_write_options opts = {
454
.exact_log_message = 1,
455
.default_permissions = 0660,
456
.disable_auto_compact = 1,
458
struct reftable_stack *st = NULL;
459
char *dir = get_tmp_dir(__LINE__);
460
struct reftable_ref_record refs[2] = { { NULL } };
461
struct reftable_log_record logs[2] = { { NULL } };
462
struct strbuf path = STRBUF_INIT;
463
struct stat stat_result;
464
int N = ARRAY_SIZE(refs);
466
err = reftable_new_stack(&st, dir, &opts);
469
for (i = 0; i < N; i++) {
471
snprintf(buf, sizeof(buf), "branch%02d", i);
472
refs[i].refname = xstrdup(buf);
473
refs[i].update_index = i + 1;
474
refs[i].value_type = REFTABLE_REF_VAL1;
475
set_test_hash(refs[i].value.val1, i);
477
logs[i].refname = xstrdup(buf);
478
logs[i].update_index = N + i + 1;
479
logs[i].value_type = REFTABLE_LOG_UPDATE;
480
logs[i].value.update.email = xstrdup("identity@invalid");
481
set_test_hash(logs[i].value.update.new_hash, i);
484
for (i = 0; i < N; i++) {
485
int err = reftable_stack_add(st, &write_test_ref, &refs[i]);
489
for (i = 0; i < N; i++) {
490
struct write_log_arg arg = {
492
.update_index = reftable_stack_next_update_index(st),
494
int err = reftable_stack_add(st, &write_test_log, &arg);
498
err = reftable_stack_compact_all(st, NULL);
501
for (i = 0; i < N; i++) {
502
struct reftable_ref_record dest = { NULL };
504
int err = reftable_stack_read_ref(st, refs[i].refname, &dest);
506
EXPECT(reftable_ref_record_equal(&dest, refs + i,
508
reftable_ref_record_release(&dest);
511
for (i = 0; i < N; i++) {
512
struct reftable_log_record dest = { NULL };
513
int err = reftable_stack_read_log(st, refs[i].refname, &dest);
515
EXPECT(reftable_log_record_equal(&dest, logs + i,
517
reftable_log_record_release(&dest);
520
#ifndef GIT_WINDOWS_NATIVE
521
strbuf_addstr(&path, dir);
522
strbuf_addstr(&path, "/tables.list");
523
err = stat(path.buf, &stat_result);
525
EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
528
strbuf_addstr(&path, dir);
529
strbuf_addstr(&path, "/");
530
/* do not try at home; not an external API for reftable. */
531
strbuf_addstr(&path, st->readers[0]->name);
532
err = stat(path.buf, &stat_result);
534
EXPECT((stat_result.st_mode & 0777) == opts.default_permissions);
540
reftable_stack_destroy(st);
541
for (i = 0; i < N; i++) {
542
reftable_ref_record_release(&refs[i]);
543
reftable_log_record_release(&logs[i]);
545
strbuf_release(&path);
549
static void test_reftable_stack_log_normalize(void)
552
struct reftable_write_options opts = {
555
struct reftable_stack *st = NULL;
556
char *dir = get_tmp_dir(__LINE__);
557
struct reftable_log_record input = {
558
.refname = (char *) "branch",
560
.value_type = REFTABLE_LOG_UPDATE,
568
struct reftable_log_record dest = {
571
struct write_log_arg arg = {
576
err = reftable_new_stack(&st, dir, &opts);
579
input.value.update.message = (char *) "one\ntwo";
580
err = reftable_stack_add(st, &write_test_log, &arg);
581
EXPECT(err == REFTABLE_API_ERROR);
583
input.value.update.message = (char *) "one";
584
err = reftable_stack_add(st, &write_test_log, &arg);
587
err = reftable_stack_read_log(st, input.refname, &dest);
589
EXPECT(0 == strcmp(dest.value.update.message, "one\n"));
591
input.value.update.message = (char *) "two\n";
592
arg.update_index = 2;
593
err = reftable_stack_add(st, &write_test_log, &arg);
595
err = reftable_stack_read_log(st, input.refname, &dest);
597
EXPECT(0 == strcmp(dest.value.update.message, "two\n"));
600
reftable_stack_destroy(st);
601
reftable_log_record_release(&dest);
605
static void test_reftable_stack_tombstone(void)
608
char *dir = get_tmp_dir(__LINE__);
609
struct reftable_write_options opts = { 0 };
610
struct reftable_stack *st = NULL;
612
struct reftable_ref_record refs[2] = { { NULL } };
613
struct reftable_log_record logs[2] = { { NULL } };
614
int N = ARRAY_SIZE(refs);
615
struct reftable_ref_record dest = { NULL };
616
struct reftable_log_record log_dest = { NULL };
618
err = reftable_new_stack(&st, dir, &opts);
621
/* even entries add the refs, odd entries delete them. */
622
for (i = 0; i < N; i++) {
623
const char *buf = "branch";
624
refs[i].refname = xstrdup(buf);
625
refs[i].update_index = i + 1;
627
refs[i].value_type = REFTABLE_REF_VAL1;
628
set_test_hash(refs[i].value.val1, i);
631
logs[i].refname = xstrdup(buf);
632
/* update_index is part of the key. */
633
logs[i].update_index = 42;
635
logs[i].value_type = REFTABLE_LOG_UPDATE;
636
set_test_hash(logs[i].value.update.new_hash, i);
637
logs[i].value.update.email =
638
xstrdup("identity@invalid");
641
for (i = 0; i < N; i++) {
642
int err = reftable_stack_add(st, &write_test_ref, &refs[i]);
646
for (i = 0; i < N; i++) {
647
struct write_log_arg arg = {
649
.update_index = reftable_stack_next_update_index(st),
651
int err = reftable_stack_add(st, &write_test_log, &arg);
655
err = reftable_stack_read_ref(st, "branch", &dest);
657
reftable_ref_record_release(&dest);
659
err = reftable_stack_read_log(st, "branch", &log_dest);
661
reftable_log_record_release(&log_dest);
663
err = reftable_stack_compact_all(st, NULL);
666
err = reftable_stack_read_ref(st, "branch", &dest);
669
err = reftable_stack_read_log(st, "branch", &log_dest);
671
reftable_ref_record_release(&dest);
672
reftable_log_record_release(&log_dest);
675
reftable_stack_destroy(st);
676
for (i = 0; i < N; i++) {
677
reftable_ref_record_release(&refs[i]);
678
reftable_log_record_release(&logs[i]);
683
static void test_reftable_stack_hash_id(void)
685
char *dir = get_tmp_dir(__LINE__);
686
struct reftable_write_options opts = { 0 };
687
struct reftable_stack *st = NULL;
690
struct reftable_ref_record ref = {
691
.refname = (char *) "master",
692
.value_type = REFTABLE_REF_SYMREF,
693
.value.symref = (char *) "target",
696
struct reftable_write_options opts32 = { .hash_id = GIT_SHA256_FORMAT_ID };
697
struct reftable_stack *st32 = NULL;
698
struct reftable_write_options opts_default = { 0 };
699
struct reftable_stack *st_default = NULL;
700
struct reftable_ref_record dest = { NULL };
702
err = reftable_new_stack(&st, dir, &opts);
705
err = reftable_stack_add(st, &write_test_ref, &ref);
708
/* can't read it with the wrong hash ID. */
709
err = reftable_new_stack(&st32, dir, &opts32);
710
EXPECT(err == REFTABLE_FORMAT_ERROR);
712
/* check that we can read it back with default opts too. */
713
err = reftable_new_stack(&st_default, dir, &opts_default);
716
err = reftable_stack_read_ref(st_default, "master", &dest);
719
EXPECT(reftable_ref_record_equal(&ref, &dest, GIT_SHA1_RAWSZ));
720
reftable_ref_record_release(&dest);
721
reftable_stack_destroy(st);
722
reftable_stack_destroy(st_default);
726
static void test_suggest_compaction_segment(void)
728
uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
730
suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2);
731
EXPECT(min.start == 1);
732
EXPECT(min.end == 10);
735
static void test_suggest_compaction_segment_nothing(void)
737
uint64_t sizes[] = { 64, 32, 16, 8, 4, 2 };
738
struct segment result =
739
suggest_compaction_segment(sizes, ARRAY_SIZE(sizes), 2);
740
EXPECT(result.start == result.end);
743
static void test_reflog_expire(void)
745
char *dir = get_tmp_dir(__LINE__);
746
struct reftable_write_options opts = { 0 };
747
struct reftable_stack *st = NULL;
748
struct reftable_log_record logs[20] = { { NULL } };
749
int N = ARRAY_SIZE(logs) - 1;
752
struct reftable_log_expiry_config expiry = {
755
struct reftable_log_record log = { NULL };
757
err = reftable_new_stack(&st, dir, &opts);
760
for (i = 1; i <= N; i++) {
762
snprintf(buf, sizeof(buf), "branch%02d", i);
764
logs[i].refname = xstrdup(buf);
765
logs[i].update_index = i;
766
logs[i].value_type = REFTABLE_LOG_UPDATE;
767
logs[i].value.update.time = i;
768
logs[i].value.update.email = xstrdup("identity@invalid");
769
set_test_hash(logs[i].value.update.new_hash, i);
772
for (i = 1; i <= N; i++) {
773
struct write_log_arg arg = {
775
.update_index = reftable_stack_next_update_index(st),
777
int err = reftable_stack_add(st, &write_test_log, &arg);
781
err = reftable_stack_compact_all(st, NULL);
784
err = reftable_stack_compact_all(st, &expiry);
787
err = reftable_stack_read_log(st, logs[9].refname, &log);
790
err = reftable_stack_read_log(st, logs[11].refname, &log);
793
expiry.min_update_index = 15;
794
err = reftable_stack_compact_all(st, &expiry);
797
err = reftable_stack_read_log(st, logs[14].refname, &log);
800
err = reftable_stack_read_log(st, logs[16].refname, &log);
804
reftable_stack_destroy(st);
805
for (i = 0; i <= N; i++) {
806
reftable_log_record_release(&logs[i]);
809
reftable_log_record_release(&log);
812
static int write_nothing(struct reftable_writer *wr, void *arg UNUSED)
814
reftable_writer_set_limits(wr, 1, 1);
818
static void test_empty_add(void)
820
struct reftable_write_options opts = { 0 };
821
struct reftable_stack *st = NULL;
823
char *dir = get_tmp_dir(__LINE__);
824
struct reftable_stack *st2 = NULL;
826
err = reftable_new_stack(&st, dir, &opts);
829
err = reftable_stack_add(st, &write_nothing, NULL);
832
err = reftable_new_stack(&st2, dir, &opts);
835
reftable_stack_destroy(st);
836
reftable_stack_destroy(st2);
839
static int fastlog2(uint64_t sz)
849
static void test_reftable_stack_auto_compaction(void)
851
struct reftable_write_options opts = {
852
.disable_auto_compact = 1,
854
struct reftable_stack *st = NULL;
855
char *dir = get_tmp_dir(__LINE__);
859
err = reftable_new_stack(&st, dir, &opts);
862
for (i = 0; i < N; i++) {
864
struct reftable_ref_record ref = {
866
.update_index = reftable_stack_next_update_index(st),
867
.value_type = REFTABLE_REF_SYMREF,
868
.value.symref = (char *) "master",
870
snprintf(name, sizeof(name), "branch%04d", i);
872
err = reftable_stack_add(st, &write_test_ref, &ref);
875
err = reftable_stack_auto_compact(st);
877
EXPECT(i < 3 || st->merged->readers_len < 2 * fastlog2(i));
880
EXPECT(reftable_stack_compaction_stats(st)->entries_written <
881
(uint64_t)(N * fastlog2(N)));
883
reftable_stack_destroy(st);
887
static void test_reftable_stack_auto_compaction_with_locked_tables(void)
889
struct reftable_write_options opts = {
890
.disable_auto_compact = 1,
892
struct reftable_stack *st = NULL;
893
struct strbuf buf = STRBUF_INIT;
894
char *dir = get_tmp_dir(__LINE__);
897
err = reftable_new_stack(&st, dir, &opts);
900
write_n_ref_tables(st, 5);
901
EXPECT(st->merged->readers_len == 5);
904
* Given that all tables we have written should be roughly the same
905
* size, we expect that auto-compaction will want to compact all of the
906
* tables. Locking any of the tables will keep it from doing so.
909
strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[2]->name);
910
write_file_buf(buf.buf, "", 0);
913
* When parts of the stack are locked, then auto-compaction does a best
914
* effort compaction of those tables which aren't locked. So while this
915
* would in theory compact all tables, due to the preexisting lock we
916
* only compact the newest two tables.
918
err = reftable_stack_auto_compact(st);
920
EXPECT(st->stats.failures == 0);
921
EXPECT(st->merged->readers_len == 4);
923
reftable_stack_destroy(st);
924
strbuf_release(&buf);
928
static void test_reftable_stack_add_performs_auto_compaction(void)
930
struct reftable_write_options opts = { 0 };
931
struct reftable_stack *st = NULL;
932
struct strbuf refname = STRBUF_INIT;
933
char *dir = get_tmp_dir(__LINE__);
936
err = reftable_new_stack(&st, dir, &opts);
939
for (i = 0; i <= n; i++) {
940
struct reftable_ref_record ref = {
941
.update_index = reftable_stack_next_update_index(st),
942
.value_type = REFTABLE_REF_SYMREF,
943
.value.symref = (char *) "master",
947
* Disable auto-compaction for all but the last runs. Like this
948
* we can ensure that we indeed honor this setting and have
949
* better control over when exactly auto compaction runs.
951
st->opts.disable_auto_compact = i != n;
953
strbuf_reset(&refname);
954
strbuf_addf(&refname, "branch-%04d", i);
955
ref.refname = refname.buf;
957
err = reftable_stack_add(st, &write_test_ref, &ref);
961
* The stack length should grow continuously for all runs where
962
* auto compaction is disabled. When enabled, we should merge
963
* all tables in the stack.
966
EXPECT(st->merged->readers_len == i + 1);
968
EXPECT(st->merged->readers_len == 1);
971
reftable_stack_destroy(st);
972
strbuf_release(&refname);
976
static void test_reftable_stack_compaction_with_locked_tables(void)
978
struct reftable_write_options opts = {
979
.disable_auto_compact = 1,
981
struct reftable_stack *st = NULL;
982
struct strbuf buf = STRBUF_INIT;
983
char *dir = get_tmp_dir(__LINE__);
986
err = reftable_new_stack(&st, dir, &opts);
989
write_n_ref_tables(st, 3);
990
EXPECT(st->merged->readers_len == 3);
992
/* Lock one of the tables that we're about to compact. */
994
strbuf_addf(&buf, "%s/%s.lock", dir, st->readers[1]->name);
995
write_file_buf(buf.buf, "", 0);
998
* Compaction is expected to fail given that we were not able to
999
* compact all tables.
1001
err = reftable_stack_compact_all(st, NULL);
1002
EXPECT(err == REFTABLE_LOCK_ERROR);
1003
EXPECT(st->stats.failures == 1);
1004
EXPECT(st->merged->readers_len == 3);
1006
reftable_stack_destroy(st);
1007
strbuf_release(&buf);
1011
static void test_reftable_stack_compaction_concurrent(void)
1013
struct reftable_write_options opts = { 0 };
1014
struct reftable_stack *st1 = NULL, *st2 = NULL;
1015
char *dir = get_tmp_dir(__LINE__);
1018
err = reftable_new_stack(&st1, dir, &opts);
1020
write_n_ref_tables(st1, 3);
1022
err = reftable_new_stack(&st2, dir, &opts);
1025
err = reftable_stack_compact_all(st1, NULL);
1028
reftable_stack_destroy(st1);
1029
reftable_stack_destroy(st2);
1031
EXPECT(count_dir_entries(dir) == 2);
1035
static void unclean_stack_close(struct reftable_stack *st)
1037
/* break abstraction boundary to simulate unclean shutdown. */
1039
for (; i < st->readers_len; i++) {
1040
reftable_reader_free(st->readers[i]);
1042
st->readers_len = 0;
1043
FREE_AND_NULL(st->readers);
1046
static void test_reftable_stack_compaction_concurrent_clean(void)
1048
struct reftable_write_options opts = { 0 };
1049
struct reftable_stack *st1 = NULL, *st2 = NULL, *st3 = NULL;
1050
char *dir = get_tmp_dir(__LINE__);
1053
err = reftable_new_stack(&st1, dir, &opts);
1055
write_n_ref_tables(st1, 3);
1057
err = reftable_new_stack(&st2, dir, &opts);
1060
err = reftable_stack_compact_all(st1, NULL);
1063
unclean_stack_close(st1);
1064
unclean_stack_close(st2);
1066
err = reftable_new_stack(&st3, dir, &opts);
1069
err = reftable_stack_clean(st3);
1071
EXPECT(count_dir_entries(dir) == 2);
1073
reftable_stack_destroy(st1);
1074
reftable_stack_destroy(st2);
1075
reftable_stack_destroy(st3);
1080
int stack_test_main(int argc UNUSED, const char *argv[] UNUSED)
1082
RUN_TEST(test_empty_add);
1083
RUN_TEST(test_read_file);
1084
RUN_TEST(test_reflog_expire);
1085
RUN_TEST(test_reftable_stack_add);
1086
RUN_TEST(test_reftable_stack_add_one);
1087
RUN_TEST(test_reftable_stack_auto_compaction);
1088
RUN_TEST(test_reftable_stack_auto_compaction_with_locked_tables);
1089
RUN_TEST(test_reftable_stack_add_performs_auto_compaction);
1090
RUN_TEST(test_reftable_stack_compaction_concurrent);
1091
RUN_TEST(test_reftable_stack_compaction_concurrent_clean);
1092
RUN_TEST(test_reftable_stack_compaction_with_locked_tables);
1093
RUN_TEST(test_reftable_stack_hash_id);
1094
RUN_TEST(test_reftable_stack_lock_failure);
1095
RUN_TEST(test_reftable_stack_log_normalize);
1096
RUN_TEST(test_reftable_stack_tombstone);
1097
RUN_TEST(test_reftable_stack_transaction_api);
1098
RUN_TEST(test_reftable_stack_transaction_api_performs_auto_compaction);
1099
RUN_TEST(test_reftable_stack_auto_compaction_fails_gracefully);
1100
RUN_TEST(test_reftable_stack_update_index_check);
1101
RUN_TEST(test_reftable_stack_uptodate);
1102
RUN_TEST(test_suggest_compaction_segment);
1103
RUN_TEST(test_suggest_compaction_segment_nothing);