git

Форк
0
/
reftable-backend.c 
2349 строк · 65.1 Кб
1
#define USE_THE_REPOSITORY_VARIABLE
2

3
#include "../git-compat-util.h"
4
#include "../abspath.h"
5
#include "../chdir-notify.h"
6
#include "../config.h"
7
#include "../dir.h"
8
#include "../environment.h"
9
#include "../gettext.h"
10
#include "../hash.h"
11
#include "../hex.h"
12
#include "../iterator.h"
13
#include "../ident.h"
14
#include "../lockfile.h"
15
#include "../object.h"
16
#include "../path.h"
17
#include "../refs.h"
18
#include "../reftable/reftable-stack.h"
19
#include "../reftable/reftable-record.h"
20
#include "../reftable/reftable-error.h"
21
#include "../reftable/reftable-iterator.h"
22
#include "../setup.h"
23
#include "../strmap.h"
24
#include "parse.h"
25
#include "refs-internal.h"
26

27
/*
28
 * Used as a flag in ref_update::flags when the ref_update was via an
29
 * update to HEAD.
30
 */
31
#define REF_UPDATE_VIA_HEAD (1 << 8)
32

33
struct reftable_ref_store {
34
	struct ref_store base;
35

36
	/*
37
	 * The main stack refers to the common dir and thus contains common
38
	 * refs as well as refs of the main repository.
39
	 */
40
	struct reftable_stack *main_stack;
41
	/*
42
	 * The worktree stack refers to the gitdir in case the refdb is opened
43
	 * via a worktree. It thus contains the per-worktree refs.
44
	 */
45
	struct reftable_stack *worktree_stack;
46
	/*
47
	 * Map of worktree stacks by their respective worktree names. The map
48
	 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
49
	 */
50
	struct strmap worktree_stacks;
51
	struct reftable_write_options write_options;
52

53
	unsigned int store_flags;
54
	int err;
55
};
56

57
/*
58
 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
59
 * reftable_ref_store. required_flags is compared with ref_store's store_flags
60
 * to ensure the ref_store has all required capabilities. "caller" is used in
61
 * any necessary error messages.
62
 */
63
static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
64
						       unsigned int required_flags,
65
						       const char *caller)
66
{
67
	struct reftable_ref_store *refs;
68

69
	if (ref_store->be != &refs_be_reftable)
70
		BUG("ref_store is type \"%s\" not \"reftables\" in %s",
71
		    ref_store->be->name, caller);
72

73
	refs = (struct reftable_ref_store *)ref_store;
74

75
	if ((refs->store_flags & required_flags) != required_flags)
76
		BUG("operation %s requires abilities 0x%x, but only have 0x%x",
77
		    caller, required_flags, refs->store_flags);
78

79
	return refs;
80
}
81

82
/*
83
 * Some refs are global to the repository (refs/heads/{*}), while others are
84
 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
85
 * multiple separate databases (ie. multiple reftable/ directories), one for
86
 * the shared refs, one for the current worktree refs, and one for each
87
 * additional worktree. For reading, we merge the view of both the shared and
88
 * the current worktree's refs, when necessary.
89
 *
90
 * This function also optionally assigns the rewritten reference name that is
91
 * local to the stack. This translation is required when using worktree refs
92
 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
93
 * those references in their normalized form.
94
 */
95
static struct reftable_stack *stack_for(struct reftable_ref_store *store,
96
					const char *refname,
97
					const char **rewritten_ref)
98
{
99
	const char *wtname;
100
	int wtname_len;
101

102
	if (!refname)
103
		return store->main_stack;
104

105
	switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
106
	case REF_WORKTREE_OTHER: {
107
		static struct strbuf wtname_buf = STRBUF_INIT;
108
		struct strbuf wt_dir = STRBUF_INIT;
109
		struct reftable_stack *stack;
110

111
		/*
112
		 * We're using a static buffer here so that we don't need to
113
		 * allocate the worktree name whenever we look up a reference.
114
		 * This could be avoided if the strmap interface knew how to
115
		 * handle keys with a length.
116
		 */
117
		strbuf_reset(&wtname_buf);
118
		strbuf_add(&wtname_buf, wtname, wtname_len);
119

120
		/*
121
		 * There is an edge case here: when the worktree references the
122
		 * current worktree, then we set up the stack once via
123
		 * `worktree_stacks` and once via `worktree_stack`. This is
124
		 * wasteful, but in the reading case it shouldn't matter. And
125
		 * in the writing case we would notice that the stack is locked
126
		 * already and error out when trying to write a reference via
127
		 * both stacks.
128
		 */
129
		stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
130
		if (!stack) {
131
			strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
132
				    store->base.repo->commondir, wtname_buf.buf);
133

134
			store->err = reftable_new_stack(&stack, wt_dir.buf,
135
							&store->write_options);
136
			assert(store->err != REFTABLE_API_ERROR);
137
			strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
138
		}
139

140
		strbuf_release(&wt_dir);
141
		return stack;
142
	}
143
	case REF_WORKTREE_CURRENT:
144
		/*
145
		 * If there is no worktree stack then we're currently in the
146
		 * main worktree. We thus return the main stack in that case.
147
		 */
148
		if (!store->worktree_stack)
149
			return store->main_stack;
150
		return store->worktree_stack;
151
	case REF_WORKTREE_MAIN:
152
	case REF_WORKTREE_SHARED:
153
		return store->main_stack;
154
	default:
155
		BUG("unhandled worktree reference type");
156
	}
157
}
158

159
static int should_write_log(struct ref_store *refs, const char *refname)
160
{
161
	if (log_all_ref_updates == LOG_REFS_UNSET)
162
		log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
163

164
	switch (log_all_ref_updates) {
165
	case LOG_REFS_NONE:
166
		return refs_reflog_exists(refs, refname);
167
	case LOG_REFS_ALWAYS:
168
		return 1;
169
	case LOG_REFS_NORMAL:
170
		if (should_autocreate_reflog(refname))
171
			return 1;
172
		return refs_reflog_exists(refs, refname);
173
	default:
174
		BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
175
	}
176
}
177

178
static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
179
{
180
	const char *tz_begin;
181
	int sign = 1;
182

183
	reftable_log_record_release(log);
184
	log->value_type = REFTABLE_LOG_UPDATE;
185
	log->value.update.name =
186
		xstrndup(split->name_begin, split->name_end - split->name_begin);
187
	log->value.update.email =
188
		xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
189
	log->value.update.time = atol(split->date_begin);
190

191
	tz_begin = split->tz_begin;
192
	if (*tz_begin == '-') {
193
		sign = -1;
194
		tz_begin++;
195
	}
196
	if (*tz_begin == '+') {
197
		sign = 1;
198
		tz_begin++;
199
	}
200

201
	log->value.update.tz_offset = sign * atoi(tz_begin);
202
}
203

204
static int read_ref_without_reload(struct reftable_ref_store *refs,
205
				   struct reftable_stack *stack,
206
				   const char *refname,
207
				   struct object_id *oid,
208
				   struct strbuf *referent,
209
				   unsigned int *type)
210
{
211
	struct reftable_ref_record ref = {0};
212
	int ret;
213

214
	ret = reftable_stack_read_ref(stack, refname, &ref);
215
	if (ret)
216
		goto done;
217

218
	if (ref.value_type == REFTABLE_REF_SYMREF) {
219
		strbuf_reset(referent);
220
		strbuf_addstr(referent, ref.value.symref);
221
		*type |= REF_ISSYMREF;
222
	} else if (reftable_ref_record_val1(&ref)) {
223
		oidread(oid, reftable_ref_record_val1(&ref),
224
			refs->base.repo->hash_algo);
225
	} else {
226
		/* We got a tombstone, which should not happen. */
227
		BUG("unhandled reference value type %d", ref.value_type);
228
	}
229

230
done:
231
	assert(ret != REFTABLE_API_ERROR);
232
	reftable_ref_record_release(&ref);
233
	return ret;
234
}
235

236
static int reftable_be_config(const char *var, const char *value,
237
			      const struct config_context *ctx,
238
			      void *_opts)
239
{
240
	struct reftable_write_options *opts = _opts;
241

242
	if (!strcmp(var, "reftable.blocksize")) {
243
		unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
244
		if (block_size > 16777215)
245
			die("reftable block size cannot exceed 16MB");
246
		opts->block_size = block_size;
247
	} else if (!strcmp(var, "reftable.restartinterval")) {
248
		unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
249
		if (restart_interval > UINT16_MAX)
250
			die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
251
		opts->restart_interval = restart_interval;
252
	} else if (!strcmp(var, "reftable.indexobjects")) {
253
		opts->skip_index_objects = !git_config_bool(var, value);
254
	} else if (!strcmp(var, "reftable.geometricfactor")) {
255
		unsigned long factor = git_config_ulong(var, value, ctx->kvi);
256
		if (factor > UINT8_MAX)
257
			die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
258
		opts->auto_compaction_factor = factor;
259
	}
260

261
	return 0;
262
}
263

264
static struct ref_store *reftable_be_init(struct repository *repo,
265
					  const char *gitdir,
266
					  unsigned int store_flags)
267
{
268
	struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
269
	struct strbuf path = STRBUF_INIT;
270
	int is_worktree;
271
	mode_t mask;
272

273
	mask = umask(0);
274
	umask(mask);
275

276
	base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
277
	strmap_init(&refs->worktree_stacks);
278
	refs->store_flags = store_flags;
279

280
	refs->write_options.hash_id = repo->hash_algo->format_id;
281
	refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
282
	refs->write_options.disable_auto_compact =
283
		!git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
284

285
	git_config(reftable_be_config, &refs->write_options);
286

287
	/*
288
	 * It is somewhat unfortunate that we have to mirror the default block
289
	 * size of the reftable library here. But given that the write options
290
	 * wouldn't be updated by the library here, and given that we require
291
	 * the proper block size to trim reflog message so that they fit, we
292
	 * must set up a proper value here.
293
	 */
294
	if (!refs->write_options.block_size)
295
		refs->write_options.block_size = 4096;
296

297
	/*
298
	 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
299
	 * This stack contains both the shared and the main worktree refs.
300
	 *
301
	 * Note that we don't try to resolve the path in case we have a
302
	 * worktree because `get_common_dir_noenv()` already does it for us.
303
	 */
304
	is_worktree = get_common_dir_noenv(&path, gitdir);
305
	if (!is_worktree) {
306
		strbuf_reset(&path);
307
		strbuf_realpath(&path, gitdir, 0);
308
	}
309
	strbuf_addstr(&path, "/reftable");
310
	refs->err = reftable_new_stack(&refs->main_stack, path.buf,
311
				       &refs->write_options);
312
	if (refs->err)
313
		goto done;
314

315
	/*
316
	 * If we're in a worktree we also need to set up the worktree reftable
317
	 * stack that is contained in the per-worktree GIT_DIR.
318
	 *
319
	 * Ideally, we would also add the stack to our worktree stack map. But
320
	 * we have no way to figure out the worktree name here and thus can't
321
	 * do it efficiently.
322
	 */
323
	if (is_worktree) {
324
		strbuf_reset(&path);
325
		strbuf_addf(&path, "%s/reftable", gitdir);
326

327
		refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
328
					       &refs->write_options);
329
		if (refs->err)
330
			goto done;
331
	}
332

333
	chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
334

335
done:
336
	assert(refs->err != REFTABLE_API_ERROR);
337
	strbuf_release(&path);
338
	return &refs->base;
339
}
340

341
static void reftable_be_release(struct ref_store *ref_store)
342
{
343
	struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
344
	struct strmap_entry *entry;
345
	struct hashmap_iter iter;
346

347
	if (refs->main_stack) {
348
		reftable_stack_destroy(refs->main_stack);
349
		refs->main_stack = NULL;
350
	}
351

352
	if (refs->worktree_stack) {
353
		reftable_stack_destroy(refs->worktree_stack);
354
		refs->worktree_stack = NULL;
355
	}
356

357
	strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
358
		reftable_stack_destroy(entry->value);
359
	strmap_clear(&refs->worktree_stacks, 0);
360
}
361

362
static int reftable_be_create_on_disk(struct ref_store *ref_store,
363
				      int flags UNUSED,
364
				      struct strbuf *err UNUSED)
365
{
366
	struct reftable_ref_store *refs =
367
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
368
	struct strbuf sb = STRBUF_INIT;
369

370
	strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
371
	safe_create_dir(sb.buf, 1);
372
	strbuf_reset(&sb);
373

374
	strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
375
	write_file(sb.buf, "ref: refs/heads/.invalid");
376
	adjust_shared_perm(sb.buf);
377
	strbuf_reset(&sb);
378

379
	strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
380
	safe_create_dir(sb.buf, 1);
381
	strbuf_reset(&sb);
382

383
	strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
384
	write_file(sb.buf, "this repository uses the reftable format");
385
	adjust_shared_perm(sb.buf);
386

387
	strbuf_release(&sb);
388
	return 0;
389
}
390

391
static int reftable_be_remove_on_disk(struct ref_store *ref_store,
392
				      struct strbuf *err)
393
{
394
	struct reftable_ref_store *refs =
395
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
396
	struct strbuf sb = STRBUF_INIT;
397
	int ret = 0;
398

399
	/*
400
	 * Release the ref store such that all stacks are closed. This is
401
	 * required so that the "tables.list" file is not open anymore, which
402
	 * would otherwise make it impossible to remove the file on Windows.
403
	 */
404
	reftable_be_release(ref_store);
405

406
	strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
407
	if (remove_dir_recursively(&sb, 0) < 0) {
408
		strbuf_addf(err, "could not delete reftables: %s",
409
			    strerror(errno));
410
		ret = -1;
411
	}
412
	strbuf_reset(&sb);
413

414
	strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
415
	if (unlink(sb.buf) < 0) {
416
		strbuf_addf(err, "could not delete stub HEAD: %s",
417
			    strerror(errno));
418
		ret = -1;
419
	}
420
	strbuf_reset(&sb);
421

422
	strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
423
	if (unlink(sb.buf) < 0) {
424
		strbuf_addf(err, "could not delete stub heads: %s",
425
			    strerror(errno));
426
		ret = -1;
427
	}
428
	strbuf_reset(&sb);
429

430
	strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
431
	if (rmdir(sb.buf) < 0) {
432
		strbuf_addf(err, "could not delete refs directory: %s",
433
			    strerror(errno));
434
		ret = -1;
435
	}
436

437
	strbuf_release(&sb);
438
	return ret;
439
}
440

441
struct reftable_ref_iterator {
442
	struct ref_iterator base;
443
	struct reftable_ref_store *refs;
444
	struct reftable_iterator iter;
445
	struct reftable_ref_record ref;
446
	struct object_id oid;
447

448
	const char *prefix;
449
	size_t prefix_len;
450
	unsigned int flags;
451
	int err;
452
};
453

454
static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
455
{
456
	struct reftable_ref_iterator *iter =
457
		(struct reftable_ref_iterator *)ref_iterator;
458
	struct reftable_ref_store *refs = iter->refs;
459
	const char *referent = NULL;
460

461
	while (!iter->err) {
462
		int flags = 0;
463

464
		iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
465
		if (iter->err)
466
			break;
467

468
		/*
469
		 * The files backend only lists references contained in "refs/" unless
470
		 * the root refs are to be included. We emulate the same behaviour here.
471
		 */
472
		if (!starts_with(iter->ref.refname, "refs/") &&
473
		    !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
474
		      is_root_ref(iter->ref.refname))) {
475
			continue;
476
		}
477

478
		if (iter->prefix_len &&
479
		    strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
480
			iter->err = 1;
481
			break;
482
		}
483

484
		if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
485
		    parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
486
			    REF_WORKTREE_CURRENT)
487
			continue;
488

489
		switch (iter->ref.value_type) {
490
		case REFTABLE_REF_VAL1:
491
			oidread(&iter->oid, iter->ref.value.val1,
492
				refs->base.repo->hash_algo);
493
			break;
494
		case REFTABLE_REF_VAL2:
495
			oidread(&iter->oid, iter->ref.value.val2.value,
496
				refs->base.repo->hash_algo);
497
			break;
498
		case REFTABLE_REF_SYMREF:
499
			referent = refs_resolve_ref_unsafe(&iter->refs->base,
500
							   iter->ref.refname,
501
							   RESOLVE_REF_READING,
502
							   &iter->oid, &flags);
503
			if (!referent)
504
				oidclr(&iter->oid, refs->base.repo->hash_algo);
505
			break;
506
		default:
507
			BUG("unhandled reference value type %d", iter->ref.value_type);
508
		}
509

510
		if (is_null_oid(&iter->oid))
511
			flags |= REF_ISBROKEN;
512

513
		if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
514
			if (!refname_is_safe(iter->ref.refname))
515
				die(_("refname is dangerous: %s"), iter->ref.refname);
516
			oidclr(&iter->oid, refs->base.repo->hash_algo);
517
			flags |= REF_BAD_NAME | REF_ISBROKEN;
518
		}
519

520
		if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
521
		    flags & REF_ISSYMREF &&
522
		    flags & REF_ISBROKEN)
523
			continue;
524

525
		if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
526
		    !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
527
					    &iter->oid, flags))
528
				continue;
529

530
		iter->base.refname = iter->ref.refname;
531
		iter->base.referent = referent;
532
		iter->base.oid = &iter->oid;
533
		iter->base.flags = flags;
534

535
		break;
536
	}
537

538
	if (iter->err > 0) {
539
		if (ref_iterator_abort(ref_iterator) != ITER_DONE)
540
			return ITER_ERROR;
541
		return ITER_DONE;
542
	}
543

544
	if (iter->err < 0) {
545
		ref_iterator_abort(ref_iterator);
546
		return ITER_ERROR;
547
	}
548

549
	return ITER_OK;
550
}
551

552
static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
553
				      struct object_id *peeled)
554
{
555
	struct reftable_ref_iterator *iter =
556
		(struct reftable_ref_iterator *)ref_iterator;
557

558
	if (iter->ref.value_type == REFTABLE_REF_VAL2) {
559
		oidread(peeled, iter->ref.value.val2.target_value,
560
			iter->refs->base.repo->hash_algo);
561
		return 0;
562
	}
563

564
	return -1;
565
}
566

567
static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
568
{
569
	struct reftable_ref_iterator *iter =
570
		(struct reftable_ref_iterator *)ref_iterator;
571
	reftable_ref_record_release(&iter->ref);
572
	reftable_iterator_destroy(&iter->iter);
573
	free(iter);
574
	return ITER_DONE;
575
}
576

577
static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
578
	.advance = reftable_ref_iterator_advance,
579
	.peel = reftable_ref_iterator_peel,
580
	.abort = reftable_ref_iterator_abort
581
};
582

583
static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
584
							    struct reftable_stack *stack,
585
							    const char *prefix,
586
							    int flags)
587
{
588
	struct reftable_ref_iterator *iter;
589
	int ret;
590

591
	iter = xcalloc(1, sizeof(*iter));
592
	base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
593
	iter->prefix = prefix;
594
	iter->prefix_len = prefix ? strlen(prefix) : 0;
595
	iter->base.oid = &iter->oid;
596
	iter->flags = flags;
597
	iter->refs = refs;
598

599
	ret = refs->err;
600
	if (ret)
601
		goto done;
602

603
	ret = reftable_stack_reload(stack);
604
	if (ret)
605
		goto done;
606

607
	reftable_stack_init_ref_iterator(stack, &iter->iter);
608
	ret = reftable_iterator_seek_ref(&iter->iter, prefix);
609
	if (ret)
610
		goto done;
611

612
done:
613
	iter->err = ret;
614
	return iter;
615
}
616

617
static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
618
						       const char *prefix,
619
						       const char **exclude_patterns UNUSED,
620
						       unsigned int flags)
621
{
622
	struct reftable_ref_iterator *main_iter, *worktree_iter;
623
	struct reftable_ref_store *refs;
624
	unsigned int required_flags = REF_STORE_READ;
625

626
	if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
627
		required_flags |= REF_STORE_ODB;
628
	refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
629

630
	main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
631

632
	/*
633
	 * The worktree stack is only set when we're in an actual worktree
634
	 * right now. If we aren't, then we return the common reftable
635
	 * iterator, only.
636
	 */
637
	 if (!refs->worktree_stack)
638
		return &main_iter->base;
639

640
	/*
641
	 * Otherwise we merge both the common and the per-worktree refs into a
642
	 * single iterator.
643
	 */
644
	worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
645
	return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
646
					ref_iterator_select, NULL);
647
}
648

649
static int reftable_be_read_raw_ref(struct ref_store *ref_store,
650
				    const char *refname,
651
				    struct object_id *oid,
652
				    struct strbuf *referent,
653
				    unsigned int *type,
654
				    int *failure_errno)
655
{
656
	struct reftable_ref_store *refs =
657
		reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
658
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
659
	int ret;
660

661
	if (refs->err < 0)
662
		return refs->err;
663

664
	ret = reftable_stack_reload(stack);
665
	if (ret)
666
		return ret;
667

668
	ret = read_ref_without_reload(refs, stack, refname, oid, referent, type);
669
	if (ret < 0)
670
		return ret;
671
	if (ret > 0) {
672
		*failure_errno = ENOENT;
673
		return -1;
674
	}
675

676
	return 0;
677
}
678

679
static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
680
					 const char *refname,
681
					 struct strbuf *referent)
682
{
683
	struct reftable_ref_store *refs =
684
		reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
685
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
686
	struct reftable_ref_record ref = {0};
687
	int ret;
688

689
	ret = reftable_stack_reload(stack);
690
	if (ret)
691
		return ret;
692

693
	ret = reftable_stack_read_ref(stack, refname, &ref);
694
	if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
695
		strbuf_addstr(referent, ref.value.symref);
696
	else
697
		ret = -1;
698

699
	reftable_ref_record_release(&ref);
700
	return ret;
701
}
702

703
struct reftable_transaction_update {
704
	struct ref_update *update;
705
	struct object_id current_oid;
706
};
707

708
struct write_transaction_table_arg {
709
	struct reftable_ref_store *refs;
710
	struct reftable_stack *stack;
711
	struct reftable_addition *addition;
712
	struct reftable_transaction_update *updates;
713
	size_t updates_nr;
714
	size_t updates_alloc;
715
	size_t updates_expected;
716
};
717

718
struct reftable_transaction_data {
719
	struct write_transaction_table_arg *args;
720
	size_t args_nr, args_alloc;
721
};
722

723
static void free_transaction_data(struct reftable_transaction_data *tx_data)
724
{
725
	if (!tx_data)
726
		return;
727
	for (size_t i = 0; i < tx_data->args_nr; i++) {
728
		reftable_addition_destroy(tx_data->args[i].addition);
729
		free(tx_data->args[i].updates);
730
	}
731
	free(tx_data->args);
732
	free(tx_data);
733
}
734

735
/*
736
 * Prepare transaction update for the given reference update. This will cause
737
 * us to lock the corresponding reftable stack for concurrent modification.
738
 */
739
static int prepare_transaction_update(struct write_transaction_table_arg **out,
740
				      struct reftable_ref_store *refs,
741
				      struct reftable_transaction_data *tx_data,
742
				      struct ref_update *update,
743
				      struct strbuf *err)
744
{
745
	struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
746
	struct write_transaction_table_arg *arg = NULL;
747
	size_t i;
748
	int ret;
749

750
	/*
751
	 * Search for a preexisting stack update. If there is one then we add
752
	 * the update to it, otherwise we set up a new stack update.
753
	 */
754
	for (i = 0; !arg && i < tx_data->args_nr; i++)
755
		if (tx_data->args[i].stack == stack)
756
			arg = &tx_data->args[i];
757

758
	if (!arg) {
759
		struct reftable_addition *addition;
760

761
		ret = reftable_stack_reload(stack);
762
		if (ret)
763
			return ret;
764

765
		ret = reftable_stack_new_addition(&addition, stack);
766
		if (ret) {
767
			if (ret == REFTABLE_LOCK_ERROR)
768
				strbuf_addstr(err, "cannot lock references");
769
			return ret;
770
		}
771

772
		ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
773
			   tx_data->args_alloc);
774
		arg = &tx_data->args[tx_data->args_nr++];
775
		arg->refs = refs;
776
		arg->stack = stack;
777
		arg->addition = addition;
778
		arg->updates = NULL;
779
		arg->updates_nr = 0;
780
		arg->updates_alloc = 0;
781
		arg->updates_expected = 0;
782
	}
783

784
	arg->updates_expected++;
785

786
	if (out)
787
		*out = arg;
788

789
	return 0;
790
}
791

792
/*
793
 * Queue a reference update for the correct stack. We potentially need to
794
 * handle multiple stack updates in a single transaction when it spans across
795
 * multiple worktrees.
796
 */
797
static int queue_transaction_update(struct reftable_ref_store *refs,
798
				    struct reftable_transaction_data *tx_data,
799
				    struct ref_update *update,
800
				    struct object_id *current_oid,
801
				    struct strbuf *err)
802
{
803
	struct write_transaction_table_arg *arg = NULL;
804
	int ret;
805

806
	if (update->backend_data)
807
		BUG("reference update queued more than once");
808

809
	ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
810
	if (ret < 0)
811
		return ret;
812

813
	ALLOC_GROW(arg->updates, arg->updates_nr + 1,
814
		   arg->updates_alloc);
815
	arg->updates[arg->updates_nr].update = update;
816
	oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
817
	update->backend_data = &arg->updates[arg->updates_nr++];
818

819
	return 0;
820
}
821

822
static int reftable_be_transaction_prepare(struct ref_store *ref_store,
823
					   struct ref_transaction *transaction,
824
					   struct strbuf *err)
825
{
826
	struct reftable_ref_store *refs =
827
		reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
828
	struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
829
	struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
830
	struct reftable_transaction_data *tx_data = NULL;
831
	struct object_id head_oid;
832
	unsigned int head_type = 0;
833
	size_t i;
834
	int ret;
835

836
	ret = refs->err;
837
	if (ret < 0)
838
		goto done;
839

840
	tx_data = xcalloc(1, sizeof(*tx_data));
841

842
	/*
843
	 * Preprocess all updates. For one we check that there are no duplicate
844
	 * reference updates in this transaction. Second, we lock all stacks
845
	 * that will be modified during the transaction.
846
	 */
847
	for (i = 0; i < transaction->nr; i++) {
848
		ret = prepare_transaction_update(NULL, refs, tx_data,
849
						 transaction->updates[i], err);
850
		if (ret)
851
			goto done;
852

853
		string_list_append(&affected_refnames,
854
				   transaction->updates[i]->refname);
855
	}
856

857
	/*
858
	 * Now that we have counted updates per stack we can preallocate their
859
	 * arrays. This avoids having to reallocate many times.
860
	 */
861
	for (i = 0; i < tx_data->args_nr; i++) {
862
		CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
863
		tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
864
	}
865

866
	/*
867
	 * Fail if a refname appears more than once in the transaction.
868
	 * This code is taken from the files backend and is a good candidate to
869
	 * be moved into the generic layer.
870
	 */
871
	string_list_sort(&affected_refnames);
872
	if (ref_update_reject_duplicates(&affected_refnames, err)) {
873
		ret = TRANSACTION_GENERIC_ERROR;
874
		goto done;
875
	}
876

877
	ret = read_ref_without_reload(refs, stack_for(refs, "HEAD", NULL), "HEAD",
878
				      &head_oid, &head_referent, &head_type);
879
	if (ret < 0)
880
		goto done;
881
	ret = 0;
882

883
	for (i = 0; i < transaction->nr; i++) {
884
		struct ref_update *u = transaction->updates[i];
885
		struct object_id current_oid = {0};
886
		struct reftable_stack *stack;
887
		const char *rewritten_ref;
888

889
		stack = stack_for(refs, u->refname, &rewritten_ref);
890

891
		/* Verify that the new object ID is valid. */
892
		if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
893
		    !(u->flags & REF_SKIP_OID_VERIFICATION) &&
894
		    !(u->flags & REF_LOG_ONLY)) {
895
			struct object *o = parse_object(refs->base.repo, &u->new_oid);
896
			if (!o) {
897
				strbuf_addf(err,
898
					    _("trying to write ref '%s' with nonexistent object %s"),
899
					    u->refname, oid_to_hex(&u->new_oid));
900
				ret = -1;
901
				goto done;
902
			}
903

904
			if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
905
				strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
906
					    oid_to_hex(&u->new_oid), u->refname);
907
				ret = -1;
908
				goto done;
909
			}
910
		}
911

912
		/*
913
		 * When we update the reference that HEAD points to we enqueue
914
		 * a second log-only update for HEAD so that its reflog is
915
		 * updated accordingly.
916
		 */
917
		if (head_type == REF_ISSYMREF &&
918
		    !(u->flags & REF_LOG_ONLY) &&
919
		    !(u->flags & REF_UPDATE_VIA_HEAD) &&
920
		    !strcmp(rewritten_ref, head_referent.buf)) {
921
			struct ref_update *new_update;
922

923
			/*
924
			 * First make sure that HEAD is not already in the
925
			 * transaction. This check is O(lg N) in the transaction
926
			 * size, but it happens at most once per transaction.
927
			 */
928
			if (string_list_has_string(&affected_refnames, "HEAD")) {
929
				/* An entry already existed */
930
				strbuf_addf(err,
931
					    _("multiple updates for 'HEAD' (including one "
932
					    "via its referent '%s') are not allowed"),
933
					    u->refname);
934
				ret = TRANSACTION_NAME_CONFLICT;
935
				goto done;
936
			}
937

938
			new_update = ref_transaction_add_update(
939
					transaction, "HEAD",
940
					u->flags | REF_LOG_ONLY | REF_NO_DEREF,
941
					&u->new_oid, &u->old_oid, NULL, NULL, u->msg);
942
			string_list_insert(&affected_refnames, new_update->refname);
943
		}
944

945
		ret = read_ref_without_reload(refs, stack, rewritten_ref,
946
					      &current_oid, &referent, &u->type);
947
		if (ret < 0)
948
			goto done;
949
		if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
950
			/*
951
			 * The reference does not exist, and we either have no
952
			 * old object ID or expect the reference to not exist.
953
			 * We can thus skip below safety checks as well as the
954
			 * symref splitting. But we do want to verify that
955
			 * there is no conflicting reference here so that we
956
			 * can output a proper error message instead of failing
957
			 * at a later point.
958
			 */
959
			ret = refs_verify_refname_available(ref_store, u->refname,
960
							    &affected_refnames, NULL, err);
961
			if (ret < 0)
962
				goto done;
963

964
			/*
965
			 * There is no need to write the reference deletion
966
			 * when the reference in question doesn't exist.
967
			 */
968
			 if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
969
				 ret = queue_transaction_update(refs, tx_data, u,
970
								&current_oid, err);
971
				 if (ret)
972
					 goto done;
973
			 }
974

975
			continue;
976
		}
977
		if (ret > 0) {
978
			/* The reference does not exist, but we expected it to. */
979
			strbuf_addf(err, _("cannot lock ref '%s': "
980
				    "unable to resolve reference '%s'"),
981
				    ref_update_original_update_refname(u), u->refname);
982
			ret = -1;
983
			goto done;
984
		}
985

986
		if (u->type & REF_ISSYMREF) {
987
			/*
988
			 * The reftable stack is locked at this point already,
989
			 * so it is safe to call `refs_resolve_ref_unsafe()`
990
			 * here without causing races.
991
			 */
992
			const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
993
								       &current_oid, NULL);
994

995
			if (u->flags & REF_NO_DEREF) {
996
				if (u->flags & REF_HAVE_OLD && !resolved) {
997
					strbuf_addf(err, _("cannot lock ref '%s': "
998
						    "error reading reference"), u->refname);
999
					ret = -1;
1000
					goto done;
1001
				}
1002
			} else {
1003
				struct ref_update *new_update;
1004
				int new_flags;
1005

1006
				new_flags = u->flags;
1007
				if (!strcmp(rewritten_ref, "HEAD"))
1008
					new_flags |= REF_UPDATE_VIA_HEAD;
1009

1010
				/*
1011
				 * If we are updating a symref (eg. HEAD), we should also
1012
				 * update the branch that the symref points to.
1013
				 *
1014
				 * This is generic functionality, and would be better
1015
				 * done in refs.c, but the current implementation is
1016
				 * intertwined with the locking in files-backend.c.
1017
				 */
1018
				new_update = ref_transaction_add_update(
1019
					transaction, referent.buf, new_flags,
1020
					u->new_target ? NULL : &u->new_oid,
1021
					u->old_target ? NULL : &u->old_oid,
1022
					u->new_target, u->old_target, u->msg);
1023

1024
				new_update->parent_update = u;
1025

1026
				/*
1027
				 * Change the symbolic ref update to log only. Also, it
1028
				 * doesn't need to check its old OID value, as that will be
1029
				 * done when new_update is processed.
1030
				 */
1031
				u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
1032
				u->flags &= ~REF_HAVE_OLD;
1033

1034
				if (string_list_has_string(&affected_refnames, new_update->refname)) {
1035
					strbuf_addf(err,
1036
						    _("multiple updates for '%s' (including one "
1037
						    "via symref '%s') are not allowed"),
1038
						    referent.buf, u->refname);
1039
					ret = TRANSACTION_NAME_CONFLICT;
1040
					goto done;
1041
				}
1042
				string_list_insert(&affected_refnames, new_update->refname);
1043
			}
1044
		}
1045

1046
		/*
1047
		 * Verify that the old object matches our expectations. Note
1048
		 * that the error messages here do not make a lot of sense in
1049
		 * the context of the reftable backend as we never lock
1050
		 * individual refs. But the error messages match what the files
1051
		 * backend returns, which keeps our tests happy.
1052
		 */
1053
		if (u->old_target) {
1054
			if (!(u->type & REF_ISSYMREF)) {
1055
				strbuf_addf(err, _("cannot lock ref '%s': "
1056
					   "expected symref with target '%s': "
1057
					   "but is a regular ref"),
1058
					    ref_update_original_update_refname(u),
1059
					    u->old_target);
1060
				ret = -1;
1061
				goto done;
1062
			}
1063

1064
			if (ref_update_check_old_target(referent.buf, u, err)) {
1065
				ret = -1;
1066
				goto done;
1067
			}
1068
		} else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
1069
			if (is_null_oid(&u->old_oid))
1070
				strbuf_addf(err, _("cannot lock ref '%s': "
1071
						   "reference already exists"),
1072
					    ref_update_original_update_refname(u));
1073
			else if (is_null_oid(&current_oid))
1074
				strbuf_addf(err, _("cannot lock ref '%s': "
1075
						   "reference is missing but expected %s"),
1076
					    ref_update_original_update_refname(u),
1077
					    oid_to_hex(&u->old_oid));
1078
			else
1079
				strbuf_addf(err, _("cannot lock ref '%s': "
1080
						   "is at %s but expected %s"),
1081
					    ref_update_original_update_refname(u),
1082
					    oid_to_hex(&current_oid),
1083
					    oid_to_hex(&u->old_oid));
1084
			ret = -1;
1085
			goto done;
1086
		}
1087

1088
		/*
1089
		 * If all of the following conditions are true:
1090
		 *
1091
		 *   - We're not about to write a symref.
1092
		 *   - We're not about to write a log-only entry.
1093
		 *   - Old and new object ID are different.
1094
		 *
1095
		 * Then we're essentially doing a no-op update that can be
1096
		 * skipped. This is not only for the sake of efficiency, but
1097
		 * also skips writing unneeded reflog entries.
1098
		 */
1099
		if ((u->type & REF_ISSYMREF) ||
1100
		    (u->flags & REF_LOG_ONLY) ||
1101
		    (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
1102
			ret = queue_transaction_update(refs, tx_data, u,
1103
						       &current_oid, err);
1104
			if (ret)
1105
				goto done;
1106
		}
1107
	}
1108

1109
	transaction->backend_data = tx_data;
1110
	transaction->state = REF_TRANSACTION_PREPARED;
1111

1112
done:
1113
	assert(ret != REFTABLE_API_ERROR);
1114
	if (ret < 0) {
1115
		free_transaction_data(tx_data);
1116
		transaction->state = REF_TRANSACTION_CLOSED;
1117
		if (!err->len)
1118
			strbuf_addf(err, _("reftable: transaction prepare: %s"),
1119
				    reftable_error_str(ret));
1120
	}
1121
	string_list_clear(&affected_refnames, 0);
1122
	strbuf_release(&referent);
1123
	strbuf_release(&head_referent);
1124

1125
	return ret;
1126
}
1127

1128
static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
1129
					 struct ref_transaction *transaction,
1130
					 struct strbuf *err UNUSED)
1131
{
1132
	struct reftable_transaction_data *tx_data = transaction->backend_data;
1133
	free_transaction_data(tx_data);
1134
	transaction->state = REF_TRANSACTION_CLOSED;
1135
	return 0;
1136
}
1137

1138
static int transaction_update_cmp(const void *a, const void *b)
1139
{
1140
	return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1141
		      ((struct reftable_transaction_update *)b)->update->refname);
1142
}
1143

1144
static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1145
{
1146
	struct write_transaction_table_arg *arg = cb_data;
1147
	uint64_t ts = reftable_stack_next_update_index(arg->stack);
1148
	struct reftable_log_record *logs = NULL;
1149
	struct ident_split committer_ident = {0};
1150
	size_t logs_nr = 0, logs_alloc = 0, i;
1151
	const char *committer_info;
1152
	int ret = 0;
1153

1154
	committer_info = git_committer_info(0);
1155
	if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1156
		BUG("failed splitting committer info");
1157

1158
	QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1159

1160
	reftable_writer_set_limits(writer, ts, ts);
1161

1162
	for (i = 0; i < arg->updates_nr; i++) {
1163
		struct reftable_transaction_update *tx_update = &arg->updates[i];
1164
		struct ref_update *u = tx_update->update;
1165

1166
		/*
1167
		 * Write a reflog entry when updating a ref to point to
1168
		 * something new in either of the following cases:
1169
		 *
1170
		 * - The reference is about to be deleted. We always want to
1171
		 *   delete the reflog in that case.
1172
		 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1173
		 *   the reflog entry.
1174
		 * - `core.logAllRefUpdates` tells us to create the reflog for
1175
		 *   the given ref.
1176
		 */
1177
		if ((u->flags & REF_HAVE_NEW) &&
1178
		    !(u->type & REF_ISSYMREF) &&
1179
		    ref_update_has_null_new_value(u)) {
1180
			struct reftable_log_record log = {0};
1181
			struct reftable_iterator it = {0};
1182

1183
			reftable_stack_init_log_iterator(arg->stack, &it);
1184

1185
			/*
1186
			 * When deleting refs we also delete all reflog entries
1187
			 * with them. While it is not strictly required to
1188
			 * delete reflogs together with their refs, this
1189
			 * matches the behaviour of the files backend.
1190
			 *
1191
			 * Unfortunately, we have no better way than to delete
1192
			 * all reflog entries one by one.
1193
			 */
1194
			ret = reftable_iterator_seek_log(&it, u->refname);
1195
			while (ret == 0) {
1196
				struct reftable_log_record *tombstone;
1197

1198
				ret = reftable_iterator_next_log(&it, &log);
1199
				if (ret < 0)
1200
					break;
1201
				if (ret > 0 || strcmp(log.refname, u->refname)) {
1202
					ret = 0;
1203
					break;
1204
				}
1205

1206
				ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1207
				tombstone = &logs[logs_nr++];
1208
				tombstone->refname = xstrdup(u->refname);
1209
				tombstone->value_type = REFTABLE_LOG_DELETION;
1210
				tombstone->update_index = log.update_index;
1211
			}
1212

1213
			reftable_log_record_release(&log);
1214
			reftable_iterator_destroy(&it);
1215

1216
			if (ret)
1217
				goto done;
1218
		} else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
1219
			   (u->flags & REF_HAVE_NEW) &&
1220
			   (u->flags & REF_FORCE_CREATE_REFLOG ||
1221
			    should_write_log(&arg->refs->base, u->refname))) {
1222
			struct reftable_log_record *log;
1223
			int create_reflog = 1;
1224

1225
			if (u->new_target) {
1226
				if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
1227
							     RESOLVE_REF_READING, &u->new_oid, NULL)) {
1228
					/*
1229
					 * TODO: currently we skip creating reflogs for dangling
1230
					 * symref updates. It would be nice to capture this as
1231
					 * zero oid updates however.
1232
					 */
1233
					create_reflog = 0;
1234
				}
1235
			}
1236

1237
			if (create_reflog) {
1238
				ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1239
				log = &logs[logs_nr++];
1240
				memset(log, 0, sizeof(*log));
1241

1242
				fill_reftable_log_record(log, &committer_ident);
1243
				log->update_index = ts;
1244
				log->refname = xstrdup(u->refname);
1245
				memcpy(log->value.update.new_hash,
1246
				       u->new_oid.hash, GIT_MAX_RAWSZ);
1247
				memcpy(log->value.update.old_hash,
1248
				       tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1249
				log->value.update.message =
1250
					xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1251
			}
1252
		}
1253

1254
		if (u->flags & REF_LOG_ONLY)
1255
			continue;
1256

1257
		if (u->new_target) {
1258
			struct reftable_ref_record ref = {
1259
				.refname = (char *)u->refname,
1260
				.value_type = REFTABLE_REF_SYMREF,
1261
				.value.symref = (char *)u->new_target,
1262
				.update_index = ts,
1263
			};
1264

1265
			ret = reftable_writer_add_ref(writer, &ref);
1266
			if (ret < 0)
1267
				goto done;
1268
		} else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
1269
			struct reftable_ref_record ref = {
1270
				.refname = (char *)u->refname,
1271
				.update_index = ts,
1272
				.value_type = REFTABLE_REF_DELETION,
1273
			};
1274

1275
			ret = reftable_writer_add_ref(writer, &ref);
1276
			if (ret < 0)
1277
				goto done;
1278
		} else if (u->flags & REF_HAVE_NEW) {
1279
			struct reftable_ref_record ref = {0};
1280
			struct object_id peeled;
1281
			int peel_error;
1282

1283
			ref.refname = (char *)u->refname;
1284
			ref.update_index = ts;
1285

1286
			peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled);
1287
			if (!peel_error) {
1288
				ref.value_type = REFTABLE_REF_VAL2;
1289
				memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1290
				memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1291
			} else if (!is_null_oid(&u->new_oid)) {
1292
				ref.value_type = REFTABLE_REF_VAL1;
1293
				memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1294
			}
1295

1296
			ret = reftable_writer_add_ref(writer, &ref);
1297
			if (ret < 0)
1298
				goto done;
1299
		}
1300
	}
1301

1302
	/*
1303
	 * Logs are written at the end so that we do not have intermixed ref
1304
	 * and log blocks.
1305
	 */
1306
	if (logs) {
1307
		ret = reftable_writer_add_logs(writer, logs, logs_nr);
1308
		if (ret < 0)
1309
			goto done;
1310
	}
1311

1312
done:
1313
	assert(ret != REFTABLE_API_ERROR);
1314
	for (i = 0; i < logs_nr; i++)
1315
		reftable_log_record_release(&logs[i]);
1316
	free(logs);
1317
	return ret;
1318
}
1319

1320
static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
1321
					  struct ref_transaction *transaction,
1322
					  struct strbuf *err)
1323
{
1324
	struct reftable_transaction_data *tx_data = transaction->backend_data;
1325
	int ret = 0;
1326

1327
	for (size_t i = 0; i < tx_data->args_nr; i++) {
1328
		ret = reftable_addition_add(tx_data->args[i].addition,
1329
					    write_transaction_table, &tx_data->args[i]);
1330
		if (ret < 0)
1331
			goto done;
1332

1333
		ret = reftable_addition_commit(tx_data->args[i].addition);
1334
		if (ret < 0)
1335
			goto done;
1336
	}
1337

1338
done:
1339
	assert(ret != REFTABLE_API_ERROR);
1340
	free_transaction_data(tx_data);
1341
	transaction->state = REF_TRANSACTION_CLOSED;
1342

1343
	if (ret) {
1344
		strbuf_addf(err, _("reftable: transaction failure: %s"),
1345
			    reftable_error_str(ret));
1346
		return -1;
1347
	}
1348
	return ret;
1349
}
1350

1351
static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1352
						  struct ref_transaction *transaction,
1353
						  struct strbuf *err)
1354
{
1355
	return ref_transaction_commit(transaction, err);
1356
}
1357

1358
static int reftable_be_pack_refs(struct ref_store *ref_store,
1359
				 struct pack_refs_opts *opts)
1360
{
1361
	struct reftable_ref_store *refs =
1362
		reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1363
	struct reftable_stack *stack;
1364
	int ret;
1365

1366
	if (refs->err)
1367
		return refs->err;
1368

1369
	stack = refs->worktree_stack;
1370
	if (!stack)
1371
		stack = refs->main_stack;
1372

1373
	if (opts->flags & PACK_REFS_AUTO)
1374
		ret = reftable_stack_auto_compact(stack);
1375
	else
1376
		ret = reftable_stack_compact_all(stack, NULL);
1377
	if (ret < 0) {
1378
		ret = error(_("unable to compact stack: %s"),
1379
			    reftable_error_str(ret));
1380
		goto out;
1381
	}
1382

1383
	ret = reftable_stack_clean(stack);
1384
	if (ret)
1385
		goto out;
1386

1387
out:
1388
	return ret;
1389
}
1390

1391
struct write_create_symref_arg {
1392
	struct reftable_ref_store *refs;
1393
	struct reftable_stack *stack;
1394
	struct strbuf *err;
1395
	const char *refname;
1396
	const char *target;
1397
	const char *logmsg;
1398
};
1399

1400
struct write_copy_arg {
1401
	struct reftable_ref_store *refs;
1402
	struct reftable_stack *stack;
1403
	const char *oldname;
1404
	const char *newname;
1405
	const char *logmsg;
1406
	int delete_old;
1407
};
1408

1409
static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1410
{
1411
	struct write_copy_arg *arg = cb_data;
1412
	uint64_t deletion_ts, creation_ts;
1413
	struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1414
	struct reftable_log_record old_log = {0}, *logs = NULL;
1415
	struct reftable_iterator it = {0};
1416
	struct string_list skip = STRING_LIST_INIT_NODUP;
1417
	struct ident_split committer_ident = {0};
1418
	struct strbuf errbuf = STRBUF_INIT;
1419
	size_t logs_nr = 0, logs_alloc = 0, i;
1420
	const char *committer_info;
1421
	int ret;
1422

1423
	committer_info = git_committer_info(0);
1424
	if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1425
		BUG("failed splitting committer info");
1426

1427
	if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1428
		ret = error(_("refname %s not found"), arg->oldname);
1429
		goto done;
1430
	}
1431
	if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1432
		ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1433
			    arg->oldname);
1434
		goto done;
1435
	}
1436

1437
	/*
1438
	 * There's nothing to do in case the old and new name are the same, so
1439
	 * we exit early in that case.
1440
	 */
1441
	if (!strcmp(arg->oldname, arg->newname)) {
1442
		ret = 0;
1443
		goto done;
1444
	}
1445

1446
	/*
1447
	 * Verify that the new refname is available.
1448
	 */
1449
	if (arg->delete_old)
1450
		string_list_insert(&skip, arg->oldname);
1451
	ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1452
					    NULL, &skip, &errbuf);
1453
	if (ret < 0) {
1454
		error("%s", errbuf.buf);
1455
		goto done;
1456
	}
1457

1458
	/*
1459
	 * When deleting the old reference we have to use two update indices:
1460
	 * once to delete the old ref and its reflog, and once to create the
1461
	 * new ref and its reflog. They need to be staged with two separate
1462
	 * indices because the new reflog needs to encode both the deletion of
1463
	 * the old branch and the creation of the new branch, and we cannot do
1464
	 * two changes to a reflog in a single update.
1465
	 */
1466
	deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1467
	if (arg->delete_old)
1468
		creation_ts++;
1469
	reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1470

1471
	/*
1472
	 * Add the new reference. If this is a rename then we also delete the
1473
	 * old reference.
1474
	 */
1475
	refs[0] = old_ref;
1476
	refs[0].refname = xstrdup(arg->newname);
1477
	refs[0].update_index = creation_ts;
1478
	if (arg->delete_old) {
1479
		refs[1].refname = xstrdup(arg->oldname);
1480
		refs[1].value_type = REFTABLE_REF_DELETION;
1481
		refs[1].update_index = deletion_ts;
1482
	}
1483
	ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1484
	if (ret < 0)
1485
		goto done;
1486

1487
	/*
1488
	 * When deleting the old branch we need to create a reflog entry on the
1489
	 * new branch name that indicates that the old branch has been deleted
1490
	 * and then recreated. This is a tad weird, but matches what the files
1491
	 * backend does.
1492
	 */
1493
	if (arg->delete_old) {
1494
		struct strbuf head_referent = STRBUF_INIT;
1495
		struct object_id head_oid;
1496
		int append_head_reflog;
1497
		unsigned head_type = 0;
1498

1499
		ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1500
		memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1501
		fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1502
		logs[logs_nr].refname = xstrdup(arg->newname);
1503
		logs[logs_nr].update_index = deletion_ts;
1504
		logs[logs_nr].value.update.message =
1505
			xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1506
		memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1507
		logs_nr++;
1508

1509
		ret = read_ref_without_reload(arg->refs, arg->stack, "HEAD", &head_oid,
1510
					      &head_referent, &head_type);
1511
		if (ret < 0)
1512
			goto done;
1513
		append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1514
		strbuf_release(&head_referent);
1515

1516
		/*
1517
		 * The files backend uses `refs_delete_ref()` to delete the old
1518
		 * branch name, which will append a reflog entry for HEAD in
1519
		 * case it points to the old branch.
1520
		 */
1521
		if (append_head_reflog) {
1522
			ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1523
			logs[logs_nr] = logs[logs_nr - 1];
1524
			logs[logs_nr].refname = xstrdup("HEAD");
1525
			logs[logs_nr].value.update.name =
1526
				xstrdup(logs[logs_nr].value.update.name);
1527
			logs[logs_nr].value.update.email =
1528
				xstrdup(logs[logs_nr].value.update.email);
1529
			logs[logs_nr].value.update.message =
1530
				xstrdup(logs[logs_nr].value.update.message);
1531
			logs_nr++;
1532
		}
1533
	}
1534

1535
	/*
1536
	 * Create the reflog entry for the newly created branch.
1537
	 */
1538
	ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1539
	memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1540
	fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1541
	logs[logs_nr].refname = xstrdup(arg->newname);
1542
	logs[logs_nr].update_index = creation_ts;
1543
	logs[logs_nr].value.update.message =
1544
		xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1545
	memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1546
	logs_nr++;
1547

1548
	/*
1549
	 * In addition to writing the reflog entry for the new branch, we also
1550
	 * copy over all log entries from the old reflog. Last but not least,
1551
	 * when renaming we also have to delete all the old reflog entries.
1552
	 */
1553
	reftable_stack_init_log_iterator(arg->stack, &it);
1554
	ret = reftable_iterator_seek_log(&it, arg->oldname);
1555
	if (ret < 0)
1556
		goto done;
1557

1558
	while (1) {
1559
		ret = reftable_iterator_next_log(&it, &old_log);
1560
		if (ret < 0)
1561
			goto done;
1562
		if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1563
			ret = 0;
1564
			break;
1565
		}
1566

1567
		free(old_log.refname);
1568

1569
		/*
1570
		 * Copy over the old reflog entry with the new refname.
1571
		 */
1572
		ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1573
		logs[logs_nr] = old_log;
1574
		logs[logs_nr].refname = xstrdup(arg->newname);
1575
		logs_nr++;
1576

1577
		/*
1578
		 * Delete the old reflog entry in case we are renaming.
1579
		 */
1580
		if (arg->delete_old) {
1581
			ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1582
			memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1583
			logs[logs_nr].refname = xstrdup(arg->oldname);
1584
			logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1585
			logs[logs_nr].update_index = old_log.update_index;
1586
			logs_nr++;
1587
		}
1588

1589
		/*
1590
		 * Transfer ownership of the log record we're iterating over to
1591
		 * the array of log records. Otherwise, the pointers would get
1592
		 * free'd or reallocated by the iterator.
1593
		 */
1594
		memset(&old_log, 0, sizeof(old_log));
1595
	}
1596

1597
	ret = reftable_writer_add_logs(writer, logs, logs_nr);
1598
	if (ret < 0)
1599
		goto done;
1600

1601
done:
1602
	assert(ret != REFTABLE_API_ERROR);
1603
	reftable_iterator_destroy(&it);
1604
	string_list_clear(&skip, 0);
1605
	strbuf_release(&errbuf);
1606
	for (i = 0; i < logs_nr; i++)
1607
		reftable_log_record_release(&logs[i]);
1608
	free(logs);
1609
	for (i = 0; i < ARRAY_SIZE(refs); i++)
1610
		reftable_ref_record_release(&refs[i]);
1611
	reftable_ref_record_release(&old_ref);
1612
	reftable_log_record_release(&old_log);
1613
	return ret;
1614
}
1615

1616
static int reftable_be_rename_ref(struct ref_store *ref_store,
1617
				  const char *oldrefname,
1618
				  const char *newrefname,
1619
				  const char *logmsg)
1620
{
1621
	struct reftable_ref_store *refs =
1622
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1623
	struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1624
	struct write_copy_arg arg = {
1625
		.refs = refs,
1626
		.stack = stack,
1627
		.oldname = oldrefname,
1628
		.newname = newrefname,
1629
		.logmsg = logmsg,
1630
		.delete_old = 1,
1631
	};
1632
	int ret;
1633

1634
	ret = refs->err;
1635
	if (ret < 0)
1636
		goto done;
1637

1638
	ret = reftable_stack_reload(stack);
1639
	if (ret)
1640
		goto done;
1641
	ret = reftable_stack_add(stack, &write_copy_table, &arg);
1642

1643
done:
1644
	assert(ret != REFTABLE_API_ERROR);
1645
	return ret;
1646
}
1647

1648
static int reftable_be_copy_ref(struct ref_store *ref_store,
1649
				const char *oldrefname,
1650
				const char *newrefname,
1651
				const char *logmsg)
1652
{
1653
	struct reftable_ref_store *refs =
1654
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1655
	struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1656
	struct write_copy_arg arg = {
1657
		.refs = refs,
1658
		.stack = stack,
1659
		.oldname = oldrefname,
1660
		.newname = newrefname,
1661
		.logmsg = logmsg,
1662
	};
1663
	int ret;
1664

1665
	ret = refs->err;
1666
	if (ret < 0)
1667
		goto done;
1668

1669
	ret = reftable_stack_reload(stack);
1670
	if (ret)
1671
		goto done;
1672
	ret = reftable_stack_add(stack, &write_copy_table, &arg);
1673

1674
done:
1675
	assert(ret != REFTABLE_API_ERROR);
1676
	return ret;
1677
}
1678

1679
struct reftable_reflog_iterator {
1680
	struct ref_iterator base;
1681
	struct reftable_ref_store *refs;
1682
	struct reftable_iterator iter;
1683
	struct reftable_log_record log;
1684
	struct strbuf last_name;
1685
	int err;
1686
};
1687

1688
static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1689
{
1690
	struct reftable_reflog_iterator *iter =
1691
		(struct reftable_reflog_iterator *)ref_iterator;
1692

1693
	while (!iter->err) {
1694
		iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1695
		if (iter->err)
1696
			break;
1697

1698
		/*
1699
		 * We want the refnames that we have reflogs for, so we skip if
1700
		 * we've already produced this name. This could be faster by
1701
		 * seeking directly to reflog@update_index==0.
1702
		 */
1703
		if (!strcmp(iter->log.refname, iter->last_name.buf))
1704
			continue;
1705

1706
		if (check_refname_format(iter->log.refname,
1707
					 REFNAME_ALLOW_ONELEVEL))
1708
			continue;
1709

1710
		strbuf_reset(&iter->last_name);
1711
		strbuf_addstr(&iter->last_name, iter->log.refname);
1712
		iter->base.refname = iter->log.refname;
1713

1714
		break;
1715
	}
1716

1717
	if (iter->err > 0) {
1718
		if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1719
			return ITER_ERROR;
1720
		return ITER_DONE;
1721
	}
1722

1723
	if (iter->err < 0) {
1724
		ref_iterator_abort(ref_iterator);
1725
		return ITER_ERROR;
1726
	}
1727

1728
	return ITER_OK;
1729
}
1730

1731
static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
1732
					 struct object_id *peeled UNUSED)
1733
{
1734
	BUG("reftable reflog iterator cannot be peeled");
1735
	return -1;
1736
}
1737

1738
static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1739
{
1740
	struct reftable_reflog_iterator *iter =
1741
		(struct reftable_reflog_iterator *)ref_iterator;
1742
	reftable_log_record_release(&iter->log);
1743
	reftable_iterator_destroy(&iter->iter);
1744
	strbuf_release(&iter->last_name);
1745
	free(iter);
1746
	return ITER_DONE;
1747
}
1748

1749
static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1750
	.advance = reftable_reflog_iterator_advance,
1751
	.peel = reftable_reflog_iterator_peel,
1752
	.abort = reftable_reflog_iterator_abort
1753
};
1754

1755
static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1756
								  struct reftable_stack *stack)
1757
{
1758
	struct reftable_reflog_iterator *iter;
1759
	int ret;
1760

1761
	iter = xcalloc(1, sizeof(*iter));
1762
	base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1763
	strbuf_init(&iter->last_name, 0);
1764
	iter->refs = refs;
1765

1766
	ret = refs->err;
1767
	if (ret)
1768
		goto done;
1769

1770
	ret = reftable_stack_reload(stack);
1771
	if (ret < 0)
1772
		goto done;
1773

1774
	reftable_stack_init_log_iterator(stack, &iter->iter);
1775
	ret = reftable_iterator_seek_log(&iter->iter, "");
1776
	if (ret < 0)
1777
		goto done;
1778

1779
done:
1780
	iter->err = ret;
1781
	return iter;
1782
}
1783

1784
static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1785
{
1786
	struct reftable_ref_store *refs =
1787
		reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1788
	struct reftable_reflog_iterator *main_iter, *worktree_iter;
1789

1790
	main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1791
	if (!refs->worktree_stack)
1792
		return &main_iter->base;
1793

1794
	worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1795

1796
	return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1797
					ref_iterator_select, NULL);
1798
}
1799

1800
static int yield_log_record(struct reftable_ref_store *refs,
1801
			    struct reftable_log_record *log,
1802
			    each_reflog_ent_fn fn,
1803
			    void *cb_data)
1804
{
1805
	struct object_id old_oid, new_oid;
1806
	const char *full_committer;
1807

1808
	oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
1809
	oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
1810

1811
	/*
1812
	 * When both the old object ID and the new object ID are null
1813
	 * then this is the reflog existence marker. The caller must
1814
	 * not be aware of it.
1815
	 */
1816
	if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1817
		return 0;
1818

1819
	full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1820
				   WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1821
	return fn(&old_oid, &new_oid, full_committer,
1822
		  log->value.update.time, log->value.update.tz_offset,
1823
		  log->value.update.message, cb_data);
1824
}
1825

1826
static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1827
						   const char *refname,
1828
						   each_reflog_ent_fn fn,
1829
						   void *cb_data)
1830
{
1831
	struct reftable_ref_store *refs =
1832
		reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1833
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
1834
	struct reftable_log_record log = {0};
1835
	struct reftable_iterator it = {0};
1836
	int ret;
1837

1838
	if (refs->err < 0)
1839
		return refs->err;
1840

1841
	reftable_stack_init_log_iterator(stack, &it);
1842
	ret = reftable_iterator_seek_log(&it, refname);
1843
	while (!ret) {
1844
		ret = reftable_iterator_next_log(&it, &log);
1845
		if (ret < 0)
1846
			break;
1847
		if (ret > 0 || strcmp(log.refname, refname)) {
1848
			ret = 0;
1849
			break;
1850
		}
1851

1852
		ret = yield_log_record(refs, &log, fn, cb_data);
1853
		if (ret)
1854
			break;
1855
	}
1856

1857
	reftable_log_record_release(&log);
1858
	reftable_iterator_destroy(&it);
1859
	return ret;
1860
}
1861

1862
static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1863
					   const char *refname,
1864
					   each_reflog_ent_fn fn,
1865
					   void *cb_data)
1866
{
1867
	struct reftable_ref_store *refs =
1868
		reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1869
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
1870
	struct reftable_log_record *logs = NULL;
1871
	struct reftable_iterator it = {0};
1872
	size_t logs_alloc = 0, logs_nr = 0, i;
1873
	int ret;
1874

1875
	if (refs->err < 0)
1876
		return refs->err;
1877

1878
	reftable_stack_init_log_iterator(stack, &it);
1879
	ret = reftable_iterator_seek_log(&it, refname);
1880
	while (!ret) {
1881
		struct reftable_log_record log = {0};
1882

1883
		ret = reftable_iterator_next_log(&it, &log);
1884
		if (ret < 0)
1885
			goto done;
1886
		if (ret > 0 || strcmp(log.refname, refname)) {
1887
			reftable_log_record_release(&log);
1888
			ret = 0;
1889
			break;
1890
		}
1891

1892
		ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1893
		logs[logs_nr++] = log;
1894
	}
1895

1896
	for (i = logs_nr; i--;) {
1897
		ret = yield_log_record(refs, &logs[i], fn, cb_data);
1898
		if (ret)
1899
			goto done;
1900
	}
1901

1902
done:
1903
	reftable_iterator_destroy(&it);
1904
	for (i = 0; i < logs_nr; i++)
1905
		reftable_log_record_release(&logs[i]);
1906
	free(logs);
1907
	return ret;
1908
}
1909

1910
static int reftable_be_reflog_exists(struct ref_store *ref_store,
1911
				     const char *refname)
1912
{
1913
	struct reftable_ref_store *refs =
1914
		reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1915
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
1916
	struct reftable_log_record log = {0};
1917
	struct reftable_iterator it = {0};
1918
	int ret;
1919

1920
	ret = refs->err;
1921
	if (ret < 0)
1922
		goto done;
1923

1924
	ret = reftable_stack_reload(stack);
1925
	if (ret < 0)
1926
		goto done;
1927

1928
	reftable_stack_init_log_iterator(stack, &it);
1929
	ret = reftable_iterator_seek_log(&it, refname);
1930
	if (ret < 0)
1931
		goto done;
1932

1933
	/*
1934
	 * Check whether we get at least one log record for the given ref name.
1935
	 * If so, the reflog exists, otherwise it doesn't.
1936
	 */
1937
	ret = reftable_iterator_next_log(&it, &log);
1938
	if (ret < 0)
1939
		goto done;
1940
	if (ret > 0) {
1941
		ret = 0;
1942
		goto done;
1943
	}
1944

1945
	ret = strcmp(log.refname, refname) == 0;
1946

1947
done:
1948
	reftable_iterator_destroy(&it);
1949
	reftable_log_record_release(&log);
1950
	if (ret < 0)
1951
		ret = 0;
1952
	return ret;
1953
}
1954

1955
struct write_reflog_existence_arg {
1956
	struct reftable_ref_store *refs;
1957
	const char *refname;
1958
	struct reftable_stack *stack;
1959
};
1960

1961
static int write_reflog_existence_table(struct reftable_writer *writer,
1962
					void *cb_data)
1963
{
1964
	struct write_reflog_existence_arg *arg = cb_data;
1965
	uint64_t ts = reftable_stack_next_update_index(arg->stack);
1966
	struct reftable_log_record log = {0};
1967
	int ret;
1968

1969
	ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1970
	if (ret <= 0)
1971
		goto done;
1972

1973
	reftable_writer_set_limits(writer, ts, ts);
1974

1975
	/*
1976
	 * The existence entry has both old and new object ID set to the the
1977
	 * null object ID. Our iterators are aware of this and will not present
1978
	 * them to their callers.
1979
	 */
1980
	log.refname = xstrdup(arg->refname);
1981
	log.update_index = ts;
1982
	log.value_type = REFTABLE_LOG_UPDATE;
1983
	ret = reftable_writer_add_log(writer, &log);
1984

1985
done:
1986
	assert(ret != REFTABLE_API_ERROR);
1987
	reftable_log_record_release(&log);
1988
	return ret;
1989
}
1990

1991
static int reftable_be_create_reflog(struct ref_store *ref_store,
1992
				     const char *refname,
1993
				     struct strbuf *errmsg UNUSED)
1994
{
1995
	struct reftable_ref_store *refs =
1996
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1997
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
1998
	struct write_reflog_existence_arg arg = {
1999
		.refs = refs,
2000
		.stack = stack,
2001
		.refname = refname,
2002
	};
2003
	int ret;
2004

2005
	ret = refs->err;
2006
	if (ret < 0)
2007
		goto done;
2008

2009
	ret = reftable_stack_reload(stack);
2010
	if (ret)
2011
		goto done;
2012

2013
	ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
2014

2015
done:
2016
	return ret;
2017
}
2018

2019
struct write_reflog_delete_arg {
2020
	struct reftable_stack *stack;
2021
	const char *refname;
2022
};
2023

2024
static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
2025
{
2026
	struct write_reflog_delete_arg *arg = cb_data;
2027
	struct reftable_log_record log = {0}, tombstone = {0};
2028
	struct reftable_iterator it = {0};
2029
	uint64_t ts = reftable_stack_next_update_index(arg->stack);
2030
	int ret;
2031

2032
	reftable_writer_set_limits(writer, ts, ts);
2033

2034
	reftable_stack_init_log_iterator(arg->stack, &it);
2035

2036
	/*
2037
	 * In order to delete a table we need to delete all reflog entries one
2038
	 * by one. This is inefficient, but the reftable format does not have a
2039
	 * better marker right now.
2040
	 */
2041
	ret = reftable_iterator_seek_log(&it, arg->refname);
2042
	while (ret == 0) {
2043
		ret = reftable_iterator_next_log(&it, &log);
2044
		if (ret < 0)
2045
			break;
2046
		if (ret > 0 || strcmp(log.refname, arg->refname)) {
2047
			ret = 0;
2048
			break;
2049
		}
2050

2051
		tombstone.refname = (char *)arg->refname;
2052
		tombstone.value_type = REFTABLE_LOG_DELETION;
2053
		tombstone.update_index = log.update_index;
2054

2055
		ret = reftable_writer_add_log(writer, &tombstone);
2056
	}
2057

2058
	reftable_log_record_release(&log);
2059
	reftable_iterator_destroy(&it);
2060
	return ret;
2061
}
2062

2063
static int reftable_be_delete_reflog(struct ref_store *ref_store,
2064
				     const char *refname)
2065
{
2066
	struct reftable_ref_store *refs =
2067
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2068
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
2069
	struct write_reflog_delete_arg arg = {
2070
		.stack = stack,
2071
		.refname = refname,
2072
	};
2073
	int ret;
2074

2075
	ret = reftable_stack_reload(stack);
2076
	if (ret)
2077
		return ret;
2078
	ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
2079

2080
	assert(ret != REFTABLE_API_ERROR);
2081
	return ret;
2082
}
2083

2084
struct reflog_expiry_arg {
2085
	struct reftable_ref_store *refs;
2086
	struct reftable_stack *stack;
2087
	struct reftable_log_record *records;
2088
	struct object_id update_oid;
2089
	const char *refname;
2090
	size_t len;
2091
};
2092

2093
static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2094
{
2095
	struct reflog_expiry_arg *arg = cb_data;
2096
	uint64_t ts = reftable_stack_next_update_index(arg->stack);
2097
	uint64_t live_records = 0;
2098
	size_t i;
2099
	int ret;
2100

2101
	for (i = 0; i < arg->len; i++)
2102
		if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2103
			live_records++;
2104

2105
	reftable_writer_set_limits(writer, ts, ts);
2106

2107
	if (!is_null_oid(&arg->update_oid)) {
2108
		struct reftable_ref_record ref = {0};
2109
		struct object_id peeled;
2110

2111
		ref.refname = (char *)arg->refname;
2112
		ref.update_index = ts;
2113

2114
		if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled)) {
2115
			ref.value_type = REFTABLE_REF_VAL2;
2116
			memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2117
			memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2118
		} else {
2119
			ref.value_type = REFTABLE_REF_VAL1;
2120
			memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2121
		}
2122

2123
		ret = reftable_writer_add_ref(writer, &ref);
2124
		if (ret < 0)
2125
			return ret;
2126
	}
2127

2128
	/*
2129
	 * When there are no more entries left in the reflog we empty it
2130
	 * completely, but write a placeholder reflog entry that indicates that
2131
	 * the reflog still exists.
2132
	 */
2133
	if (!live_records) {
2134
		struct reftable_log_record log = {
2135
			.refname = (char *)arg->refname,
2136
			.value_type = REFTABLE_LOG_UPDATE,
2137
			.update_index = ts,
2138
		};
2139

2140
		ret = reftable_writer_add_log(writer, &log);
2141
		if (ret)
2142
			return ret;
2143
	}
2144

2145
	for (i = 0; i < arg->len; i++) {
2146
		ret = reftable_writer_add_log(writer, &arg->records[i]);
2147
		if (ret)
2148
			return ret;
2149
	}
2150

2151
	return 0;
2152
}
2153

2154
static int reftable_be_reflog_expire(struct ref_store *ref_store,
2155
				     const char *refname,
2156
				     unsigned int flags,
2157
				     reflog_expiry_prepare_fn prepare_fn,
2158
				     reflog_expiry_should_prune_fn should_prune_fn,
2159
				     reflog_expiry_cleanup_fn cleanup_fn,
2160
				     void *policy_cb_data)
2161
{
2162
	/*
2163
	 * For log expiry, we write tombstones for every single reflog entry
2164
	 * that is to be expired. This means that the entries are still
2165
	 * retrievable by delving into the stack, and expiring entries
2166
	 * paradoxically takes extra memory. This memory is only reclaimed when
2167
	 * compacting the reftable stack.
2168
	 *
2169
	 * It would be better if the refs backend supported an API that sets a
2170
	 * criterion for all refs, passing the criterion to pack_refs().
2171
	 *
2172
	 * On the plus side, because we do the expiration per ref, we can easily
2173
	 * insert the reflog existence dummies.
2174
	 */
2175
	struct reftable_ref_store *refs =
2176
		reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2177
	struct reftable_stack *stack = stack_for(refs, refname, &refname);
2178
	struct reftable_log_record *logs = NULL;
2179
	struct reftable_log_record *rewritten = NULL;
2180
	struct reftable_ref_record ref_record = {0};
2181
	struct reftable_iterator it = {0};
2182
	struct reftable_addition *add = NULL;
2183
	struct reflog_expiry_arg arg = {0};
2184
	struct object_id oid = {0};
2185
	uint8_t *last_hash = NULL;
2186
	size_t logs_nr = 0, logs_alloc = 0, i;
2187
	int ret;
2188

2189
	if (refs->err < 0)
2190
		return refs->err;
2191

2192
	ret = reftable_stack_reload(stack);
2193
	if (ret < 0)
2194
		goto done;
2195

2196
	reftable_stack_init_log_iterator(stack, &it);
2197

2198
	ret = reftable_iterator_seek_log(&it, refname);
2199
	if (ret < 0)
2200
		goto done;
2201

2202
	ret = reftable_stack_new_addition(&add, stack);
2203
	if (ret < 0)
2204
		goto done;
2205

2206
	ret = reftable_stack_read_ref(stack, refname, &ref_record);
2207
	if (ret < 0)
2208
		goto done;
2209
	if (reftable_ref_record_val1(&ref_record))
2210
		oidread(&oid, reftable_ref_record_val1(&ref_record),
2211
			ref_store->repo->hash_algo);
2212
	prepare_fn(refname, &oid, policy_cb_data);
2213

2214
	while (1) {
2215
		struct reftable_log_record log = {0};
2216
		struct object_id old_oid, new_oid;
2217

2218
		ret = reftable_iterator_next_log(&it, &log);
2219
		if (ret < 0)
2220
			goto done;
2221
		if (ret > 0 || strcmp(log.refname, refname)) {
2222
			reftable_log_record_release(&log);
2223
			break;
2224
		}
2225

2226
		oidread(&old_oid, log.value.update.old_hash,
2227
			ref_store->repo->hash_algo);
2228
		oidread(&new_oid, log.value.update.new_hash,
2229
			ref_store->repo->hash_algo);
2230

2231
		/*
2232
		 * Skip over the reflog existence marker. We will add it back
2233
		 * in when there are no live reflog records.
2234
		 */
2235
		if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2236
			reftable_log_record_release(&log);
2237
			continue;
2238
		}
2239

2240
		ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2241
		logs[logs_nr++] = log;
2242
	}
2243

2244
	/*
2245
	 * We need to rewrite all reflog entries according to the pruning
2246
	 * callback function:
2247
	 *
2248
	 *   - If a reflog entry shall be pruned we mark the record for
2249
	 *     deletion.
2250
	 *
2251
	 *   - Otherwise we may have to rewrite the chain of reflog entries so
2252
	 *     that gaps created by just-deleted records get backfilled.
2253
	 */
2254
	CALLOC_ARRAY(rewritten, logs_nr);
2255
	for (i = logs_nr; i--;) {
2256
		struct reftable_log_record *dest = &rewritten[i];
2257
		struct object_id old_oid, new_oid;
2258

2259
		*dest = logs[i];
2260
		oidread(&old_oid, logs[i].value.update.old_hash,
2261
			ref_store->repo->hash_algo);
2262
		oidread(&new_oid, logs[i].value.update.new_hash,
2263
			ref_store->repo->hash_algo);
2264

2265
		if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2266
				    (timestamp_t)logs[i].value.update.time,
2267
				    logs[i].value.update.tz_offset,
2268
				    logs[i].value.update.message,
2269
				    policy_cb_data)) {
2270
			dest->value_type = REFTABLE_LOG_DELETION;
2271
		} else {
2272
			if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2273
				memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2274
			last_hash = logs[i].value.update.new_hash;
2275
		}
2276
	}
2277

2278
	if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2279
	    reftable_ref_record_val1(&ref_record))
2280
		oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
2281

2282
	arg.refs = refs;
2283
	arg.records = rewritten;
2284
	arg.len = logs_nr;
2285
	arg.stack = stack,
2286
	arg.refname = refname,
2287

2288
	ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2289
	if (ret < 0)
2290
		goto done;
2291

2292
	/*
2293
	 * Future improvement: we could skip writing records that were
2294
	 * not changed.
2295
	 */
2296
	if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2297
		ret = reftable_addition_commit(add);
2298

2299
done:
2300
	if (add)
2301
		cleanup_fn(policy_cb_data);
2302
	assert(ret != REFTABLE_API_ERROR);
2303

2304
	reftable_ref_record_release(&ref_record);
2305
	reftable_iterator_destroy(&it);
2306
	reftable_addition_destroy(add);
2307
	for (i = 0; i < logs_nr; i++)
2308
		reftable_log_record_release(&logs[i]);
2309
	free(logs);
2310
	free(rewritten);
2311
	return ret;
2312
}
2313

2314
static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
2315
			    struct fsck_options *o UNUSED)
2316
{
2317
	return 0;
2318
}
2319

2320
struct ref_storage_be refs_be_reftable = {
2321
	.name = "reftable",
2322
	.init = reftable_be_init,
2323
	.release = reftable_be_release,
2324
	.create_on_disk = reftable_be_create_on_disk,
2325
	.remove_on_disk = reftable_be_remove_on_disk,
2326

2327
	.transaction_prepare = reftable_be_transaction_prepare,
2328
	.transaction_finish = reftable_be_transaction_finish,
2329
	.transaction_abort = reftable_be_transaction_abort,
2330
	.initial_transaction_commit = reftable_be_initial_transaction_commit,
2331

2332
	.pack_refs = reftable_be_pack_refs,
2333
	.rename_ref = reftable_be_rename_ref,
2334
	.copy_ref = reftable_be_copy_ref,
2335

2336
	.iterator_begin = reftable_be_iterator_begin,
2337
	.read_raw_ref = reftable_be_read_raw_ref,
2338
	.read_symbolic_ref = reftable_be_read_symbolic_ref,
2339

2340
	.reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2341
	.for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2342
	.for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2343
	.reflog_exists = reftable_be_reflog_exists,
2344
	.create_reflog = reftable_be_create_reflog,
2345
	.delete_reflog = reftable_be_delete_reflog,
2346
	.reflog_expire = reftable_be_reflog_expire,
2347

2348
	.fsck = reftable_be_fsck,
2349
};
2350

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.