git
1#define USE_THE_REPOSITORY_VARIABLE2
3#include "../git-compat-util.h"4#include "../config.h"5#include "../dir.h"6#include "../gettext.h"7#include "../hash.h"8#include "../hex.h"9#include "../refs.h"10#include "refs-internal.h"11#include "packed-backend.h"12#include "../iterator.h"13#include "../lockfile.h"14#include "../chdir-notify.h"15#include "../statinfo.h"16#include "../wrapper.h"17#include "../write-or-die.h"18#include "../trace2.h"19
20enum mmap_strategy {21/*22* Don't use mmap() at all for reading `packed-refs`.
23*/
24MMAP_NONE,25
26/*27* Can use mmap() for reading `packed-refs`, but the file must
28* not remain mmapped. This is the usual option on Windows,
29* where you cannot rename a new version of a file onto a file
30* that is currently mmapped.
31*/
32MMAP_TEMPORARY,33
34/*35* It is OK to leave the `packed-refs` file mmapped while
36* arbitrary other code is running.
37*/
38MMAP_OK
39};40
41#if defined(NO_MMAP)42static enum mmap_strategy mmap_strategy = MMAP_NONE;43#elif defined(MMAP_PREVENTS_DELETE)44static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;45#else46static enum mmap_strategy mmap_strategy = MMAP_OK;47#endif48
49struct packed_ref_store;50
51/*
52* A `snapshot` represents one snapshot of a `packed-refs` file.
53*
54* Normally, this will be a mmapped view of the contents of the
55* `packed-refs` file at the time the snapshot was created. However,
56* if the `packed-refs` file was not sorted, this might point at heap
57* memory holding the contents of the `packed-refs` file with its
58* records sorted by refname.
59*
60* `snapshot` instances are reference counted (via
61* `acquire_snapshot()` and `release_snapshot()`). This is to prevent
62* an instance from disappearing while an iterator is still iterating
63* over it. Instances are garbage collected when their `referrers`
64* count goes to zero.
65*
66* The most recent `snapshot`, if available, is referenced by the
67* `packed_ref_store`. Its freshness is checked whenever
68* `get_snapshot()` is called; if the existing snapshot is obsolete, a
69* new snapshot is taken.
70*/
71struct snapshot {72/*73* A back-pointer to the packed_ref_store with which this
74* snapshot is associated:
75*/
76struct packed_ref_store *refs;77
78/* Is the `packed-refs` file currently mmapped? */79int mmapped;80
81/*82* The contents of the `packed-refs` file:
83*
84* - buf -- a pointer to the start of the memory
85* - start -- a pointer to the first byte of actual references
86* (i.e., after the header line, if one is present)
87* - eof -- a pointer just past the end of the reference
88* contents
89*
90* If the `packed-refs` file was already sorted, `buf` points
91* at the mmapped contents of the file. If not, it points at
92* heap-allocated memory containing the contents, sorted. If
93* there were no contents (e.g., because the file didn't
94* exist), `buf`, `start`, and `eof` are all NULL.
95*/
96char *buf, *start, *eof;97
98/*99* What is the peeled state of the `packed-refs` file that
100* this snapshot represents? (This is usually determined from
101* the file's header.)
102*/
103enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;104
105/*106* Count of references to this instance, including the pointer
107* from `packed_ref_store::snapshot`, if any. The instance
108* will not be freed as long as the reference count is
109* nonzero.
110*/
111unsigned int referrers;112
113/*114* The metadata of the `packed-refs` file from which this
115* snapshot was created, used to tell if the file has been
116* replaced since we read it.
117*/
118struct stat_validity validity;119};120
121/*
122* A `ref_store` representing references stored in a `packed-refs`
123* file. It implements the `ref_store` interface, though it has some
124* limitations:
125*
126* - It cannot store symbolic references.
127*
128* - It cannot store reflogs.
129*
130* - It does not support reference renaming (though it could).
131*
132* On the other hand, it can be locked outside of a reference
133* transaction. In that case, it remains locked even after the
134* transaction is done and the new `packed-refs` file is activated.
135*/
136struct packed_ref_store {137struct ref_store base;138
139unsigned int store_flags;140
141/* The path of the "packed-refs" file: */142char *path;143
144/*145* A snapshot of the values read from the `packed-refs` file,
146* if it might still be current; otherwise, NULL.
147*/
148struct snapshot *snapshot;149
150/*151* Lock used for the "packed-refs" file. Note that this (and
152* thus the enclosing `packed_ref_store`) must not be freed.
153*/
154struct lock_file lock;155
156/*157* Temporary file used when rewriting new contents to the
158* "packed-refs" file. Note that this (and thus the enclosing
159* `packed_ref_store`) must not be freed.
160*/
161struct tempfile *tempfile;162};163
164/*
165* Increment the reference count of `*snapshot`.
166*/
167static void acquire_snapshot(struct snapshot *snapshot)168{
169snapshot->referrers++;170}
171
172/*
173* If the buffer in `snapshot` is active, then either munmap the
174* memory and close the file, or free the memory. Then set the buffer
175* pointers to NULL.
176*/
177static void clear_snapshot_buffer(struct snapshot *snapshot)178{
179if (snapshot->mmapped) {180if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))181die_errno("error ummapping packed-refs file %s",182snapshot->refs->path);183snapshot->mmapped = 0;184} else {185free(snapshot->buf);186}187snapshot->buf = snapshot->start = snapshot->eof = NULL;188}
189
190/*
191* Decrease the reference count of `*snapshot`. If it goes to zero,
192* free `*snapshot` and return true; otherwise return false.
193*/
194static int release_snapshot(struct snapshot *snapshot)195{
196if (!--snapshot->referrers) {197stat_validity_clear(&snapshot->validity);198clear_snapshot_buffer(snapshot);199free(snapshot);200return 1;201} else {202return 0;203}204}
205
206static size_t snapshot_hexsz(const struct snapshot *snapshot)207{
208return snapshot->refs->base.repo->hash_algo->hexsz;209}
210
211struct ref_store *packed_ref_store_init(struct repository *repo,212const char *gitdir,213unsigned int store_flags)214{
215struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));216struct ref_store *ref_store = (struct ref_store *)refs;217struct strbuf sb = STRBUF_INIT;218
219base_ref_store_init(ref_store, repo, gitdir, &refs_be_packed);220refs->store_flags = store_flags;221
222strbuf_addf(&sb, "%s/packed-refs", gitdir);223refs->path = strbuf_detach(&sb, NULL);224chdir_notify_reparent("packed-refs", &refs->path);225return ref_store;226}
227
228/*
229* Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
230* not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
231* support at least the flags specified in `required_flags`. `caller`
232* is used in any necessary error messages.
233*/
234static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,235unsigned int required_flags,236const char *caller)237{
238struct packed_ref_store *refs;239
240if (ref_store->be != &refs_be_packed)241BUG("ref_store is type \"%s\" not \"packed\" in %s",242ref_store->be->name, caller);243
244refs = (struct packed_ref_store *)ref_store;245
246if ((refs->store_flags & required_flags) != required_flags)247BUG("unallowed operation (%s), requires %x, has %x\n",248caller, required_flags, refs->store_flags);249
250return refs;251}
252
253static void clear_snapshot(struct packed_ref_store *refs)254{
255if (refs->snapshot) {256struct snapshot *snapshot = refs->snapshot;257
258refs->snapshot = NULL;259release_snapshot(snapshot);260}261}
262
263static void packed_ref_store_release(struct ref_store *ref_store)264{
265struct packed_ref_store *refs = packed_downcast(ref_store, 0, "release");266clear_snapshot(refs);267rollback_lock_file(&refs->lock);268delete_tempfile(&refs->tempfile);269free(refs->path);270}
271
272static NORETURN void die_unterminated_line(const char *path,273const char *p, size_t len)274{
275if (len < 80)276die("unterminated line in %s: %.*s", path, (int)len, p);277else278die("unterminated line in %s: %.75s...", path, p);279}
280
281static NORETURN void die_invalid_line(const char *path,282const char *p, size_t len)283{
284const char *eol = memchr(p, '\n', len);285
286if (!eol)287die_unterminated_line(path, p, len);288else if (eol - p < 80)289die("unexpected line in %s: %.*s", path, (int)(eol - p), p);290else291die("unexpected line in %s: %.75s...", path, p);292
293}
294
295struct snapshot_record {296const char *start;297size_t len;298};299
300static int cmp_packed_ref_records(const void *v1, const void *v2,301void *cb_data)302{
303const struct snapshot *snapshot = cb_data;304const struct snapshot_record *e1 = v1, *e2 = v2;305const char *r1 = e1->start + snapshot_hexsz(snapshot) + 1;306const char *r2 = e2->start + snapshot_hexsz(snapshot) + 1;307
308while (1) {309if (*r1 == '\n')310return *r2 == '\n' ? 0 : -1;311if (*r1 != *r2) {312if (*r2 == '\n')313return 1;314else315return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;316}317r1++;318r2++;319}320}
321
322/*
323* Compare a snapshot record at `rec` to the specified NUL-terminated
324* refname.
325*/
326static int cmp_record_to_refname(const char *rec, const char *refname,327int start, const struct snapshot *snapshot)328{
329const char *r1 = rec + snapshot_hexsz(snapshot) + 1;330const char *r2 = refname;331
332while (1) {333if (*r1 == '\n')334return *r2 ? -1 : 0;335if (!*r2)336return start ? 1 : -1;337if (*r1 != *r2)338return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;339r1++;340r2++;341}342}
343
344/*
345* `snapshot->buf` is not known to be sorted. Check whether it is, and
346* if not, sort it into new memory and munmap/free the old storage.
347*/
348static void sort_snapshot(struct snapshot *snapshot)349{
350struct snapshot_record *records = NULL;351size_t alloc = 0, nr = 0;352int sorted = 1;353const char *pos, *eof, *eol;354size_t len, i;355char *new_buffer, *dst;356
357pos = snapshot->start;358eof = snapshot->eof;359
360if (pos == eof)361return;362
363len = eof - pos;364
365/*366* Initialize records based on a crude estimate of the number
367* of references in the file (we'll grow it below if needed):
368*/
369ALLOC_GROW(records, len / 80 + 20, alloc);370
371while (pos < eof) {372eol = memchr(pos, '\n', eof - pos);373if (!eol)374/* The safety check should prevent this. */375BUG("unterminated line found in packed-refs");376if (eol - pos < snapshot_hexsz(snapshot) + 2)377die_invalid_line(snapshot->refs->path,378pos, eof - pos);379eol++;380if (eol < eof && *eol == '^') {381/*382* Keep any peeled line together with its
383* reference:
384*/
385const char *peeled_start = eol;386
387eol = memchr(peeled_start, '\n', eof - peeled_start);388if (!eol)389/* The safety check should prevent this. */390BUG("unterminated peeled line found in packed-refs");391eol++;392}393
394ALLOC_GROW(records, nr + 1, alloc);395records[nr].start = pos;396records[nr].len = eol - pos;397nr++;398
399if (sorted &&400nr > 1 &&401cmp_packed_ref_records(&records[nr - 2],402&records[nr - 1], snapshot) >= 0)403sorted = 0;404
405pos = eol;406}407
408if (sorted)409goto cleanup;410
411/* We need to sort the memory. First we sort the records array: */412QSORT_S(records, nr, cmp_packed_ref_records, snapshot);413
414/*415* Allocate a new chunk of memory, and copy the old memory to
416* the new in the order indicated by `records` (not bothering
417* with the header line):
418*/
419new_buffer = xmalloc(len);420for (dst = new_buffer, i = 0; i < nr; i++) {421memcpy(dst, records[i].start, records[i].len);422dst += records[i].len;423}424
425/*426* Now munmap the old buffer and use the sorted buffer in its
427* place:
428*/
429clear_snapshot_buffer(snapshot);430snapshot->buf = snapshot->start = new_buffer;431snapshot->eof = new_buffer + len;432
433cleanup:434free(records);435}
436
437/*
438* Return a pointer to the start of the record that contains the
439* character `*p` (which must be within the buffer). If no other
440* record start is found, return `buf`.
441*/
442static const char *find_start_of_record(const char *buf, const char *p)443{
444while (p > buf && (p[-1] != '\n' || p[0] == '^'))445p--;446return p;447}
448
449/*
450* Return a pointer to the start of the record following the record
451* that contains `*p`. If none is found before `end`, return `end`.
452*/
453static const char *find_end_of_record(const char *p, const char *end)454{
455while (++p < end && (p[-1] != '\n' || p[0] == '^'))456;457return p;458}
459
460/*
461* We want to be able to compare mmapped reference records quickly,
462* without totally parsing them. We can do so because the records are
463* LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
464* + 1) bytes past the beginning of the record.
465*
466* But what if the `packed-refs` file contains garbage? We're willing
467* to tolerate not detecting the problem, as long as we don't produce
468* totally garbled output (we can't afford to check the integrity of
469* the whole file during every Git invocation). But we do want to be
470* sure that we never read past the end of the buffer in memory and
471* perform an illegal memory access.
472*
473* Guarantee that minimum level of safety by verifying that the last
474* record in the file is LF-terminated, and that it has at least
475* (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
476* these checks fails.
477*/
478static void verify_buffer_safe(struct snapshot *snapshot)479{
480const char *start = snapshot->start;481const char *eof = snapshot->eof;482const char *last_line;483
484if (start == eof)485return;486
487last_line = find_start_of_record(start, eof - 1);488if (*(eof - 1) != '\n' ||489eof - last_line < snapshot_hexsz(snapshot) + 2)490die_invalid_line(snapshot->refs->path,491last_line, eof - last_line);492}
493
494#define SMALL_FILE_SIZE (32*1024)495
496/*
497* Depending on `mmap_strategy`, either mmap or read the contents of
498* the `packed-refs` file into the snapshot. Return 1 if the file
499* existed and was read, or 0 if the file was absent or empty. Die on
500* errors.
501*/
502static int load_contents(struct snapshot *snapshot)503{
504int fd;505struct stat st;506size_t size;507ssize_t bytes_read;508
509fd = open(snapshot->refs->path, O_RDONLY);510if (fd < 0) {511if (errno == ENOENT) {512/*513* This is OK; it just means that no
514* "packed-refs" file has been written yet,
515* which is equivalent to it being empty,
516* which is its state when initialized with
517* zeros.
518*/
519return 0;520} else {521die_errno("couldn't read %s", snapshot->refs->path);522}523}524
525stat_validity_update(&snapshot->validity, fd);526
527if (fstat(fd, &st) < 0)528die_errno("couldn't stat %s", snapshot->refs->path);529size = xsize_t(st.st_size);530
531if (!size) {532close(fd);533return 0;534} else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {535snapshot->buf = xmalloc(size);536bytes_read = read_in_full(fd, snapshot->buf, size);537if (bytes_read < 0 || bytes_read != size)538die_errno("couldn't read %s", snapshot->refs->path);539snapshot->mmapped = 0;540} else {541snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);542snapshot->mmapped = 1;543}544close(fd);545
546snapshot->start = snapshot->buf;547snapshot->eof = snapshot->buf + size;548
549return 1;550}
551
552static const char *find_reference_location_1(struct snapshot *snapshot,553const char *refname, int mustexist,554int start)555{
556/*557* This is not *quite* a garden-variety binary search, because
558* the data we're searching is made up of records, and we
559* always need to find the beginning of a record to do a
560* comparison. A "record" here is one line for the reference
561* itself and zero or one peel lines that start with '^'. Our
562* loop invariant is described in the next two comments.
563*/
564
565/*566* A pointer to the character at the start of a record whose
567* preceding records all have reference names that come
568* *before* `refname`.
569*/
570const char *lo = snapshot->start;571
572/*573* A pointer to a the first character of a record whose
574* reference name comes *after* `refname`.
575*/
576const char *hi = snapshot->eof;577
578while (lo != hi) {579const char *mid, *rec;580int cmp;581
582mid = lo + (hi - lo) / 2;583rec = find_start_of_record(lo, mid);584cmp = cmp_record_to_refname(rec, refname, start, snapshot);585if (cmp < 0) {586lo = find_end_of_record(mid, hi);587} else if (cmp > 0) {588hi = rec;589} else {590return rec;591}592}593
594if (mustexist)595return NULL;596else597return lo;598}
599
600/*
601* Find the place in `snapshot->buf` where the start of the record for
602* `refname` starts. If `mustexist` is true and the reference doesn't
603* exist, then return NULL. If `mustexist` is false and the reference
604* doesn't exist, then return the point where that reference would be
605* inserted, or `snapshot->eof` (which might be NULL) if it would be
606* inserted at the end of the file. In the latter mode, `refname`
607* doesn't have to be a proper reference name; for example, one could
608* search for "refs/replace/" to find the start of any replace
609* references.
610*
611* The record is sought using a binary search, so `snapshot->buf` must
612* be sorted.
613*/
614static const char *find_reference_location(struct snapshot *snapshot,615const char *refname, int mustexist)616{
617return find_reference_location_1(snapshot, refname, mustexist, 1);618}
619
620/*
621* Find the place in `snapshot->buf` after the end of the record for
622* `refname`. In other words, find the location of first thing *after*
623* `refname`.
624*
625* Other semantics are identical to the ones in
626* `find_reference_location()`.
627*/
628static const char *find_reference_location_end(struct snapshot *snapshot,629const char *refname,630int mustexist)631{
632return find_reference_location_1(snapshot, refname, mustexist, 0);633}
634
635/*
636* Create a newly-allocated `snapshot` of the `packed-refs` file in
637* its current state and return it. The return value will already have
638* its reference count incremented.
639*
640* A comment line of the form "# pack-refs with: " may contain zero or
641* more traits. We interpret the traits as follows:
642*
643* Neither `peeled` nor `fully-peeled`:
644*
645* Probably no references are peeled. But if the file contains a
646* peeled value for a reference, we will use it.
647*
648* `peeled`:
649*
650* References under "refs/tags/", if they *can* be peeled, *are*
651* peeled in this file. References outside of "refs/tags/" are
652* probably not peeled even if they could have been, but if we find
653* a peeled value for such a reference we will use it.
654*
655* `fully-peeled`:
656*
657* All references in the file that can be peeled are peeled.
658* Inversely (and this is more important), any references in the
659* file for which no peeled value is recorded is not peelable. This
660* trait should typically be written alongside "peeled" for
661* compatibility with older clients, but we do not require it
662* (i.e., "peeled" is a no-op if "fully-peeled" is set).
663*
664* `sorted`:
665*
666* The references in this file are known to be sorted by refname.
667*/
668static struct snapshot *create_snapshot(struct packed_ref_store *refs)669{
670struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot));671int sorted = 0;672
673snapshot->refs = refs;674acquire_snapshot(snapshot);675snapshot->peeled = PEELED_NONE;676
677if (!load_contents(snapshot))678return snapshot;679
680/* If the file has a header line, process it: */681if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {682char *tmp, *p, *eol;683struct string_list traits = STRING_LIST_INIT_NODUP;684
685eol = memchr(snapshot->buf, '\n',686snapshot->eof - snapshot->buf);687if (!eol)688die_unterminated_line(refs->path,689snapshot->buf,690snapshot->eof - snapshot->buf);691
692tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);693
694if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))695die_invalid_line(refs->path,696snapshot->buf,697snapshot->eof - snapshot->buf);698
699string_list_split_in_place(&traits, p, " ", -1);700
701if (unsorted_string_list_has_string(&traits, "fully-peeled"))702snapshot->peeled = PEELED_FULLY;703else if (unsorted_string_list_has_string(&traits, "peeled"))704snapshot->peeled = PEELED_TAGS;705
706sorted = unsorted_string_list_has_string(&traits, "sorted");707
708/* perhaps other traits later as well */709
710/* The "+ 1" is for the LF character. */711snapshot->start = eol + 1;712
713string_list_clear(&traits, 0);714free(tmp);715}716
717verify_buffer_safe(snapshot);718
719if (!sorted) {720sort_snapshot(snapshot);721
722/*723* Reordering the records might have moved a short one
724* to the end of the buffer, so verify the buffer's
725* safety again:
726*/
727verify_buffer_safe(snapshot);728}729
730if (mmap_strategy != MMAP_OK && snapshot->mmapped) {731/*732* We don't want to leave the file mmapped, so we are
733* forced to make a copy now:
734*/
735size_t size = snapshot->eof - snapshot->start;736char *buf_copy = xmalloc(size);737
738memcpy(buf_copy, snapshot->start, size);739clear_snapshot_buffer(snapshot);740snapshot->buf = snapshot->start = buf_copy;741snapshot->eof = buf_copy + size;742}743
744return snapshot;745}
746
747/*
748* Check that `refs->snapshot` (if present) still reflects the
749* contents of the `packed-refs` file. If not, clear the snapshot.
750*/
751static void validate_snapshot(struct packed_ref_store *refs)752{
753if (refs->snapshot &&754!stat_validity_check(&refs->snapshot->validity, refs->path))755clear_snapshot(refs);756}
757
758/*
759* Get the `snapshot` for the specified packed_ref_store, creating and
760* populating it if it hasn't been read before or if the file has been
761* changed (according to its `validity` field) since it was last read.
762* On the other hand, if we hold the lock, then assume that the file
763* hasn't been changed out from under us, so skip the extra `stat()`
764* call in `stat_validity_check()`. This function does *not* increase
765* the snapshot's reference count on behalf of the caller.
766*/
767static struct snapshot *get_snapshot(struct packed_ref_store *refs)768{
769if (!is_lock_file_locked(&refs->lock))770validate_snapshot(refs);771
772if (!refs->snapshot)773refs->snapshot = create_snapshot(refs);774
775return refs->snapshot;776}
777
778static int packed_read_raw_ref(struct ref_store *ref_store, const char *refname,779struct object_id *oid, struct strbuf *referent UNUSED,780unsigned int *type, int *failure_errno)781{
782struct packed_ref_store *refs =783packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");784struct snapshot *snapshot = get_snapshot(refs);785const char *rec;786
787*type = 0;788
789rec = find_reference_location(snapshot, refname, 1);790
791if (!rec) {792/* refname is not a packed reference. */793*failure_errno = ENOENT;794return -1;795}796
797if (get_oid_hex_algop(rec, oid, ref_store->repo->hash_algo))798die_invalid_line(refs->path, rec, snapshot->eof - rec);799
800*type = REF_ISPACKED;801return 0;802}
803
804/*
805* This value is set in `base.flags` if the peeled value of the
806* current reference is known. In that case, `peeled` contains the
807* correct peeled value for the reference, which might be `null_oid`
808* if the reference is not a tag or if it is broken.
809*/
810#define REF_KNOWS_PEELED 0x40811
812/*
813* An iterator over a snapshot of a `packed-refs` file.
814*/
815struct packed_ref_iterator {816struct ref_iterator base;817
818struct snapshot *snapshot;819
820/* The current position in the snapshot's buffer: */821const char *pos;822
823/* The end of the part of the buffer that will be iterated over: */824const char *eof;825
826struct jump_list_entry {827const char *start;828const char *end;829} *jump;830size_t jump_nr, jump_alloc;831size_t jump_cur;832
833/* Scratch space for current values: */834struct object_id oid, peeled;835struct strbuf refname_buf;836
837struct repository *repo;838unsigned int flags;839};840
841/*
842* Move the iterator to the next record in the snapshot, without
843* respect for whether the record is actually required by the current
844* iteration. Adjust the fields in `iter` and return `ITER_OK` or
845* `ITER_DONE`. This function does not free the iterator in the case
846* of `ITER_DONE`.
847*/
848static int next_record(struct packed_ref_iterator *iter)849{
850const char *p, *eol;851
852strbuf_reset(&iter->refname_buf);853
854/*855* If iter->pos is contained within a skipped region, jump past
856* it.
857*
858* Note that each skipped region is considered at most once,
859* since they are ordered based on their starting position.
860*/
861while (iter->jump_cur < iter->jump_nr) {862struct jump_list_entry *curr = &iter->jump[iter->jump_cur];863if (iter->pos < curr->start)864break; /* not to the next jump yet */865
866iter->jump_cur++;867if (iter->pos < curr->end) {868iter->pos = curr->end;869trace2_counter_add(TRACE2_COUNTER_ID_PACKED_REFS_JUMPS, 1);870/* jumps are coalesced, so only one jump is necessary */871break;872}873}874
875if (iter->pos == iter->eof)876return ITER_DONE;877
878iter->base.flags = REF_ISPACKED;879p = iter->pos;880
881if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 2 ||882parse_oid_hex_algop(p, &iter->oid, &p, iter->repo->hash_algo) ||883!isspace(*p++))884die_invalid_line(iter->snapshot->refs->path,885iter->pos, iter->eof - iter->pos);886
887eol = memchr(p, '\n', iter->eof - p);888if (!eol)889die_unterminated_line(iter->snapshot->refs->path,890iter->pos, iter->eof - iter->pos);891
892strbuf_add(&iter->refname_buf, p, eol - p);893iter->base.refname = iter->refname_buf.buf;894
895if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {896if (!refname_is_safe(iter->base.refname))897die("packed refname is dangerous: %s",898iter->base.refname);899oidclr(&iter->oid, iter->repo->hash_algo);900iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;901}902if (iter->snapshot->peeled == PEELED_FULLY ||903(iter->snapshot->peeled == PEELED_TAGS &&904starts_with(iter->base.refname, "refs/tags/")))905iter->base.flags |= REF_KNOWS_PEELED;906
907iter->pos = eol + 1;908
909if (iter->pos < iter->eof && *iter->pos == '^') {910p = iter->pos + 1;911if (iter->eof - p < snapshot_hexsz(iter->snapshot) + 1 ||912parse_oid_hex_algop(p, &iter->peeled, &p, iter->repo->hash_algo) ||913*p++ != '\n')914die_invalid_line(iter->snapshot->refs->path,915iter->pos, iter->eof - iter->pos);916iter->pos = p;917
918/*919* Regardless of what the file header said, we
920* definitely know the value of *this* reference. But
921* we suppress it if the reference is broken:
922*/
923if ((iter->base.flags & REF_ISBROKEN)) {924oidclr(&iter->peeled, iter->repo->hash_algo);925iter->base.flags &= ~REF_KNOWS_PEELED;926} else {927iter->base.flags |= REF_KNOWS_PEELED;928}929} else {930oidclr(&iter->peeled, iter->repo->hash_algo);931}932
933return ITER_OK;934}
935
936static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)937{
938struct packed_ref_iterator *iter =939(struct packed_ref_iterator *)ref_iterator;940int ok;941
942while ((ok = next_record(iter)) == ITER_OK) {943if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&944!is_per_worktree_ref(iter->base.refname))945continue;946
947if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&948!ref_resolves_to_object(iter->base.refname, iter->repo,949&iter->oid, iter->flags))950continue;951
952return ITER_OK;953}954
955if (ref_iterator_abort(ref_iterator) != ITER_DONE)956ok = ITER_ERROR;957
958return ok;959}
960
961static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,962struct object_id *peeled)963{
964struct packed_ref_iterator *iter =965(struct packed_ref_iterator *)ref_iterator;966
967if ((iter->base.flags & REF_KNOWS_PEELED)) {968oidcpy(peeled, &iter->peeled);969return is_null_oid(&iter->peeled) ? -1 : 0;970} else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {971return -1;972} else {973return peel_object(iter->repo, &iter->oid, peeled) ? -1 : 0;974}975}
976
977static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)978{
979struct packed_ref_iterator *iter =980(struct packed_ref_iterator *)ref_iterator;981int ok = ITER_DONE;982
983strbuf_release(&iter->refname_buf);984free(iter->jump);985release_snapshot(iter->snapshot);986base_ref_iterator_free(ref_iterator);987return ok;988}
989
990static struct ref_iterator_vtable packed_ref_iterator_vtable = {991.advance = packed_ref_iterator_advance,992.peel = packed_ref_iterator_peel,993.abort = packed_ref_iterator_abort994};995
996static int jump_list_entry_cmp(const void *va, const void *vb)997{
998const struct jump_list_entry *a = va;999const struct jump_list_entry *b = vb;1000
1001if (a->start < b->start)1002return -1;1003if (a->start > b->start)1004return 1;1005return 0;1006}
1007
1008static int has_glob_special(const char *str)1009{
1010const char *p;1011for (p = str; *p; p++) {1012if (is_glob_special(*p))1013return 1;1014}1015return 0;1016}
1017
1018static void populate_excluded_jump_list(struct packed_ref_iterator *iter,1019struct snapshot *snapshot,1020const char **excluded_patterns)1021{
1022size_t i, j;1023const char **pattern;1024struct jump_list_entry *last_disjoint;1025
1026if (!excluded_patterns)1027return;1028
1029for (pattern = excluded_patterns; *pattern; pattern++) {1030struct jump_list_entry *e;1031const char *start, *end;1032
1033/*1034* We can't feed any excludes with globs in them to the
1035* refs machinery. It only understands prefix matching.
1036* We likewise can't even feed the string leading up to
1037* the first meta-character, as something like "foo[a]"
1038* should not exclude "foobar" (but the prefix "foo"
1039* would match that and mark it for exclusion).
1040*/
1041if (has_glob_special(*pattern))1042continue;1043
1044start = find_reference_location(snapshot, *pattern, 0);1045end = find_reference_location_end(snapshot, *pattern, 0);1046
1047if (start == end)1048continue; /* nothing to jump over */1049
1050ALLOC_GROW(iter->jump, iter->jump_nr + 1, iter->jump_alloc);1051
1052e = &iter->jump[iter->jump_nr++];1053e->start = start;1054e->end = end;1055}1056
1057if (!iter->jump_nr) {1058/*1059* Every entry in exclude_patterns has a meta-character,
1060* nothing to do here.
1061*/
1062return;1063}1064
1065QSORT(iter->jump, iter->jump_nr, jump_list_entry_cmp);1066
1067/*1068* As an optimization, merge adjacent entries in the jump list
1069* to jump forwards as far as possible when entering a skipped
1070* region.
1071*
1072* For example, if we have two skipped regions:
1073*
1074* [[A, B], [B, C]]
1075*
1076* we want to combine that into a single entry jumping from A to
1077* C.
1078*/
1079last_disjoint = iter->jump;1080
1081for (i = 1, j = 1; i < iter->jump_nr; i++) {1082struct jump_list_entry *ours = &iter->jump[i];1083if (ours->start <= last_disjoint->end) {1084/* overlapping regions extend the previous one */1085last_disjoint->end = last_disjoint->end > ours->end1086? last_disjoint->end : ours->end;1087} else {1088/* otherwise, insert a new region */1089iter->jump[j++] = *ours;1090last_disjoint = ours;1091}1092}1093
1094iter->jump_nr = j;1095iter->jump_cur = 0;1096}
1097
1098static struct ref_iterator *packed_ref_iterator_begin(1099struct ref_store *ref_store,1100const char *prefix, const char **exclude_patterns,1101unsigned int flags)1102{
1103struct packed_ref_store *refs;1104struct snapshot *snapshot;1105const char *start;1106struct packed_ref_iterator *iter;1107struct ref_iterator *ref_iterator;1108unsigned int required_flags = REF_STORE_READ;1109
1110if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))1111required_flags |= REF_STORE_ODB;1112refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");1113
1114/*1115* Note that `get_snapshot()` internally checks whether the
1116* snapshot is up to date with what is on disk, and re-reads
1117* it if not.
1118*/
1119snapshot = get_snapshot(refs);1120
1121if (prefix && *prefix)1122start = find_reference_location(snapshot, prefix, 0);1123else1124start = snapshot->start;1125
1126if (start == snapshot->eof)1127return empty_ref_iterator_begin();1128
1129CALLOC_ARRAY(iter, 1);1130ref_iterator = &iter->base;1131base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);1132
1133if (exclude_patterns)1134populate_excluded_jump_list(iter, snapshot, exclude_patterns);1135
1136iter->snapshot = snapshot;1137acquire_snapshot(snapshot);1138
1139iter->pos = start;1140iter->eof = snapshot->eof;1141strbuf_init(&iter->refname_buf, 0);1142
1143iter->base.oid = &iter->oid;1144
1145iter->repo = ref_store->repo;1146iter->flags = flags;1147
1148if (prefix && *prefix)1149/* Stop iteration after we've gone *past* prefix: */1150ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);1151
1152return ref_iterator;1153}
1154
1155/*
1156* Write an entry to the packed-refs file for the specified refname.
1157* If peeled is non-NULL, write it as the entry's peeled value. On
1158* error, return a nonzero value and leave errno set at the value left
1159* by the failing call to `fprintf()`.
1160*/
1161static int write_packed_entry(FILE *fh, const char *refname,1162const struct object_id *oid,1163const struct object_id *peeled)1164{
1165if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||1166(peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))1167return -1;1168
1169return 0;1170}
1171
1172int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)1173{
1174struct packed_ref_store *refs =1175packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,1176"packed_refs_lock");1177static int timeout_configured = 0;1178static int timeout_value = 1000;1179
1180if (!timeout_configured) {1181git_config_get_int("core.packedrefstimeout", &timeout_value);1182timeout_configured = 1;1183}1184
1185/*1186* Note that we close the lockfile immediately because we
1187* don't write new content to it, but rather to a separate
1188* tempfile.
1189*/
1190if (hold_lock_file_for_update_timeout(1191&refs->lock,1192refs->path,1193flags, timeout_value) < 0) {1194unable_to_lock_message(refs->path, errno, err);1195return -1;1196}1197
1198if (close_lock_file_gently(&refs->lock)) {1199strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));1200rollback_lock_file(&refs->lock);1201return -1;1202}1203
1204/*1205* There is a stat-validity problem might cause `update-ref -d`
1206* lost the newly commit of a ref, because a new `packed-refs`
1207* file might has the same on-disk file attributes such as
1208* timestamp, file size and inode value, but has a changed
1209* ref value.
1210*
1211* This could happen with a very small chance when
1212* `update-ref -d` is called and at the same time another
1213* `pack-refs --all` process is running.
1214*
1215* Now that we hold the `packed-refs` lock, it is important
1216* to make sure we could read the latest version of
1217* `packed-refs` file no matter we have just mmap it or not.
1218* So what need to do is clear the snapshot if we hold it
1219* already.
1220*/
1221clear_snapshot(refs);1222
1223/*1224* Now make sure that the packed-refs file as it exists in the
1225* locked state is loaded into the snapshot:
1226*/
1227get_snapshot(refs);1228return 0;1229}
1230
1231void packed_refs_unlock(struct ref_store *ref_store)1232{
1233struct packed_ref_store *refs = packed_downcast(1234ref_store,1235REF_STORE_READ | REF_STORE_WRITE,1236"packed_refs_unlock");1237
1238if (!is_lock_file_locked(&refs->lock))1239BUG("packed_refs_unlock() called when not locked");1240rollback_lock_file(&refs->lock);1241}
1242
1243int packed_refs_is_locked(struct ref_store *ref_store)1244{
1245struct packed_ref_store *refs = packed_downcast(1246ref_store,1247REF_STORE_READ | REF_STORE_WRITE,1248"packed_refs_is_locked");1249
1250return is_lock_file_locked(&refs->lock);1251}
1252
1253/*
1254* The packed-refs header line that we write out. Perhaps other traits
1255* will be added later.
1256*
1257* Note that earlier versions of Git used to parse these traits by
1258* looking for " trait " in the line. For this reason, the space after
1259* the colon and the trailing space are required.
1260*/
1261static const char PACKED_REFS_HEADER[] =1262"# pack-refs with: peeled fully-peeled sorted \n";1263
1264static int packed_ref_store_create_on_disk(struct ref_store *ref_store UNUSED,1265int flags UNUSED,1266struct strbuf *err UNUSED)1267{
1268/* Nothing to do. */1269return 0;1270}
1271
1272static int packed_ref_store_remove_on_disk(struct ref_store *ref_store,1273struct strbuf *err)1274{
1275struct packed_ref_store *refs = packed_downcast(ref_store, 0, "remove");1276
1277if (remove_path(refs->path) < 0) {1278strbuf_addstr(err, "could not delete packed-refs");1279return -1;1280}1281
1282return 0;1283}
1284
1285/*
1286* Write the packed refs from the current snapshot to the packed-refs
1287* tempfile, incorporating any changes from `updates`. `updates` must
1288* be a sorted string list whose keys are the refnames and whose util
1289* values are `struct ref_update *`. On error, rollback the tempfile,
1290* write an error message to `err`, and return a nonzero value.
1291*
1292* The packfile must be locked before calling this function and will
1293* remain locked when it is done.
1294*/
1295static int write_with_updates(struct packed_ref_store *refs,1296struct string_list *updates,1297struct strbuf *err)1298{
1299struct ref_iterator *iter = NULL;1300size_t i;1301int ok;1302FILE *out;1303struct strbuf sb = STRBUF_INIT;1304char *packed_refs_path;1305
1306if (!is_lock_file_locked(&refs->lock))1307BUG("write_with_updates() called while unlocked");1308
1309/*1310* If packed-refs is a symlink, we want to overwrite the
1311* symlinked-to file, not the symlink itself. Also, put the
1312* staging file next to it:
1313*/
1314packed_refs_path = get_locked_file_path(&refs->lock);1315strbuf_addf(&sb, "%s.new", packed_refs_path);1316free(packed_refs_path);1317refs->tempfile = create_tempfile(sb.buf);1318if (!refs->tempfile) {1319strbuf_addf(err, "unable to create file %s: %s",1320sb.buf, strerror(errno));1321strbuf_release(&sb);1322return -1;1323}1324strbuf_release(&sb);1325
1326out = fdopen_tempfile(refs->tempfile, "w");1327if (!out) {1328strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",1329strerror(errno));1330goto error;1331}1332
1333if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)1334goto write_error;1335
1336/*1337* We iterate in parallel through the current list of refs and
1338* the list of updates, processing an entry from at least one
1339* of the lists each time through the loop. When the current
1340* list of refs is exhausted, set iter to NULL. When the list
1341* of updates is exhausted, leave i set to updates->nr.
1342*/
1343iter = packed_ref_iterator_begin(&refs->base, "", NULL,1344DO_FOR_EACH_INCLUDE_BROKEN);1345if ((ok = ref_iterator_advance(iter)) != ITER_OK)1346iter = NULL;1347
1348i = 0;1349
1350while (iter || i < updates->nr) {1351struct ref_update *update = NULL;1352int cmp;1353
1354if (i >= updates->nr) {1355cmp = -1;1356} else {1357update = updates->items[i].util;1358
1359if (!iter)1360cmp = +1;1361else1362cmp = strcmp(iter->refname, update->refname);1363}1364
1365if (!cmp) {1366/*1367* There is both an old value and an update
1368* for this reference. Check the old value if
1369* necessary:
1370*/
1371if ((update->flags & REF_HAVE_OLD)) {1372if (is_null_oid(&update->old_oid)) {1373strbuf_addf(err, "cannot update ref '%s': "1374"reference already exists",1375update->refname);1376goto error;1377} else if (!oideq(&update->old_oid, iter->oid)) {1378strbuf_addf(err, "cannot update ref '%s': "1379"is at %s but expected %s",1380update->refname,1381oid_to_hex(iter->oid),1382oid_to_hex(&update->old_oid));1383goto error;1384}1385}1386
1387/* Now figure out what to use for the new value: */1388if ((update->flags & REF_HAVE_NEW)) {1389/*1390* The update takes precedence. Skip
1391* the iterator over the unneeded
1392* value.
1393*/
1394if ((ok = ref_iterator_advance(iter)) != ITER_OK)1395iter = NULL;1396cmp = +1;1397} else {1398/*1399* The update doesn't actually want to
1400* change anything. We're done with it.
1401*/
1402i++;1403cmp = -1;1404}1405} else if (cmp > 0) {1406/*1407* There is no old value but there is an
1408* update for this reference. Make sure that
1409* the update didn't expect an existing value:
1410*/
1411if ((update->flags & REF_HAVE_OLD) &&1412!is_null_oid(&update->old_oid)) {1413strbuf_addf(err, "cannot update ref '%s': "1414"reference is missing but expected %s",1415update->refname,1416oid_to_hex(&update->old_oid));1417goto error;1418}1419}1420
1421if (cmp < 0) {1422/* Pass the old reference through. */1423
1424struct object_id peeled;1425int peel_error = ref_iterator_peel(iter, &peeled);1426
1427if (write_packed_entry(out, iter->refname,1428iter->oid,1429peel_error ? NULL : &peeled))1430goto write_error;1431
1432if ((ok = ref_iterator_advance(iter)) != ITER_OK)1433iter = NULL;1434} else if (is_null_oid(&update->new_oid)) {1435/*1436* The update wants to delete the reference,
1437* and the reference either didn't exist or we
1438* have already skipped it. So we're done with
1439* the update (and don't have to write
1440* anything).
1441*/
1442i++;1443} else {1444struct object_id peeled;1445int peel_error = peel_object(refs->base.repo,1446&update->new_oid,1447&peeled);1448
1449if (write_packed_entry(out, update->refname,1450&update->new_oid,1451peel_error ? NULL : &peeled))1452goto write_error;1453
1454i++;1455}1456}1457
1458if (ok != ITER_DONE) {1459strbuf_addstr(err, "unable to write packed-refs file: "1460"error iterating over old contents");1461goto error;1462}1463
1464if (fflush(out) ||1465fsync_component(FSYNC_COMPONENT_REFERENCE, get_tempfile_fd(refs->tempfile)) ||1466close_tempfile_gently(refs->tempfile)) {1467strbuf_addf(err, "error closing file %s: %s",1468get_tempfile_path(refs->tempfile),1469strerror(errno));1470strbuf_release(&sb);1471delete_tempfile(&refs->tempfile);1472return -1;1473}1474
1475return 0;1476
1477write_error:1478strbuf_addf(err, "error writing to %s: %s",1479get_tempfile_path(refs->tempfile), strerror(errno));1480
1481error:1482if (iter)1483ref_iterator_abort(iter);1484
1485delete_tempfile(&refs->tempfile);1486return -1;1487}
1488
1489int is_packed_transaction_needed(struct ref_store *ref_store,1490struct ref_transaction *transaction)1491{
1492struct packed_ref_store *refs = packed_downcast(1493ref_store,1494REF_STORE_READ,1495"is_packed_transaction_needed");1496struct strbuf referent = STRBUF_INIT;1497size_t i;1498int ret;1499
1500if (!is_lock_file_locked(&refs->lock))1501BUG("is_packed_transaction_needed() called while unlocked");1502
1503/*1504* We're only going to bother returning false for the common,
1505* trivial case that references are only being deleted, their
1506* old values are not being checked, and the old `packed-refs`
1507* file doesn't contain any of those reference(s). This gives
1508* false positives for some other cases that could
1509* theoretically be optimized away:
1510*
1511* 1. It could be that the old value is being verified without
1512* setting a new value. In this case, we could verify the
1513* old value here and skip the update if it agrees. If it
1514* disagrees, we could either let the update go through
1515* (the actual commit would re-detect and report the
1516* problem), or come up with a way of reporting such an
1517* error to *our* caller.
1518*
1519* 2. It could be that a new value is being set, but that it
1520* is identical to the current packed value of the
1521* reference.
1522*
1523* Neither of these cases will come up in the current code,
1524* because the only caller of this function passes to it a
1525* transaction that only includes `delete` updates with no
1526* `old_id`. Even if that ever changes, false positives only
1527* cause an optimization to be missed; they do not affect
1528* correctness.
1529*/
1530
1531/*1532* Start with the cheap checks that don't require old
1533* reference values to be read:
1534*/
1535for (i = 0; i < transaction->nr; i++) {1536struct ref_update *update = transaction->updates[i];1537
1538if (update->flags & REF_HAVE_OLD)1539/* Have to check the old value -> needed. */1540return 1;1541
1542if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))1543/* Have to set a new value -> needed. */1544return 1;1545}1546
1547/*1548* The transaction isn't checking any old values nor is it
1549* setting any nonzero new values, so it still might be able
1550* to be skipped. Now do the more expensive check: the update
1551* is needed if any of the updates is a delete, and the old
1552* `packed-refs` file contains a value for that reference.
1553*/
1554ret = 0;1555for (i = 0; i < transaction->nr; i++) {1556struct ref_update *update = transaction->updates[i];1557int failure_errno;1558unsigned int type;1559struct object_id oid;1560
1561if (!(update->flags & REF_HAVE_NEW))1562/*1563* This reference isn't being deleted -> not
1564* needed.
1565*/
1566continue;1567
1568if (!refs_read_raw_ref(ref_store, update->refname, &oid,1569&referent, &type, &failure_errno) ||1570failure_errno != ENOENT) {1571/*1572* We have to actually delete that reference
1573* -> this transaction is needed.
1574*/
1575ret = 1;1576break;1577}1578}1579
1580strbuf_release(&referent);1581return ret;1582}
1583
1584struct packed_transaction_backend_data {1585/* True iff the transaction owns the packed-refs lock. */1586int own_lock;1587
1588struct string_list updates;1589};1590
1591static void packed_transaction_cleanup(struct packed_ref_store *refs,1592struct ref_transaction *transaction)1593{
1594struct packed_transaction_backend_data *data = transaction->backend_data;1595
1596if (data) {1597string_list_clear(&data->updates, 0);1598
1599if (is_tempfile_active(refs->tempfile))1600delete_tempfile(&refs->tempfile);1601
1602if (data->own_lock && is_lock_file_locked(&refs->lock)) {1603packed_refs_unlock(&refs->base);1604data->own_lock = 0;1605}1606
1607free(data);1608transaction->backend_data = NULL;1609}1610
1611transaction->state = REF_TRANSACTION_CLOSED;1612}
1613
1614static int packed_transaction_prepare(struct ref_store *ref_store,1615struct ref_transaction *transaction,1616struct strbuf *err)1617{
1618struct packed_ref_store *refs = packed_downcast(1619ref_store,1620REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1621"ref_transaction_prepare");1622struct packed_transaction_backend_data *data;1623size_t i;1624int ret = TRANSACTION_GENERIC_ERROR;1625
1626/*1627* Note that we *don't* skip transactions with zero updates,
1628* because such a transaction might be executed for the side
1629* effect of ensuring that all of the references are peeled or
1630* ensuring that the `packed-refs` file is sorted. If the
1631* caller wants to optimize away empty transactions, it should
1632* do so itself.
1633*/
1634
1635CALLOC_ARRAY(data, 1);1636string_list_init_nodup(&data->updates);1637
1638transaction->backend_data = data;1639
1640/*1641* Stick the updates in a string list by refname so that we
1642* can sort them:
1643*/
1644for (i = 0; i < transaction->nr; i++) {1645struct ref_update *update = transaction->updates[i];1646struct string_list_item *item =1647string_list_append(&data->updates, update->refname);1648
1649/* Store a pointer to update in item->util: */1650item->util = update;1651}1652string_list_sort(&data->updates);1653
1654if (ref_update_reject_duplicates(&data->updates, err))1655goto failure;1656
1657if (!is_lock_file_locked(&refs->lock)) {1658if (packed_refs_lock(ref_store, 0, err))1659goto failure;1660data->own_lock = 1;1661}1662
1663if (write_with_updates(refs, &data->updates, err))1664goto failure;1665
1666transaction->state = REF_TRANSACTION_PREPARED;1667return 0;1668
1669failure:1670packed_transaction_cleanup(refs, transaction);1671return ret;1672}
1673
1674static int packed_transaction_abort(struct ref_store *ref_store,1675struct ref_transaction *transaction,1676struct strbuf *err UNUSED)1677{
1678struct packed_ref_store *refs = packed_downcast(1679ref_store,1680REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1681"ref_transaction_abort");1682
1683packed_transaction_cleanup(refs, transaction);1684return 0;1685}
1686
1687static int packed_transaction_finish(struct ref_store *ref_store,1688struct ref_transaction *transaction,1689struct strbuf *err)1690{
1691struct packed_ref_store *refs = packed_downcast(1692ref_store,1693REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1694"ref_transaction_finish");1695int ret = TRANSACTION_GENERIC_ERROR;1696char *packed_refs_path;1697
1698clear_snapshot(refs);1699
1700packed_refs_path = get_locked_file_path(&refs->lock);1701if (rename_tempfile(&refs->tempfile, packed_refs_path)) {1702strbuf_addf(err, "error replacing %s: %s",1703refs->path, strerror(errno));1704goto cleanup;1705}1706
1707ret = 0;1708
1709cleanup:1710free(packed_refs_path);1711packed_transaction_cleanup(refs, transaction);1712return ret;1713}
1714
1715static int packed_initial_transaction_commit(struct ref_store *ref_store UNUSED,1716struct ref_transaction *transaction,1717struct strbuf *err)1718{
1719return ref_transaction_commit(transaction, err);1720}
1721
1722static int packed_pack_refs(struct ref_store *ref_store UNUSED,1723struct pack_refs_opts *pack_opts UNUSED)1724{
1725/*1726* Packed refs are already packed. It might be that loose refs
1727* are packed *into* a packed refs store, but that is done by
1728* updating the packed references via a transaction.
1729*/
1730return 0;1731}
1732
1733static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store UNUSED)1734{
1735return empty_ref_iterator_begin();1736}
1737
1738static int packed_fsck(struct ref_store *ref_store UNUSED,1739struct fsck_options *o UNUSED)1740{
1741return 0;1742}
1743
1744struct ref_storage_be refs_be_packed = {1745.name = "packed",1746.init = packed_ref_store_init,1747.release = packed_ref_store_release,1748.create_on_disk = packed_ref_store_create_on_disk,1749.remove_on_disk = packed_ref_store_remove_on_disk,1750
1751.transaction_prepare = packed_transaction_prepare,1752.transaction_finish = packed_transaction_finish,1753.transaction_abort = packed_transaction_abort,1754.initial_transaction_commit = packed_initial_transaction_commit,1755
1756.pack_refs = packed_pack_refs,1757.rename_ref = NULL,1758.copy_ref = NULL,1759
1760.iterator_begin = packed_ref_iterator_begin,1761.read_raw_ref = packed_read_raw_ref,1762.read_symbolic_ref = NULL,1763
1764.reflog_iterator_begin = packed_reflog_iterator_begin,1765.for_each_reflog_ent = NULL,1766.for_each_reflog_ent_reverse = NULL,1767.reflog_exists = NULL,1768.create_reflog = NULL,1769.delete_reflog = NULL,1770.reflog_expire = NULL,1771
1772.fsck = packed_fsck,1773};1774