git
/
cache-tree.c
982 строки · 24.7 Кб
1#define USE_THE_REPOSITORY_VARIABLE
2
3#include "git-compat-util.h"
4#include "environment.h"
5#include "hex.h"
6#include "lockfile.h"
7#include "tree.h"
8#include "tree-walk.h"
9#include "cache-tree.h"
10#include "bulk-checkin.h"
11#include "object-file.h"
12#include "object-store-ll.h"
13#include "read-cache-ll.h"
14#include "replace-object.h"
15#include "promisor-remote.h"
16#include "trace.h"
17#include "trace2.h"
18
19#ifndef DEBUG_CACHE_TREE
20#define DEBUG_CACHE_TREE 0
21#endif
22
23struct cache_tree *cache_tree(void)
24{
25struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
26it->entry_count = -1;
27return it;
28}
29
30void cache_tree_free(struct cache_tree **it_p)
31{
32int i;
33struct cache_tree *it = *it_p;
34
35if (!it)
36return;
37for (i = 0; i < it->subtree_nr; i++)
38if (it->down[i]) {
39cache_tree_free(&it->down[i]->cache_tree);
40free(it->down[i]);
41}
42free(it->down);
43free(it);
44*it_p = NULL;
45}
46
47static int subtree_name_cmp(const char *one, int onelen,
48const char *two, int twolen)
49{
50if (onelen < twolen)
51return -1;
52if (twolen < onelen)
53return 1;
54return memcmp(one, two, onelen);
55}
56
57int cache_tree_subtree_pos(struct cache_tree *it, const char *path, int pathlen)
58{
59struct cache_tree_sub **down = it->down;
60int lo, hi;
61lo = 0;
62hi = it->subtree_nr;
63while (lo < hi) {
64int mi = lo + (hi - lo) / 2;
65struct cache_tree_sub *mdl = down[mi];
66int cmp = subtree_name_cmp(path, pathlen,
67mdl->name, mdl->namelen);
68if (!cmp)
69return mi;
70if (cmp < 0)
71hi = mi;
72else
73lo = mi + 1;
74}
75return -lo-1;
76}
77
78static struct cache_tree_sub *find_subtree(struct cache_tree *it,
79const char *path,
80int pathlen,
81int create)
82{
83struct cache_tree_sub *down;
84int pos = cache_tree_subtree_pos(it, path, pathlen);
85if (0 <= pos)
86return it->down[pos];
87if (!create)
88return NULL;
89
90pos = -pos-1;
91ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
92it->subtree_nr++;
93
94FLEX_ALLOC_MEM(down, name, path, pathlen);
95down->cache_tree = NULL;
96down->namelen = pathlen;
97
98if (pos < it->subtree_nr)
99MOVE_ARRAY(it->down + pos + 1, it->down + pos,
100it->subtree_nr - pos - 1);
101it->down[pos] = down;
102return down;
103}
104
105struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
106{
107int pathlen = strlen(path);
108return find_subtree(it, path, pathlen, 1);
109}
110
111static int do_invalidate_path(struct cache_tree *it, const char *path)
112{
113/* a/b/c
114* ==> invalidate self
115* ==> find "a", have it invalidate "b/c"
116* a
117* ==> invalidate self
118* ==> if "a" exists as a subtree, remove it.
119*/
120const char *slash;
121int namelen;
122struct cache_tree_sub *down;
123
124#if DEBUG_CACHE_TREE
125fprintf(stderr, "cache-tree invalidate <%s>\n", path);
126#endif
127
128if (!it)
129return 0;
130slash = strchrnul(path, '/');
131namelen = slash - path;
132it->entry_count = -1;
133if (!*slash) {
134int pos;
135pos = cache_tree_subtree_pos(it, path, namelen);
136if (0 <= pos) {
137cache_tree_free(&it->down[pos]->cache_tree);
138free(it->down[pos]);
139/* 0 1 2 3 4 5
140* ^ ^subtree_nr = 6
141* pos
142* move 4 and 5 up one place (2 entries)
143* 2 = 6 - 3 - 1 = subtree_nr - pos - 1
144*/
145MOVE_ARRAY(it->down + pos, it->down + pos + 1,
146it->subtree_nr - pos - 1);
147it->subtree_nr--;
148}
149return 1;
150}
151down = find_subtree(it, path, namelen, 0);
152if (down)
153do_invalidate_path(down->cache_tree, slash + 1);
154return 1;
155}
156
157void cache_tree_invalidate_path(struct index_state *istate, const char *path)
158{
159if (do_invalidate_path(istate->cache_tree, path))
160istate->cache_changed |= CACHE_TREE_CHANGED;
161}
162
163static int verify_cache(struct index_state *istate, int flags)
164{
165unsigned i, funny;
166int silent = flags & WRITE_TREE_SILENT;
167
168/* Verify that the tree is merged */
169funny = 0;
170for (i = 0; i < istate->cache_nr; i++) {
171const struct cache_entry *ce = istate->cache[i];
172if (ce_stage(ce)) {
173if (silent)
174return -1;
175if (10 < ++funny) {
176fprintf(stderr, "...\n");
177break;
178}
179fprintf(stderr, "%s: unmerged (%s)\n",
180ce->name, oid_to_hex(&ce->oid));
181}
182}
183if (funny)
184return -1;
185
186/* Also verify that the cache does not have path and path/file
187* at the same time. At this point we know the cache has only
188* stage 0 entries.
189*/
190funny = 0;
191for (i = 0; i + 1 < istate->cache_nr; i++) {
192/* path/file always comes after path because of the way
193* the cache is sorted. Also path can appear only once,
194* which means conflicting one would immediately follow.
195*/
196const struct cache_entry *this_ce = istate->cache[i];
197const struct cache_entry *next_ce = istate->cache[i + 1];
198const char *this_name = this_ce->name;
199const char *next_name = next_ce->name;
200int this_len = ce_namelen(this_ce);
201if (this_len < ce_namelen(next_ce) &&
202next_name[this_len] == '/' &&
203strncmp(this_name, next_name, this_len) == 0) {
204if (10 < ++funny) {
205fprintf(stderr, "...\n");
206break;
207}
208fprintf(stderr, "You have both %s and %s\n",
209this_name, next_name);
210}
211}
212if (funny)
213return -1;
214return 0;
215}
216
217static void discard_unused_subtrees(struct cache_tree *it)
218{
219struct cache_tree_sub **down = it->down;
220int nr = it->subtree_nr;
221int dst, src;
222for (dst = src = 0; src < nr; src++) {
223struct cache_tree_sub *s = down[src];
224if (s->used)
225down[dst++] = s;
226else {
227cache_tree_free(&s->cache_tree);
228free(s);
229it->subtree_nr--;
230}
231}
232}
233
234int cache_tree_fully_valid(struct cache_tree *it)
235{
236int i;
237if (!it)
238return 0;
239if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
240return 0;
241for (i = 0; i < it->subtree_nr; i++) {
242if (!cache_tree_fully_valid(it->down[i]->cache_tree))
243return 0;
244}
245return 1;
246}
247
248static int must_check_existence(const struct cache_entry *ce)
249{
250return !(repo_has_promisor_remote(the_repository) && ce_skip_worktree(ce));
251}
252
253static int update_one(struct cache_tree *it,
254struct cache_entry **cache,
255int entries,
256const char *base,
257int baselen,
258int *skip_count,
259int flags)
260{
261struct strbuf buffer;
262int missing_ok = flags & WRITE_TREE_MISSING_OK;
263int dryrun = flags & WRITE_TREE_DRY_RUN;
264int repair = flags & WRITE_TREE_REPAIR;
265int to_invalidate = 0;
266int i;
267
268assert(!(dryrun && repair));
269
270*skip_count = 0;
271
272/*
273* If the first entry of this region is a sparse directory
274* entry corresponding exactly to 'base', then this cache_tree
275* struct is a "leaf" in the data structure, pointing to the
276* tree OID specified in the entry.
277*/
278if (entries > 0) {
279const struct cache_entry *ce = cache[0];
280
281if (S_ISSPARSEDIR(ce->ce_mode) &&
282ce->ce_namelen == baselen &&
283!strncmp(ce->name, base, baselen)) {
284it->entry_count = 1;
285oidcpy(&it->oid, &ce->oid);
286return 1;
287}
288}
289
290if (0 <= it->entry_count && repo_has_object_file(the_repository, &it->oid))
291return it->entry_count;
292
293/*
294* We first scan for subtrees and update them; we start by
295* marking existing subtrees -- the ones that are unmarked
296* should not be in the result.
297*/
298for (i = 0; i < it->subtree_nr; i++)
299it->down[i]->used = 0;
300
301/*
302* Find the subtrees and update them.
303*/
304i = 0;
305while (i < entries) {
306const struct cache_entry *ce = cache[i];
307struct cache_tree_sub *sub;
308const char *path, *slash;
309int pathlen, sublen, subcnt, subskip;
310
311path = ce->name;
312pathlen = ce_namelen(ce);
313if (pathlen <= baselen || memcmp(base, path, baselen))
314break; /* at the end of this level */
315
316slash = strchr(path + baselen, '/');
317if (!slash) {
318i++;
319continue;
320}
321/*
322* a/bbb/c (base = a/, slash = /c)
323* ==>
324* path+baselen = bbb/c, sublen = 3
325*/
326sublen = slash - (path + baselen);
327sub = find_subtree(it, path + baselen, sublen, 1);
328if (!sub->cache_tree)
329sub->cache_tree = cache_tree();
330subcnt = update_one(sub->cache_tree,
331cache + i, entries - i,
332path,
333baselen + sublen + 1,
334&subskip,
335flags);
336if (subcnt < 0)
337return subcnt;
338if (!subcnt)
339die("index cache-tree records empty sub-tree");
340i += subcnt;
341sub->count = subcnt; /* to be used in the next loop */
342*skip_count += subskip;
343sub->used = 1;
344}
345
346discard_unused_subtrees(it);
347
348/*
349* Then write out the tree object for this level.
350*/
351strbuf_init(&buffer, 8192);
352
353i = 0;
354while (i < entries) {
355const struct cache_entry *ce = cache[i];
356struct cache_tree_sub *sub = NULL;
357const char *path, *slash;
358int pathlen, entlen;
359const struct object_id *oid;
360unsigned mode;
361int expected_missing = 0;
362int contains_ita = 0;
363int ce_missing_ok;
364
365path = ce->name;
366pathlen = ce_namelen(ce);
367if (pathlen <= baselen || memcmp(base, path, baselen))
368break; /* at the end of this level */
369
370slash = strchr(path + baselen, '/');
371if (slash) {
372entlen = slash - (path + baselen);
373sub = find_subtree(it, path + baselen, entlen, 0);
374if (!sub)
375die("cache-tree.c: '%.*s' in '%s' not found",
376entlen, path + baselen, path);
377i += sub->count;
378oid = &sub->cache_tree->oid;
379mode = S_IFDIR;
380contains_ita = sub->cache_tree->entry_count < 0;
381if (contains_ita) {
382to_invalidate = 1;
383expected_missing = 1;
384}
385}
386else {
387oid = &ce->oid;
388mode = ce->ce_mode;
389entlen = pathlen - baselen;
390i++;
391}
392
393ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
394!must_check_existence(ce);
395if (is_null_oid(oid) ||
396(!ce_missing_ok && !repo_has_object_file(the_repository, oid))) {
397strbuf_release(&buffer);
398if (expected_missing)
399return -1;
400return error("invalid object %06o %s for '%.*s'",
401mode, oid_to_hex(oid), entlen+baselen, path);
402}
403
404/*
405* CE_REMOVE entries are removed before the index is
406* written to disk. Skip them to remain consistent
407* with the future on-disk index.
408*/
409if (ce->ce_flags & CE_REMOVE) {
410*skip_count = *skip_count + 1;
411continue;
412}
413
414/*
415* CE_INTENT_TO_ADD entries exist in on-disk index but
416* they are not part of generated trees. Invalidate up
417* to root to force cache-tree users to read elsewhere.
418*/
419if (!sub && ce_intent_to_add(ce)) {
420to_invalidate = 1;
421continue;
422}
423
424/*
425* "sub" can be an empty tree if all subentries are i-t-a.
426*/
427if (contains_ita && is_empty_tree_oid(oid, the_repository->hash_algo))
428continue;
429
430strbuf_grow(&buffer, entlen + 100);
431strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
432strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
433
434#if DEBUG_CACHE_TREE
435fprintf(stderr, "cache-tree update-one %o %.*s\n",
436mode, entlen, path + baselen);
437#endif
438}
439
440if (repair) {
441struct object_id oid;
442hash_object_file(the_hash_algo, buffer.buf, buffer.len,
443OBJ_TREE, &oid);
444if (repo_has_object_file_with_flags(the_repository, &oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
445oidcpy(&it->oid, &oid);
446else
447to_invalidate = 1;
448} else if (dryrun) {
449hash_object_file(the_hash_algo, buffer.buf, buffer.len,
450OBJ_TREE, &it->oid);
451} else if (write_object_file_flags(buffer.buf, buffer.len, OBJ_TREE,
452&it->oid, NULL, flags & WRITE_TREE_SILENT
453? HASH_SILENT : 0)) {
454strbuf_release(&buffer);
455return -1;
456}
457
458strbuf_release(&buffer);
459it->entry_count = to_invalidate ? -1 : i - *skip_count;
460#if DEBUG_CACHE_TREE
461fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
462it->entry_count, it->subtree_nr,
463oid_to_hex(&it->oid));
464#endif
465return i;
466}
467
468int cache_tree_update(struct index_state *istate, int flags)
469{
470int skip, i;
471
472i = verify_cache(istate, flags);
473
474if (i)
475return i;
476
477if (!istate->cache_tree)
478istate->cache_tree = cache_tree();
479
480if (!(flags & WRITE_TREE_MISSING_OK) && repo_has_promisor_remote(the_repository))
481prefetch_cache_entries(istate, must_check_existence);
482
483trace_performance_enter();
484trace2_region_enter("cache_tree", "update", the_repository);
485begin_odb_transaction();
486i = update_one(istate->cache_tree, istate->cache, istate->cache_nr,
487"", 0, &skip, flags);
488end_odb_transaction();
489trace2_region_leave("cache_tree", "update", the_repository);
490trace_performance_leave("cache_tree_update");
491if (i < 0)
492return i;
493istate->cache_changed |= CACHE_TREE_CHANGED;
494return 0;
495}
496
497static void write_one(struct strbuf *buffer, struct cache_tree *it,
498const char *path, int pathlen)
499{
500int i;
501
502/* One "cache-tree" entry consists of the following:
503* path (NUL terminated)
504* entry_count, subtree_nr ("%d %d\n")
505* tree-sha1 (missing if invalid)
506* subtree_nr "cache-tree" entries for subtrees.
507*/
508strbuf_grow(buffer, pathlen + 100);
509strbuf_add(buffer, path, pathlen);
510strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
511
512#if DEBUG_CACHE_TREE
513if (0 <= it->entry_count)
514fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
515pathlen, path, it->entry_count, it->subtree_nr,
516oid_to_hex(&it->oid));
517else
518fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
519pathlen, path, it->subtree_nr);
520#endif
521
522if (0 <= it->entry_count) {
523strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
524}
525for (i = 0; i < it->subtree_nr; i++) {
526struct cache_tree_sub *down = it->down[i];
527if (i) {
528struct cache_tree_sub *prev = it->down[i-1];
529if (subtree_name_cmp(down->name, down->namelen,
530prev->name, prev->namelen) <= 0)
531die("fatal - unsorted cache subtree");
532}
533write_one(buffer, down->cache_tree, down->name, down->namelen);
534}
535}
536
537void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
538{
539trace2_region_enter("cache_tree", "write", the_repository);
540write_one(sb, root, "", 0);
541trace2_region_leave("cache_tree", "write", the_repository);
542}
543
544static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
545{
546const char *buf = *buffer;
547unsigned long size = *size_p;
548const char *cp;
549char *ep;
550struct cache_tree *it;
551int i, subtree_nr;
552const unsigned rawsz = the_hash_algo->rawsz;
553
554it = NULL;
555/* skip name, but make sure name exists */
556while (size && *buf) {
557size--;
558buf++;
559}
560if (!size)
561goto free_return;
562buf++; size--;
563it = cache_tree();
564
565cp = buf;
566it->entry_count = strtol(cp, &ep, 10);
567if (cp == ep)
568goto free_return;
569cp = ep;
570subtree_nr = strtol(cp, &ep, 10);
571if (cp == ep)
572goto free_return;
573while (size && *buf && *buf != '\n') {
574size--;
575buf++;
576}
577if (!size)
578goto free_return;
579buf++; size--;
580if (0 <= it->entry_count) {
581if (size < rawsz)
582goto free_return;
583oidread(&it->oid, (const unsigned char *)buf,
584the_repository->hash_algo);
585buf += rawsz;
586size -= rawsz;
587}
588
589#if DEBUG_CACHE_TREE
590if (0 <= it->entry_count)
591fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
592*buffer, it->entry_count, subtree_nr,
593oid_to_hex(&it->oid));
594else
595fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
596*buffer, subtree_nr);
597#endif
598
599/*
600* Just a heuristic -- we do not add directories that often but
601* we do not want to have to extend it immediately when we do,
602* hence +2.
603*/
604it->subtree_alloc = subtree_nr + 2;
605CALLOC_ARRAY(it->down, it->subtree_alloc);
606for (i = 0; i < subtree_nr; i++) {
607/* read each subtree */
608struct cache_tree *sub;
609struct cache_tree_sub *subtree;
610const char *name = buf;
611
612sub = read_one(&buf, &size);
613if (!sub)
614goto free_return;
615subtree = cache_tree_sub(it, name);
616subtree->cache_tree = sub;
617}
618if (subtree_nr != it->subtree_nr)
619die("cache-tree: internal error");
620*buffer = buf;
621*size_p = size;
622return it;
623
624free_return:
625cache_tree_free(&it);
626return NULL;
627}
628
629struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
630{
631struct cache_tree *result;
632
633if (buffer[0])
634return NULL; /* not the whole tree */
635
636trace2_region_enter("cache_tree", "read", the_repository);
637result = read_one(&buffer, &size);
638trace2_region_leave("cache_tree", "read", the_repository);
639
640return result;
641}
642
643static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
644{
645if (!it)
646return NULL;
647while (*path) {
648const char *slash;
649struct cache_tree_sub *sub;
650
651slash = strchrnul(path, '/');
652/*
653* Between path and slash is the name of the subtree
654* to look for.
655*/
656sub = find_subtree(it, path, slash - path, 0);
657if (!sub)
658return NULL;
659it = sub->cache_tree;
660
661path = slash;
662while (*path == '/')
663path++;
664}
665return it;
666}
667
668static int write_index_as_tree_internal(struct object_id *oid,
669struct index_state *index_state,
670int cache_tree_valid,
671int flags,
672const char *prefix)
673{
674if (flags & WRITE_TREE_IGNORE_CACHE_TREE) {
675cache_tree_free(&index_state->cache_tree);
676cache_tree_valid = 0;
677}
678
679if (!cache_tree_valid && cache_tree_update(index_state, flags) < 0)
680return WRITE_TREE_UNMERGED_INDEX;
681
682if (prefix) {
683struct cache_tree *subtree;
684subtree = cache_tree_find(index_state->cache_tree, prefix);
685if (!subtree)
686return WRITE_TREE_PREFIX_ERROR;
687oidcpy(oid, &subtree->oid);
688}
689else
690oidcpy(oid, &index_state->cache_tree->oid);
691
692return 0;
693}
694
695struct tree* write_in_core_index_as_tree(struct repository *repo) {
696struct object_id o;
697int was_valid, ret;
698
699struct index_state *index_state = repo->index;
700was_valid = index_state->cache_tree &&
701cache_tree_fully_valid(index_state->cache_tree);
702
703ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL);
704if (ret == WRITE_TREE_UNMERGED_INDEX) {
705int i;
706bug("there are unmerged index entries:");
707for (i = 0; i < index_state->cache_nr; i++) {
708const struct cache_entry *ce = index_state->cache[i];
709if (ce_stage(ce))
710bug("%d %.*s", ce_stage(ce),
711(int)ce_namelen(ce), ce->name);
712}
713BUG("unmerged index entries when writing in-core index");
714}
715
716return lookup_tree(repo, &index_state->cache_tree->oid);
717}
718
719
720int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
721{
722int entries, was_valid;
723struct lock_file lock_file = LOCK_INIT;
724int ret;
725
726hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
727
728entries = read_index_from(index_state, index_path, get_git_dir());
729if (entries < 0) {
730ret = WRITE_TREE_UNREADABLE_INDEX;
731goto out;
732}
733
734was_valid = !(flags & WRITE_TREE_IGNORE_CACHE_TREE) &&
735index_state->cache_tree &&
736cache_tree_fully_valid(index_state->cache_tree);
737
738ret = write_index_as_tree_internal(oid, index_state, was_valid, flags,
739prefix);
740if (!ret && !was_valid) {
741write_locked_index(index_state, &lock_file, COMMIT_LOCK);
742/* Not being able to write is fine -- we are only interested
743* in updating the cache-tree part, and if the next caller
744* ends up using the old index with unupdated cache-tree part
745* it misses the work we did here, but that is just a
746* performance penalty and not a big deal.
747*/
748}
749
750out:
751rollback_lock_file(&lock_file);
752return ret;
753}
754
755static void prime_cache_tree_sparse_dir(struct cache_tree *it,
756struct tree *tree)
757{
758
759oidcpy(&it->oid, &tree->object.oid);
760it->entry_count = 1;
761}
762
763static void prime_cache_tree_rec(struct repository *r,
764struct cache_tree *it,
765struct tree *tree,
766struct strbuf *tree_path)
767{
768struct tree_desc desc;
769struct name_entry entry;
770int cnt;
771size_t base_path_len = tree_path->len;
772
773oidcpy(&it->oid, &tree->object.oid);
774
775init_tree_desc(&desc, &tree->object.oid, tree->buffer, tree->size);
776cnt = 0;
777while (tree_entry(&desc, &entry)) {
778if (!S_ISDIR(entry.mode))
779cnt++;
780else {
781struct cache_tree_sub *sub;
782struct tree *subtree = lookup_tree(r, &entry.oid);
783
784if (parse_tree(subtree) < 0)
785exit(128);
786sub = cache_tree_sub(it, entry.path);
787sub->cache_tree = cache_tree();
788
789/*
790* Recursively-constructed subtree path is only needed when working
791* in a sparse index (where it's used to determine whether the
792* subtree is a sparse directory in the index).
793*/
794if (r->index->sparse_index) {
795strbuf_setlen(tree_path, base_path_len);
796strbuf_add(tree_path, entry.path, entry.pathlen);
797strbuf_addch(tree_path, '/');
798}
799
800/*
801* If a sparse index is in use, the directory being processed may be
802* sparse. To confirm that, we can check whether an entry with that
803* exact name exists in the index. If it does, the created subtree
804* should be sparse. Otherwise, cache tree expansion should continue
805* as normal.
806*/
807if (r->index->sparse_index &&
808index_entry_exists(r->index, tree_path->buf, tree_path->len))
809prime_cache_tree_sparse_dir(sub->cache_tree, subtree);
810else
811prime_cache_tree_rec(r, sub->cache_tree, subtree, tree_path);
812cnt += sub->cache_tree->entry_count;
813}
814}
815
816it->entry_count = cnt;
817}
818
819void prime_cache_tree(struct repository *r,
820struct index_state *istate,
821struct tree *tree)
822{
823struct strbuf tree_path = STRBUF_INIT;
824
825trace2_region_enter("cache-tree", "prime_cache_tree", r);
826cache_tree_free(&istate->cache_tree);
827istate->cache_tree = cache_tree();
828
829prime_cache_tree_rec(r, istate->cache_tree, tree, &tree_path);
830strbuf_release(&tree_path);
831istate->cache_changed |= CACHE_TREE_CHANGED;
832trace2_region_leave("cache-tree", "prime_cache_tree", r);
833}
834
835/*
836* find the cache_tree that corresponds to the current level without
837* exploding the full path into textual form. The root of the
838* cache tree is given as "root", and our current level is "info".
839* (1) When at root level, info->prev is NULL, so it is "root" itself.
840* (2) Otherwise, find the cache_tree that corresponds to one level
841* above us, and find ourselves in there.
842*/
843static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
844struct traverse_info *info)
845{
846struct cache_tree *our_parent;
847
848if (!info->prev)
849return root;
850our_parent = find_cache_tree_from_traversal(root, info->prev);
851return cache_tree_find(our_parent, info->name);
852}
853
854int cache_tree_matches_traversal(struct cache_tree *root,
855struct name_entry *ent,
856struct traverse_info *info)
857{
858struct cache_tree *it;
859
860it = find_cache_tree_from_traversal(root, info);
861it = cache_tree_find(it, ent->path);
862if (it && it->entry_count > 0 && oideq(&ent->oid, &it->oid))
863return it->entry_count;
864return 0;
865}
866
867static void verify_one_sparse(struct index_state *istate,
868struct strbuf *path,
869int pos)
870{
871struct cache_entry *ce = istate->cache[pos];
872
873if (!S_ISSPARSEDIR(ce->ce_mode))
874BUG("directory '%s' is present in index, but not sparse",
875path->buf);
876}
877
878/*
879* Returns:
880* 0 - Verification completed.
881* 1 - Restart verification - a call to ensure_full_index() freed the cache
882* tree that is being verified and verification needs to be restarted from
883* the new toplevel cache tree.
884*/
885static int verify_one(struct repository *r,
886struct index_state *istate,
887struct cache_tree *it,
888struct strbuf *path)
889{
890int i, pos, len = path->len;
891struct strbuf tree_buf = STRBUF_INIT;
892struct object_id new_oid;
893
894for (i = 0; i < it->subtree_nr; i++) {
895strbuf_addf(path, "%s/", it->down[i]->name);
896if (verify_one(r, istate, it->down[i]->cache_tree, path))
897return 1;
898strbuf_setlen(path, len);
899}
900
901if (it->entry_count < 0 ||
902/* no verification on tests (t7003) that replace trees */
903lookup_replace_object(r, &it->oid) != &it->oid)
904return 0;
905
906if (path->len) {
907/*
908* If the index is sparse and the cache tree is not
909* index_name_pos() may trigger ensure_full_index() which will
910* free the tree that is being verified.
911*/
912int is_sparse = istate->sparse_index;
913pos = index_name_pos(istate, path->buf, path->len);
914if (is_sparse && !istate->sparse_index)
915return 1;
916
917if (pos >= 0) {
918verify_one_sparse(istate, path, pos);
919return 0;
920}
921
922pos = -pos - 1;
923} else {
924pos = 0;
925}
926
927i = 0;
928while (i < it->entry_count) {
929struct cache_entry *ce = istate->cache[pos + i];
930const char *slash;
931struct cache_tree_sub *sub = NULL;
932const struct object_id *oid;
933const char *name;
934unsigned mode;
935int entlen;
936
937if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE))
938BUG("%s with flags 0x%x should not be in cache-tree",
939ce->name, ce->ce_flags);
940name = ce->name + path->len;
941slash = strchr(name, '/');
942if (slash) {
943entlen = slash - name;
944sub = find_subtree(it, ce->name + path->len, entlen, 0);
945if (!sub || sub->cache_tree->entry_count < 0)
946BUG("bad subtree '%.*s'", entlen, name);
947oid = &sub->cache_tree->oid;
948mode = S_IFDIR;
949i += sub->cache_tree->entry_count;
950} else {
951oid = &ce->oid;
952mode = ce->ce_mode;
953entlen = ce_namelen(ce) - path->len;
954i++;
955}
956strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
957strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz);
958}
959hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, OBJ_TREE,
960&new_oid);
961if (!oideq(&new_oid, &it->oid))
962BUG("cache-tree for path %.*s does not match. "
963"Expected %s got %s", len, path->buf,
964oid_to_hex(&new_oid), oid_to_hex(&it->oid));
965strbuf_setlen(path, len);
966strbuf_release(&tree_buf);
967return 0;
968}
969
970void cache_tree_verify(struct repository *r, struct index_state *istate)
971{
972struct strbuf path = STRBUF_INIT;
973
974if (!istate->cache_tree)
975return;
976if (verify_one(r, istate, istate->cache_tree, &path)) {
977strbuf_reset(&path);
978if (verify_one(r, istate, istate->cache_tree, &path))
979BUG("ensure_full_index() called twice while verifying cache tree");
980}
981strbuf_release(&path);
982}
983