git
/
read-cache.c
3992 строки · 109.4 Кб
1/*
2* GIT - The information manager from hell
3*
4* Copyright (C) Linus Torvalds, 2005
5*/
6
7#define USE_THE_REPOSITORY_VARIABLE8
9#include "git-compat-util.h"10#include "bulk-checkin.h"11#include "config.h"12#include "date.h"13#include "diff.h"14#include "diffcore.h"15#include "hex.h"16#include "tempfile.h"17#include "lockfile.h"18#include "cache-tree.h"19#include "refs.h"20#include "dir.h"21#include "object-file.h"22#include "object-store-ll.h"23#include "oid-array.h"24#include "tree.h"25#include "commit.h"26#include "environment.h"27#include "gettext.h"28#include "mem-pool.h"29#include "name-hash.h"30#include "object-name.h"31#include "path.h"32#include "preload-index.h"33#include "read-cache.h"34#include "resolve-undo.h"35#include "revision.h"36#include "strbuf.h"37#include "trace2.h"38#include "varint.h"39#include "split-index.h"40#include "symlinks.h"41#include "utf8.h"42#include "fsmonitor.h"43#include "thread-utils.h"44#include "progress.h"45#include "sparse-index.h"46#include "csum-file.h"47#include "promisor-remote.h"48#include "hook.h"49
50/* Mask for the name length in ce_flags in the on-disk index */
51
52#define CE_NAMEMASK (0x0fff)53
54/* Index extensions.
55*
56* The first letter should be 'A'..'Z' for extensions that are not
57* necessary for a correct operation (i.e. optimization data).
58* When new extensions are added that _needs_ to be understood in
59* order to correctly interpret the index file, pick character that
60* is outside the range, to cause the reader to abort.
61*/
62
63#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )64#define CACHE_EXT_TREE 0x54524545 /* "TREE" */65#define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */66#define CACHE_EXT_LINK 0x6c696e6b /* "link" */67#define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */68#define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */69#define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */70#define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */71#define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */72
73/* changes that can be kept in $GIT_DIR/index (basically all extensions) */
74#define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \75CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \76SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)77
78
79/*
80* This is an estimate of the pathname length in the index. We use
81* this for V4 index files to guess the un-deltafied size of the index
82* in memory because of pathname deltafication. This is not required
83* for V2/V3 index formats because their pathnames are not compressed.
84* If the initial amount of memory set aside is not sufficient, the
85* mem pool will allocate extra memory.
86*/
87#define CACHE_ENTRY_PATH_LENGTH 8088
89enum index_search_mode {90NO_EXPAND_SPARSE = 0,91EXPAND_SPARSE = 192};93
94static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)95{
96struct cache_entry *ce;97ce = mem_pool_alloc(mem_pool, cache_entry_size(len));98ce->mem_pool_allocated = 1;99return ce;100}
101
102static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)103{
104struct cache_entry * ce;105ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));106ce->mem_pool_allocated = 1;107return ce;108}
109
110static struct mem_pool *find_mem_pool(struct index_state *istate)111{
112struct mem_pool **pool_ptr;113
114if (istate->split_index && istate->split_index->base)115pool_ptr = &istate->split_index->base->ce_mem_pool;116else117pool_ptr = &istate->ce_mem_pool;118
119if (!*pool_ptr) {120*pool_ptr = xmalloc(sizeof(**pool_ptr));121mem_pool_init(*pool_ptr, 0);122}123
124return *pool_ptr;125}
126
127static const char *alternate_index_output;128
129static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)130{
131if (S_ISSPARSEDIR(ce->ce_mode))132istate->sparse_index = INDEX_COLLAPSED;133
134istate->cache[nr] = ce;135add_name_hash(istate, ce);136}
137
138static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)139{
140struct cache_entry *old = istate->cache[nr];141
142replace_index_entry_in_base(istate, old, ce);143remove_name_hash(istate, old);144discard_cache_entry(old);145ce->ce_flags &= ~CE_HASHED;146set_index_entry(istate, nr, ce);147ce->ce_flags |= CE_UPDATE_IN_BASE;148mark_fsmonitor_invalid(istate, ce);149istate->cache_changed |= CE_ENTRY_CHANGED;150}
151
152void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)153{
154struct cache_entry *old_entry = istate->cache[nr], *new_entry, *refreshed;155int namelen = strlen(new_name);156
157new_entry = make_empty_cache_entry(istate, namelen);158copy_cache_entry(new_entry, old_entry);159new_entry->ce_flags &= ~CE_HASHED;160new_entry->ce_namelen = namelen;161new_entry->index = 0;162memcpy(new_entry->name, new_name, namelen + 1);163
164cache_tree_invalidate_path(istate, old_entry->name);165untracked_cache_remove_from_index(istate, old_entry->name);166remove_index_entry_at(istate, nr);167
168/*169* Refresh the new index entry. Using 'refresh_cache_entry' ensures
170* we only update stat info if the entry is otherwise up-to-date (i.e.,
171* the contents/mode haven't changed). This ensures that we reflect the
172* 'ctime' of the rename in the index without (incorrectly) updating
173* the cached stat info to reflect unstaged changes on disk.
174*/
175refreshed = refresh_cache_entry(istate, new_entry, CE_MATCH_REFRESH);176if (refreshed && refreshed != new_entry) {177add_index_entry(istate, refreshed, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);178discard_cache_entry(new_entry);179} else180add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);181}
182
183/*
184* This only updates the "non-critical" parts of the directory
185* cache, ie the parts that aren't tracked by GIT, and only used
186* to validate the cache.
187*/
188void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st)189{
190fill_stat_data(&ce->ce_stat_data, st);191
192if (assume_unchanged)193ce->ce_flags |= CE_VALID;194
195if (S_ISREG(st->st_mode)) {196ce_mark_uptodate(ce);197mark_fsmonitor_valid(istate, ce);198}199}
200
201static unsigned int st_mode_from_ce(const struct cache_entry *ce)202{
203extern int trust_executable_bit, has_symlinks;204
205switch (ce->ce_mode & S_IFMT) {206case S_IFLNK:207return has_symlinks ? S_IFLNK : (S_IFREG | 0644);208case S_IFREG:209return (ce->ce_mode & (trust_executable_bit ? 0755 : 0644)) | S_IFREG;210case S_IFGITLINK:211return S_IFDIR | 0755;212case S_IFDIR:213return ce->ce_mode;214default:215BUG("unsupported ce_mode: %o", ce->ce_mode);216}217}
218
219int fake_lstat(const struct cache_entry *ce, struct stat *st)220{
221fake_lstat_data(&ce->ce_stat_data, st);222st->st_mode = st_mode_from_ce(ce);223
224/* always succeed as lstat() replacement */225return 0;226}
227
228static int ce_compare_data(struct index_state *istate,229const struct cache_entry *ce,230struct stat *st)231{
232int match = -1;233int fd = git_open_cloexec(ce->name, O_RDONLY);234
235if (fd >= 0) {236struct object_id oid;237if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))238match = !oideq(&oid, &ce->oid);239/* index_fd() closed the file descriptor already */240}241return match;242}
243
244static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)245{
246int match = -1;247void *buffer;248unsigned long size;249enum object_type type;250struct strbuf sb = STRBUF_INIT;251
252if (strbuf_readlink(&sb, ce->name, expected_size))253return -1;254
255buffer = repo_read_object_file(the_repository, &ce->oid, &type, &size);256if (buffer) {257if (size == sb.len)258match = memcmp(buffer, sb.buf, size);259free(buffer);260}261strbuf_release(&sb);262return match;263}
264
265static int ce_compare_gitlink(const struct cache_entry *ce)266{
267struct object_id oid;268
269/*270* We don't actually require that the .git directory
271* under GITLINK directory be a valid git directory. It
272* might even be missing (in case nobody populated that
273* sub-project).
274*
275* If so, we consider it always to match.
276*/
277if (repo_resolve_gitlink_ref(the_repository, ce->name,278"HEAD", &oid) < 0)279return 0;280return !oideq(&oid, &ce->oid);281}
282
283static int ce_modified_check_fs(struct index_state *istate,284const struct cache_entry *ce,285struct stat *st)286{
287switch (st->st_mode & S_IFMT) {288case S_IFREG:289if (ce_compare_data(istate, ce, st))290return DATA_CHANGED;291break;292case S_IFLNK:293if (ce_compare_link(ce, xsize_t(st->st_size)))294return DATA_CHANGED;295break;296case S_IFDIR:297if (S_ISGITLINK(ce->ce_mode))298return ce_compare_gitlink(ce) ? DATA_CHANGED : 0;299/* else fallthrough */300default:301return TYPE_CHANGED;302}303return 0;304}
305
306static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st)307{
308unsigned int changed = 0;309
310if (ce->ce_flags & CE_REMOVE)311return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED;312
313switch (ce->ce_mode & S_IFMT) {314case S_IFREG:315changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;316/* We consider only the owner x bit to be relevant for317* "mode changes"
318*/
319if (trust_executable_bit &&320(0100 & (ce->ce_mode ^ st->st_mode)))321changed |= MODE_CHANGED;322break;323case S_IFLNK:324if (!S_ISLNK(st->st_mode) &&325(has_symlinks || !S_ISREG(st->st_mode)))326changed |= TYPE_CHANGED;327break;328case S_IFGITLINK:329/* We ignore most of the st_xxx fields for gitlinks */330if (!S_ISDIR(st->st_mode))331changed |= TYPE_CHANGED;332else if (ce_compare_gitlink(ce))333changed |= DATA_CHANGED;334return changed;335default:336BUG("unsupported ce_mode: %o", ce->ce_mode);337}338
339changed |= match_stat_data(&ce->ce_stat_data, st);340
341/* Racily smudged entry? */342if (!ce->ce_stat_data.sd_size) {343if (!is_empty_blob_oid(&ce->oid, the_repository->hash_algo))344changed |= DATA_CHANGED;345}346
347return changed;348}
349
350static int is_racy_stat(const struct index_state *istate,351const struct stat_data *sd)352{
353return (istate->timestamp.sec &&354#ifdef USE_NSEC355/* nanosecond timestamped files can also be racy! */356(istate->timestamp.sec < sd->sd_mtime.sec ||357(istate->timestamp.sec == sd->sd_mtime.sec &&358istate->timestamp.nsec <= sd->sd_mtime.nsec))359#else360istate->timestamp.sec <= sd->sd_mtime.sec361#endif362);363}
364
365int is_racy_timestamp(const struct index_state *istate,366const struct cache_entry *ce)367{
368return (!S_ISGITLINK(ce->ce_mode) &&369is_racy_stat(istate, &ce->ce_stat_data));370}
371
372int match_stat_data_racy(const struct index_state *istate,373const struct stat_data *sd, struct stat *st)374{
375if (is_racy_stat(istate, sd))376return MTIME_CHANGED;377return match_stat_data(sd, st);378}
379
380int ie_match_stat(struct index_state *istate,381const struct cache_entry *ce, struct stat *st,382unsigned int options)383{
384unsigned int changed;385int ignore_valid = options & CE_MATCH_IGNORE_VALID;386int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;387int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;388int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;389
390if (!ignore_fsmonitor)391refresh_fsmonitor(istate);392/*393* If it's marked as always valid in the index, it's
394* valid whatever the checked-out copy says.
395*
396* skip-worktree has the same effect with higher precedence
397*/
398if (!ignore_skip_worktree && ce_skip_worktree(ce))399return 0;400if (!ignore_valid && (ce->ce_flags & CE_VALID))401return 0;402if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))403return 0;404
405/*406* Intent-to-add entries have not been added, so the index entry
407* by definition never matches what is in the work tree until it
408* actually gets added.
409*/
410if (ce_intent_to_add(ce))411return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED;412
413changed = ce_match_stat_basic(ce, st);414
415/*416* Within 1 second of this sequence:
417* echo xyzzy >file && git-update-index --add file
418* running this command:
419* echo frotz >file
420* would give a falsely clean cache entry. The mtime and
421* length match the cache, and other stat fields do not change.
422*
423* We could detect this at update-index time (the cache entry
424* being registered/updated records the same time as "now")
425* and delay the return from git-update-index, but that would
426* effectively mean we can make at most one commit per second,
427* which is not acceptable. Instead, we check cache entries
428* whose mtime are the same as the index file timestamp more
429* carefully than others.
430*/
431if (!changed && is_racy_timestamp(istate, ce)) {432if (assume_racy_is_modified)433changed |= DATA_CHANGED;434else435changed |= ce_modified_check_fs(istate, ce, st);436}437
438return changed;439}
440
441int ie_modified(struct index_state *istate,442const struct cache_entry *ce,443struct stat *st, unsigned int options)444{
445int changed, changed_fs;446
447changed = ie_match_stat(istate, ce, st, options);448if (!changed)449return 0;450/*451* If the mode or type has changed, there's no point in trying
452* to refresh the entry - it's not going to match
453*/
454if (changed & (MODE_CHANGED | TYPE_CHANGED))455return changed;456
457/*458* Immediately after read-tree or update-index --cacheinfo,
459* the length field is zero, as we have never even read the
460* lstat(2) information once, and we cannot trust DATA_CHANGED
461* returned by ie_match_stat() which in turn was returned by
462* ce_match_stat_basic() to signal that the filesize of the
463* blob changed. We have to actually go to the filesystem to
464* see if the contents match, and if so, should answer "unchanged".
465*
466* The logic does not apply to gitlinks, as ce_match_stat_basic()
467* already has checked the actual HEAD from the filesystem in the
468* subproject. If ie_match_stat() already said it is different,
469* then we know it is.
470*/
471if ((changed & DATA_CHANGED) &&472(S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))473return changed;474
475changed_fs = ce_modified_check_fs(istate, ce, st);476if (changed_fs)477return changed | changed_fs;478return 0;479}
480
481static int cache_name_stage_compare(const char *name1, int len1, int stage1,482const char *name2, int len2, int stage2)483{
484int cmp;485
486cmp = name_compare(name1, len1, name2, len2);487if (cmp)488return cmp;489
490if (stage1 < stage2)491return -1;492if (stage1 > stage2)493return 1;494return 0;495}
496
497int cmp_cache_name_compare(const void *a_, const void *b_)498{
499const struct cache_entry *ce1, *ce2;500
501ce1 = *((const struct cache_entry **)a_);502ce2 = *((const struct cache_entry **)b_);503return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),504ce2->name, ce2->ce_namelen, ce_stage(ce2));505}
506
507static int index_name_stage_pos(struct index_state *istate,508const char *name, int namelen,509int stage,510enum index_search_mode search_mode)511{
512int first, last;513
514first = 0;515last = istate->cache_nr;516while (last > first) {517int next = first + ((last - first) >> 1);518struct cache_entry *ce = istate->cache[next];519int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce));520if (!cmp)521return next;522if (cmp < 0) {523last = next;524continue;525}526first = next+1;527}528
529if (search_mode == EXPAND_SPARSE && istate->sparse_index &&530first > 0) {531/* Note: first <= istate->cache_nr */532struct cache_entry *ce = istate->cache[first - 1];533
534/*535* If we are in a sparse-index _and_ the entry before the
536* insertion position is a sparse-directory entry that is
537* an ancestor of 'name', then we need to expand the index
538* and search again. This will only trigger once, because
539* thereafter the index is fully expanded.
540*/
541if (S_ISSPARSEDIR(ce->ce_mode) &&542ce_namelen(ce) < namelen &&543!strncmp(name, ce->name, ce_namelen(ce))) {544ensure_full_index(istate);545return index_name_stage_pos(istate, name, namelen, stage, search_mode);546}547}548
549return -first-1;550}
551
552int index_name_pos(struct index_state *istate, const char *name, int namelen)553{
554return index_name_stage_pos(istate, name, namelen, 0, EXPAND_SPARSE);555}
556
557int index_name_pos_sparse(struct index_state *istate, const char *name, int namelen)558{
559return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE);560}
561
562int index_entry_exists(struct index_state *istate, const char *name, int namelen)563{
564return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE) >= 0;565}
566
567int remove_index_entry_at(struct index_state *istate, int pos)568{
569struct cache_entry *ce = istate->cache[pos];570
571record_resolve_undo(istate, ce);572remove_name_hash(istate, ce);573save_or_free_index_entry(istate, ce);574istate->cache_changed |= CE_ENTRY_REMOVED;575istate->cache_nr--;576if (pos >= istate->cache_nr)577return 0;578MOVE_ARRAY(istate->cache + pos, istate->cache + pos + 1,579istate->cache_nr - pos);580return 1;581}
582
583/*
584* Remove all cache entries marked for removal, that is where
585* CE_REMOVE is set in ce_flags. This is much more effective than
586* calling remove_index_entry_at() for each entry to be removed.
587*/
588void remove_marked_cache_entries(struct index_state *istate, int invalidate)589{
590struct cache_entry **ce_array = istate->cache;591unsigned int i, j;592
593for (i = j = 0; i < istate->cache_nr; i++) {594if (ce_array[i]->ce_flags & CE_REMOVE) {595if (invalidate) {596cache_tree_invalidate_path(istate,597ce_array[i]->name);598untracked_cache_remove_from_index(istate,599ce_array[i]->name);600}601remove_name_hash(istate, ce_array[i]);602save_or_free_index_entry(istate, ce_array[i]);603}604else605ce_array[j++] = ce_array[i];606}607if (j == istate->cache_nr)608return;609istate->cache_changed |= CE_ENTRY_REMOVED;610istate->cache_nr = j;611}
612
613int remove_file_from_index(struct index_state *istate, const char *path)614{
615int pos = index_name_pos(istate, path, strlen(path));616if (pos < 0)617pos = -pos-1;618cache_tree_invalidate_path(istate, path);619untracked_cache_remove_from_index(istate, path);620while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path))621remove_index_entry_at(istate, pos);622return 0;623}
624
625static int compare_name(struct cache_entry *ce, const char *path, int namelen)626{
627return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen);628}
629
630static int index_name_pos_also_unmerged(struct index_state *istate,631const char *path, int namelen)632{
633int pos = index_name_pos(istate, path, namelen);634struct cache_entry *ce;635
636if (pos >= 0)637return pos;638
639/* maybe unmerged? */640pos = -1 - pos;641if (pos >= istate->cache_nr ||642compare_name((ce = istate->cache[pos]), path, namelen))643return -1;644
645/* order of preference: stage 2, 1, 3 */646if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr &&647ce_stage((ce = istate->cache[pos + 1])) == 2 &&648!compare_name(ce, path, namelen))649pos++;650return pos;651}
652
653static int different_name(struct cache_entry *ce, struct cache_entry *alias)654{
655int len = ce_namelen(ce);656return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len);657}
658
659/*
660* If we add a filename that aliases in the cache, we will use the
661* name that we already have - but we don't want to update the same
662* alias twice, because that implies that there were actually two
663* different files with aliasing names!
664*
665* So we use the CE_ADDED flag to verify that the alias was an old
666* one before we accept it as
667*/
668static struct cache_entry *create_alias_ce(struct index_state *istate,669struct cache_entry *ce,670struct cache_entry *alias)671{
672int len;673struct cache_entry *new_entry;674
675if (alias->ce_flags & CE_ADDED)676die(_("will not add file alias '%s' ('%s' already exists in index)"),677ce->name, alias->name);678
679/* Ok, create the new entry using the name of the existing alias */680len = ce_namelen(alias);681new_entry = make_empty_cache_entry(istate, len);682memcpy(new_entry->name, alias->name, len);683copy_cache_entry(new_entry, ce);684save_or_free_index_entry(istate, ce);685return new_entry;686}
687
688void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)689{
690struct object_id oid;691if (write_object_file("", 0, OBJ_BLOB, &oid))692die(_("cannot create an empty blob in the object database"));693oidcpy(&ce->oid, &oid);694}
695
696int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)697{
698int namelen, was_same;699mode_t st_mode = st->st_mode;700struct cache_entry *ce, *alias = NULL;701unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;702int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND);703int pretend = flags & ADD_CACHE_PRETEND;704int intent_only = flags & ADD_CACHE_INTENT;705int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|706(intent_only ? ADD_CACHE_NEW_ONLY : 0));707unsigned hash_flags = pretend ? 0 : HASH_WRITE_OBJECT;708struct object_id oid;709
710if (flags & ADD_CACHE_RENORMALIZE)711hash_flags |= HASH_RENORMALIZE;712
713if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))714return error(_("%s: can only add regular files, symbolic links or git-directories"), path);715
716namelen = strlen(path);717if (S_ISDIR(st_mode)) {718if (repo_resolve_gitlink_ref(the_repository, path, "HEAD", &oid) < 0)719return error(_("'%s' does not have a commit checked out"), path);720while (namelen && path[namelen-1] == '/')721namelen--;722}723ce = make_empty_cache_entry(istate, namelen);724memcpy(ce->name, path, namelen);725ce->ce_namelen = namelen;726if (!intent_only)727fill_stat_cache_info(istate, ce, st);728else729ce->ce_flags |= CE_INTENT_TO_ADD;730
731
732if (trust_executable_bit && has_symlinks) {733ce->ce_mode = create_ce_mode(st_mode);734} else {735/* If there is an existing entry, pick the mode bits and type736* from it, otherwise assume unexecutable regular file.
737*/
738struct cache_entry *ent;739int pos = index_name_pos_also_unmerged(istate, path, namelen);740
741ent = (0 <= pos) ? istate->cache[pos] : NULL;742ce->ce_mode = ce_mode_from_stat(ent, st_mode);743}744
745/* When core.ignorecase=true, determine if a directory of the same name but differing746* case already exists within the Git repository. If it does, ensure the directory
747* case of the file being added to the repository matches (is folded into) the existing
748* entry's directory case.
749*/
750if (ignore_case) {751adjust_dirname_case(istate, ce->name);752}753if (!(flags & ADD_CACHE_RENORMALIZE)) {754alias = index_file_exists(istate, ce->name,755ce_namelen(ce), ignore_case);756if (alias &&757!ce_stage(alias) &&758!ie_match_stat(istate, alias, st, ce_option)) {759/* Nothing changed, really */760if (!S_ISGITLINK(alias->ce_mode))761ce_mark_uptodate(alias);762alias->ce_flags |= CE_ADDED;763
764discard_cache_entry(ce);765return 0;766}767}768if (!intent_only) {769if (index_path(istate, &ce->oid, path, st, hash_flags)) {770discard_cache_entry(ce);771return error(_("unable to index file '%s'"), path);772}773} else774set_object_name_for_intent_to_add_entry(ce);775
776if (ignore_case && alias && different_name(ce, alias))777ce = create_alias_ce(istate, ce, alias);778ce->ce_flags |= CE_ADDED;779
780/* It was suspected to be racily clean, but it turns out to be Ok */781was_same = (alias &&782!ce_stage(alias) &&783oideq(&alias->oid, &ce->oid) &&784ce->ce_mode == alias->ce_mode);785
786if (pretend)787discard_cache_entry(ce);788else if (add_index_entry(istate, ce, add_option)) {789discard_cache_entry(ce);790return error(_("unable to add '%s' to index"), path);791}792if (verbose && !was_same)793printf("add '%s'\n", path);794return 0;795}
796
797int add_file_to_index(struct index_state *istate, const char *path, int flags)798{
799struct stat st;800if (lstat(path, &st))801die_errno(_("unable to stat '%s'"), path);802return add_to_index(istate, path, &st, flags);803}
804
805struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)806{
807return mem_pool__ce_calloc(find_mem_pool(istate), len);808}
809
810struct cache_entry *make_empty_transient_cache_entry(size_t len,811struct mem_pool *ce_mem_pool)812{
813if (ce_mem_pool)814return mem_pool__ce_calloc(ce_mem_pool, len);815return xcalloc(1, cache_entry_size(len));816}
817
818enum verify_path_result {819PATH_OK,820PATH_INVALID,821PATH_DIR_WITH_SEP,822};823
824static enum verify_path_result verify_path_internal(const char *, unsigned);825
826int verify_path(const char *path, unsigned mode)827{
828return verify_path_internal(path, mode) == PATH_OK;829}
830
831struct cache_entry *make_cache_entry(struct index_state *istate,832unsigned int mode,833const struct object_id *oid,834const char *path,835int stage,836unsigned int refresh_options)837{
838struct cache_entry *ce, *ret;839int len;840
841if (verify_path_internal(path, mode) == PATH_INVALID) {842error(_("invalid path '%s'"), path);843return NULL;844}845
846len = strlen(path);847ce = make_empty_cache_entry(istate, len);848
849oidcpy(&ce->oid, oid);850memcpy(ce->name, path, len);851ce->ce_flags = create_ce_flags(stage);852ce->ce_namelen = len;853ce->ce_mode = create_ce_mode(mode);854
855ret = refresh_cache_entry(istate, ce, refresh_options);856if (ret != ce)857discard_cache_entry(ce);858return ret;859}
860
861struct cache_entry *make_transient_cache_entry(unsigned int mode,862const struct object_id *oid,863const char *path,864int stage,865struct mem_pool *ce_mem_pool)866{
867struct cache_entry *ce;868int len;869
870if (!verify_path(path, mode)) {871error(_("invalid path '%s'"), path);872return NULL;873}874
875len = strlen(path);876ce = make_empty_transient_cache_entry(len, ce_mem_pool);877
878oidcpy(&ce->oid, oid);879memcpy(ce->name, path, len);880ce->ce_flags = create_ce_flags(stage);881ce->ce_namelen = len;882ce->ce_mode = create_ce_mode(mode);883
884return ce;885}
886
887/*
888* Chmod an index entry with either +x or -x.
889*
890* Returns -1 if the chmod for the particular cache entry failed (if it's
891* not a regular file), -2 if an invalid flip argument is passed in, 0
892* otherwise.
893*/
894int chmod_index_entry(struct index_state *istate, struct cache_entry *ce,895char flip)896{
897if (!S_ISREG(ce->ce_mode))898return -1;899switch (flip) {900case '+':901ce->ce_mode |= 0111;902break;903case '-':904ce->ce_mode &= ~0111;905break;906default:907return -2;908}909cache_tree_invalidate_path(istate, ce->name);910ce->ce_flags |= CE_UPDATE_IN_BASE;911mark_fsmonitor_invalid(istate, ce);912istate->cache_changed |= CE_ENTRY_CHANGED;913
914return 0;915}
916
917int ce_same_name(const struct cache_entry *a, const struct cache_entry *b)918{
919int len = ce_namelen(a);920return ce_namelen(b) == len && !memcmp(a->name, b->name, len);921}
922
923/*
924* We fundamentally don't like some paths: we don't want
925* dot or dot-dot anywhere, and for obvious reasons don't
926* want to recurse into ".git" either.
927*
928* Also, we don't want double slashes or slashes at the
929* end that can make pathnames ambiguous.
930*/
931static int verify_dotfile(const char *rest, unsigned mode)932{
933/*934* The first character was '.', but that
935* has already been discarded, we now test
936* the rest.
937*/
938
939/* "." is not allowed */940if (*rest == '\0' || is_dir_sep(*rest))941return 0;942
943switch (*rest) {944/*945* ".git" followed by NUL or slash is bad. Note that we match
946* case-insensitively here, even if ignore_case is not set.
947* This outlaws ".GIT" everywhere out of an abundance of caution,
948* since there's really no good reason to allow it.
949*
950* Once we've seen ".git", we can also find ".gitmodules", etc (also
951* case-insensitively).
952*/
953case 'g':954case 'G':955if (rest[1] != 'i' && rest[1] != 'I')956break;957if (rest[2] != 't' && rest[2] != 'T')958break;959if (rest[3] == '\0' || is_dir_sep(rest[3]))960return 0;961if (S_ISLNK(mode)) {962rest += 3;963if (skip_iprefix(rest, "modules", &rest) &&964(*rest == '\0' || is_dir_sep(*rest)))965return 0;966}967break;968case '.':969if (rest[1] == '\0' || is_dir_sep(rest[1]))970return 0;971}972return 1;973}
974
975static enum verify_path_result verify_path_internal(const char *path,976unsigned mode)977{
978char c = 0;979
980if (has_dos_drive_prefix(path))981return PATH_INVALID;982
983if (!is_valid_path(path))984return PATH_INVALID;985
986goto inside;987for (;;) {988if (!c)989return PATH_OK;990if (is_dir_sep(c)) {991inside:992if (protect_hfs) {993
994if (is_hfs_dotgit(path))995return PATH_INVALID;996if (S_ISLNK(mode)) {997if (is_hfs_dotgitmodules(path))998return PATH_INVALID;999}1000}1001if (protect_ntfs) {1002#if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__1003if (c == '\\')1004return PATH_INVALID;1005#endif1006if (is_ntfs_dotgit(path))1007return PATH_INVALID;1008if (S_ISLNK(mode)) {1009if (is_ntfs_dotgitmodules(path))1010return PATH_INVALID;1011}1012}1013
1014c = *path++;1015if ((c == '.' && !verify_dotfile(path, mode)) ||1016is_dir_sep(c))1017return PATH_INVALID;1018/*1019* allow terminating directory separators for
1020* sparse directory entries.
1021*/
1022if (c == '\0')1023return S_ISDIR(mode) ? PATH_DIR_WITH_SEP :1024PATH_INVALID;1025} else if (c == '\\' && protect_ntfs) {1026if (is_ntfs_dotgit(path))1027return PATH_INVALID;1028if (S_ISLNK(mode)) {1029if (is_ntfs_dotgitmodules(path))1030return PATH_INVALID;1031}1032}1033
1034c = *path++;1035}1036}
1037
1038/*
1039* Do we have another file that has the beginning components being a
1040* proper superset of the name we're trying to add?
1041*/
1042static int has_file_name(struct index_state *istate,1043const struct cache_entry *ce, int pos, int ok_to_replace)1044{
1045int retval = 0;1046int len = ce_namelen(ce);1047int stage = ce_stage(ce);1048const char *name = ce->name;1049
1050while (pos < istate->cache_nr) {1051struct cache_entry *p = istate->cache[pos++];1052
1053if (len >= ce_namelen(p))1054break;1055if (memcmp(name, p->name, len))1056break;1057if (ce_stage(p) != stage)1058continue;1059if (p->name[len] != '/')1060continue;1061if (p->ce_flags & CE_REMOVE)1062continue;1063retval = -1;1064if (!ok_to_replace)1065break;1066remove_index_entry_at(istate, --pos);1067}1068return retval;1069}
1070
1071
1072/*
1073* Like strcmp(), but also return the offset of the first change.
1074* If strings are equal, return the length.
1075*/
1076int strcmp_offset(const char *s1, const char *s2, size_t *first_change)1077{
1078size_t k;1079
1080if (!first_change)1081return strcmp(s1, s2);1082
1083for (k = 0; s1[k] == s2[k]; k++)1084if (s1[k] == '\0')1085break;1086
1087*first_change = k;1088return (unsigned char)s1[k] - (unsigned char)s2[k];1089}
1090
1091/*
1092* Do we have another file with a pathname that is a proper
1093* subset of the name we're trying to add?
1094*
1095* That is, is there another file in the index with a path
1096* that matches a sub-directory in the given entry?
1097*/
1098static int has_dir_name(struct index_state *istate,1099const struct cache_entry *ce, int pos, int ok_to_replace)1100{
1101int retval = 0;1102int stage = ce_stage(ce);1103const char *name = ce->name;1104const char *slash = name + ce_namelen(ce);1105size_t len_eq_last;1106int cmp_last = 0;1107
1108/*1109* We are frequently called during an iteration on a sorted
1110* list of pathnames and while building a new index. Therefore,
1111* there is a high probability that this entry will eventually
1112* be appended to the index, rather than inserted in the middle.
1113* If we can confirm that, we can avoid binary searches on the
1114* components of the pathname.
1115*
1116* Compare the entry's full path with the last path in the index.
1117*/
1118if (istate->cache_nr > 0) {1119cmp_last = strcmp_offset(name,1120istate->cache[istate->cache_nr - 1]->name,1121&len_eq_last);1122if (cmp_last > 0) {1123if (name[len_eq_last] != '/') {1124/*1125* The entry sorts AFTER the last one in the
1126* index.
1127*
1128* If there were a conflict with "file", then our
1129* name would start with "file/" and the last index
1130* entry would start with "file" but not "file/".
1131*
1132* The next character after common prefix is
1133* not '/', so there can be no conflict.
1134*/
1135return retval;1136} else {1137/*1138* The entry sorts AFTER the last one in the
1139* index, and the next character after common
1140* prefix is '/'.
1141*
1142* Either the last index entry is a file in
1143* conflict with this entry, or it has a name
1144* which sorts between this entry and the
1145* potential conflicting file.
1146*
1147* In both cases, we fall through to the loop
1148* below and let the regular search code handle it.
1149*/
1150}1151} else if (cmp_last == 0) {1152/*1153* The entry exactly matches the last one in the
1154* index, but because of multiple stage and CE_REMOVE
1155* items, we fall through and let the regular search
1156* code handle it.
1157*/
1158}1159}1160
1161for (;;) {1162size_t len;1163
1164for (;;) {1165if (*--slash == '/')1166break;1167if (slash <= ce->name)1168return retval;1169}1170len = slash - name;1171
1172pos = index_name_stage_pos(istate, name, len, stage, EXPAND_SPARSE);1173if (pos >= 0) {1174/*1175* Found one, but not so fast. This could
1176* be a marker that says "I was here, but
1177* I am being removed". Such an entry is
1178* not a part of the resulting tree, and
1179* it is Ok to have a directory at the same
1180* path.
1181*/
1182if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) {1183retval = -1;1184if (!ok_to_replace)1185break;1186remove_index_entry_at(istate, pos);1187continue;1188}1189}1190else1191pos = -pos-1;1192
1193/*1194* Trivial optimization: if we find an entry that
1195* already matches the sub-directory, then we know
1196* we're ok, and we can exit.
1197*/
1198while (pos < istate->cache_nr) {1199struct cache_entry *p = istate->cache[pos];1200if ((ce_namelen(p) <= len) ||1201(p->name[len] != '/') ||1202memcmp(p->name, name, len))1203break; /* not our subdirectory */1204if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))1205/*1206* p is at the same stage as our entry, and
1207* is a subdirectory of what we are looking
1208* at, so we cannot have conflicts at our
1209* level or anything shorter.
1210*/
1211return retval;1212pos++;1213}1214}1215return retval;1216}
1217
1218/* We may be in a situation where we already have path/file and path
1219* is being added, or we already have path and path/file is being
1220* added. Either one would result in a nonsense tree that has path
1221* twice when git-write-tree tries to write it out. Prevent it.
1222*
1223* If ok-to-replace is specified, we remove the conflicting entries
1224* from the cache so the caller should recompute the insert position.
1225* When this happens, we return non-zero.
1226*/
1227static int check_file_directory_conflict(struct index_state *istate,1228const struct cache_entry *ce,1229int pos, int ok_to_replace)1230{
1231int retval;1232
1233/*1234* When ce is an "I am going away" entry, we allow it to be added
1235*/
1236if (ce->ce_flags & CE_REMOVE)1237return 0;1238
1239/*1240* We check if the path is a sub-path of a subsequent pathname
1241* first, since removing those will not change the position
1242* in the array.
1243*/
1244retval = has_file_name(istate, ce, pos, ok_to_replace);1245
1246/*1247* Then check if the path might have a clashing sub-directory
1248* before it.
1249*/
1250return retval + has_dir_name(istate, ce, pos, ok_to_replace);1251}
1252
1253static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)1254{
1255int pos;1256int ok_to_add = option & ADD_CACHE_OK_TO_ADD;1257int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;1258int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;1259int new_only = option & ADD_CACHE_NEW_ONLY;1260
1261/*1262* If this entry's path sorts after the last entry in the index,
1263* we can avoid searching for it.
1264*/
1265if (istate->cache_nr > 0 &&1266strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)1267pos = index_pos_to_insert_pos(istate->cache_nr);1268else1269pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);1270
1271/*1272* Cache tree path should be invalidated only after index_name_stage_pos,
1273* in case it expands a sparse index.
1274*/
1275if (!(option & ADD_CACHE_KEEP_CACHE_TREE))1276cache_tree_invalidate_path(istate, ce->name);1277
1278/* existing match? Just replace it. */1279if (pos >= 0) {1280if (!new_only)1281replace_index_entry(istate, pos, ce);1282return 0;1283}1284pos = -pos-1;1285
1286if (!(option & ADD_CACHE_KEEP_CACHE_TREE))1287untracked_cache_add_to_index(istate, ce->name);1288
1289/*1290* Inserting a merged entry ("stage 0") into the index
1291* will always replace all non-merged entries..
1292*/
1293if (pos < istate->cache_nr && ce_stage(ce) == 0) {1294while (ce_same_name(istate->cache[pos], ce)) {1295ok_to_add = 1;1296if (!remove_index_entry_at(istate, pos))1297break;1298}1299}1300
1301if (!ok_to_add)1302return -1;1303if (verify_path_internal(ce->name, ce->ce_mode) == PATH_INVALID)1304return error(_("invalid path '%s'"), ce->name);1305
1306if (!skip_df_check &&1307check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {1308if (!ok_to_replace)1309return error(_("'%s' appears as both a file and as a directory"),1310ce->name);1311pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);1312pos = -pos-1;1313}1314return pos + 1;1315}
1316
1317int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)1318{
1319int pos;1320
1321if (option & ADD_CACHE_JUST_APPEND)1322pos = istate->cache_nr;1323else {1324int ret;1325ret = add_index_entry_with_check(istate, ce, option);1326if (ret <= 0)1327return ret;1328pos = ret - 1;1329}1330
1331/* Make sure the array is big enough .. */1332ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc);1333
1334/* Add it in.. */1335istate->cache_nr++;1336if (istate->cache_nr > pos + 1)1337MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,1338istate->cache_nr - pos - 1);1339set_index_entry(istate, pos, ce);1340istate->cache_changed |= CE_ENTRY_ADDED;1341return 0;1342}
1343
1344/*
1345* "refresh" does not calculate a new sha1 file or bring the
1346* cache up-to-date for mode/content changes. But what it
1347* _does_ do is to "re-match" the stat information of a file
1348* with the cache, so that you can refresh the cache for a
1349* file that hasn't been changed but where the stat entry is
1350* out of date.
1351*
1352* For example, you'd want to do this after doing a "git-read-tree",
1353* to link up the stat cache details with the proper files.
1354*/
1355static struct cache_entry *refresh_cache_ent(struct index_state *istate,1356struct cache_entry *ce,1357unsigned int options, int *err,1358int *changed_ret,1359int *t2_did_lstat,1360int *t2_did_scan)1361{
1362struct stat st;1363struct cache_entry *updated;1364int changed;1365int refresh = options & CE_MATCH_REFRESH;1366int ignore_valid = options & CE_MATCH_IGNORE_VALID;1367int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;1368int ignore_missing = options & CE_MATCH_IGNORE_MISSING;1369int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;1370
1371if (!refresh || ce_uptodate(ce))1372return ce;1373
1374if (!ignore_fsmonitor)1375refresh_fsmonitor(istate);1376/*1377* CE_VALID or CE_SKIP_WORKTREE means the user promised us
1378* that the change to the work tree does not matter and told
1379* us not to worry.
1380*/
1381if (!ignore_skip_worktree && ce_skip_worktree(ce)) {1382ce_mark_uptodate(ce);1383return ce;1384}1385if (!ignore_valid && (ce->ce_flags & CE_VALID)) {1386ce_mark_uptodate(ce);1387return ce;1388}1389if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {1390ce_mark_uptodate(ce);1391return ce;1392}1393
1394if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {1395if (ignore_missing)1396return ce;1397if (err)1398*err = ENOENT;1399return NULL;1400}1401
1402if (t2_did_lstat)1403*t2_did_lstat = 1;1404if (lstat(ce->name, &st) < 0) {1405if (ignore_missing && errno == ENOENT)1406return ce;1407if (err)1408*err = errno;1409return NULL;1410}1411
1412changed = ie_match_stat(istate, ce, &st, options);1413if (changed_ret)1414*changed_ret = changed;1415if (!changed) {1416/*1417* The path is unchanged. If we were told to ignore
1418* valid bit, then we did the actual stat check and
1419* found that the entry is unmodified. If the entry
1420* is not marked VALID, this is the place to mark it
1421* valid again, under "assume unchanged" mode.
1422*/
1423if (ignore_valid && assume_unchanged &&1424!(ce->ce_flags & CE_VALID))1425; /* mark this one VALID again */1426else {1427/*1428* We do not mark the index itself "modified"
1429* because CE_UPTODATE flag is in-core only;
1430* we are not going to write this change out.
1431*/
1432if (!S_ISGITLINK(ce->ce_mode)) {1433ce_mark_uptodate(ce);1434mark_fsmonitor_valid(istate, ce);1435}1436return ce;1437}1438}1439
1440if (t2_did_scan)1441*t2_did_scan = 1;1442if (ie_modified(istate, ce, &st, options)) {1443if (err)1444*err = EINVAL;1445return NULL;1446}1447
1448updated = make_empty_cache_entry(istate, ce_namelen(ce));1449copy_cache_entry(updated, ce);1450memcpy(updated->name, ce->name, ce->ce_namelen + 1);1451fill_stat_cache_info(istate, updated, &st);1452/*1453* If ignore_valid is not set, we should leave CE_VALID bit
1454* alone. Otherwise, paths marked with --no-assume-unchanged
1455* (i.e. things to be edited) will reacquire CE_VALID bit
1456* automatically, which is not really what we want.
1457*/
1458if (!ignore_valid && assume_unchanged &&1459!(ce->ce_flags & CE_VALID))1460updated->ce_flags &= ~CE_VALID;1461
1462/* istate->cache_changed is updated in the caller */1463return updated;1464}
1465
1466static void show_file(const char * fmt, const char * name, int in_porcelain,1467int * first, const char *header_msg)1468{
1469if (in_porcelain && *first && header_msg) {1470printf("%s\n", header_msg);1471*first = 0;1472}1473printf(fmt, name);1474}
1475
1476int repo_refresh_and_write_index(struct repository *repo,1477unsigned int refresh_flags,1478unsigned int write_flags,1479int gentle,1480const struct pathspec *pathspec,1481char *seen, const char *header_msg)1482{
1483struct lock_file lock_file = LOCK_INIT;1484int fd, ret = 0;1485
1486fd = repo_hold_locked_index(repo, &lock_file, 0);1487if (!gentle && fd < 0)1488return -1;1489if (refresh_index(repo->index, refresh_flags, pathspec, seen, header_msg))1490ret = 1;1491if (0 <= fd && write_locked_index(repo->index, &lock_file, COMMIT_LOCK | write_flags))1492ret = -1;1493return ret;1494}
1495
1496
1497int refresh_index(struct index_state *istate, unsigned int flags,1498const struct pathspec *pathspec,1499char *seen, const char *header_msg)1500{
1501int i;1502int has_errors = 0;1503int really = (flags & REFRESH_REALLY) != 0;1504int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;1505int quiet = (flags & REFRESH_QUIET) != 0;1506int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;1507int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0;1508int ignore_skip_worktree = (flags & REFRESH_IGNORE_SKIP_WORKTREE) != 0;1509int first = 1;1510int in_porcelain = (flags & REFRESH_IN_PORCELAIN);1511unsigned int options = (CE_MATCH_REFRESH |1512(really ? CE_MATCH_IGNORE_VALID : 0) |1513(not_new ? CE_MATCH_IGNORE_MISSING : 0));1514const char *modified_fmt;1515const char *deleted_fmt;1516const char *typechange_fmt;1517const char *added_fmt;1518const char *unmerged_fmt;1519struct progress *progress = NULL;1520int t2_sum_lstat = 0;1521int t2_sum_scan = 0;1522
1523if (flags & REFRESH_PROGRESS && isatty(2))1524progress = start_delayed_progress(_("Refresh index"),1525istate->cache_nr);1526
1527trace_performance_enter();1528modified_fmt = in_porcelain ? "M\t%s\n" : "%s: needs update\n";1529deleted_fmt = in_porcelain ? "D\t%s\n" : "%s: needs update\n";1530typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";1531added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";1532unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";1533/*1534* Use the multi-threaded preload_index() to refresh most of the
1535* cache entries quickly then in the single threaded loop below,
1536* we only have to do the special cases that are left.
1537*/
1538preload_index(istate, pathspec, 0);1539trace2_region_enter("index", "refresh", NULL);1540
1541for (i = 0; i < istate->cache_nr; i++) {1542struct cache_entry *ce, *new_entry;1543int cache_errno = 0;1544int changed = 0;1545int filtered = 0;1546int t2_did_lstat = 0;1547int t2_did_scan = 0;1548
1549ce = istate->cache[i];1550if (ignore_submodules && S_ISGITLINK(ce->ce_mode))1551continue;1552if (ignore_skip_worktree && ce_skip_worktree(ce))1553continue;1554
1555/*1556* If this entry is a sparse directory, then there isn't
1557* any stat() information to update. Ignore the entry.
1558*/
1559if (S_ISSPARSEDIR(ce->ce_mode))1560continue;1561
1562if (pathspec && !ce_path_match(istate, ce, pathspec, seen))1563filtered = 1;1564
1565if (ce_stage(ce)) {1566while ((i < istate->cache_nr) &&1567! strcmp(istate->cache[i]->name, ce->name))1568i++;1569i--;1570if (allow_unmerged)1571continue;1572if (!filtered)1573show_file(unmerged_fmt, ce->name, in_porcelain,1574&first, header_msg);1575has_errors = 1;1576continue;1577}1578
1579if (filtered)1580continue;1581
1582new_entry = refresh_cache_ent(istate, ce, options,1583&cache_errno, &changed,1584&t2_did_lstat, &t2_did_scan);1585t2_sum_lstat += t2_did_lstat;1586t2_sum_scan += t2_did_scan;1587if (new_entry == ce)1588continue;1589display_progress(progress, i);1590if (!new_entry) {1591const char *fmt;1592
1593if (really && cache_errno == EINVAL) {1594/* If we are doing --really-refresh that1595* means the index is not valid anymore.
1596*/
1597ce->ce_flags &= ~CE_VALID;1598ce->ce_flags |= CE_UPDATE_IN_BASE;1599mark_fsmonitor_invalid(istate, ce);1600istate->cache_changed |= CE_ENTRY_CHANGED;1601}1602if (quiet)1603continue;1604
1605if (cache_errno == ENOENT)1606fmt = deleted_fmt;1607else if (ce_intent_to_add(ce))1608fmt = added_fmt; /* must be before other checks */1609else if (changed & TYPE_CHANGED)1610fmt = typechange_fmt;1611else1612fmt = modified_fmt;1613show_file(fmt,1614ce->name, in_porcelain, &first, header_msg);1615has_errors = 1;1616continue;1617}1618
1619replace_index_entry(istate, i, new_entry);1620}1621trace2_data_intmax("index", NULL, "refresh/sum_lstat", t2_sum_lstat);1622trace2_data_intmax("index", NULL, "refresh/sum_scan", t2_sum_scan);1623trace2_region_leave("index", "refresh", NULL);1624display_progress(progress, istate->cache_nr);1625stop_progress(&progress);1626trace_performance_leave("refresh index");1627return has_errors;1628}
1629
1630struct cache_entry *refresh_cache_entry(struct index_state *istate,1631struct cache_entry *ce,1632unsigned int options)1633{
1634return refresh_cache_ent(istate, ce, options, NULL, NULL, NULL, NULL);1635}
1636
1637
1638/*****************************************************************
1639* Index File I/O
1640*****************************************************************/
1641
1642#define INDEX_FORMAT_DEFAULT 31643
1644static unsigned int get_index_format_default(struct repository *r)1645{
1646char *envversion = getenv("GIT_INDEX_VERSION");1647char *endp;1648unsigned int version = INDEX_FORMAT_DEFAULT;1649
1650if (!envversion) {1651prepare_repo_settings(r);1652
1653if (r->settings.index_version >= 0)1654version = r->settings.index_version;1655if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {1656warning(_("index.version set, but the value is invalid.\n"1657"Using version %i"), INDEX_FORMAT_DEFAULT);1658return INDEX_FORMAT_DEFAULT;1659}1660return version;1661}1662
1663version = strtoul(envversion, &endp, 10);1664if (*endp ||1665version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {1666warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"1667"Using version %i"), INDEX_FORMAT_DEFAULT);1668version = INDEX_FORMAT_DEFAULT;1669}1670return version;1671}
1672
1673/*
1674* dev/ino/uid/gid/size are also just tracked to the low 32 bits
1675* Again - this is just a (very strong in practice) heuristic that
1676* the inode hasn't changed.
1677*
1678* We save the fields in big-endian order to allow using the
1679* index file over NFS transparently.
1680*/
1681struct ondisk_cache_entry {1682struct cache_time ctime;1683struct cache_time mtime;1684uint32_t dev;1685uint32_t ino;1686uint32_t mode;1687uint32_t uid;1688uint32_t gid;1689uint32_t size;1690/*1691* unsigned char hash[hashsz];
1692* uint16_t flags;
1693* if (flags & CE_EXTENDED)
1694* uint16_t flags2;
1695*/
1696unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];1697char name[FLEX_ARRAY];1698};1699
1700/* These are only used for v3 or lower */
1701#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)1702#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)1703#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)1704#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \1705((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)1706#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))1707#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))1708
1709/* Allow fsck to force verification of the index checksum. */
1710int verify_index_checksum;1711
1712/* Allow fsck to force verification of the cache entry order. */
1713int verify_ce_order;1714
1715static int verify_hdr(const struct cache_header *hdr, unsigned long size)1716{
1717git_hash_ctx c;1718unsigned char hash[GIT_MAX_RAWSZ];1719int hdr_version;1720unsigned char *start, *end;1721struct object_id oid;1722
1723if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))1724return error(_("bad signature 0x%08x"), hdr->hdr_signature);1725hdr_version = ntohl(hdr->hdr_version);1726if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)1727return error(_("bad index version %d"), hdr_version);1728
1729if (!verify_index_checksum)1730return 0;1731
1732end = (unsigned char *)hdr + size;1733start = end - the_hash_algo->rawsz;1734oidread(&oid, start, the_repository->hash_algo);1735if (oideq(&oid, null_oid()))1736return 0;1737
1738the_hash_algo->init_fn(&c);1739the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);1740the_hash_algo->final_fn(hash, &c);1741if (!hasheq(hash, start, the_repository->hash_algo))1742return error(_("bad index file sha1 signature"));1743return 0;1744}
1745
1746static int read_index_extension(struct index_state *istate,1747const char *ext, const char *data, unsigned long sz)1748{
1749switch (CACHE_EXT(ext)) {1750case CACHE_EXT_TREE:1751istate->cache_tree = cache_tree_read(data, sz);1752break;1753case CACHE_EXT_RESOLVE_UNDO:1754istate->resolve_undo = resolve_undo_read(data, sz);1755break;1756case CACHE_EXT_LINK:1757if (read_link_extension(istate, data, sz))1758return -1;1759break;1760case CACHE_EXT_UNTRACKED:1761istate->untracked = read_untracked_extension(data, sz);1762break;1763case CACHE_EXT_FSMONITOR:1764read_fsmonitor_extension(istate, data, sz);1765break;1766case CACHE_EXT_ENDOFINDEXENTRIES:1767case CACHE_EXT_INDEXENTRYOFFSETTABLE:1768/* already handled in do_read_index() */1769break;1770case CACHE_EXT_SPARSE_DIRECTORIES:1771/* no content, only an indicator */1772istate->sparse_index = INDEX_COLLAPSED;1773break;1774default:1775if (*ext < 'A' || 'Z' < *ext)1776return error(_("index uses %.4s extension, which we do not understand"),1777ext);1778fprintf_ln(stderr, _("ignoring %.4s extension"), ext);1779break;1780}1781return 0;1782}
1783
1784/*
1785* Parses the contents of the cache entry contained within the 'ondisk' buffer
1786* into a new incore 'cache_entry'.
1787*
1788* Note that 'char *ondisk' may not be aligned to a 4-byte address interval in
1789* index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access
1790* its members. Instead, we use the byte offsets of members within the struct to
1791* identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all
1792* read from an unaligned memory buffer) should read from the 'ondisk' buffer
1793* into the corresponding incore 'cache_entry' members.
1794*/
1795static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,1796unsigned int version,1797const char *ondisk,1798unsigned long *ent_size,1799const struct cache_entry *previous_ce)1800{
1801struct cache_entry *ce;1802size_t len;1803const char *name;1804const unsigned hashsz = the_hash_algo->rawsz;1805const char *flagsp = ondisk + offsetof(struct ondisk_cache_entry, data) + hashsz;1806unsigned int flags;1807size_t copy_len = 0;1808/*1809* Adjacent cache entries tend to share the leading paths, so it makes
1810* sense to only store the differences in later entries. In the v4
1811* on-disk format of the index, each on-disk cache entry stores the
1812* number of bytes to be stripped from the end of the previous name,
1813* and the bytes to append to the result, to come up with its name.
1814*/
1815int expand_name_field = version == 4;1816
1817/* On-disk flags are just 16 bits */1818flags = get_be16(flagsp);1819len = flags & CE_NAMEMASK;1820
1821if (flags & CE_EXTENDED) {1822int extended_flags;1823extended_flags = get_be16(flagsp + sizeof(uint16_t)) << 16;1824/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */1825if (extended_flags & ~CE_EXTENDED_FLAGS)1826die(_("unknown index entry format 0x%08x"), extended_flags);1827flags |= extended_flags;1828name = (const char *)(flagsp + 2 * sizeof(uint16_t));1829}1830else1831name = (const char *)(flagsp + sizeof(uint16_t));1832
1833if (expand_name_field) {1834const unsigned char *cp = (const unsigned char *)name;1835size_t strip_len, previous_len;1836
1837/* If we're at the beginning of a block, ignore the previous name */1838strip_len = decode_varint(&cp);1839if (previous_ce) {1840previous_len = previous_ce->ce_namelen;1841if (previous_len < strip_len)1842die(_("malformed name field in the index, near path '%s'"),1843previous_ce->name);1844copy_len = previous_len - strip_len;1845}1846name = (const char *)cp;1847}1848
1849if (len == CE_NAMEMASK) {1850len = strlen(name);1851if (expand_name_field)1852len += copy_len;1853}1854
1855ce = mem_pool__ce_alloc(ce_mem_pool, len);1856
1857/*1858* NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced
1859* with something more akin to 'load_bitmap_entries_v1()'s use of
1860* 'read_be16'/'read_be32'. For consistency with the corresponding
1861* ondisk entry write function ('copy_cache_entry_to_ondisk()'), this
1862* should be done at the same time as removing references to
1863* 'ondisk_cache_entry' there.
1864*/
1865ce->ce_stat_data.sd_ctime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)1866+ offsetof(struct cache_time, sec));1867ce->ce_stat_data.sd_mtime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)1868+ offsetof(struct cache_time, sec));1869ce->ce_stat_data.sd_ctime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)1870+ offsetof(struct cache_time, nsec));1871ce->ce_stat_data.sd_mtime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)1872+ offsetof(struct cache_time, nsec));1873ce->ce_stat_data.sd_dev = get_be32(ondisk + offsetof(struct ondisk_cache_entry, dev));1874ce->ce_stat_data.sd_ino = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ino));1875ce->ce_mode = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mode));1876ce->ce_stat_data.sd_uid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, uid));1877ce->ce_stat_data.sd_gid = get_be32(ondisk + offsetof(struct ondisk_cache_entry, gid));1878ce->ce_stat_data.sd_size = get_be32(ondisk + offsetof(struct ondisk_cache_entry, size));1879ce->ce_flags = flags & ~CE_NAMEMASK;1880ce->ce_namelen = len;1881ce->index = 0;1882oidread(&ce->oid, (const unsigned char *)ondisk + offsetof(struct ondisk_cache_entry, data),1883the_repository->hash_algo);1884
1885if (expand_name_field) {1886if (copy_len)1887memcpy(ce->name, previous_ce->name, copy_len);1888memcpy(ce->name + copy_len, name, len + 1 - copy_len);1889*ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len;1890} else {1891memcpy(ce->name, name, len + 1);1892*ent_size = ondisk_ce_size(ce);1893}1894return ce;1895}
1896
1897static void check_ce_order(struct index_state *istate)1898{
1899unsigned int i;1900
1901if (!verify_ce_order)1902return;1903
1904for (i = 1; i < istate->cache_nr; i++) {1905struct cache_entry *ce = istate->cache[i - 1];1906struct cache_entry *next_ce = istate->cache[i];1907int name_compare = strcmp(ce->name, next_ce->name);1908
1909if (0 < name_compare)1910die(_("unordered stage entries in index"));1911if (!name_compare) {1912if (!ce_stage(ce))1913die(_("multiple stage entries for merged file '%s'"),1914ce->name);1915if (ce_stage(ce) > ce_stage(next_ce))1916die(_("unordered stage entries for '%s'"),1917ce->name);1918}1919}1920}
1921
1922static void tweak_untracked_cache(struct index_state *istate)1923{
1924struct repository *r = the_repository;1925
1926prepare_repo_settings(r);1927
1928switch (r->settings.core_untracked_cache) {1929case UNTRACKED_CACHE_REMOVE:1930remove_untracked_cache(istate);1931break;1932case UNTRACKED_CACHE_WRITE:1933add_untracked_cache(istate);1934break;1935case UNTRACKED_CACHE_KEEP:1936/*1937* Either an explicit "core.untrackedCache=keep", the
1938* default if "core.untrackedCache" isn't configured,
1939* or a fallback on an unknown "core.untrackedCache"
1940* value.
1941*/
1942break;1943}1944}
1945
1946static void tweak_split_index(struct index_state *istate)1947{
1948switch (repo_config_get_split_index(the_repository)) {1949case -1: /* unset: do nothing */1950break;1951case 0: /* false */1952remove_split_index(istate);1953break;1954case 1: /* true */1955add_split_index(istate);1956break;1957default: /* unknown value: do nothing */1958break;1959}1960}
1961
1962static void post_read_index_from(struct index_state *istate)1963{
1964check_ce_order(istate);1965tweak_untracked_cache(istate);1966tweak_split_index(istate);1967tweak_fsmonitor(istate);1968}
1969
1970static size_t estimate_cache_size_from_compressed(unsigned int entries)1971{
1972return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);1973}
1974
1975static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)1976{
1977long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);1978
1979/*1980* Account for potential alignment differences.
1981*/
1982per_entry += align_padding_size(per_entry, 0);1983return ondisk_size + entries * per_entry;1984}
1985
1986struct index_entry_offset1987{
1988/* starting byte offset into index file, count of index entries in this block */1989int offset, nr;1990};1991
1992struct index_entry_offset_table1993{
1994int nr;1995struct index_entry_offset entries[FLEX_ARRAY];1996};1997
1998static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);1999static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);2000
2001static size_t read_eoie_extension(const char *mmap, size_t mmap_size);2002static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);2003
2004struct load_index_extensions2005{
2006pthread_t pthread;2007struct index_state *istate;2008const char *mmap;2009size_t mmap_size;2010unsigned long src_offset;2011};2012
2013static void *load_index_extensions(void *_data)2014{
2015struct load_index_extensions *p = _data;2016unsigned long src_offset = p->src_offset;2017
2018while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) {2019/* After an array of active_nr index entries,2020* there can be arbitrary number of extended
2021* sections, each of which is prefixed with
2022* extension name (4-byte) and section length
2023* in 4-byte network byte order.
2024*/
2025uint32_t extsize = get_be32(p->mmap + src_offset + 4);2026if (read_index_extension(p->istate,2027p->mmap + src_offset,2028p->mmap + src_offset + 8,2029extsize) < 0) {2030munmap((void *)p->mmap, p->mmap_size);2031die(_("index file corrupt"));2032}2033src_offset += 8;2034src_offset += extsize;2035}2036
2037return NULL;2038}
2039
2040/*
2041* A helper function that will load the specified range of cache entries
2042* from the memory mapped file and add them to the given index.
2043*/
2044static unsigned long load_cache_entry_block(struct index_state *istate,2045struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap,2046unsigned long start_offset, const struct cache_entry *previous_ce)2047{
2048int i;2049unsigned long src_offset = start_offset;2050
2051for (i = offset; i < offset + nr; i++) {2052struct cache_entry *ce;2053unsigned long consumed;2054
2055ce = create_from_disk(ce_mem_pool, istate->version,2056mmap + src_offset,2057&consumed, previous_ce);2058set_index_entry(istate, i, ce);2059
2060src_offset += consumed;2061previous_ce = ce;2062}2063return src_offset - start_offset;2064}
2065
2066static unsigned long load_all_cache_entries(struct index_state *istate,2067const char *mmap, size_t mmap_size, unsigned long src_offset)2068{
2069unsigned long consumed;2070
2071istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));2072if (istate->version == 4) {2073mem_pool_init(istate->ce_mem_pool,2074estimate_cache_size_from_compressed(istate->cache_nr));2075} else {2076mem_pool_init(istate->ce_mem_pool,2077estimate_cache_size(mmap_size, istate->cache_nr));2078}2079
2080consumed = load_cache_entry_block(istate, istate->ce_mem_pool,20810, istate->cache_nr, mmap, src_offset, NULL);2082return consumed;2083}
2084
2085/*
2086* Mostly randomly chosen maximum thread counts: we
2087* cap the parallelism to online_cpus() threads, and we want
2088* to have at least 10000 cache entries per thread for it to
2089* be worth starting a thread.
2090*/
2091
2092#define THREAD_COST (10000)2093
2094struct load_cache_entries_thread_data2095{
2096pthread_t pthread;2097struct index_state *istate;2098struct mem_pool *ce_mem_pool;2099int offset;2100const char *mmap;2101struct index_entry_offset_table *ieot;2102int ieot_start; /* starting index into the ieot array */2103int ieot_blocks; /* count of ieot entries to process */2104unsigned long consumed; /* return # of bytes in index file processed */2105};2106
2107/*
2108* A thread proc to run the load_cache_entries() computation
2109* across multiple background threads.
2110*/
2111static void *load_cache_entries_thread(void *_data)2112{
2113struct load_cache_entries_thread_data *p = _data;2114int i;2115
2116/* iterate across all ieot blocks assigned to this thread */2117for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {2118p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,2119p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);2120p->offset += p->ieot->entries[i].nr;2121}2122return NULL;2123}
2124
2125static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,2126int nr_threads, struct index_entry_offset_table *ieot)2127{
2128int i, offset, ieot_blocks, ieot_start, err;2129struct load_cache_entries_thread_data *data;2130unsigned long consumed = 0;2131
2132/* a little sanity checking */2133if (istate->name_hash_initialized)2134BUG("the name hash isn't thread safe");2135
2136istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));2137mem_pool_init(istate->ce_mem_pool, 0);2138
2139/* ensure we have no more threads than we have blocks to process */2140if (nr_threads > ieot->nr)2141nr_threads = ieot->nr;2142CALLOC_ARRAY(data, nr_threads);2143
2144offset = ieot_start = 0;2145ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);2146for (i = 0; i < nr_threads; i++) {2147struct load_cache_entries_thread_data *p = &data[i];2148int nr, j;2149
2150if (ieot_start + ieot_blocks > ieot->nr)2151ieot_blocks = ieot->nr - ieot_start;2152
2153p->istate = istate;2154p->offset = offset;2155p->mmap = mmap;2156p->ieot = ieot;2157p->ieot_start = ieot_start;2158p->ieot_blocks = ieot_blocks;2159
2160/* create a mem_pool for each thread */2161nr = 0;2162for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)2163nr += p->ieot->entries[j].nr;2164p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));2165if (istate->version == 4) {2166mem_pool_init(p->ce_mem_pool,2167estimate_cache_size_from_compressed(nr));2168} else {2169mem_pool_init(p->ce_mem_pool,2170estimate_cache_size(mmap_size, nr));2171}2172
2173err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);2174if (err)2175die(_("unable to create load_cache_entries thread: %s"), strerror(err));2176
2177/* increment by the number of cache entries in the ieot block being processed */2178for (j = 0; j < ieot_blocks; j++)2179offset += ieot->entries[ieot_start + j].nr;2180ieot_start += ieot_blocks;2181}2182
2183for (i = 0; i < nr_threads; i++) {2184struct load_cache_entries_thread_data *p = &data[i];2185
2186err = pthread_join(p->pthread, NULL);2187if (err)2188die(_("unable to join load_cache_entries thread: %s"), strerror(err));2189mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);2190consumed += p->consumed;2191}2192
2193free(data);2194
2195return consumed;2196}
2197
2198static void set_new_index_sparsity(struct index_state *istate)2199{
2200/*2201* If the index's repo exists, mark it sparse according to
2202* repo settings.
2203*/
2204prepare_repo_settings(istate->repo);2205if (!istate->repo->settings.command_requires_full_index &&2206is_sparse_index_allowed(istate, 0))2207istate->sparse_index = 1;2208}
2209
2210/* remember to discard_cache() before reading a different cache! */
2211int do_read_index(struct index_state *istate, const char *path, int must_exist)2212{
2213int fd;2214struct stat st;2215unsigned long src_offset;2216const struct cache_header *hdr;2217const char *mmap;2218size_t mmap_size;2219struct load_index_extensions p;2220size_t extension_offset = 0;2221int nr_threads, cpus;2222struct index_entry_offset_table *ieot = NULL;2223
2224if (istate->initialized)2225return istate->cache_nr;2226
2227istate->timestamp.sec = 0;2228istate->timestamp.nsec = 0;2229fd = open(path, O_RDONLY);2230if (fd < 0) {2231if (!must_exist && errno == ENOENT) {2232set_new_index_sparsity(istate);2233istate->initialized = 1;2234return 0;2235}2236die_errno(_("%s: index file open failed"), path);2237}2238
2239if (fstat(fd, &st))2240die_errno(_("%s: cannot stat the open index"), path);2241
2242mmap_size = xsize_t(st.st_size);2243if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)2244die(_("%s: index file smaller than expected"), path);2245
2246mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);2247if (mmap == MAP_FAILED)2248die_errno(_("%s: unable to map index file%s"), path,2249mmap_os_err());2250close(fd);2251
2252hdr = (const struct cache_header *)mmap;2253if (verify_hdr(hdr, mmap_size) < 0)2254goto unmap;2255
2256oidread(&istate->oid, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz,2257the_repository->hash_algo);2258istate->version = ntohl(hdr->hdr_version);2259istate->cache_nr = ntohl(hdr->hdr_entries);2260istate->cache_alloc = alloc_nr(istate->cache_nr);2261CALLOC_ARRAY(istate->cache, istate->cache_alloc);2262istate->initialized = 1;2263
2264p.istate = istate;2265p.mmap = mmap;2266p.mmap_size = mmap_size;2267
2268src_offset = sizeof(*hdr);2269
2270if (repo_config_get_index_threads(the_repository, &nr_threads))2271nr_threads = 1;2272
2273/* TODO: does creating more threads than cores help? */2274if (!nr_threads) {2275nr_threads = istate->cache_nr / THREAD_COST;2276cpus = online_cpus();2277if (nr_threads > cpus)2278nr_threads = cpus;2279}2280
2281if (!HAVE_THREADS)2282nr_threads = 1;2283
2284if (nr_threads > 1) {2285extension_offset = read_eoie_extension(mmap, mmap_size);2286if (extension_offset) {2287int err;2288
2289p.src_offset = extension_offset;2290err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);2291if (err)2292die(_("unable to create load_index_extensions thread: %s"), strerror(err));2293
2294nr_threads--;2295}2296}2297
2298/*2299* Locate and read the index entry offset table so that we can use it
2300* to multi-thread the reading of the cache entries.
2301*/
2302if (extension_offset && nr_threads > 1)2303ieot = read_ieot_extension(mmap, mmap_size, extension_offset);2304
2305if (ieot) {2306src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);2307free(ieot);2308} else {2309src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);2310}2311
2312istate->timestamp.sec = st.st_mtime;2313istate->timestamp.nsec = ST_MTIME_NSEC(st);2314
2315/* if we created a thread, join it otherwise load the extensions on the primary thread */2316if (extension_offset) {2317int ret = pthread_join(p.pthread, NULL);2318if (ret)2319die(_("unable to join load_index_extensions thread: %s"), strerror(ret));2320} else {2321p.src_offset = src_offset;2322load_index_extensions(&p);2323}2324munmap((void *)mmap, mmap_size);2325
2326/*2327* TODO trace2: replace "the_repository" with the actual repo instance
2328* that is associated with the given "istate".
2329*/
2330trace2_data_intmax("index", the_repository, "read/version",2331istate->version);2332trace2_data_intmax("index", the_repository, "read/cache_nr",2333istate->cache_nr);2334
2335/*2336* If the command explicitly requires a full index, force it
2337* to be full. Otherwise, correct the sparsity based on repository
2338* settings and other properties of the index (if necessary).
2339*/
2340prepare_repo_settings(istate->repo);2341if (istate->repo->settings.command_requires_full_index)2342ensure_full_index(istate);2343else2344ensure_correct_sparsity(istate);2345
2346return istate->cache_nr;2347
2348unmap:2349munmap((void *)mmap, mmap_size);2350die(_("index file corrupt"));2351}
2352
2353/*
2354* Signal that the shared index is used by updating its mtime.
2355*
2356* This way, shared index can be removed if they have not been used
2357* for some time.
2358*/
2359static void freshen_shared_index(const char *shared_index, int warn)2360{
2361if (!check_and_freshen_file(shared_index, 1) && warn)2362warning(_("could not freshen shared index '%s'"), shared_index);2363}
2364
2365int read_index_from(struct index_state *istate, const char *path,2366const char *gitdir)2367{
2368struct split_index *split_index;2369int ret;2370char *base_oid_hex;2371char *base_path;2372
2373/* istate->initialized covers both .git/index and .git/sharedindex.xxx */2374if (istate->initialized)2375return istate->cache_nr;2376
2377/*2378* TODO trace2: replace "the_repository" with the actual repo instance
2379* that is associated with the given "istate".
2380*/
2381trace2_region_enter_printf("index", "do_read_index", the_repository,2382"%s", path);2383trace_performance_enter();2384ret = do_read_index(istate, path, 0);2385trace_performance_leave("read cache %s", path);2386trace2_region_leave_printf("index", "do_read_index", the_repository,2387"%s", path);2388
2389split_index = istate->split_index;2390if (!split_index || is_null_oid(&split_index->base_oid)) {2391post_read_index_from(istate);2392return ret;2393}2394
2395trace_performance_enter();2396if (split_index->base)2397release_index(split_index->base);2398else2399ALLOC_ARRAY(split_index->base, 1);2400index_state_init(split_index->base, istate->repo);2401
2402base_oid_hex = oid_to_hex(&split_index->base_oid);2403base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);2404if (file_exists(base_path)) {2405trace2_region_enter_printf("index", "shared/do_read_index",2406the_repository, "%s", base_path);2407
2408ret = do_read_index(split_index->base, base_path, 0);2409trace2_region_leave_printf("index", "shared/do_read_index",2410the_repository, "%s", base_path);2411} else {2412char *path_copy = xstrdup(path);2413char *base_path2 = xstrfmt("%s/sharedindex.%s",2414dirname(path_copy), base_oid_hex);2415free(path_copy);2416trace2_region_enter_printf("index", "shared/do_read_index",2417the_repository, "%s", base_path2);2418ret = do_read_index(split_index->base, base_path2, 1);2419trace2_region_leave_printf("index", "shared/do_read_index",2420the_repository, "%s", base_path2);2421free(base_path2);2422}2423if (!oideq(&split_index->base_oid, &split_index->base->oid))2424die(_("broken index, expect %s in %s, got %s"),2425base_oid_hex, base_path,2426oid_to_hex(&split_index->base->oid));2427
2428freshen_shared_index(base_path, 0);2429merge_base_index(istate);2430post_read_index_from(istate);2431trace_performance_leave("read cache %s", base_path);2432free(base_path);2433return ret;2434}
2435
2436int is_index_unborn(struct index_state *istate)2437{
2438return (!istate->cache_nr && !istate->timestamp.sec);2439}
2440
2441void index_state_init(struct index_state *istate, struct repository *r)2442{
2443struct index_state blank = INDEX_STATE_INIT(r);2444memcpy(istate, &blank, sizeof(*istate));2445}
2446
2447void release_index(struct index_state *istate)2448{
2449/*2450* Cache entries in istate->cache[] should have been allocated
2451* from the memory pool associated with this index, or from an
2452* associated split_index. There is no need to free individual
2453* cache entries. validate_cache_entries can detect when this
2454* assertion does not hold.
2455*/
2456validate_cache_entries(istate);2457
2458resolve_undo_clear_index(istate);2459free_name_hash(istate);2460cache_tree_free(&(istate->cache_tree));2461free(istate->fsmonitor_last_update);2462free(istate->cache);2463discard_split_index(istate);2464free_untracked_cache(istate->untracked);2465
2466if (istate->sparse_checkout_patterns) {2467clear_pattern_list(istate->sparse_checkout_patterns);2468FREE_AND_NULL(istate->sparse_checkout_patterns);2469}2470
2471if (istate->ce_mem_pool) {2472mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());2473FREE_AND_NULL(istate->ce_mem_pool);2474}2475}
2476
2477void discard_index(struct index_state *istate)2478{
2479release_index(istate);2480index_state_init(istate, istate->repo);2481}
2482
2483/*
2484* Validate the cache entries of this index.
2485* All cache entries associated with this index
2486* should have been allocated by the memory pool
2487* associated with this index, or by a referenced
2488* split index.
2489*/
2490void validate_cache_entries(const struct index_state *istate)2491{
2492int i;2493
2494if (!should_validate_cache_entries() ||!istate || !istate->initialized)2495return;2496
2497for (i = 0; i < istate->cache_nr; i++) {2498if (!istate) {2499BUG("cache entry is not allocated from expected memory pool");2500} else if (!istate->ce_mem_pool ||2501!mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {2502if (!istate->split_index ||2503!istate->split_index->base ||2504!istate->split_index->base->ce_mem_pool ||2505!mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {2506BUG("cache entry is not allocated from expected memory pool");2507}2508}2509}2510
2511if (istate->split_index)2512validate_cache_entries(istate->split_index->base);2513}
2514
2515int unmerged_index(const struct index_state *istate)2516{
2517int i;2518for (i = 0; i < istate->cache_nr; i++) {2519if (ce_stage(istate->cache[i]))2520return 1;2521}2522return 0;2523}
2524
2525int repo_index_has_changes(struct repository *repo,2526struct tree *tree,2527struct strbuf *sb)2528{
2529struct index_state *istate = repo->index;2530struct object_id cmp;2531int i;2532
2533if (tree)2534cmp = tree->object.oid;2535if (tree || !repo_get_oid_tree(repo, "HEAD", &cmp)) {2536struct diff_options opt;2537
2538repo_diff_setup(repo, &opt);2539opt.flags.exit_with_status = 1;2540if (!sb)2541opt.flags.quick = 1;2542diff_setup_done(&opt);2543do_diff_cache(&cmp, &opt);2544diffcore_std(&opt);2545for (i = 0; sb && i < diff_queued_diff.nr; i++) {2546if (i)2547strbuf_addch(sb, ' ');2548strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);2549}2550diff_flush(&opt);2551return opt.flags.has_changes != 0;2552} else {2553/* TODO: audit for interaction with sparse-index. */2554ensure_full_index(istate);2555for (i = 0; sb && i < istate->cache_nr; i++) {2556if (i)2557strbuf_addch(sb, ' ');2558strbuf_addstr(sb, istate->cache[i]->name);2559}2560return !!istate->cache_nr;2561}2562}
2563
2564static int write_index_ext_header(struct hashfile *f,2565git_hash_ctx *eoie_f,2566unsigned int ext,2567unsigned int sz)2568{
2569hashwrite_be32(f, ext);2570hashwrite_be32(f, sz);2571
2572if (eoie_f) {2573ext = htonl(ext);2574sz = htonl(sz);2575the_hash_algo->update_fn(eoie_f, &ext, sizeof(ext));2576the_hash_algo->update_fn(eoie_f, &sz, sizeof(sz));2577}2578return 0;2579}
2580
2581static void ce_smudge_racily_clean_entry(struct index_state *istate,2582struct cache_entry *ce)2583{
2584/*2585* The only thing we care about in this function is to smudge the
2586* falsely clean entry due to touch-update-touch race, so we leave
2587* everything else as they are. We are called for entries whose
2588* ce_stat_data.sd_mtime match the index file mtime.
2589*
2590* Note that this actually does not do much for gitlinks, for
2591* which ce_match_stat_basic() always goes to the actual
2592* contents. The caller checks with is_racy_timestamp() which
2593* always says "no" for gitlinks, so we are not called for them ;-)
2594*/
2595struct stat st;2596
2597if (lstat(ce->name, &st) < 0)2598return;2599if (ce_match_stat_basic(ce, &st))2600return;2601if (ce_modified_check_fs(istate, ce, &st)) {2602/* This is "racily clean"; smudge it. Note that this2603* is a tricky code. At first glance, it may appear
2604* that it can break with this sequence:
2605*
2606* $ echo xyzzy >frotz
2607* $ git-update-index --add frotz
2608* $ : >frotz
2609* $ sleep 3
2610* $ echo filfre >nitfol
2611* $ git-update-index --add nitfol
2612*
2613* but it does not. When the second update-index runs,
2614* it notices that the entry "frotz" has the same timestamp
2615* as index, and if we were to smudge it by resetting its
2616* size to zero here, then the object name recorded
2617* in index is the 6-byte file but the cached stat information
2618* becomes zero --- which would then match what we would
2619* obtain from the filesystem next time we stat("frotz").
2620*
2621* However, the second update-index, before calling
2622* this function, notices that the cached size is 6
2623* bytes and what is on the filesystem is an empty
2624* file, and never calls us, so the cached size information
2625* for "frotz" stays 6 which does not match the filesystem.
2626*/
2627ce->ce_stat_data.sd_size = 0;2628}2629}
2630
2631/* Copy miscellaneous fields but not the name */
2632static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,2633struct cache_entry *ce)2634{
2635short flags;2636const unsigned hashsz = the_hash_algo->rawsz;2637uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);2638
2639ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);2640ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);2641ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);2642ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);2643ondisk->dev = htonl(ce->ce_stat_data.sd_dev);2644ondisk->ino = htonl(ce->ce_stat_data.sd_ino);2645ondisk->mode = htonl(ce->ce_mode);2646ondisk->uid = htonl(ce->ce_stat_data.sd_uid);2647ondisk->gid = htonl(ce->ce_stat_data.sd_gid);2648ondisk->size = htonl(ce->ce_stat_data.sd_size);2649hashcpy(ondisk->data, ce->oid.hash, the_repository->hash_algo);2650
2651flags = ce->ce_flags & ~CE_NAMEMASK;2652flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));2653flagsp[0] = htons(flags);2654if (ce->ce_flags & CE_EXTENDED) {2655flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);2656}2657}
2658
2659static int ce_write_entry(struct hashfile *f, struct cache_entry *ce,2660struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)2661{
2662int size;2663unsigned int saved_namelen;2664int stripped_name = 0;2665static unsigned char padding[8] = { 0x00 };2666
2667if (ce->ce_flags & CE_STRIP_NAME) {2668saved_namelen = ce_namelen(ce);2669ce->ce_namelen = 0;2670stripped_name = 1;2671}2672
2673size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);2674
2675if (!previous_name) {2676int len = ce_namelen(ce);2677copy_cache_entry_to_ondisk(ondisk, ce);2678hashwrite(f, ondisk, size);2679hashwrite(f, ce->name, len);2680hashwrite(f, padding, align_padding_size(size, len));2681} else {2682int common, to_remove, prefix_size;2683unsigned char to_remove_vi[16];2684for (common = 0;2685(ce->name[common] &&2686common < previous_name->len &&2687ce->name[common] == previous_name->buf[common]);2688common++)2689; /* still matching */2690to_remove = previous_name->len - common;2691prefix_size = encode_varint(to_remove, to_remove_vi);2692
2693copy_cache_entry_to_ondisk(ondisk, ce);2694hashwrite(f, ondisk, size);2695hashwrite(f, to_remove_vi, prefix_size);2696hashwrite(f, ce->name + common, ce_namelen(ce) - common);2697hashwrite(f, padding, 1);2698
2699strbuf_splice(previous_name, common, to_remove,2700ce->name + common, ce_namelen(ce) - common);2701}2702if (stripped_name) {2703ce->ce_namelen = saved_namelen;2704ce->ce_flags &= ~CE_STRIP_NAME;2705}2706
2707return 0;2708}
2709
2710/*
2711* This function verifies if index_state has the correct sha1 of the
2712* index file. Don't die if we have any other failure, just return 0.
2713*/
2714static int verify_index_from(const struct index_state *istate, const char *path)2715{
2716int fd;2717ssize_t n;2718struct stat st;2719unsigned char hash[GIT_MAX_RAWSZ];2720
2721if (!istate->initialized)2722return 0;2723
2724fd = open(path, O_RDONLY);2725if (fd < 0)2726return 0;2727
2728if (fstat(fd, &st))2729goto out;2730
2731if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)2732goto out;2733
2734n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);2735if (n != the_hash_algo->rawsz)2736goto out;2737
2738if (!hasheq(istate->oid.hash, hash, the_repository->hash_algo))2739goto out;2740
2741close(fd);2742return 1;2743
2744out:2745close(fd);2746return 0;2747}
2748
2749static int repo_verify_index(struct repository *repo)2750{
2751return verify_index_from(repo->index, repo->index_file);2752}
2753
2754int has_racy_timestamp(struct index_state *istate)2755{
2756int entries = istate->cache_nr;2757int i;2758
2759for (i = 0; i < entries; i++) {2760struct cache_entry *ce = istate->cache[i];2761if (is_racy_timestamp(istate, ce))2762return 1;2763}2764return 0;2765}
2766
2767void repo_update_index_if_able(struct repository *repo,2768struct lock_file *lockfile)2769{
2770if ((repo->index->cache_changed ||2771has_racy_timestamp(repo->index)) &&2772repo_verify_index(repo))2773write_locked_index(repo->index, lockfile, COMMIT_LOCK);2774else2775rollback_lock_file(lockfile);2776}
2777
2778static int record_eoie(void)2779{
2780int val;2781
2782if (!git_config_get_bool("index.recordendofindexentries", &val))2783return val;2784
2785/*2786* As a convenience, the end of index entries extension
2787* used for threading is written by default if the user
2788* explicitly requested threaded index reads.
2789*/
2790return !repo_config_get_index_threads(the_repository, &val) && val != 1;2791}
2792
2793static int record_ieot(void)2794{
2795int val;2796
2797if (!git_config_get_bool("index.recordoffsettable", &val))2798return val;2799
2800/*2801* As a convenience, the offset table used for threading is
2802* written by default if the user explicitly requested
2803* threaded index reads.
2804*/
2805return !repo_config_get_index_threads(the_repository, &val) && val != 1;2806}
2807
2808enum write_extensions {2809WRITE_NO_EXTENSION = 0,2810WRITE_SPLIT_INDEX_EXTENSION = 1<<0,2811WRITE_CACHE_TREE_EXTENSION = 1<<1,2812WRITE_RESOLVE_UNDO_EXTENSION = 1<<2,2813WRITE_UNTRACKED_CACHE_EXTENSION = 1<<3,2814WRITE_FSMONITOR_EXTENSION = 1<<4,2815};2816#define WRITE_ALL_EXTENSIONS ((enum write_extensions)-1)2817
2818/*
2819* On success, `tempfile` is closed. If it is the temporary file
2820* of a `struct lock_file`, we will therefore effectively perform
2821* a 'close_lock_file_gently()`. Since that is an implementation
2822* detail of lockfiles, callers of `do_write_index()` should not
2823* rely on it.
2824*/
2825static int do_write_index(struct index_state *istate, struct tempfile *tempfile,2826enum write_extensions write_extensions, unsigned flags)2827{
2828uint64_t start = getnanotime();2829struct hashfile *f;2830git_hash_ctx *eoie_c = NULL;2831struct cache_header hdr;2832int i, err = 0, removed, extended, hdr_version;2833struct cache_entry **cache = istate->cache;2834int entries = istate->cache_nr;2835struct stat st;2836struct ondisk_cache_entry ondisk;2837struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;2838int drop_cache_tree = istate->drop_cache_tree;2839off_t offset;2840int csum_fsync_flag;2841int ieot_entries = 1;2842struct index_entry_offset_table *ieot = NULL;2843struct repository *r = istate->repo;2844struct strbuf sb = STRBUF_INIT;2845int nr, nr_threads, ret;2846
2847f = hashfd(tempfile->fd, tempfile->filename.buf);2848
2849prepare_repo_settings(r);2850f->skip_hash = r->settings.index_skip_hash;2851
2852for (i = removed = extended = 0; i < entries; i++) {2853if (cache[i]->ce_flags & CE_REMOVE)2854removed++;2855
2856/* reduce extended entries if possible */2857cache[i]->ce_flags &= ~CE_EXTENDED;2858if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) {2859extended++;2860cache[i]->ce_flags |= CE_EXTENDED;2861}2862}2863
2864if (!istate->version)2865istate->version = get_index_format_default(r);2866
2867/* demote version 3 to version 2 when the latter suffices */2868if (istate->version == 3 || istate->version == 2)2869istate->version = extended ? 3 : 2;2870
2871hdr_version = istate->version;2872
2873hdr.hdr_signature = htonl(CACHE_SIGNATURE);2874hdr.hdr_version = htonl(hdr_version);2875hdr.hdr_entries = htonl(entries - removed);2876
2877hashwrite(f, &hdr, sizeof(hdr));2878
2879if (!HAVE_THREADS || repo_config_get_index_threads(the_repository, &nr_threads))2880nr_threads = 1;2881
2882if (nr_threads != 1 && record_ieot()) {2883int ieot_blocks, cpus;2884
2885/*2886* ensure default number of ieot blocks maps evenly to the
2887* default number of threads that will process them leaving
2888* room for the thread to load the index extensions.
2889*/
2890if (!nr_threads) {2891ieot_blocks = istate->cache_nr / THREAD_COST;2892cpus = online_cpus();2893if (ieot_blocks > cpus - 1)2894ieot_blocks = cpus - 1;2895} else {2896ieot_blocks = nr_threads;2897if (ieot_blocks > istate->cache_nr)2898ieot_blocks = istate->cache_nr;2899}2900
2901/*2902* no reason to write out the IEOT extension if we don't
2903* have enough blocks to utilize multi-threading
2904*/
2905if (ieot_blocks > 1) {2906ieot = xcalloc(1, sizeof(struct index_entry_offset_table)2907+ (ieot_blocks * sizeof(struct index_entry_offset)));2908ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);2909}2910}2911
2912offset = hashfile_total(f);2913
2914nr = 0;2915previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;2916
2917for (i = 0; i < entries; i++) {2918struct cache_entry *ce = cache[i];2919if (ce->ce_flags & CE_REMOVE)2920continue;2921if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))2922ce_smudge_racily_clean_entry(istate, ce);2923if (is_null_oid(&ce->oid)) {2924static const char msg[] = "cache entry has null sha1: %s";2925static int allow = -1;2926
2927if (allow < 0)2928allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0);2929if (allow)2930warning(msg, ce->name);2931else2932err = error(msg, ce->name);2933
2934drop_cache_tree = 1;2935}2936if (ieot && i && (i % ieot_entries == 0)) {2937ieot->entries[ieot->nr].nr = nr;2938ieot->entries[ieot->nr].offset = offset;2939ieot->nr++;2940/*2941* If we have a V4 index, set the first byte to an invalid
2942* character to ensure there is nothing common with the previous
2943* entry
2944*/
2945if (previous_name)2946previous_name->buf[0] = 0;2947nr = 0;2948
2949offset = hashfile_total(f);2950}2951if (ce_write_entry(f, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)2952err = -1;2953
2954if (err)2955break;2956nr++;2957}2958if (ieot && nr) {2959ieot->entries[ieot->nr].nr = nr;2960ieot->entries[ieot->nr].offset = offset;2961ieot->nr++;2962}2963strbuf_release(&previous_name_buf);2964
2965if (err) {2966ret = err;2967goto out;2968}2969
2970offset = hashfile_total(f);2971
2972/*2973* The extension headers must be hashed on their own for the
2974* EOIE extension. Create a hashfile here to compute that hash.
2975*/
2976if (offset && record_eoie()) {2977CALLOC_ARRAY(eoie_c, 1);2978the_hash_algo->init_fn(eoie_c);2979}2980
2981/*2982* Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2983* can minimize the number of extensions we have to scan through to
2984* find it during load. Write it out regardless of the
2985* strip_extensions parameter as we need it when loading the shared
2986* index.
2987*/
2988if (ieot) {2989strbuf_reset(&sb);2990
2991write_ieot_extension(&sb, ieot);2992err = write_index_ext_header(f, eoie_c, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0;2993hashwrite(f, sb.buf, sb.len);2994if (err) {2995ret = -1;2996goto out;2997}2998}2999
3000if (write_extensions & WRITE_SPLIT_INDEX_EXTENSION &&3001istate->split_index) {3002strbuf_reset(&sb);3003
3004if (istate->sparse_index)3005die(_("cannot write split index for a sparse index"));3006
3007err = write_link_extension(&sb, istate) < 0 ||3008write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,3009sb.len) < 0;3010hashwrite(f, sb.buf, sb.len);3011if (err) {3012ret = -1;3013goto out;3014}3015}3016if (write_extensions & WRITE_CACHE_TREE_EXTENSION &&3017!drop_cache_tree && istate->cache_tree) {3018strbuf_reset(&sb);3019
3020cache_tree_write(&sb, istate->cache_tree);3021err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0;3022hashwrite(f, sb.buf, sb.len);3023if (err) {3024ret = -1;3025goto out;3026}3027}3028if (write_extensions & WRITE_RESOLVE_UNDO_EXTENSION &&3029istate->resolve_undo) {3030strbuf_reset(&sb);3031
3032resolve_undo_write(&sb, istate->resolve_undo);3033err = write_index_ext_header(f, eoie_c, CACHE_EXT_RESOLVE_UNDO,3034sb.len) < 0;3035hashwrite(f, sb.buf, sb.len);3036if (err) {3037ret = -1;3038goto out;3039}3040}3041if (write_extensions & WRITE_UNTRACKED_CACHE_EXTENSION &&3042istate->untracked) {3043strbuf_reset(&sb);3044
3045write_untracked_extension(&sb, istate->untracked);3046err = write_index_ext_header(f, eoie_c, CACHE_EXT_UNTRACKED,3047sb.len) < 0;3048hashwrite(f, sb.buf, sb.len);3049if (err) {3050ret = -1;3051goto out;3052}3053}3054if (write_extensions & WRITE_FSMONITOR_EXTENSION &&3055istate->fsmonitor_last_update) {3056strbuf_reset(&sb);3057
3058write_fsmonitor_extension(&sb, istate);3059err = write_index_ext_header(f, eoie_c, CACHE_EXT_FSMONITOR, sb.len) < 0;3060hashwrite(f, sb.buf, sb.len);3061if (err) {3062ret = -1;3063goto out;3064}3065}3066if (istate->sparse_index) {3067if (write_index_ext_header(f, eoie_c, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0) {3068ret = -1;3069goto out;3070}3071}3072
3073/*3074* CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
3075* so that it can be found and processed before all the index entries are
3076* read. Write it out regardless of the strip_extensions parameter as we need it
3077* when loading the shared index.
3078*/
3079if (eoie_c) {3080strbuf_reset(&sb);3081
3082write_eoie_extension(&sb, eoie_c, offset);3083err = write_index_ext_header(f, NULL, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0;3084hashwrite(f, sb.buf, sb.len);3085if (err) {3086ret = -1;3087goto out;3088}3089}3090
3091csum_fsync_flag = 0;3092if (!alternate_index_output && (flags & COMMIT_LOCK))3093csum_fsync_flag = CSUM_FSYNC;3094
3095finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX,3096CSUM_HASH_IN_STREAM | csum_fsync_flag);3097f = NULL;3098
3099if (close_tempfile_gently(tempfile)) {3100ret = error(_("could not close '%s'"), get_tempfile_path(tempfile));3101goto out;3102}3103if (stat(get_tempfile_path(tempfile), &st)) {3104ret = -1;3105goto out;3106}3107istate->timestamp.sec = (unsigned int)st.st_mtime;3108istate->timestamp.nsec = ST_MTIME_NSEC(st);3109trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);3110
3111/*3112* TODO trace2: replace "the_repository" with the actual repo instance
3113* that is associated with the given "istate".
3114*/
3115trace2_data_intmax("index", the_repository, "write/version",3116istate->version);3117trace2_data_intmax("index", the_repository, "write/cache_nr",3118istate->cache_nr);3119
3120ret = 0;3121
3122out:3123if (f)3124free_hashfile(f);3125strbuf_release(&sb);3126free(ieot);3127return ret;3128}
3129
3130void set_alternate_index_output(const char *name)3131{
3132alternate_index_output = name;3133}
3134
3135static int commit_locked_index(struct lock_file *lk)3136{
3137if (alternate_index_output)3138return commit_lock_file_to(lk, alternate_index_output);3139else3140return commit_lock_file(lk);3141}
3142
3143static int do_write_locked_index(struct index_state *istate,3144struct lock_file *lock,3145unsigned flags,3146enum write_extensions write_extensions)3147{
3148int ret;3149int was_full = istate->sparse_index == INDEX_EXPANDED;3150
3151ret = convert_to_sparse(istate, 0);3152
3153if (ret) {3154warning(_("failed to convert to a sparse-index"));3155return ret;3156}3157
3158/*3159* TODO trace2: replace "the_repository" with the actual repo instance
3160* that is associated with the given "istate".
3161*/
3162trace2_region_enter_printf("index", "do_write_index", the_repository,3163"%s", get_lock_file_path(lock));3164ret = do_write_index(istate, lock->tempfile, write_extensions, flags);3165trace2_region_leave_printf("index", "do_write_index", the_repository,3166"%s", get_lock_file_path(lock));3167
3168if (was_full)3169ensure_full_index(istate);3170
3171if (ret)3172return ret;3173if (flags & COMMIT_LOCK)3174ret = commit_locked_index(lock);3175else3176ret = close_lock_file_gently(lock);3177
3178run_hooks_l(the_repository, "post-index-change",3179istate->updated_workdir ? "1" : "0",3180istate->updated_skipworktree ? "1" : "0", NULL);3181istate->updated_workdir = 0;3182istate->updated_skipworktree = 0;3183
3184return ret;3185}
3186
3187static int write_split_index(struct index_state *istate,3188struct lock_file *lock,3189unsigned flags)3190{
3191int ret;3192prepare_to_write_split_index(istate);3193ret = do_write_locked_index(istate, lock, flags, WRITE_ALL_EXTENSIONS);3194finish_writing_split_index(istate);3195return ret;3196}
3197
3198static unsigned long get_shared_index_expire_date(void)3199{
3200static unsigned long shared_index_expire_date;3201static int shared_index_expire_date_prepared;3202
3203if (!shared_index_expire_date_prepared) {3204const char *shared_index_expire = "2.weeks.ago";3205char *value = NULL;3206
3207repo_config_get_expiry(the_repository, "splitindex.sharedindexexpire",3208&value);3209if (value)3210shared_index_expire = value;3211
3212shared_index_expire_date = approxidate(shared_index_expire);3213shared_index_expire_date_prepared = 1;3214
3215free(value);3216}3217
3218return shared_index_expire_date;3219}
3220
3221static int should_delete_shared_index(const char *shared_index_path)3222{
3223struct stat st;3224unsigned long expiration;3225
3226/* Check timestamp */3227expiration = get_shared_index_expire_date();3228if (!expiration)3229return 0;3230if (stat(shared_index_path, &st))3231return error_errno(_("could not stat '%s'"), shared_index_path);3232if (st.st_mtime > expiration)3233return 0;3234
3235return 1;3236}
3237
3238static int clean_shared_index_files(const char *current_hex)3239{
3240struct dirent *de;3241DIR *dir = opendir(get_git_dir());3242
3243if (!dir)3244return error_errno(_("unable to open git dir: %s"), get_git_dir());3245
3246while ((de = readdir(dir)) != NULL) {3247const char *sha1_hex;3248const char *shared_index_path;3249if (!skip_prefix(de->d_name, "sharedindex.", &sha1_hex))3250continue;3251if (!strcmp(sha1_hex, current_hex))3252continue;3253shared_index_path = git_path("%s", de->d_name);3254if (should_delete_shared_index(shared_index_path) > 0 &&3255unlink(shared_index_path))3256warning_errno(_("unable to unlink: %s"), shared_index_path);3257}3258closedir(dir);3259
3260return 0;3261}
3262
3263static int write_shared_index(struct index_state *istate,3264struct tempfile **temp, unsigned flags)3265{
3266struct split_index *si = istate->split_index;3267int ret, was_full = !istate->sparse_index;3268
3269move_cache_to_base_index(istate);3270convert_to_sparse(istate, 0);3271
3272trace2_region_enter_printf("index", "shared/do_write_index",3273the_repository, "%s", get_tempfile_path(*temp));3274ret = do_write_index(si->base, *temp, WRITE_NO_EXTENSION, flags);3275trace2_region_leave_printf("index", "shared/do_write_index",3276the_repository, "%s", get_tempfile_path(*temp));3277
3278if (was_full)3279ensure_full_index(istate);3280
3281if (ret)3282return ret;3283ret = adjust_shared_perm(get_tempfile_path(*temp));3284if (ret) {3285error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));3286return ret;3287}3288ret = rename_tempfile(temp,3289git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));3290if (!ret) {3291oidcpy(&si->base_oid, &si->base->oid);3292clean_shared_index_files(oid_to_hex(&si->base->oid));3293}3294
3295return ret;3296}
3297
3298static const int default_max_percent_split_change = 20;3299
3300static int too_many_not_shared_entries(struct index_state *istate)3301{
3302int i, not_shared = 0;3303int max_split = repo_config_get_max_percent_split_change(the_repository);3304
3305switch (max_split) {3306case -1:3307/* not or badly configured: use the default value */3308max_split = default_max_percent_split_change;3309break;3310case 0:3311return 1; /* 0% means always write a new shared index */3312case 100:3313return 0; /* 100% means never write a new shared index */3314default:3315break; /* just use the configured value */3316}3317
3318/* Count not shared entries */3319for (i = 0; i < istate->cache_nr; i++) {3320struct cache_entry *ce = istate->cache[i];3321if (!ce->index)3322not_shared++;3323}3324
3325return (int64_t)istate->cache_nr * max_split < (int64_t)not_shared * 100;3326}
3327
3328int write_locked_index(struct index_state *istate, struct lock_file *lock,3329unsigned flags)3330{
3331int new_shared_index, ret, test_split_index_env;3332struct split_index *si = istate->split_index;3333
3334if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))3335cache_tree_verify(the_repository, istate);3336
3337if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {3338if (flags & COMMIT_LOCK)3339rollback_lock_file(lock);3340return 0;3341}3342
3343if (istate->fsmonitor_last_update)3344fill_fsmonitor_bitmap(istate);3345
3346test_split_index_env = git_env_bool("GIT_TEST_SPLIT_INDEX", 0);3347
3348if ((!si && !test_split_index_env) ||3349alternate_index_output ||3350(istate->cache_changed & ~EXTMASK)) {3351ret = do_write_locked_index(istate, lock, flags,3352~WRITE_SPLIT_INDEX_EXTENSION);3353goto out;3354}3355
3356if (test_split_index_env) {3357if (!si) {3358si = init_split_index(istate);3359istate->cache_changed |= SPLIT_INDEX_ORDERED;3360} else {3361int v = si->base_oid.hash[0];3362if ((v & 15) < 6)3363istate->cache_changed |= SPLIT_INDEX_ORDERED;3364}3365}3366if (too_many_not_shared_entries(istate))3367istate->cache_changed |= SPLIT_INDEX_ORDERED;3368
3369new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;3370
3371if (new_shared_index) {3372struct tempfile *temp;3373int saved_errno;3374
3375/* Same initial permissions as the main .git/index file */3376temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);3377if (!temp) {3378ret = do_write_locked_index(istate, lock, flags,3379~WRITE_SPLIT_INDEX_EXTENSION);3380goto out;3381}3382ret = write_shared_index(istate, &temp, flags);3383
3384saved_errno = errno;3385if (is_tempfile_active(temp))3386delete_tempfile(&temp);3387errno = saved_errno;3388
3389if (ret)3390goto out;3391}3392
3393ret = write_split_index(istate, lock, flags);3394
3395/* Freshen the shared index only if the split-index was written */3396if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {3397const char *shared_index = git_path("sharedindex.%s",3398oid_to_hex(&si->base_oid));3399freshen_shared_index(shared_index, 1);3400}3401
3402out:3403if (flags & COMMIT_LOCK)3404rollback_lock_file(lock);3405return ret;3406}
3407
3408/*
3409* Read the index file that is potentially unmerged into given
3410* index_state, dropping any unmerged entries to stage #0 (potentially
3411* resulting in a path appearing as both a file and a directory in the
3412* index; the caller is responsible to clear out the extra entries
3413* before writing the index to a tree). Returns true if the index is
3414* unmerged. Callers who want to refuse to work from an unmerged
3415* state can call this and check its return value, instead of calling
3416* read_cache().
3417*/
3418int repo_read_index_unmerged(struct repository *repo)3419{
3420struct index_state *istate;3421int i;3422int unmerged = 0;3423
3424repo_read_index(repo);3425istate = repo->index;3426for (i = 0; i < istate->cache_nr; i++) {3427struct cache_entry *ce = istate->cache[i];3428struct cache_entry *new_ce;3429int len;3430
3431if (!ce_stage(ce))3432continue;3433unmerged = 1;3434len = ce_namelen(ce);3435new_ce = make_empty_cache_entry(istate, len);3436memcpy(new_ce->name, ce->name, len);3437new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;3438new_ce->ce_namelen = len;3439new_ce->ce_mode = ce->ce_mode;3440if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))3441return error(_("%s: cannot drop to stage #0"),3442new_ce->name);3443}3444return unmerged;3445}
3446
3447/*
3448* Returns 1 if the path is an "other" path with respect to
3449* the index; that is, the path is not mentioned in the index at all,
3450* either as a file, a directory with some files in the index,
3451* or as an unmerged entry.
3452*
3453* We helpfully remove a trailing "/" from directories so that
3454* the output of read_directory can be used as-is.
3455*/
3456int index_name_is_other(struct index_state *istate, const char *name,3457int namelen)3458{
3459int pos;3460if (namelen && name[namelen - 1] == '/')3461namelen--;3462pos = index_name_pos(istate, name, namelen);3463if (0 <= pos)3464return 0; /* exact match */3465pos = -pos - 1;3466if (pos < istate->cache_nr) {3467struct cache_entry *ce = istate->cache[pos];3468if (ce_namelen(ce) == namelen &&3469!memcmp(ce->name, name, namelen))3470return 0; /* Yup, this one exists unmerged */3471}3472return 1;3473}
3474
3475void *read_blob_data_from_index(struct index_state *istate,3476const char *path, unsigned long *size)3477{
3478int pos, len;3479unsigned long sz;3480enum object_type type;3481void *data;3482
3483len = strlen(path);3484pos = index_name_pos(istate, path, len);3485if (pos < 0) {3486/*3487* We might be in the middle of a merge, in which
3488* case we would read stage #2 (ours).
3489*/
3490int i;3491for (i = -pos - 1;3492(pos < 0 && i < istate->cache_nr &&3493!strcmp(istate->cache[i]->name, path));3494i++)3495if (ce_stage(istate->cache[i]) == 2)3496pos = i;3497}3498if (pos < 0)3499return NULL;3500data = repo_read_object_file(the_repository, &istate->cache[pos]->oid,3501&type, &sz);3502if (!data || type != OBJ_BLOB) {3503free(data);3504return NULL;3505}3506if (size)3507*size = sz;3508return data;3509}
3510
3511void move_index_extensions(struct index_state *dst, struct index_state *src)3512{
3513dst->untracked = src->untracked;3514src->untracked = NULL;3515dst->cache_tree = src->cache_tree;3516src->cache_tree = NULL;3517}
3518
3519struct cache_entry *dup_cache_entry(const struct cache_entry *ce,3520struct index_state *istate)3521{
3522unsigned int size = ce_size(ce);3523int mem_pool_allocated;3524struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));3525mem_pool_allocated = new_entry->mem_pool_allocated;3526
3527memcpy(new_entry, ce, size);3528new_entry->mem_pool_allocated = mem_pool_allocated;3529return new_entry;3530}
3531
3532void discard_cache_entry(struct cache_entry *ce)3533{
3534if (ce && should_validate_cache_entries())3535memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));3536
3537if (ce && ce->mem_pool_allocated)3538return;3539
3540free(ce);3541}
3542
3543int should_validate_cache_entries(void)3544{
3545static int validate_index_cache_entries = -1;3546
3547if (validate_index_cache_entries < 0) {3548if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))3549validate_index_cache_entries = 1;3550else3551validate_index_cache_entries = 0;3552}3553
3554return validate_index_cache_entries;3555}
3556
3557#define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */3558#define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */3559
3560static size_t read_eoie_extension(const char *mmap, size_t mmap_size)3561{
3562/*3563* The end of index entries (EOIE) extension is guaranteed to be last
3564* so that it can be found by scanning backwards from the EOF.
3565*
3566* "EOIE"
3567* <4-byte length>
3568* <4-byte offset>
3569* <20-byte hash>
3570*/
3571const char *index, *eoie;3572uint32_t extsize;3573size_t offset, src_offset;3574unsigned char hash[GIT_MAX_RAWSZ];3575git_hash_ctx c;3576
3577/* ensure we have an index big enough to contain an EOIE extension */3578if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)3579return 0;3580
3581/* validate the extension signature */3582index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;3583if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)3584return 0;3585index += sizeof(uint32_t);3586
3587/* validate the extension size */3588extsize = get_be32(index);3589if (extsize != EOIE_SIZE)3590return 0;3591index += sizeof(uint32_t);3592
3593/*3594* Validate the offset we're going to look for the first extension
3595* signature is after the index header and before the eoie extension.
3596*/
3597offset = get_be32(index);3598if (mmap + offset < mmap + sizeof(struct cache_header))3599return 0;3600if (mmap + offset >= eoie)3601return 0;3602index += sizeof(uint32_t);3603
3604/*3605* The hash is computed over extension types and their sizes (but not
3606* their contents). E.g. if we have "TREE" extension that is N-bytes
3607* long, "REUC" extension that is M-bytes long, followed by "EOIE",
3608* then the hash would be:
3609*
3610* SHA-1("TREE" + <binary representation of N> +
3611* "REUC" + <binary representation of M>)
3612*/
3613src_offset = offset;3614the_hash_algo->init_fn(&c);3615while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {3616/* After an array of active_nr index entries,3617* there can be arbitrary number of extended
3618* sections, each of which is prefixed with
3619* extension name (4-byte) and section length
3620* in 4-byte network byte order.
3621*/
3622uint32_t extsize;3623memcpy(&extsize, mmap + src_offset + 4, 4);3624extsize = ntohl(extsize);3625
3626/* verify the extension size isn't so large it will wrap around */3627if (src_offset + 8 + extsize < src_offset)3628return 0;3629
3630the_hash_algo->update_fn(&c, mmap + src_offset, 8);3631
3632src_offset += 8;3633src_offset += extsize;3634}3635the_hash_algo->final_fn(hash, &c);3636if (!hasheq(hash, (const unsigned char *)index, the_repository->hash_algo))3637return 0;3638
3639/* Validate that the extension offsets returned us back to the eoie extension. */3640if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)3641return 0;3642
3643return offset;3644}
3645
3646static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset)3647{
3648uint32_t buffer;3649unsigned char hash[GIT_MAX_RAWSZ];3650
3651/* offset */3652put_be32(&buffer, offset);3653strbuf_add(sb, &buffer, sizeof(uint32_t));3654
3655/* hash */3656the_hash_algo->final_fn(hash, eoie_context);3657strbuf_add(sb, hash, the_hash_algo->rawsz);3658}
3659
3660#define IEOT_VERSION (1)3661
3662static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)3663{
3664const char *index = NULL;3665uint32_t extsize, ext_version;3666struct index_entry_offset_table *ieot;3667int i, nr;3668
3669/* find the IEOT extension */3670if (!offset)3671return NULL;3672while (offset <= mmap_size - the_hash_algo->rawsz - 8) {3673extsize = get_be32(mmap + offset + 4);3674if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {3675index = mmap + offset + 4 + 4;3676break;3677}3678offset += 8;3679offset += extsize;3680}3681if (!index)3682return NULL;3683
3684/* validate the version is IEOT_VERSION */3685ext_version = get_be32(index);3686if (ext_version != IEOT_VERSION) {3687error("invalid IEOT version %d", ext_version);3688return NULL;3689}3690index += sizeof(uint32_t);3691
3692/* extension size - version bytes / bytes per entry */3693nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));3694if (!nr) {3695error("invalid number of IEOT entries %d", nr);3696return NULL;3697}3698ieot = xmalloc(sizeof(struct index_entry_offset_table)3699+ (nr * sizeof(struct index_entry_offset)));3700ieot->nr = nr;3701for (i = 0; i < nr; i++) {3702ieot->entries[i].offset = get_be32(index);3703index += sizeof(uint32_t);3704ieot->entries[i].nr = get_be32(index);3705index += sizeof(uint32_t);3706}3707
3708return ieot;3709}
3710
3711static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)3712{
3713uint32_t buffer;3714int i;3715
3716/* version */3717put_be32(&buffer, IEOT_VERSION);3718strbuf_add(sb, &buffer, sizeof(uint32_t));3719
3720/* ieot */3721for (i = 0; i < ieot->nr; i++) {3722
3723/* offset */3724put_be32(&buffer, ieot->entries[i].offset);3725strbuf_add(sb, &buffer, sizeof(uint32_t));3726
3727/* count */3728put_be32(&buffer, ieot->entries[i].nr);3729strbuf_add(sb, &buffer, sizeof(uint32_t));3730}3731}
3732
3733void prefetch_cache_entries(const struct index_state *istate,3734must_prefetch_predicate must_prefetch)3735{
3736int i;3737struct oid_array to_fetch = OID_ARRAY_INIT;3738
3739for (i = 0; i < istate->cache_nr; i++) {3740struct cache_entry *ce = istate->cache[i];3741
3742if (S_ISGITLINK(ce->ce_mode) || !must_prefetch(ce))3743continue;3744if (!oid_object_info_extended(the_repository, &ce->oid,3745NULL,3746OBJECT_INFO_FOR_PREFETCH))3747continue;3748oid_array_append(&to_fetch, &ce->oid);3749}3750promisor_remote_get_direct(the_repository,3751to_fetch.oid, to_fetch.nr);3752oid_array_clear(&to_fetch);3753}
3754
3755static int read_one_entry_opt(struct index_state *istate,3756const struct object_id *oid,3757struct strbuf *base,3758const char *pathname,3759unsigned mode, int opt)3760{
3761int len;3762struct cache_entry *ce;3763
3764if (S_ISDIR(mode))3765return READ_TREE_RECURSIVE;3766
3767len = strlen(pathname);3768ce = make_empty_cache_entry(istate, base->len + len);3769
3770ce->ce_mode = create_ce_mode(mode);3771ce->ce_flags = create_ce_flags(1);3772ce->ce_namelen = base->len + len;3773memcpy(ce->name, base->buf, base->len);3774memcpy(ce->name + base->len, pathname, len+1);3775oidcpy(&ce->oid, oid);3776return add_index_entry(istate, ce, opt);3777}
3778
3779static int read_one_entry(const struct object_id *oid, struct strbuf *base,3780const char *pathname, unsigned mode,3781void *context)3782{
3783struct index_state *istate = context;3784return read_one_entry_opt(istate, oid, base, pathname,3785mode,3786ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);3787}
3788
3789/*
3790* This is used when the caller knows there is no existing entries at
3791* the stage that will conflict with the entry being added.
3792*/
3793static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,3794const char *pathname, unsigned mode,3795void *context)3796{
3797struct index_state *istate = context;3798return read_one_entry_opt(istate, oid, base, pathname,3799mode, ADD_CACHE_JUST_APPEND);3800}
3801
3802/*
3803* Read the tree specified with --with-tree option
3804* (typically, HEAD) into stage #1 and then
3805* squash them down to stage #0. This is used for
3806* --error-unmatch to list and check the path patterns
3807* that were given from the command line. We are not
3808* going to write this index out.
3809*/
3810void overlay_tree_on_index(struct index_state *istate,3811const char *tree_name, const char *prefix)3812{
3813struct tree *tree;3814struct object_id oid;3815struct pathspec pathspec;3816struct cache_entry *last_stage0 = NULL;3817int i;3818read_tree_fn_t fn = NULL;3819int err;3820
3821if (repo_get_oid(the_repository, tree_name, &oid))3822die("tree-ish %s not found.", tree_name);3823tree = parse_tree_indirect(&oid);3824if (!tree)3825die("bad tree-ish %s", tree_name);3826
3827/* Hoist the unmerged entries up to stage #3 to make room */3828/* TODO: audit for interaction with sparse-index. */3829ensure_full_index(istate);3830for (i = 0; i < istate->cache_nr; i++) {3831struct cache_entry *ce = istate->cache[i];3832if (!ce_stage(ce))3833continue;3834ce->ce_flags |= CE_STAGEMASK;3835}3836
3837if (prefix) {3838static const char *(matchbuf[1]);3839matchbuf[0] = NULL;3840parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC,3841PATHSPEC_PREFER_CWD, prefix, matchbuf);3842} else3843memset(&pathspec, 0, sizeof(pathspec));3844
3845/*3846* See if we have cache entry at the stage. If so,
3847* do it the original slow way, otherwise, append and then
3848* sort at the end.
3849*/
3850for (i = 0; !fn && i < istate->cache_nr; i++) {3851const struct cache_entry *ce = istate->cache[i];3852if (ce_stage(ce) == 1)3853fn = read_one_entry;3854}3855
3856if (!fn)3857fn = read_one_entry_quick;3858err = read_tree(the_repository, tree, &pathspec, fn, istate);3859clear_pathspec(&pathspec);3860if (err)3861die("unable to read tree entries %s", tree_name);3862
3863/*3864* Sort the cache entry -- we need to nuke the cache tree, though.
3865*/
3866if (fn == read_one_entry_quick) {3867cache_tree_free(&istate->cache_tree);3868QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);3869}3870
3871for (i = 0; i < istate->cache_nr; i++) {3872struct cache_entry *ce = istate->cache[i];3873switch (ce_stage(ce)) {3874case 0:3875last_stage0 = ce;3876/* fallthru */3877default:3878continue;3879case 1:3880/*3881* If there is stage #0 entry for this, we do not
3882* need to show it. We use CE_UPDATE bit to mark
3883* such an entry.
3884*/
3885if (last_stage0 &&3886!strcmp(last_stage0->name, ce->name))3887ce->ce_flags |= CE_UPDATE;3888}3889}3890}
3891
3892struct update_callback_data {3893struct index_state *index;3894int include_sparse;3895int flags;3896int add_errors;3897};3898
3899static int fix_unmerged_status(struct diff_filepair *p,3900struct update_callback_data *data)3901{
3902if (p->status != DIFF_STATUS_UNMERGED)3903return p->status;3904if (!(data->flags & ADD_CACHE_IGNORE_REMOVAL) && !p->two->mode)3905/*3906* This is not an explicit add request, and the
3907* path is missing from the working tree (deleted)
3908*/
3909return DIFF_STATUS_DELETED;3910else3911/*3912* Either an explicit add request, or path exists
3913* in the working tree. An attempt to explicitly
3914* add a path that does not exist in the working tree
3915* will be caught as an error by the caller immediately.
3916*/
3917return DIFF_STATUS_MODIFIED;3918}
3919
3920static void update_callback(struct diff_queue_struct *q,3921struct diff_options *opt UNUSED, void *cbdata)3922{
3923int i;3924struct update_callback_data *data = cbdata;3925
3926for (i = 0; i < q->nr; i++) {3927struct diff_filepair *p = q->queue[i];3928const char *path = p->one->path;3929
3930if (!data->include_sparse &&3931!path_in_sparse_checkout(path, data->index))3932continue;3933
3934switch (fix_unmerged_status(p, data)) {3935default:3936die(_("unexpected diff status %c"), p->status);3937case DIFF_STATUS_MODIFIED:3938case DIFF_STATUS_TYPE_CHANGED:3939if (add_file_to_index(data->index, path, data->flags)) {3940if (!(data->flags & ADD_CACHE_IGNORE_ERRORS))3941die(_("updating files failed"));3942data->add_errors++;3943}3944break;3945case DIFF_STATUS_DELETED:3946if (data->flags & ADD_CACHE_IGNORE_REMOVAL)3947break;3948if (!(data->flags & ADD_CACHE_PRETEND))3949remove_file_from_index(data->index, path);3950if (data->flags & (ADD_CACHE_PRETEND|ADD_CACHE_VERBOSE))3951printf(_("remove '%s'\n"), path);3952break;3953}3954}3955}
3956
3957int add_files_to_cache(struct repository *repo, const char *prefix,3958const struct pathspec *pathspec, char *ps_matched,3959int include_sparse, int flags)3960{
3961struct update_callback_data data;3962struct rev_info rev;3963
3964memset(&data, 0, sizeof(data));3965data.index = repo->index;3966data.include_sparse = include_sparse;3967data.flags = flags;3968
3969repo_init_revisions(repo, &rev, prefix);3970setup_revisions(0, NULL, &rev, NULL);3971if (pathspec) {3972copy_pathspec(&rev.prune_data, pathspec);3973rev.ps_matched = ps_matched;3974}3975rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;3976rev.diffopt.format_callback = update_callback;3977rev.diffopt.format_callback_data = &data;3978rev.diffopt.flags.override_submodule_config = 1;3979rev.max_count = 0; /* do not compare unmerged paths with stage #2 */3980
3981/*3982* Use an ODB transaction to optimize adding multiple objects.
3983* This function is invoked from commands other than 'add', which
3984* may not have their own transaction active.
3985*/
3986begin_odb_transaction();3987run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);3988end_odb_transaction();3989
3990release_revisions(&rev);3991return !!data.add_errors;3992}
3993