git

Форк
0
/
read-cache.c 
3992 строки · 109.4 Кб
1
/*
2
 * GIT - The information manager from hell
3
 *
4
 * Copyright (C) Linus Torvalds, 2005
5
 */
6

7
#define USE_THE_REPOSITORY_VARIABLE
8

9
#include "git-compat-util.h"
10
#include "bulk-checkin.h"
11
#include "config.h"
12
#include "date.h"
13
#include "diff.h"
14
#include "diffcore.h"
15
#include "hex.h"
16
#include "tempfile.h"
17
#include "lockfile.h"
18
#include "cache-tree.h"
19
#include "refs.h"
20
#include "dir.h"
21
#include "object-file.h"
22
#include "object-store-ll.h"
23
#include "oid-array.h"
24
#include "tree.h"
25
#include "commit.h"
26
#include "environment.h"
27
#include "gettext.h"
28
#include "mem-pool.h"
29
#include "name-hash.h"
30
#include "object-name.h"
31
#include "path.h"
32
#include "preload-index.h"
33
#include "read-cache.h"
34
#include "resolve-undo.h"
35
#include "revision.h"
36
#include "strbuf.h"
37
#include "trace2.h"
38
#include "varint.h"
39
#include "split-index.h"
40
#include "symlinks.h"
41
#include "utf8.h"
42
#include "fsmonitor.h"
43
#include "thread-utils.h"
44
#include "progress.h"
45
#include "sparse-index.h"
46
#include "csum-file.h"
47
#include "promisor-remote.h"
48
#include "hook.h"
49

50
/* Mask for the name length in ce_flags in the on-disk index */
51

52
#define CE_NAMEMASK  (0x0fff)
53

54
/* Index extensions.
55
 *
56
 * The first letter should be 'A'..'Z' for extensions that are not
57
 * necessary for a correct operation (i.e. optimization data).
58
 * When new extensions are added that _needs_ to be understood in
59
 * order to correctly interpret the index file, pick character that
60
 * is outside the range, to cause the reader to abort.
61
 */
62

63
#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
64
#define CACHE_EXT_TREE 0x54524545	/* "TREE" */
65
#define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
66
#define CACHE_EXT_LINK 0x6c696e6b	  /* "link" */
67
#define CACHE_EXT_UNTRACKED 0x554E5452	  /* "UNTR" */
68
#define CACHE_EXT_FSMONITOR 0x46534D4E	  /* "FSMN" */
69
#define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945	/* "EOIE" */
70
#define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
71
#define CACHE_EXT_SPARSE_DIRECTORIES 0x73646972 /* "sdir" */
72

73
/* changes that can be kept in $GIT_DIR/index (basically all extensions) */
74
#define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
75
		 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
76
		 SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
77

78

79
/*
80
 * This is an estimate of the pathname length in the index.  We use
81
 * this for V4 index files to guess the un-deltafied size of the index
82
 * in memory because of pathname deltafication.  This is not required
83
 * for V2/V3 index formats because their pathnames are not compressed.
84
 * If the initial amount of memory set aside is not sufficient, the
85
 * mem pool will allocate extra memory.
86
 */
87
#define CACHE_ENTRY_PATH_LENGTH 80
88

89
enum index_search_mode {
90
	NO_EXPAND_SPARSE = 0,
91
	EXPAND_SPARSE = 1
92
};
93

94
static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
95
{
96
	struct cache_entry *ce;
97
	ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
98
	ce->mem_pool_allocated = 1;
99
	return ce;
100
}
101

102
static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
103
{
104
	struct cache_entry * ce;
105
	ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
106
	ce->mem_pool_allocated = 1;
107
	return ce;
108
}
109

110
static struct mem_pool *find_mem_pool(struct index_state *istate)
111
{
112
	struct mem_pool **pool_ptr;
113

114
	if (istate->split_index && istate->split_index->base)
115
		pool_ptr = &istate->split_index->base->ce_mem_pool;
116
	else
117
		pool_ptr = &istate->ce_mem_pool;
118

119
	if (!*pool_ptr) {
120
		*pool_ptr = xmalloc(sizeof(**pool_ptr));
121
		mem_pool_init(*pool_ptr, 0);
122
	}
123

124
	return *pool_ptr;
125
}
126

127
static const char *alternate_index_output;
128

129
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
130
{
131
	if (S_ISSPARSEDIR(ce->ce_mode))
132
		istate->sparse_index = INDEX_COLLAPSED;
133

134
	istate->cache[nr] = ce;
135
	add_name_hash(istate, ce);
136
}
137

138
static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
139
{
140
	struct cache_entry *old = istate->cache[nr];
141

142
	replace_index_entry_in_base(istate, old, ce);
143
	remove_name_hash(istate, old);
144
	discard_cache_entry(old);
145
	ce->ce_flags &= ~CE_HASHED;
146
	set_index_entry(istate, nr, ce);
147
	ce->ce_flags |= CE_UPDATE_IN_BASE;
148
	mark_fsmonitor_invalid(istate, ce);
149
	istate->cache_changed |= CE_ENTRY_CHANGED;
150
}
151

152
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
153
{
154
	struct cache_entry *old_entry = istate->cache[nr], *new_entry, *refreshed;
155
	int namelen = strlen(new_name);
156

157
	new_entry = make_empty_cache_entry(istate, namelen);
158
	copy_cache_entry(new_entry, old_entry);
159
	new_entry->ce_flags &= ~CE_HASHED;
160
	new_entry->ce_namelen = namelen;
161
	new_entry->index = 0;
162
	memcpy(new_entry->name, new_name, namelen + 1);
163

164
	cache_tree_invalidate_path(istate, old_entry->name);
165
	untracked_cache_remove_from_index(istate, old_entry->name);
166
	remove_index_entry_at(istate, nr);
167

168
	/*
169
	 * Refresh the new index entry. Using 'refresh_cache_entry' ensures
170
	 * we only update stat info if the entry is otherwise up-to-date (i.e.,
171
	 * the contents/mode haven't changed). This ensures that we reflect the
172
	 * 'ctime' of the rename in the index without (incorrectly) updating
173
	 * the cached stat info to reflect unstaged changes on disk.
174
	 */
175
	refreshed = refresh_cache_entry(istate, new_entry, CE_MATCH_REFRESH);
176
	if (refreshed && refreshed != new_entry) {
177
		add_index_entry(istate, refreshed, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
178
		discard_cache_entry(new_entry);
179
	} else
180
		add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
181
}
182

183
/*
184
 * This only updates the "non-critical" parts of the directory
185
 * cache, ie the parts that aren't tracked by GIT, and only used
186
 * to validate the cache.
187
 */
188
void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st)
189
{
190
	fill_stat_data(&ce->ce_stat_data, st);
191

192
	if (assume_unchanged)
193
		ce->ce_flags |= CE_VALID;
194

195
	if (S_ISREG(st->st_mode)) {
196
		ce_mark_uptodate(ce);
197
		mark_fsmonitor_valid(istate, ce);
198
	}
199
}
200

201
static unsigned int st_mode_from_ce(const struct cache_entry *ce)
202
{
203
	extern int trust_executable_bit, has_symlinks;
204

205
	switch (ce->ce_mode & S_IFMT) {
206
	case S_IFLNK:
207
		return has_symlinks ? S_IFLNK : (S_IFREG | 0644);
208
	case S_IFREG:
209
		return (ce->ce_mode & (trust_executable_bit ? 0755 : 0644)) | S_IFREG;
210
	case S_IFGITLINK:
211
		return S_IFDIR | 0755;
212
	case S_IFDIR:
213
		return ce->ce_mode;
214
	default:
215
		BUG("unsupported ce_mode: %o", ce->ce_mode);
216
	}
217
}
218

219
int fake_lstat(const struct cache_entry *ce, struct stat *st)
220
{
221
	fake_lstat_data(&ce->ce_stat_data, st);
222
	st->st_mode = st_mode_from_ce(ce);
223

224
	/* always succeed as lstat() replacement */
225
	return 0;
226
}
227

228
static int ce_compare_data(struct index_state *istate,
229
			   const struct cache_entry *ce,
230
			   struct stat *st)
231
{
232
	int match = -1;
233
	int fd = git_open_cloexec(ce->name, O_RDONLY);
234

235
	if (fd >= 0) {
236
		struct object_id oid;
237
		if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))
238
			match = !oideq(&oid, &ce->oid);
239
		/* index_fd() closed the file descriptor already */
240
	}
241
	return match;
242
}
243

244
static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
245
{
246
	int match = -1;
247
	void *buffer;
248
	unsigned long size;
249
	enum object_type type;
250
	struct strbuf sb = STRBUF_INIT;
251

252
	if (strbuf_readlink(&sb, ce->name, expected_size))
253
		return -1;
254

255
	buffer = repo_read_object_file(the_repository, &ce->oid, &type, &size);
256
	if (buffer) {
257
		if (size == sb.len)
258
			match = memcmp(buffer, sb.buf, size);
259
		free(buffer);
260
	}
261
	strbuf_release(&sb);
262
	return match;
263
}
264

265
static int ce_compare_gitlink(const struct cache_entry *ce)
266
{
267
	struct object_id oid;
268

269
	/*
270
	 * We don't actually require that the .git directory
271
	 * under GITLINK directory be a valid git directory. It
272
	 * might even be missing (in case nobody populated that
273
	 * sub-project).
274
	 *
275
	 * If so, we consider it always to match.
276
	 */
277
	if (repo_resolve_gitlink_ref(the_repository, ce->name,
278
				     "HEAD", &oid) < 0)
279
		return 0;
280
	return !oideq(&oid, &ce->oid);
281
}
282

283
static int ce_modified_check_fs(struct index_state *istate,
284
				const struct cache_entry *ce,
285
				struct stat *st)
286
{
287
	switch (st->st_mode & S_IFMT) {
288
	case S_IFREG:
289
		if (ce_compare_data(istate, ce, st))
290
			return DATA_CHANGED;
291
		break;
292
	case S_IFLNK:
293
		if (ce_compare_link(ce, xsize_t(st->st_size)))
294
			return DATA_CHANGED;
295
		break;
296
	case S_IFDIR:
297
		if (S_ISGITLINK(ce->ce_mode))
298
			return ce_compare_gitlink(ce) ? DATA_CHANGED : 0;
299
		/* else fallthrough */
300
	default:
301
		return TYPE_CHANGED;
302
	}
303
	return 0;
304
}
305

306
static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st)
307
{
308
	unsigned int changed = 0;
309

310
	if (ce->ce_flags & CE_REMOVE)
311
		return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED;
312

313
	switch (ce->ce_mode & S_IFMT) {
314
	case S_IFREG:
315
		changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;
316
		/* We consider only the owner x bit to be relevant for
317
		 * "mode changes"
318
		 */
319
		if (trust_executable_bit &&
320
		    (0100 & (ce->ce_mode ^ st->st_mode)))
321
			changed |= MODE_CHANGED;
322
		break;
323
	case S_IFLNK:
324
		if (!S_ISLNK(st->st_mode) &&
325
		    (has_symlinks || !S_ISREG(st->st_mode)))
326
			changed |= TYPE_CHANGED;
327
		break;
328
	case S_IFGITLINK:
329
		/* We ignore most of the st_xxx fields for gitlinks */
330
		if (!S_ISDIR(st->st_mode))
331
			changed |= TYPE_CHANGED;
332
		else if (ce_compare_gitlink(ce))
333
			changed |= DATA_CHANGED;
334
		return changed;
335
	default:
336
		BUG("unsupported ce_mode: %o", ce->ce_mode);
337
	}
338

339
	changed |= match_stat_data(&ce->ce_stat_data, st);
340

341
	/* Racily smudged entry? */
342
	if (!ce->ce_stat_data.sd_size) {
343
		if (!is_empty_blob_oid(&ce->oid, the_repository->hash_algo))
344
			changed |= DATA_CHANGED;
345
	}
346

347
	return changed;
348
}
349

350
static int is_racy_stat(const struct index_state *istate,
351
			const struct stat_data *sd)
352
{
353
	return (istate->timestamp.sec &&
354
#ifdef USE_NSEC
355
		 /* nanosecond timestamped files can also be racy! */
356
		(istate->timestamp.sec < sd->sd_mtime.sec ||
357
		 (istate->timestamp.sec == sd->sd_mtime.sec &&
358
		  istate->timestamp.nsec <= sd->sd_mtime.nsec))
359
#else
360
		istate->timestamp.sec <= sd->sd_mtime.sec
361
#endif
362
		);
363
}
364

365
int is_racy_timestamp(const struct index_state *istate,
366
			     const struct cache_entry *ce)
367
{
368
	return (!S_ISGITLINK(ce->ce_mode) &&
369
		is_racy_stat(istate, &ce->ce_stat_data));
370
}
371

372
int match_stat_data_racy(const struct index_state *istate,
373
			 const struct stat_data *sd, struct stat *st)
374
{
375
	if (is_racy_stat(istate, sd))
376
		return MTIME_CHANGED;
377
	return match_stat_data(sd, st);
378
}
379

380
int ie_match_stat(struct index_state *istate,
381
		  const struct cache_entry *ce, struct stat *st,
382
		  unsigned int options)
383
{
384
	unsigned int changed;
385
	int ignore_valid = options & CE_MATCH_IGNORE_VALID;
386
	int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
387
	int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;
388
	int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
389

390
	if (!ignore_fsmonitor)
391
		refresh_fsmonitor(istate);
392
	/*
393
	 * If it's marked as always valid in the index, it's
394
	 * valid whatever the checked-out copy says.
395
	 *
396
	 * skip-worktree has the same effect with higher precedence
397
	 */
398
	if (!ignore_skip_worktree && ce_skip_worktree(ce))
399
		return 0;
400
	if (!ignore_valid && (ce->ce_flags & CE_VALID))
401
		return 0;
402
	if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))
403
		return 0;
404

405
	/*
406
	 * Intent-to-add entries have not been added, so the index entry
407
	 * by definition never matches what is in the work tree until it
408
	 * actually gets added.
409
	 */
410
	if (ce_intent_to_add(ce))
411
		return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED;
412

413
	changed = ce_match_stat_basic(ce, st);
414

415
	/*
416
	 * Within 1 second of this sequence:
417
	 * 	echo xyzzy >file && git-update-index --add file
418
	 * running this command:
419
	 * 	echo frotz >file
420
	 * would give a falsely clean cache entry.  The mtime and
421
	 * length match the cache, and other stat fields do not change.
422
	 *
423
	 * We could detect this at update-index time (the cache entry
424
	 * being registered/updated records the same time as "now")
425
	 * and delay the return from git-update-index, but that would
426
	 * effectively mean we can make at most one commit per second,
427
	 * which is not acceptable.  Instead, we check cache entries
428
	 * whose mtime are the same as the index file timestamp more
429
	 * carefully than others.
430
	 */
431
	if (!changed && is_racy_timestamp(istate, ce)) {
432
		if (assume_racy_is_modified)
433
			changed |= DATA_CHANGED;
434
		else
435
			changed |= ce_modified_check_fs(istate, ce, st);
436
	}
437

438
	return changed;
439
}
440

441
int ie_modified(struct index_state *istate,
442
		const struct cache_entry *ce,
443
		struct stat *st, unsigned int options)
444
{
445
	int changed, changed_fs;
446

447
	changed = ie_match_stat(istate, ce, st, options);
448
	if (!changed)
449
		return 0;
450
	/*
451
	 * If the mode or type has changed, there's no point in trying
452
	 * to refresh the entry - it's not going to match
453
	 */
454
	if (changed & (MODE_CHANGED | TYPE_CHANGED))
455
		return changed;
456

457
	/*
458
	 * Immediately after read-tree or update-index --cacheinfo,
459
	 * the length field is zero, as we have never even read the
460
	 * lstat(2) information once, and we cannot trust DATA_CHANGED
461
	 * returned by ie_match_stat() which in turn was returned by
462
	 * ce_match_stat_basic() to signal that the filesize of the
463
	 * blob changed.  We have to actually go to the filesystem to
464
	 * see if the contents match, and if so, should answer "unchanged".
465
	 *
466
	 * The logic does not apply to gitlinks, as ce_match_stat_basic()
467
	 * already has checked the actual HEAD from the filesystem in the
468
	 * subproject.  If ie_match_stat() already said it is different,
469
	 * then we know it is.
470
	 */
471
	if ((changed & DATA_CHANGED) &&
472
	    (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
473
		return changed;
474

475
	changed_fs = ce_modified_check_fs(istate, ce, st);
476
	if (changed_fs)
477
		return changed | changed_fs;
478
	return 0;
479
}
480

481
static int cache_name_stage_compare(const char *name1, int len1, int stage1,
482
				    const char *name2, int len2, int stage2)
483
{
484
	int cmp;
485

486
	cmp = name_compare(name1, len1, name2, len2);
487
	if (cmp)
488
		return cmp;
489

490
	if (stage1 < stage2)
491
		return -1;
492
	if (stage1 > stage2)
493
		return 1;
494
	return 0;
495
}
496

497
int cmp_cache_name_compare(const void *a_, const void *b_)
498
{
499
	const struct cache_entry *ce1, *ce2;
500

501
	ce1 = *((const struct cache_entry **)a_);
502
	ce2 = *((const struct cache_entry **)b_);
503
	return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
504
				  ce2->name, ce2->ce_namelen, ce_stage(ce2));
505
}
506

507
static int index_name_stage_pos(struct index_state *istate,
508
				const char *name, int namelen,
509
				int stage,
510
				enum index_search_mode search_mode)
511
{
512
	int first, last;
513

514
	first = 0;
515
	last = istate->cache_nr;
516
	while (last > first) {
517
		int next = first + ((last - first) >> 1);
518
		struct cache_entry *ce = istate->cache[next];
519
		int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce));
520
		if (!cmp)
521
			return next;
522
		if (cmp < 0) {
523
			last = next;
524
			continue;
525
		}
526
		first = next+1;
527
	}
528

529
	if (search_mode == EXPAND_SPARSE && istate->sparse_index &&
530
	    first > 0) {
531
		/* Note: first <= istate->cache_nr */
532
		struct cache_entry *ce = istate->cache[first - 1];
533

534
		/*
535
		 * If we are in a sparse-index _and_ the entry before the
536
		 * insertion position is a sparse-directory entry that is
537
		 * an ancestor of 'name', then we need to expand the index
538
		 * and search again. This will only trigger once, because
539
		 * thereafter the index is fully expanded.
540
		 */
541
		if (S_ISSPARSEDIR(ce->ce_mode) &&
542
		    ce_namelen(ce) < namelen &&
543
		    !strncmp(name, ce->name, ce_namelen(ce))) {
544
			ensure_full_index(istate);
545
			return index_name_stage_pos(istate, name, namelen, stage, search_mode);
546
		}
547
	}
548

549
	return -first-1;
550
}
551

552
int index_name_pos(struct index_state *istate, const char *name, int namelen)
553
{
554
	return index_name_stage_pos(istate, name, namelen, 0, EXPAND_SPARSE);
555
}
556

557
int index_name_pos_sparse(struct index_state *istate, const char *name, int namelen)
558
{
559
	return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE);
560
}
561

562
int index_entry_exists(struct index_state *istate, const char *name, int namelen)
563
{
564
	return index_name_stage_pos(istate, name, namelen, 0, NO_EXPAND_SPARSE) >= 0;
565
}
566

567
int remove_index_entry_at(struct index_state *istate, int pos)
568
{
569
	struct cache_entry *ce = istate->cache[pos];
570

571
	record_resolve_undo(istate, ce);
572
	remove_name_hash(istate, ce);
573
	save_or_free_index_entry(istate, ce);
574
	istate->cache_changed |= CE_ENTRY_REMOVED;
575
	istate->cache_nr--;
576
	if (pos >= istate->cache_nr)
577
		return 0;
578
	MOVE_ARRAY(istate->cache + pos, istate->cache + pos + 1,
579
		   istate->cache_nr - pos);
580
	return 1;
581
}
582

583
/*
584
 * Remove all cache entries marked for removal, that is where
585
 * CE_REMOVE is set in ce_flags.  This is much more effective than
586
 * calling remove_index_entry_at() for each entry to be removed.
587
 */
588
void remove_marked_cache_entries(struct index_state *istate, int invalidate)
589
{
590
	struct cache_entry **ce_array = istate->cache;
591
	unsigned int i, j;
592

593
	for (i = j = 0; i < istate->cache_nr; i++) {
594
		if (ce_array[i]->ce_flags & CE_REMOVE) {
595
			if (invalidate) {
596
				cache_tree_invalidate_path(istate,
597
							   ce_array[i]->name);
598
				untracked_cache_remove_from_index(istate,
599
								  ce_array[i]->name);
600
			}
601
			remove_name_hash(istate, ce_array[i]);
602
			save_or_free_index_entry(istate, ce_array[i]);
603
		}
604
		else
605
			ce_array[j++] = ce_array[i];
606
	}
607
	if (j == istate->cache_nr)
608
		return;
609
	istate->cache_changed |= CE_ENTRY_REMOVED;
610
	istate->cache_nr = j;
611
}
612

613
int remove_file_from_index(struct index_state *istate, const char *path)
614
{
615
	int pos = index_name_pos(istate, path, strlen(path));
616
	if (pos < 0)
617
		pos = -pos-1;
618
	cache_tree_invalidate_path(istate, path);
619
	untracked_cache_remove_from_index(istate, path);
620
	while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path))
621
		remove_index_entry_at(istate, pos);
622
	return 0;
623
}
624

625
static int compare_name(struct cache_entry *ce, const char *path, int namelen)
626
{
627
	return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen);
628
}
629

630
static int index_name_pos_also_unmerged(struct index_state *istate,
631
	const char *path, int namelen)
632
{
633
	int pos = index_name_pos(istate, path, namelen);
634
	struct cache_entry *ce;
635

636
	if (pos >= 0)
637
		return pos;
638

639
	/* maybe unmerged? */
640
	pos = -1 - pos;
641
	if (pos >= istate->cache_nr ||
642
			compare_name((ce = istate->cache[pos]), path, namelen))
643
		return -1;
644

645
	/* order of preference: stage 2, 1, 3 */
646
	if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr &&
647
			ce_stage((ce = istate->cache[pos + 1])) == 2 &&
648
			!compare_name(ce, path, namelen))
649
		pos++;
650
	return pos;
651
}
652

653
static int different_name(struct cache_entry *ce, struct cache_entry *alias)
654
{
655
	int len = ce_namelen(ce);
656
	return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len);
657
}
658

659
/*
660
 * If we add a filename that aliases in the cache, we will use the
661
 * name that we already have - but we don't want to update the same
662
 * alias twice, because that implies that there were actually two
663
 * different files with aliasing names!
664
 *
665
 * So we use the CE_ADDED flag to verify that the alias was an old
666
 * one before we accept it as
667
 */
668
static struct cache_entry *create_alias_ce(struct index_state *istate,
669
					   struct cache_entry *ce,
670
					   struct cache_entry *alias)
671
{
672
	int len;
673
	struct cache_entry *new_entry;
674

675
	if (alias->ce_flags & CE_ADDED)
676
		die(_("will not add file alias '%s' ('%s' already exists in index)"),
677
		    ce->name, alias->name);
678

679
	/* Ok, create the new entry using the name of the existing alias */
680
	len = ce_namelen(alias);
681
	new_entry = make_empty_cache_entry(istate, len);
682
	memcpy(new_entry->name, alias->name, len);
683
	copy_cache_entry(new_entry, ce);
684
	save_or_free_index_entry(istate, ce);
685
	return new_entry;
686
}
687

688
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
689
{
690
	struct object_id oid;
691
	if (write_object_file("", 0, OBJ_BLOB, &oid))
692
		die(_("cannot create an empty blob in the object database"));
693
	oidcpy(&ce->oid, &oid);
694
}
695

696
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
697
{
698
	int namelen, was_same;
699
	mode_t st_mode = st->st_mode;
700
	struct cache_entry *ce, *alias = NULL;
701
	unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
702
	int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND);
703
	int pretend = flags & ADD_CACHE_PRETEND;
704
	int intent_only = flags & ADD_CACHE_INTENT;
705
	int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
706
			  (intent_only ? ADD_CACHE_NEW_ONLY : 0));
707
	unsigned hash_flags = pretend ? 0 : HASH_WRITE_OBJECT;
708
	struct object_id oid;
709

710
	if (flags & ADD_CACHE_RENORMALIZE)
711
		hash_flags |= HASH_RENORMALIZE;
712

713
	if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
714
		return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
715

716
	namelen = strlen(path);
717
	if (S_ISDIR(st_mode)) {
718
		if (repo_resolve_gitlink_ref(the_repository, path, "HEAD", &oid) < 0)
719
			return error(_("'%s' does not have a commit checked out"), path);
720
		while (namelen && path[namelen-1] == '/')
721
			namelen--;
722
	}
723
	ce = make_empty_cache_entry(istate, namelen);
724
	memcpy(ce->name, path, namelen);
725
	ce->ce_namelen = namelen;
726
	if (!intent_only)
727
		fill_stat_cache_info(istate, ce, st);
728
	else
729
		ce->ce_flags |= CE_INTENT_TO_ADD;
730

731

732
	if (trust_executable_bit && has_symlinks) {
733
		ce->ce_mode = create_ce_mode(st_mode);
734
	} else {
735
		/* If there is an existing entry, pick the mode bits and type
736
		 * from it, otherwise assume unexecutable regular file.
737
		 */
738
		struct cache_entry *ent;
739
		int pos = index_name_pos_also_unmerged(istate, path, namelen);
740

741
		ent = (0 <= pos) ? istate->cache[pos] : NULL;
742
		ce->ce_mode = ce_mode_from_stat(ent, st_mode);
743
	}
744

745
	/* When core.ignorecase=true, determine if a directory of the same name but differing
746
	 * case already exists within the Git repository.  If it does, ensure the directory
747
	 * case of the file being added to the repository matches (is folded into) the existing
748
	 * entry's directory case.
749
	 */
750
	if (ignore_case) {
751
		adjust_dirname_case(istate, ce->name);
752
	}
753
	if (!(flags & ADD_CACHE_RENORMALIZE)) {
754
		alias = index_file_exists(istate, ce->name,
755
					  ce_namelen(ce), ignore_case);
756
		if (alias &&
757
		    !ce_stage(alias) &&
758
		    !ie_match_stat(istate, alias, st, ce_option)) {
759
			/* Nothing changed, really */
760
			if (!S_ISGITLINK(alias->ce_mode))
761
				ce_mark_uptodate(alias);
762
			alias->ce_flags |= CE_ADDED;
763

764
			discard_cache_entry(ce);
765
			return 0;
766
		}
767
	}
768
	if (!intent_only) {
769
		if (index_path(istate, &ce->oid, path, st, hash_flags)) {
770
			discard_cache_entry(ce);
771
			return error(_("unable to index file '%s'"), path);
772
		}
773
	} else
774
		set_object_name_for_intent_to_add_entry(ce);
775

776
	if (ignore_case && alias && different_name(ce, alias))
777
		ce = create_alias_ce(istate, ce, alias);
778
	ce->ce_flags |= CE_ADDED;
779

780
	/* It was suspected to be racily clean, but it turns out to be Ok */
781
	was_same = (alias &&
782
		    !ce_stage(alias) &&
783
		    oideq(&alias->oid, &ce->oid) &&
784
		    ce->ce_mode == alias->ce_mode);
785

786
	if (pretend)
787
		discard_cache_entry(ce);
788
	else if (add_index_entry(istate, ce, add_option)) {
789
		discard_cache_entry(ce);
790
		return error(_("unable to add '%s' to index"), path);
791
	}
792
	if (verbose && !was_same)
793
		printf("add '%s'\n", path);
794
	return 0;
795
}
796

797
int add_file_to_index(struct index_state *istate, const char *path, int flags)
798
{
799
	struct stat st;
800
	if (lstat(path, &st))
801
		die_errno(_("unable to stat '%s'"), path);
802
	return add_to_index(istate, path, &st, flags);
803
}
804

805
struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
806
{
807
	return mem_pool__ce_calloc(find_mem_pool(istate), len);
808
}
809

810
struct cache_entry *make_empty_transient_cache_entry(size_t len,
811
						     struct mem_pool *ce_mem_pool)
812
{
813
	if (ce_mem_pool)
814
		return mem_pool__ce_calloc(ce_mem_pool, len);
815
	return xcalloc(1, cache_entry_size(len));
816
}
817

818
enum verify_path_result {
819
	PATH_OK,
820
	PATH_INVALID,
821
	PATH_DIR_WITH_SEP,
822
};
823

824
static enum verify_path_result verify_path_internal(const char *, unsigned);
825

826
int verify_path(const char *path, unsigned mode)
827
{
828
	return verify_path_internal(path, mode) == PATH_OK;
829
}
830

831
struct cache_entry *make_cache_entry(struct index_state *istate,
832
				     unsigned int mode,
833
				     const struct object_id *oid,
834
				     const char *path,
835
				     int stage,
836
				     unsigned int refresh_options)
837
{
838
	struct cache_entry *ce, *ret;
839
	int len;
840

841
	if (verify_path_internal(path, mode) == PATH_INVALID) {
842
		error(_("invalid path '%s'"), path);
843
		return NULL;
844
	}
845

846
	len = strlen(path);
847
	ce = make_empty_cache_entry(istate, len);
848

849
	oidcpy(&ce->oid, oid);
850
	memcpy(ce->name, path, len);
851
	ce->ce_flags = create_ce_flags(stage);
852
	ce->ce_namelen = len;
853
	ce->ce_mode = create_ce_mode(mode);
854

855
	ret = refresh_cache_entry(istate, ce, refresh_options);
856
	if (ret != ce)
857
		discard_cache_entry(ce);
858
	return ret;
859
}
860

861
struct cache_entry *make_transient_cache_entry(unsigned int mode,
862
					       const struct object_id *oid,
863
					       const char *path,
864
					       int stage,
865
					       struct mem_pool *ce_mem_pool)
866
{
867
	struct cache_entry *ce;
868
	int len;
869

870
	if (!verify_path(path, mode)) {
871
		error(_("invalid path '%s'"), path);
872
		return NULL;
873
	}
874

875
	len = strlen(path);
876
	ce = make_empty_transient_cache_entry(len, ce_mem_pool);
877

878
	oidcpy(&ce->oid, oid);
879
	memcpy(ce->name, path, len);
880
	ce->ce_flags = create_ce_flags(stage);
881
	ce->ce_namelen = len;
882
	ce->ce_mode = create_ce_mode(mode);
883

884
	return ce;
885
}
886

887
/*
888
 * Chmod an index entry with either +x or -x.
889
 *
890
 * Returns -1 if the chmod for the particular cache entry failed (if it's
891
 * not a regular file), -2 if an invalid flip argument is passed in, 0
892
 * otherwise.
893
 */
894
int chmod_index_entry(struct index_state *istate, struct cache_entry *ce,
895
		      char flip)
896
{
897
	if (!S_ISREG(ce->ce_mode))
898
		return -1;
899
	switch (flip) {
900
	case '+':
901
		ce->ce_mode |= 0111;
902
		break;
903
	case '-':
904
		ce->ce_mode &= ~0111;
905
		break;
906
	default:
907
		return -2;
908
	}
909
	cache_tree_invalidate_path(istate, ce->name);
910
	ce->ce_flags |= CE_UPDATE_IN_BASE;
911
	mark_fsmonitor_invalid(istate, ce);
912
	istate->cache_changed |= CE_ENTRY_CHANGED;
913

914
	return 0;
915
}
916

917
int ce_same_name(const struct cache_entry *a, const struct cache_entry *b)
918
{
919
	int len = ce_namelen(a);
920
	return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
921
}
922

923
/*
924
 * We fundamentally don't like some paths: we don't want
925
 * dot or dot-dot anywhere, and for obvious reasons don't
926
 * want to recurse into ".git" either.
927
 *
928
 * Also, we don't want double slashes or slashes at the
929
 * end that can make pathnames ambiguous.
930
 */
931
static int verify_dotfile(const char *rest, unsigned mode)
932
{
933
	/*
934
	 * The first character was '.', but that
935
	 * has already been discarded, we now test
936
	 * the rest.
937
	 */
938

939
	/* "." is not allowed */
940
	if (*rest == '\0' || is_dir_sep(*rest))
941
		return 0;
942

943
	switch (*rest) {
944
	/*
945
	 * ".git" followed by NUL or slash is bad. Note that we match
946
	 * case-insensitively here, even if ignore_case is not set.
947
	 * This outlaws ".GIT" everywhere out of an abundance of caution,
948
	 * since there's really no good reason to allow it.
949
	 *
950
	 * Once we've seen ".git", we can also find ".gitmodules", etc (also
951
	 * case-insensitively).
952
	 */
953
	case 'g':
954
	case 'G':
955
		if (rest[1] != 'i' && rest[1] != 'I')
956
			break;
957
		if (rest[2] != 't' && rest[2] != 'T')
958
			break;
959
		if (rest[3] == '\0' || is_dir_sep(rest[3]))
960
			return 0;
961
		if (S_ISLNK(mode)) {
962
			rest += 3;
963
			if (skip_iprefix(rest, "modules", &rest) &&
964
			    (*rest == '\0' || is_dir_sep(*rest)))
965
				return 0;
966
		}
967
		break;
968
	case '.':
969
		if (rest[1] == '\0' || is_dir_sep(rest[1]))
970
			return 0;
971
	}
972
	return 1;
973
}
974

975
static enum verify_path_result verify_path_internal(const char *path,
976
						    unsigned mode)
977
{
978
	char c = 0;
979

980
	if (has_dos_drive_prefix(path))
981
		return PATH_INVALID;
982

983
	if (!is_valid_path(path))
984
		return PATH_INVALID;
985

986
	goto inside;
987
	for (;;) {
988
		if (!c)
989
			return PATH_OK;
990
		if (is_dir_sep(c)) {
991
inside:
992
			if (protect_hfs) {
993

994
				if (is_hfs_dotgit(path))
995
					return PATH_INVALID;
996
				if (S_ISLNK(mode)) {
997
					if (is_hfs_dotgitmodules(path))
998
						return PATH_INVALID;
999
				}
1000
			}
1001
			if (protect_ntfs) {
1002
#if defined GIT_WINDOWS_NATIVE || defined __CYGWIN__
1003
				if (c == '\\')
1004
					return PATH_INVALID;
1005
#endif
1006
				if (is_ntfs_dotgit(path))
1007
					return PATH_INVALID;
1008
				if (S_ISLNK(mode)) {
1009
					if (is_ntfs_dotgitmodules(path))
1010
						return PATH_INVALID;
1011
				}
1012
			}
1013

1014
			c = *path++;
1015
			if ((c == '.' && !verify_dotfile(path, mode)) ||
1016
			    is_dir_sep(c))
1017
				return PATH_INVALID;
1018
			/*
1019
			 * allow terminating directory separators for
1020
			 * sparse directory entries.
1021
			 */
1022
			if (c == '\0')
1023
				return S_ISDIR(mode) ? PATH_DIR_WITH_SEP :
1024
						       PATH_INVALID;
1025
		} else if (c == '\\' && protect_ntfs) {
1026
			if (is_ntfs_dotgit(path))
1027
				return PATH_INVALID;
1028
			if (S_ISLNK(mode)) {
1029
				if (is_ntfs_dotgitmodules(path))
1030
					return PATH_INVALID;
1031
			}
1032
		}
1033

1034
		c = *path++;
1035
	}
1036
}
1037

1038
/*
1039
 * Do we have another file that has the beginning components being a
1040
 * proper superset of the name we're trying to add?
1041
 */
1042
static int has_file_name(struct index_state *istate,
1043
			 const struct cache_entry *ce, int pos, int ok_to_replace)
1044
{
1045
	int retval = 0;
1046
	int len = ce_namelen(ce);
1047
	int stage = ce_stage(ce);
1048
	const char *name = ce->name;
1049

1050
	while (pos < istate->cache_nr) {
1051
		struct cache_entry *p = istate->cache[pos++];
1052

1053
		if (len >= ce_namelen(p))
1054
			break;
1055
		if (memcmp(name, p->name, len))
1056
			break;
1057
		if (ce_stage(p) != stage)
1058
			continue;
1059
		if (p->name[len] != '/')
1060
			continue;
1061
		if (p->ce_flags & CE_REMOVE)
1062
			continue;
1063
		retval = -1;
1064
		if (!ok_to_replace)
1065
			break;
1066
		remove_index_entry_at(istate, --pos);
1067
	}
1068
	return retval;
1069
}
1070

1071

1072
/*
1073
 * Like strcmp(), but also return the offset of the first change.
1074
 * If strings are equal, return the length.
1075
 */
1076
int strcmp_offset(const char *s1, const char *s2, size_t *first_change)
1077
{
1078
	size_t k;
1079

1080
	if (!first_change)
1081
		return strcmp(s1, s2);
1082

1083
	for (k = 0; s1[k] == s2[k]; k++)
1084
		if (s1[k] == '\0')
1085
			break;
1086

1087
	*first_change = k;
1088
	return (unsigned char)s1[k] - (unsigned char)s2[k];
1089
}
1090

1091
/*
1092
 * Do we have another file with a pathname that is a proper
1093
 * subset of the name we're trying to add?
1094
 *
1095
 * That is, is there another file in the index with a path
1096
 * that matches a sub-directory in the given entry?
1097
 */
1098
static int has_dir_name(struct index_state *istate,
1099
			const struct cache_entry *ce, int pos, int ok_to_replace)
1100
{
1101
	int retval = 0;
1102
	int stage = ce_stage(ce);
1103
	const char *name = ce->name;
1104
	const char *slash = name + ce_namelen(ce);
1105
	size_t len_eq_last;
1106
	int cmp_last = 0;
1107

1108
	/*
1109
	 * We are frequently called during an iteration on a sorted
1110
	 * list of pathnames and while building a new index.  Therefore,
1111
	 * there is a high probability that this entry will eventually
1112
	 * be appended to the index, rather than inserted in the middle.
1113
	 * If we can confirm that, we can avoid binary searches on the
1114
	 * components of the pathname.
1115
	 *
1116
	 * Compare the entry's full path with the last path in the index.
1117
	 */
1118
	if (istate->cache_nr > 0) {
1119
		cmp_last = strcmp_offset(name,
1120
			istate->cache[istate->cache_nr - 1]->name,
1121
			&len_eq_last);
1122
		if (cmp_last > 0) {
1123
			if (name[len_eq_last] != '/') {
1124
				/*
1125
				 * The entry sorts AFTER the last one in the
1126
				 * index.
1127
				 *
1128
				 * If there were a conflict with "file", then our
1129
				 * name would start with "file/" and the last index
1130
				 * entry would start with "file" but not "file/".
1131
				 *
1132
				 * The next character after common prefix is
1133
				 * not '/', so there can be no conflict.
1134
				 */
1135
				return retval;
1136
			} else {
1137
				/*
1138
				 * The entry sorts AFTER the last one in the
1139
				 * index, and the next character after common
1140
				 * prefix is '/'.
1141
				 *
1142
				 * Either the last index entry is a file in
1143
				 * conflict with this entry, or it has a name
1144
				 * which sorts between this entry and the
1145
				 * potential conflicting file.
1146
				 *
1147
				 * In both cases, we fall through to the loop
1148
				 * below and let the regular search code handle it.
1149
				 */
1150
			}
1151
		} else if (cmp_last == 0) {
1152
			/*
1153
			 * The entry exactly matches the last one in the
1154
			 * index, but because of multiple stage and CE_REMOVE
1155
			 * items, we fall through and let the regular search
1156
			 * code handle it.
1157
			 */
1158
		}
1159
	}
1160

1161
	for (;;) {
1162
		size_t len;
1163

1164
		for (;;) {
1165
			if (*--slash == '/')
1166
				break;
1167
			if (slash <= ce->name)
1168
				return retval;
1169
		}
1170
		len = slash - name;
1171

1172
		pos = index_name_stage_pos(istate, name, len, stage, EXPAND_SPARSE);
1173
		if (pos >= 0) {
1174
			/*
1175
			 * Found one, but not so fast.  This could
1176
			 * be a marker that says "I was here, but
1177
			 * I am being removed".  Such an entry is
1178
			 * not a part of the resulting tree, and
1179
			 * it is Ok to have a directory at the same
1180
			 * path.
1181
			 */
1182
			if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) {
1183
				retval = -1;
1184
				if (!ok_to_replace)
1185
					break;
1186
				remove_index_entry_at(istate, pos);
1187
				continue;
1188
			}
1189
		}
1190
		else
1191
			pos = -pos-1;
1192

1193
		/*
1194
		 * Trivial optimization: if we find an entry that
1195
		 * already matches the sub-directory, then we know
1196
		 * we're ok, and we can exit.
1197
		 */
1198
		while (pos < istate->cache_nr) {
1199
			struct cache_entry *p = istate->cache[pos];
1200
			if ((ce_namelen(p) <= len) ||
1201
			    (p->name[len] != '/') ||
1202
			    memcmp(p->name, name, len))
1203
				break; /* not our subdirectory */
1204
			if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))
1205
				/*
1206
				 * p is at the same stage as our entry, and
1207
				 * is a subdirectory of what we are looking
1208
				 * at, so we cannot have conflicts at our
1209
				 * level or anything shorter.
1210
				 */
1211
				return retval;
1212
			pos++;
1213
		}
1214
	}
1215
	return retval;
1216
}
1217

1218
/* We may be in a situation where we already have path/file and path
1219
 * is being added, or we already have path and path/file is being
1220
 * added.  Either one would result in a nonsense tree that has path
1221
 * twice when git-write-tree tries to write it out.  Prevent it.
1222
 *
1223
 * If ok-to-replace is specified, we remove the conflicting entries
1224
 * from the cache so the caller should recompute the insert position.
1225
 * When this happens, we return non-zero.
1226
 */
1227
static int check_file_directory_conflict(struct index_state *istate,
1228
					 const struct cache_entry *ce,
1229
					 int pos, int ok_to_replace)
1230
{
1231
	int retval;
1232

1233
	/*
1234
	 * When ce is an "I am going away" entry, we allow it to be added
1235
	 */
1236
	if (ce->ce_flags & CE_REMOVE)
1237
		return 0;
1238

1239
	/*
1240
	 * We check if the path is a sub-path of a subsequent pathname
1241
	 * first, since removing those will not change the position
1242
	 * in the array.
1243
	 */
1244
	retval = has_file_name(istate, ce, pos, ok_to_replace);
1245

1246
	/*
1247
	 * Then check if the path might have a clashing sub-directory
1248
	 * before it.
1249
	 */
1250
	return retval + has_dir_name(istate, ce, pos, ok_to_replace);
1251
}
1252

1253
static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)
1254
{
1255
	int pos;
1256
	int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
1257
	int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;
1258
	int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
1259
	int new_only = option & ADD_CACHE_NEW_ONLY;
1260

1261
	/*
1262
	 * If this entry's path sorts after the last entry in the index,
1263
	 * we can avoid searching for it.
1264
	 */
1265
	if (istate->cache_nr > 0 &&
1266
		strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
1267
		pos = index_pos_to_insert_pos(istate->cache_nr);
1268
	else
1269
		pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
1270

1271
	/*
1272
	 * Cache tree path should be invalidated only after index_name_stage_pos,
1273
	 * in case it expands a sparse index.
1274
	 */
1275
	if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1276
		cache_tree_invalidate_path(istate, ce->name);
1277

1278
	/* existing match? Just replace it. */
1279
	if (pos >= 0) {
1280
		if (!new_only)
1281
			replace_index_entry(istate, pos, ce);
1282
		return 0;
1283
	}
1284
	pos = -pos-1;
1285

1286
	if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1287
		untracked_cache_add_to_index(istate, ce->name);
1288

1289
	/*
1290
	 * Inserting a merged entry ("stage 0") into the index
1291
	 * will always replace all non-merged entries..
1292
	 */
1293
	if (pos < istate->cache_nr && ce_stage(ce) == 0) {
1294
		while (ce_same_name(istate->cache[pos], ce)) {
1295
			ok_to_add = 1;
1296
			if (!remove_index_entry_at(istate, pos))
1297
				break;
1298
		}
1299
	}
1300

1301
	if (!ok_to_add)
1302
		return -1;
1303
	if (verify_path_internal(ce->name, ce->ce_mode) == PATH_INVALID)
1304
		return error(_("invalid path '%s'"), ce->name);
1305

1306
	if (!skip_df_check &&
1307
	    check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {
1308
		if (!ok_to_replace)
1309
			return error(_("'%s' appears as both a file and as a directory"),
1310
				     ce->name);
1311
		pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce), EXPAND_SPARSE);
1312
		pos = -pos-1;
1313
	}
1314
	return pos + 1;
1315
}
1316

1317
int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
1318
{
1319
	int pos;
1320

1321
	if (option & ADD_CACHE_JUST_APPEND)
1322
		pos = istate->cache_nr;
1323
	else {
1324
		int ret;
1325
		ret = add_index_entry_with_check(istate, ce, option);
1326
		if (ret <= 0)
1327
			return ret;
1328
		pos = ret - 1;
1329
	}
1330

1331
	/* Make sure the array is big enough .. */
1332
	ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc);
1333

1334
	/* Add it in.. */
1335
	istate->cache_nr++;
1336
	if (istate->cache_nr > pos + 1)
1337
		MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
1338
			   istate->cache_nr - pos - 1);
1339
	set_index_entry(istate, pos, ce);
1340
	istate->cache_changed |= CE_ENTRY_ADDED;
1341
	return 0;
1342
}
1343

1344
/*
1345
 * "refresh" does not calculate a new sha1 file or bring the
1346
 * cache up-to-date for mode/content changes. But what it
1347
 * _does_ do is to "re-match" the stat information of a file
1348
 * with the cache, so that you can refresh the cache for a
1349
 * file that hasn't been changed but where the stat entry is
1350
 * out of date.
1351
 *
1352
 * For example, you'd want to do this after doing a "git-read-tree",
1353
 * to link up the stat cache details with the proper files.
1354
 */
1355
static struct cache_entry *refresh_cache_ent(struct index_state *istate,
1356
					     struct cache_entry *ce,
1357
					     unsigned int options, int *err,
1358
					     int *changed_ret,
1359
					     int *t2_did_lstat,
1360
					     int *t2_did_scan)
1361
{
1362
	struct stat st;
1363
	struct cache_entry *updated;
1364
	int changed;
1365
	int refresh = options & CE_MATCH_REFRESH;
1366
	int ignore_valid = options & CE_MATCH_IGNORE_VALID;
1367
	int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
1368
	int ignore_missing = options & CE_MATCH_IGNORE_MISSING;
1369
	int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
1370

1371
	if (!refresh || ce_uptodate(ce))
1372
		return ce;
1373

1374
	if (!ignore_fsmonitor)
1375
		refresh_fsmonitor(istate);
1376
	/*
1377
	 * CE_VALID or CE_SKIP_WORKTREE means the user promised us
1378
	 * that the change to the work tree does not matter and told
1379
	 * us not to worry.
1380
	 */
1381
	if (!ignore_skip_worktree && ce_skip_worktree(ce)) {
1382
		ce_mark_uptodate(ce);
1383
		return ce;
1384
	}
1385
	if (!ignore_valid && (ce->ce_flags & CE_VALID)) {
1386
		ce_mark_uptodate(ce);
1387
		return ce;
1388
	}
1389
	if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {
1390
		ce_mark_uptodate(ce);
1391
		return ce;
1392
	}
1393

1394
	if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {
1395
		if (ignore_missing)
1396
			return ce;
1397
		if (err)
1398
			*err = ENOENT;
1399
		return NULL;
1400
	}
1401

1402
	if (t2_did_lstat)
1403
		*t2_did_lstat = 1;
1404
	if (lstat(ce->name, &st) < 0) {
1405
		if (ignore_missing && errno == ENOENT)
1406
			return ce;
1407
		if (err)
1408
			*err = errno;
1409
		return NULL;
1410
	}
1411

1412
	changed = ie_match_stat(istate, ce, &st, options);
1413
	if (changed_ret)
1414
		*changed_ret = changed;
1415
	if (!changed) {
1416
		/*
1417
		 * The path is unchanged.  If we were told to ignore
1418
		 * valid bit, then we did the actual stat check and
1419
		 * found that the entry is unmodified.  If the entry
1420
		 * is not marked VALID, this is the place to mark it
1421
		 * valid again, under "assume unchanged" mode.
1422
		 */
1423
		if (ignore_valid && assume_unchanged &&
1424
		    !(ce->ce_flags & CE_VALID))
1425
			; /* mark this one VALID again */
1426
		else {
1427
			/*
1428
			 * We do not mark the index itself "modified"
1429
			 * because CE_UPTODATE flag is in-core only;
1430
			 * we are not going to write this change out.
1431
			 */
1432
			if (!S_ISGITLINK(ce->ce_mode)) {
1433
				ce_mark_uptodate(ce);
1434
				mark_fsmonitor_valid(istate, ce);
1435
			}
1436
			return ce;
1437
		}
1438
	}
1439

1440
	if (t2_did_scan)
1441
		*t2_did_scan = 1;
1442
	if (ie_modified(istate, ce, &st, options)) {
1443
		if (err)
1444
			*err = EINVAL;
1445
		return NULL;
1446
	}
1447

1448
	updated = make_empty_cache_entry(istate, ce_namelen(ce));
1449
	copy_cache_entry(updated, ce);
1450
	memcpy(updated->name, ce->name, ce->ce_namelen + 1);
1451
	fill_stat_cache_info(istate, updated, &st);
1452
	/*
1453
	 * If ignore_valid is not set, we should leave CE_VALID bit
1454
	 * alone.  Otherwise, paths marked with --no-assume-unchanged
1455
	 * (i.e. things to be edited) will reacquire CE_VALID bit
1456
	 * automatically, which is not really what we want.
1457
	 */
1458
	if (!ignore_valid && assume_unchanged &&
1459
	    !(ce->ce_flags & CE_VALID))
1460
		updated->ce_flags &= ~CE_VALID;
1461

1462
	/* istate->cache_changed is updated in the caller */
1463
	return updated;
1464
}
1465

1466
static void show_file(const char * fmt, const char * name, int in_porcelain,
1467
		      int * first, const char *header_msg)
1468
{
1469
	if (in_porcelain && *first && header_msg) {
1470
		printf("%s\n", header_msg);
1471
		*first = 0;
1472
	}
1473
	printf(fmt, name);
1474
}
1475

1476
int repo_refresh_and_write_index(struct repository *repo,
1477
				 unsigned int refresh_flags,
1478
				 unsigned int write_flags,
1479
				 int gentle,
1480
				 const struct pathspec *pathspec,
1481
				 char *seen, const char *header_msg)
1482
{
1483
	struct lock_file lock_file = LOCK_INIT;
1484
	int fd, ret = 0;
1485

1486
	fd = repo_hold_locked_index(repo, &lock_file, 0);
1487
	if (!gentle && fd < 0)
1488
		return -1;
1489
	if (refresh_index(repo->index, refresh_flags, pathspec, seen, header_msg))
1490
		ret = 1;
1491
	if (0 <= fd && write_locked_index(repo->index, &lock_file, COMMIT_LOCK | write_flags))
1492
		ret = -1;
1493
	return ret;
1494
}
1495

1496

1497
int refresh_index(struct index_state *istate, unsigned int flags,
1498
		  const struct pathspec *pathspec,
1499
		  char *seen, const char *header_msg)
1500
{
1501
	int i;
1502
	int has_errors = 0;
1503
	int really = (flags & REFRESH_REALLY) != 0;
1504
	int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;
1505
	int quiet = (flags & REFRESH_QUIET) != 0;
1506
	int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;
1507
	int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0;
1508
	int ignore_skip_worktree = (flags & REFRESH_IGNORE_SKIP_WORKTREE) != 0;
1509
	int first = 1;
1510
	int in_porcelain = (flags & REFRESH_IN_PORCELAIN);
1511
	unsigned int options = (CE_MATCH_REFRESH |
1512
				(really ? CE_MATCH_IGNORE_VALID : 0) |
1513
				(not_new ? CE_MATCH_IGNORE_MISSING : 0));
1514
	const char *modified_fmt;
1515
	const char *deleted_fmt;
1516
	const char *typechange_fmt;
1517
	const char *added_fmt;
1518
	const char *unmerged_fmt;
1519
	struct progress *progress = NULL;
1520
	int t2_sum_lstat = 0;
1521
	int t2_sum_scan = 0;
1522

1523
	if (flags & REFRESH_PROGRESS && isatty(2))
1524
		progress = start_delayed_progress(_("Refresh index"),
1525
						  istate->cache_nr);
1526

1527
	trace_performance_enter();
1528
	modified_fmt   = in_porcelain ? "M\t%s\n" : "%s: needs update\n";
1529
	deleted_fmt    = in_porcelain ? "D\t%s\n" : "%s: needs update\n";
1530
	typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
1531
	added_fmt      = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
1532
	unmerged_fmt   = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
1533
	/*
1534
	 * Use the multi-threaded preload_index() to refresh most of the
1535
	 * cache entries quickly then in the single threaded loop below,
1536
	 * we only have to do the special cases that are left.
1537
	 */
1538
	preload_index(istate, pathspec, 0);
1539
	trace2_region_enter("index", "refresh", NULL);
1540

1541
	for (i = 0; i < istate->cache_nr; i++) {
1542
		struct cache_entry *ce, *new_entry;
1543
		int cache_errno = 0;
1544
		int changed = 0;
1545
		int filtered = 0;
1546
		int t2_did_lstat = 0;
1547
		int t2_did_scan = 0;
1548

1549
		ce = istate->cache[i];
1550
		if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
1551
			continue;
1552
		if (ignore_skip_worktree && ce_skip_worktree(ce))
1553
			continue;
1554

1555
		/*
1556
		 * If this entry is a sparse directory, then there isn't
1557
		 * any stat() information to update. Ignore the entry.
1558
		 */
1559
		if (S_ISSPARSEDIR(ce->ce_mode))
1560
			continue;
1561

1562
		if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
1563
			filtered = 1;
1564

1565
		if (ce_stage(ce)) {
1566
			while ((i < istate->cache_nr) &&
1567
			       ! strcmp(istate->cache[i]->name, ce->name))
1568
				i++;
1569
			i--;
1570
			if (allow_unmerged)
1571
				continue;
1572
			if (!filtered)
1573
				show_file(unmerged_fmt, ce->name, in_porcelain,
1574
					  &first, header_msg);
1575
			has_errors = 1;
1576
			continue;
1577
		}
1578

1579
		if (filtered)
1580
			continue;
1581

1582
		new_entry = refresh_cache_ent(istate, ce, options,
1583
					      &cache_errno, &changed,
1584
					      &t2_did_lstat, &t2_did_scan);
1585
		t2_sum_lstat += t2_did_lstat;
1586
		t2_sum_scan += t2_did_scan;
1587
		if (new_entry == ce)
1588
			continue;
1589
		display_progress(progress, i);
1590
		if (!new_entry) {
1591
			const char *fmt;
1592

1593
			if (really && cache_errno == EINVAL) {
1594
				/* If we are doing --really-refresh that
1595
				 * means the index is not valid anymore.
1596
				 */
1597
				ce->ce_flags &= ~CE_VALID;
1598
				ce->ce_flags |= CE_UPDATE_IN_BASE;
1599
				mark_fsmonitor_invalid(istate, ce);
1600
				istate->cache_changed |= CE_ENTRY_CHANGED;
1601
			}
1602
			if (quiet)
1603
				continue;
1604

1605
			if (cache_errno == ENOENT)
1606
				fmt = deleted_fmt;
1607
			else if (ce_intent_to_add(ce))
1608
				fmt = added_fmt; /* must be before other checks */
1609
			else if (changed & TYPE_CHANGED)
1610
				fmt = typechange_fmt;
1611
			else
1612
				fmt = modified_fmt;
1613
			show_file(fmt,
1614
				  ce->name, in_porcelain, &first, header_msg);
1615
			has_errors = 1;
1616
			continue;
1617
		}
1618

1619
		replace_index_entry(istate, i, new_entry);
1620
	}
1621
	trace2_data_intmax("index", NULL, "refresh/sum_lstat", t2_sum_lstat);
1622
	trace2_data_intmax("index", NULL, "refresh/sum_scan", t2_sum_scan);
1623
	trace2_region_leave("index", "refresh", NULL);
1624
	display_progress(progress, istate->cache_nr);
1625
	stop_progress(&progress);
1626
	trace_performance_leave("refresh index");
1627
	return has_errors;
1628
}
1629

1630
struct cache_entry *refresh_cache_entry(struct index_state *istate,
1631
					struct cache_entry *ce,
1632
					unsigned int options)
1633
{
1634
	return refresh_cache_ent(istate, ce, options, NULL, NULL, NULL, NULL);
1635
}
1636

1637

1638
/*****************************************************************
1639
 * Index File I/O
1640
 *****************************************************************/
1641

1642
#define INDEX_FORMAT_DEFAULT 3
1643

1644
static unsigned int get_index_format_default(struct repository *r)
1645
{
1646
	char *envversion = getenv("GIT_INDEX_VERSION");
1647
	char *endp;
1648
	unsigned int version = INDEX_FORMAT_DEFAULT;
1649

1650
	if (!envversion) {
1651
		prepare_repo_settings(r);
1652

1653
		if (r->settings.index_version >= 0)
1654
			version = r->settings.index_version;
1655
		if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1656
			warning(_("index.version set, but the value is invalid.\n"
1657
				  "Using version %i"), INDEX_FORMAT_DEFAULT);
1658
			return INDEX_FORMAT_DEFAULT;
1659
		}
1660
		return version;
1661
	}
1662

1663
	version = strtoul(envversion, &endp, 10);
1664
	if (*endp ||
1665
	    version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1666
		warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"
1667
			  "Using version %i"), INDEX_FORMAT_DEFAULT);
1668
		version = INDEX_FORMAT_DEFAULT;
1669
	}
1670
	return version;
1671
}
1672

1673
/*
1674
 * dev/ino/uid/gid/size are also just tracked to the low 32 bits
1675
 * Again - this is just a (very strong in practice) heuristic that
1676
 * the inode hasn't changed.
1677
 *
1678
 * We save the fields in big-endian order to allow using the
1679
 * index file over NFS transparently.
1680
 */
1681
struct ondisk_cache_entry {
1682
	struct cache_time ctime;
1683
	struct cache_time mtime;
1684
	uint32_t dev;
1685
	uint32_t ino;
1686
	uint32_t mode;
1687
	uint32_t uid;
1688
	uint32_t gid;
1689
	uint32_t size;
1690
	/*
1691
	 * unsigned char hash[hashsz];
1692
	 * uint16_t flags;
1693
	 * if (flags & CE_EXTENDED)
1694
	 *	uint16_t flags2;
1695
	 */
1696
	unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
1697
	char name[FLEX_ARRAY];
1698
};
1699

1700
/* These are only used for v3 or lower */
1701
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
1702
#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
1703
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
1704
#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
1705
				     ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
1706
#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
1707
#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
1708

1709
/* Allow fsck to force verification of the index checksum. */
1710
int verify_index_checksum;
1711

1712
/* Allow fsck to force verification of the cache entry order. */
1713
int verify_ce_order;
1714

1715
static int verify_hdr(const struct cache_header *hdr, unsigned long size)
1716
{
1717
	git_hash_ctx c;
1718
	unsigned char hash[GIT_MAX_RAWSZ];
1719
	int hdr_version;
1720
	unsigned char *start, *end;
1721
	struct object_id oid;
1722

1723
	if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
1724
		return error(_("bad signature 0x%08x"), hdr->hdr_signature);
1725
	hdr_version = ntohl(hdr->hdr_version);
1726
	if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)
1727
		return error(_("bad index version %d"), hdr_version);
1728

1729
	if (!verify_index_checksum)
1730
		return 0;
1731

1732
	end = (unsigned char *)hdr + size;
1733
	start = end - the_hash_algo->rawsz;
1734
	oidread(&oid, start, the_repository->hash_algo);
1735
	if (oideq(&oid, null_oid()))
1736
		return 0;
1737

1738
	the_hash_algo->init_fn(&c);
1739
	the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
1740
	the_hash_algo->final_fn(hash, &c);
1741
	if (!hasheq(hash, start, the_repository->hash_algo))
1742
		return error(_("bad index file sha1 signature"));
1743
	return 0;
1744
}
1745

1746
static int read_index_extension(struct index_state *istate,
1747
				const char *ext, const char *data, unsigned long sz)
1748
{
1749
	switch (CACHE_EXT(ext)) {
1750
	case CACHE_EXT_TREE:
1751
		istate->cache_tree = cache_tree_read(data, sz);
1752
		break;
1753
	case CACHE_EXT_RESOLVE_UNDO:
1754
		istate->resolve_undo = resolve_undo_read(data, sz);
1755
		break;
1756
	case CACHE_EXT_LINK:
1757
		if (read_link_extension(istate, data, sz))
1758
			return -1;
1759
		break;
1760
	case CACHE_EXT_UNTRACKED:
1761
		istate->untracked = read_untracked_extension(data, sz);
1762
		break;
1763
	case CACHE_EXT_FSMONITOR:
1764
		read_fsmonitor_extension(istate, data, sz);
1765
		break;
1766
	case CACHE_EXT_ENDOFINDEXENTRIES:
1767
	case CACHE_EXT_INDEXENTRYOFFSETTABLE:
1768
		/* already handled in do_read_index() */
1769
		break;
1770
	case CACHE_EXT_SPARSE_DIRECTORIES:
1771
		/* no content, only an indicator */
1772
		istate->sparse_index = INDEX_COLLAPSED;
1773
		break;
1774
	default:
1775
		if (*ext < 'A' || 'Z' < *ext)
1776
			return error(_("index uses %.4s extension, which we do not understand"),
1777
				     ext);
1778
		fprintf_ln(stderr, _("ignoring %.4s extension"), ext);
1779
		break;
1780
	}
1781
	return 0;
1782
}
1783

1784
/*
1785
 * Parses the contents of the cache entry contained within the 'ondisk' buffer
1786
 * into a new incore 'cache_entry'.
1787
 *
1788
 * Note that 'char *ondisk' may not be aligned to a 4-byte address interval in
1789
 * index v4, so we cannot cast it to 'struct ondisk_cache_entry *' and access
1790
 * its members. Instead, we use the byte offsets of members within the struct to
1791
 * identify where 'get_be16()', 'get_be32()', and 'oidread()' (which can all
1792
 * read from an unaligned memory buffer) should read from the 'ondisk' buffer
1793
 * into the corresponding incore 'cache_entry' members.
1794
 */
1795
static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
1796
					    unsigned int version,
1797
					    const char *ondisk,
1798
					    unsigned long *ent_size,
1799
					    const struct cache_entry *previous_ce)
1800
{
1801
	struct cache_entry *ce;
1802
	size_t len;
1803
	const char *name;
1804
	const unsigned hashsz = the_hash_algo->rawsz;
1805
	const char *flagsp = ondisk + offsetof(struct ondisk_cache_entry, data) + hashsz;
1806
	unsigned int flags;
1807
	size_t copy_len = 0;
1808
	/*
1809
	 * Adjacent cache entries tend to share the leading paths, so it makes
1810
	 * sense to only store the differences in later entries.  In the v4
1811
	 * on-disk format of the index, each on-disk cache entry stores the
1812
	 * number of bytes to be stripped from the end of the previous name,
1813
	 * and the bytes to append to the result, to come up with its name.
1814
	 */
1815
	int expand_name_field = version == 4;
1816

1817
	/* On-disk flags are just 16 bits */
1818
	flags = get_be16(flagsp);
1819
	len = flags & CE_NAMEMASK;
1820

1821
	if (flags & CE_EXTENDED) {
1822
		int extended_flags;
1823
		extended_flags = get_be16(flagsp + sizeof(uint16_t)) << 16;
1824
		/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
1825
		if (extended_flags & ~CE_EXTENDED_FLAGS)
1826
			die(_("unknown index entry format 0x%08x"), extended_flags);
1827
		flags |= extended_flags;
1828
		name = (const char *)(flagsp + 2 * sizeof(uint16_t));
1829
	}
1830
	else
1831
		name = (const char *)(flagsp + sizeof(uint16_t));
1832

1833
	if (expand_name_field) {
1834
		const unsigned char *cp = (const unsigned char *)name;
1835
		size_t strip_len, previous_len;
1836

1837
		/* If we're at the beginning of a block, ignore the previous name */
1838
		strip_len = decode_varint(&cp);
1839
		if (previous_ce) {
1840
			previous_len = previous_ce->ce_namelen;
1841
			if (previous_len < strip_len)
1842
				die(_("malformed name field in the index, near path '%s'"),
1843
					previous_ce->name);
1844
			copy_len = previous_len - strip_len;
1845
		}
1846
		name = (const char *)cp;
1847
	}
1848

1849
	if (len == CE_NAMEMASK) {
1850
		len = strlen(name);
1851
		if (expand_name_field)
1852
			len += copy_len;
1853
	}
1854

1855
	ce = mem_pool__ce_alloc(ce_mem_pool, len);
1856

1857
	/*
1858
	 * NEEDSWORK: using 'offsetof()' is cumbersome and should be replaced
1859
	 * with something more akin to 'load_bitmap_entries_v1()'s use of
1860
	 * 'read_be16'/'read_be32'. For consistency with the corresponding
1861
	 * ondisk entry write function ('copy_cache_entry_to_ondisk()'), this
1862
	 * should be done at the same time as removing references to
1863
	 * 'ondisk_cache_entry' there.
1864
	 */
1865
	ce->ce_stat_data.sd_ctime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
1866
							+ offsetof(struct cache_time, sec));
1867
	ce->ce_stat_data.sd_mtime.sec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
1868
							+ offsetof(struct cache_time, sec));
1869
	ce->ce_stat_data.sd_ctime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ctime)
1870
							 + offsetof(struct cache_time, nsec));
1871
	ce->ce_stat_data.sd_mtime.nsec = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mtime)
1872
							 + offsetof(struct cache_time, nsec));
1873
	ce->ce_stat_data.sd_dev   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, dev));
1874
	ce->ce_stat_data.sd_ino   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, ino));
1875
	ce->ce_mode  = get_be32(ondisk + offsetof(struct ondisk_cache_entry, mode));
1876
	ce->ce_stat_data.sd_uid   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, uid));
1877
	ce->ce_stat_data.sd_gid   = get_be32(ondisk + offsetof(struct ondisk_cache_entry, gid));
1878
	ce->ce_stat_data.sd_size  = get_be32(ondisk + offsetof(struct ondisk_cache_entry, size));
1879
	ce->ce_flags = flags & ~CE_NAMEMASK;
1880
	ce->ce_namelen = len;
1881
	ce->index = 0;
1882
	oidread(&ce->oid, (const unsigned char *)ondisk + offsetof(struct ondisk_cache_entry, data),
1883
		the_repository->hash_algo);
1884

1885
	if (expand_name_field) {
1886
		if (copy_len)
1887
			memcpy(ce->name, previous_ce->name, copy_len);
1888
		memcpy(ce->name + copy_len, name, len + 1 - copy_len);
1889
		*ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len;
1890
	} else {
1891
		memcpy(ce->name, name, len + 1);
1892
		*ent_size = ondisk_ce_size(ce);
1893
	}
1894
	return ce;
1895
}
1896

1897
static void check_ce_order(struct index_state *istate)
1898
{
1899
	unsigned int i;
1900

1901
	if (!verify_ce_order)
1902
		return;
1903

1904
	for (i = 1; i < istate->cache_nr; i++) {
1905
		struct cache_entry *ce = istate->cache[i - 1];
1906
		struct cache_entry *next_ce = istate->cache[i];
1907
		int name_compare = strcmp(ce->name, next_ce->name);
1908

1909
		if (0 < name_compare)
1910
			die(_("unordered stage entries in index"));
1911
		if (!name_compare) {
1912
			if (!ce_stage(ce))
1913
				die(_("multiple stage entries for merged file '%s'"),
1914
				    ce->name);
1915
			if (ce_stage(ce) > ce_stage(next_ce))
1916
				die(_("unordered stage entries for '%s'"),
1917
				    ce->name);
1918
		}
1919
	}
1920
}
1921

1922
static void tweak_untracked_cache(struct index_state *istate)
1923
{
1924
	struct repository *r = the_repository;
1925

1926
	prepare_repo_settings(r);
1927

1928
	switch (r->settings.core_untracked_cache) {
1929
	case UNTRACKED_CACHE_REMOVE:
1930
		remove_untracked_cache(istate);
1931
		break;
1932
	case UNTRACKED_CACHE_WRITE:
1933
		add_untracked_cache(istate);
1934
		break;
1935
	case UNTRACKED_CACHE_KEEP:
1936
		/*
1937
		 * Either an explicit "core.untrackedCache=keep", the
1938
		 * default if "core.untrackedCache" isn't configured,
1939
		 * or a fallback on an unknown "core.untrackedCache"
1940
		 * value.
1941
		 */
1942
		break;
1943
	}
1944
}
1945

1946
static void tweak_split_index(struct index_state *istate)
1947
{
1948
	switch (repo_config_get_split_index(the_repository)) {
1949
	case -1: /* unset: do nothing */
1950
		break;
1951
	case 0: /* false */
1952
		remove_split_index(istate);
1953
		break;
1954
	case 1: /* true */
1955
		add_split_index(istate);
1956
		break;
1957
	default: /* unknown value: do nothing */
1958
		break;
1959
	}
1960
}
1961

1962
static void post_read_index_from(struct index_state *istate)
1963
{
1964
	check_ce_order(istate);
1965
	tweak_untracked_cache(istate);
1966
	tweak_split_index(istate);
1967
	tweak_fsmonitor(istate);
1968
}
1969

1970
static size_t estimate_cache_size_from_compressed(unsigned int entries)
1971
{
1972
	return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
1973
}
1974

1975
static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
1976
{
1977
	long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
1978

1979
	/*
1980
	 * Account for potential alignment differences.
1981
	 */
1982
	per_entry += align_padding_size(per_entry, 0);
1983
	return ondisk_size + entries * per_entry;
1984
}
1985

1986
struct index_entry_offset
1987
{
1988
	/* starting byte offset into index file, count of index entries in this block */
1989
	int offset, nr;
1990
};
1991

1992
struct index_entry_offset_table
1993
{
1994
	int nr;
1995
	struct index_entry_offset entries[FLEX_ARRAY];
1996
};
1997

1998
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
1999
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
2000

2001
static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
2002
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
2003

2004
struct load_index_extensions
2005
{
2006
	pthread_t pthread;
2007
	struct index_state *istate;
2008
	const char *mmap;
2009
	size_t mmap_size;
2010
	unsigned long src_offset;
2011
};
2012

2013
static void *load_index_extensions(void *_data)
2014
{
2015
	struct load_index_extensions *p = _data;
2016
	unsigned long src_offset = p->src_offset;
2017

2018
	while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) {
2019
		/* After an array of active_nr index entries,
2020
		 * there can be arbitrary number of extended
2021
		 * sections, each of which is prefixed with
2022
		 * extension name (4-byte) and section length
2023
		 * in 4-byte network byte order.
2024
		 */
2025
		uint32_t extsize = get_be32(p->mmap + src_offset + 4);
2026
		if (read_index_extension(p->istate,
2027
					 p->mmap + src_offset,
2028
					 p->mmap + src_offset + 8,
2029
					 extsize) < 0) {
2030
			munmap((void *)p->mmap, p->mmap_size);
2031
			die(_("index file corrupt"));
2032
		}
2033
		src_offset += 8;
2034
		src_offset += extsize;
2035
	}
2036

2037
	return NULL;
2038
}
2039

2040
/*
2041
 * A helper function that will load the specified range of cache entries
2042
 * from the memory mapped file and add them to the given index.
2043
 */
2044
static unsigned long load_cache_entry_block(struct index_state *istate,
2045
			struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap,
2046
			unsigned long start_offset, const struct cache_entry *previous_ce)
2047
{
2048
	int i;
2049
	unsigned long src_offset = start_offset;
2050

2051
	for (i = offset; i < offset + nr; i++) {
2052
		struct cache_entry *ce;
2053
		unsigned long consumed;
2054

2055
		ce = create_from_disk(ce_mem_pool, istate->version,
2056
				      mmap + src_offset,
2057
				      &consumed, previous_ce);
2058
		set_index_entry(istate, i, ce);
2059

2060
		src_offset += consumed;
2061
		previous_ce = ce;
2062
	}
2063
	return src_offset - start_offset;
2064
}
2065

2066
static unsigned long load_all_cache_entries(struct index_state *istate,
2067
			const char *mmap, size_t mmap_size, unsigned long src_offset)
2068
{
2069
	unsigned long consumed;
2070

2071
	istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2072
	if (istate->version == 4) {
2073
		mem_pool_init(istate->ce_mem_pool,
2074
				estimate_cache_size_from_compressed(istate->cache_nr));
2075
	} else {
2076
		mem_pool_init(istate->ce_mem_pool,
2077
				estimate_cache_size(mmap_size, istate->cache_nr));
2078
	}
2079

2080
	consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
2081
					0, istate->cache_nr, mmap, src_offset, NULL);
2082
	return consumed;
2083
}
2084

2085
/*
2086
 * Mostly randomly chosen maximum thread counts: we
2087
 * cap the parallelism to online_cpus() threads, and we want
2088
 * to have at least 10000 cache entries per thread for it to
2089
 * be worth starting a thread.
2090
 */
2091

2092
#define THREAD_COST		(10000)
2093

2094
struct load_cache_entries_thread_data
2095
{
2096
	pthread_t pthread;
2097
	struct index_state *istate;
2098
	struct mem_pool *ce_mem_pool;
2099
	int offset;
2100
	const char *mmap;
2101
	struct index_entry_offset_table *ieot;
2102
	int ieot_start;		/* starting index into the ieot array */
2103
	int ieot_blocks;	/* count of ieot entries to process */
2104
	unsigned long consumed;	/* return # of bytes in index file processed */
2105
};
2106

2107
/*
2108
 * A thread proc to run the load_cache_entries() computation
2109
 * across multiple background threads.
2110
 */
2111
static void *load_cache_entries_thread(void *_data)
2112
{
2113
	struct load_cache_entries_thread_data *p = _data;
2114
	int i;
2115

2116
	/* iterate across all ieot blocks assigned to this thread */
2117
	for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
2118
		p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
2119
			p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
2120
		p->offset += p->ieot->entries[i].nr;
2121
	}
2122
	return NULL;
2123
}
2124

2125
static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
2126
						 int nr_threads, struct index_entry_offset_table *ieot)
2127
{
2128
	int i, offset, ieot_blocks, ieot_start, err;
2129
	struct load_cache_entries_thread_data *data;
2130
	unsigned long consumed = 0;
2131

2132
	/* a little sanity checking */
2133
	if (istate->name_hash_initialized)
2134
		BUG("the name hash isn't thread safe");
2135

2136
	istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2137
	mem_pool_init(istate->ce_mem_pool, 0);
2138

2139
	/* ensure we have no more threads than we have blocks to process */
2140
	if (nr_threads > ieot->nr)
2141
		nr_threads = ieot->nr;
2142
	CALLOC_ARRAY(data, nr_threads);
2143

2144
	offset = ieot_start = 0;
2145
	ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);
2146
	for (i = 0; i < nr_threads; i++) {
2147
		struct load_cache_entries_thread_data *p = &data[i];
2148
		int nr, j;
2149

2150
		if (ieot_start + ieot_blocks > ieot->nr)
2151
			ieot_blocks = ieot->nr - ieot_start;
2152

2153
		p->istate = istate;
2154
		p->offset = offset;
2155
		p->mmap = mmap;
2156
		p->ieot = ieot;
2157
		p->ieot_start = ieot_start;
2158
		p->ieot_blocks = ieot_blocks;
2159

2160
		/* create a mem_pool for each thread */
2161
		nr = 0;
2162
		for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
2163
			nr += p->ieot->entries[j].nr;
2164
		p->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
2165
		if (istate->version == 4) {
2166
			mem_pool_init(p->ce_mem_pool,
2167
				estimate_cache_size_from_compressed(nr));
2168
		} else {
2169
			mem_pool_init(p->ce_mem_pool,
2170
				estimate_cache_size(mmap_size, nr));
2171
		}
2172

2173
		err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);
2174
		if (err)
2175
			die(_("unable to create load_cache_entries thread: %s"), strerror(err));
2176

2177
		/* increment by the number of cache entries in the ieot block being processed */
2178
		for (j = 0; j < ieot_blocks; j++)
2179
			offset += ieot->entries[ieot_start + j].nr;
2180
		ieot_start += ieot_blocks;
2181
	}
2182

2183
	for (i = 0; i < nr_threads; i++) {
2184
		struct load_cache_entries_thread_data *p = &data[i];
2185

2186
		err = pthread_join(p->pthread, NULL);
2187
		if (err)
2188
			die(_("unable to join load_cache_entries thread: %s"), strerror(err));
2189
		mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
2190
		consumed += p->consumed;
2191
	}
2192

2193
	free(data);
2194

2195
	return consumed;
2196
}
2197

2198
static void set_new_index_sparsity(struct index_state *istate)
2199
{
2200
	/*
2201
	 * If the index's repo exists, mark it sparse according to
2202
	 * repo settings.
2203
	 */
2204
	prepare_repo_settings(istate->repo);
2205
	if (!istate->repo->settings.command_requires_full_index &&
2206
	    is_sparse_index_allowed(istate, 0))
2207
		istate->sparse_index = 1;
2208
}
2209

2210
/* remember to discard_cache() before reading a different cache! */
2211
int do_read_index(struct index_state *istate, const char *path, int must_exist)
2212
{
2213
	int fd;
2214
	struct stat st;
2215
	unsigned long src_offset;
2216
	const struct cache_header *hdr;
2217
	const char *mmap;
2218
	size_t mmap_size;
2219
	struct load_index_extensions p;
2220
	size_t extension_offset = 0;
2221
	int nr_threads, cpus;
2222
	struct index_entry_offset_table *ieot = NULL;
2223

2224
	if (istate->initialized)
2225
		return istate->cache_nr;
2226

2227
	istate->timestamp.sec = 0;
2228
	istate->timestamp.nsec = 0;
2229
	fd = open(path, O_RDONLY);
2230
	if (fd < 0) {
2231
		if (!must_exist && errno == ENOENT) {
2232
			set_new_index_sparsity(istate);
2233
			istate->initialized = 1;
2234
			return 0;
2235
		}
2236
		die_errno(_("%s: index file open failed"), path);
2237
	}
2238

2239
	if (fstat(fd, &st))
2240
		die_errno(_("%s: cannot stat the open index"), path);
2241

2242
	mmap_size = xsize_t(st.st_size);
2243
	if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2244
		die(_("%s: index file smaller than expected"), path);
2245

2246
	mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
2247
	if (mmap == MAP_FAILED)
2248
		die_errno(_("%s: unable to map index file%s"), path,
2249
			mmap_os_err());
2250
	close(fd);
2251

2252
	hdr = (const struct cache_header *)mmap;
2253
	if (verify_hdr(hdr, mmap_size) < 0)
2254
		goto unmap;
2255

2256
	oidread(&istate->oid, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz,
2257
		the_repository->hash_algo);
2258
	istate->version = ntohl(hdr->hdr_version);
2259
	istate->cache_nr = ntohl(hdr->hdr_entries);
2260
	istate->cache_alloc = alloc_nr(istate->cache_nr);
2261
	CALLOC_ARRAY(istate->cache, istate->cache_alloc);
2262
	istate->initialized = 1;
2263

2264
	p.istate = istate;
2265
	p.mmap = mmap;
2266
	p.mmap_size = mmap_size;
2267

2268
	src_offset = sizeof(*hdr);
2269

2270
	if (repo_config_get_index_threads(the_repository, &nr_threads))
2271
		nr_threads = 1;
2272

2273
	/* TODO: does creating more threads than cores help? */
2274
	if (!nr_threads) {
2275
		nr_threads = istate->cache_nr / THREAD_COST;
2276
		cpus = online_cpus();
2277
		if (nr_threads > cpus)
2278
			nr_threads = cpus;
2279
	}
2280

2281
	if (!HAVE_THREADS)
2282
		nr_threads = 1;
2283

2284
	if (nr_threads > 1) {
2285
		extension_offset = read_eoie_extension(mmap, mmap_size);
2286
		if (extension_offset) {
2287
			int err;
2288

2289
			p.src_offset = extension_offset;
2290
			err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);
2291
			if (err)
2292
				die(_("unable to create load_index_extensions thread: %s"), strerror(err));
2293

2294
			nr_threads--;
2295
		}
2296
	}
2297

2298
	/*
2299
	 * Locate and read the index entry offset table so that we can use it
2300
	 * to multi-thread the reading of the cache entries.
2301
	 */
2302
	if (extension_offset && nr_threads > 1)
2303
		ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
2304

2305
	if (ieot) {
2306
		src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);
2307
		free(ieot);
2308
	} else {
2309
		src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
2310
	}
2311

2312
	istate->timestamp.sec = st.st_mtime;
2313
	istate->timestamp.nsec = ST_MTIME_NSEC(st);
2314

2315
	/* if we created a thread, join it otherwise load the extensions on the primary thread */
2316
	if (extension_offset) {
2317
		int ret = pthread_join(p.pthread, NULL);
2318
		if (ret)
2319
			die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
2320
	} else {
2321
		p.src_offset = src_offset;
2322
		load_index_extensions(&p);
2323
	}
2324
	munmap((void *)mmap, mmap_size);
2325

2326
	/*
2327
	 * TODO trace2: replace "the_repository" with the actual repo instance
2328
	 * that is associated with the given "istate".
2329
	 */
2330
	trace2_data_intmax("index", the_repository, "read/version",
2331
			   istate->version);
2332
	trace2_data_intmax("index", the_repository, "read/cache_nr",
2333
			   istate->cache_nr);
2334

2335
	/*
2336
	 * If the command explicitly requires a full index, force it
2337
	 * to be full. Otherwise, correct the sparsity based on repository
2338
	 * settings and other properties of the index (if necessary).
2339
	 */
2340
	prepare_repo_settings(istate->repo);
2341
	if (istate->repo->settings.command_requires_full_index)
2342
		ensure_full_index(istate);
2343
	else
2344
		ensure_correct_sparsity(istate);
2345

2346
	return istate->cache_nr;
2347

2348
unmap:
2349
	munmap((void *)mmap, mmap_size);
2350
	die(_("index file corrupt"));
2351
}
2352

2353
/*
2354
 * Signal that the shared index is used by updating its mtime.
2355
 *
2356
 * This way, shared index can be removed if they have not been used
2357
 * for some time.
2358
 */
2359
static void freshen_shared_index(const char *shared_index, int warn)
2360
{
2361
	if (!check_and_freshen_file(shared_index, 1) && warn)
2362
		warning(_("could not freshen shared index '%s'"), shared_index);
2363
}
2364

2365
int read_index_from(struct index_state *istate, const char *path,
2366
		    const char *gitdir)
2367
{
2368
	struct split_index *split_index;
2369
	int ret;
2370
	char *base_oid_hex;
2371
	char *base_path;
2372

2373
	/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
2374
	if (istate->initialized)
2375
		return istate->cache_nr;
2376

2377
	/*
2378
	 * TODO trace2: replace "the_repository" with the actual repo instance
2379
	 * that is associated with the given "istate".
2380
	 */
2381
	trace2_region_enter_printf("index", "do_read_index", the_repository,
2382
				   "%s", path);
2383
	trace_performance_enter();
2384
	ret = do_read_index(istate, path, 0);
2385
	trace_performance_leave("read cache %s", path);
2386
	trace2_region_leave_printf("index", "do_read_index", the_repository,
2387
				   "%s", path);
2388

2389
	split_index = istate->split_index;
2390
	if (!split_index || is_null_oid(&split_index->base_oid)) {
2391
		post_read_index_from(istate);
2392
		return ret;
2393
	}
2394

2395
	trace_performance_enter();
2396
	if (split_index->base)
2397
		release_index(split_index->base);
2398
	else
2399
		ALLOC_ARRAY(split_index->base, 1);
2400
	index_state_init(split_index->base, istate->repo);
2401

2402
	base_oid_hex = oid_to_hex(&split_index->base_oid);
2403
	base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
2404
	if (file_exists(base_path)) {
2405
		trace2_region_enter_printf("index", "shared/do_read_index",
2406
					the_repository, "%s", base_path);
2407

2408
		ret = do_read_index(split_index->base, base_path, 0);
2409
		trace2_region_leave_printf("index", "shared/do_read_index",
2410
					the_repository, "%s", base_path);
2411
	} else {
2412
		char *path_copy = xstrdup(path);
2413
		char *base_path2 = xstrfmt("%s/sharedindex.%s",
2414
					   dirname(path_copy), base_oid_hex);
2415
		free(path_copy);
2416
		trace2_region_enter_printf("index", "shared/do_read_index",
2417
					   the_repository, "%s", base_path2);
2418
		ret = do_read_index(split_index->base, base_path2, 1);
2419
		trace2_region_leave_printf("index", "shared/do_read_index",
2420
					   the_repository, "%s", base_path2);
2421
		free(base_path2);
2422
	}
2423
	if (!oideq(&split_index->base_oid, &split_index->base->oid))
2424
		die(_("broken index, expect %s in %s, got %s"),
2425
		    base_oid_hex, base_path,
2426
		    oid_to_hex(&split_index->base->oid));
2427

2428
	freshen_shared_index(base_path, 0);
2429
	merge_base_index(istate);
2430
	post_read_index_from(istate);
2431
	trace_performance_leave("read cache %s", base_path);
2432
	free(base_path);
2433
	return ret;
2434
}
2435

2436
int is_index_unborn(struct index_state *istate)
2437
{
2438
	return (!istate->cache_nr && !istate->timestamp.sec);
2439
}
2440

2441
void index_state_init(struct index_state *istate, struct repository *r)
2442
{
2443
	struct index_state blank = INDEX_STATE_INIT(r);
2444
	memcpy(istate, &blank, sizeof(*istate));
2445
}
2446

2447
void release_index(struct index_state *istate)
2448
{
2449
	/*
2450
	 * Cache entries in istate->cache[] should have been allocated
2451
	 * from the memory pool associated with this index, or from an
2452
	 * associated split_index. There is no need to free individual
2453
	 * cache entries. validate_cache_entries can detect when this
2454
	 * assertion does not hold.
2455
	 */
2456
	validate_cache_entries(istate);
2457

2458
	resolve_undo_clear_index(istate);
2459
	free_name_hash(istate);
2460
	cache_tree_free(&(istate->cache_tree));
2461
	free(istate->fsmonitor_last_update);
2462
	free(istate->cache);
2463
	discard_split_index(istate);
2464
	free_untracked_cache(istate->untracked);
2465

2466
	if (istate->sparse_checkout_patterns) {
2467
		clear_pattern_list(istate->sparse_checkout_patterns);
2468
		FREE_AND_NULL(istate->sparse_checkout_patterns);
2469
	}
2470

2471
	if (istate->ce_mem_pool) {
2472
		mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
2473
		FREE_AND_NULL(istate->ce_mem_pool);
2474
	}
2475
}
2476

2477
void discard_index(struct index_state *istate)
2478
{
2479
	release_index(istate);
2480
	index_state_init(istate, istate->repo);
2481
}
2482

2483
/*
2484
 * Validate the cache entries of this index.
2485
 * All cache entries associated with this index
2486
 * should have been allocated by the memory pool
2487
 * associated with this index, or by a referenced
2488
 * split index.
2489
 */
2490
void validate_cache_entries(const struct index_state *istate)
2491
{
2492
	int i;
2493

2494
	if (!should_validate_cache_entries() ||!istate || !istate->initialized)
2495
		return;
2496

2497
	for (i = 0; i < istate->cache_nr; i++) {
2498
		if (!istate) {
2499
			BUG("cache entry is not allocated from expected memory pool");
2500
		} else if (!istate->ce_mem_pool ||
2501
			!mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
2502
			if (!istate->split_index ||
2503
				!istate->split_index->base ||
2504
				!istate->split_index->base->ce_mem_pool ||
2505
				!mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
2506
				BUG("cache entry is not allocated from expected memory pool");
2507
			}
2508
		}
2509
	}
2510

2511
	if (istate->split_index)
2512
		validate_cache_entries(istate->split_index->base);
2513
}
2514

2515
int unmerged_index(const struct index_state *istate)
2516
{
2517
	int i;
2518
	for (i = 0; i < istate->cache_nr; i++) {
2519
		if (ce_stage(istate->cache[i]))
2520
			return 1;
2521
	}
2522
	return 0;
2523
}
2524

2525
int repo_index_has_changes(struct repository *repo,
2526
			   struct tree *tree,
2527
			   struct strbuf *sb)
2528
{
2529
	struct index_state *istate = repo->index;
2530
	struct object_id cmp;
2531
	int i;
2532

2533
	if (tree)
2534
		cmp = tree->object.oid;
2535
	if (tree || !repo_get_oid_tree(repo, "HEAD", &cmp)) {
2536
		struct diff_options opt;
2537

2538
		repo_diff_setup(repo, &opt);
2539
		opt.flags.exit_with_status = 1;
2540
		if (!sb)
2541
			opt.flags.quick = 1;
2542
		diff_setup_done(&opt);
2543
		do_diff_cache(&cmp, &opt);
2544
		diffcore_std(&opt);
2545
		for (i = 0; sb && i < diff_queued_diff.nr; i++) {
2546
			if (i)
2547
				strbuf_addch(sb, ' ');
2548
			strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
2549
		}
2550
		diff_flush(&opt);
2551
		return opt.flags.has_changes != 0;
2552
	} else {
2553
		/* TODO: audit for interaction with sparse-index. */
2554
		ensure_full_index(istate);
2555
		for (i = 0; sb && i < istate->cache_nr; i++) {
2556
			if (i)
2557
				strbuf_addch(sb, ' ');
2558
			strbuf_addstr(sb, istate->cache[i]->name);
2559
		}
2560
		return !!istate->cache_nr;
2561
	}
2562
}
2563

2564
static int write_index_ext_header(struct hashfile *f,
2565
				  git_hash_ctx *eoie_f,
2566
				  unsigned int ext,
2567
				  unsigned int sz)
2568
{
2569
	hashwrite_be32(f, ext);
2570
	hashwrite_be32(f, sz);
2571

2572
	if (eoie_f) {
2573
		ext = htonl(ext);
2574
		sz = htonl(sz);
2575
		the_hash_algo->update_fn(eoie_f, &ext, sizeof(ext));
2576
		the_hash_algo->update_fn(eoie_f, &sz, sizeof(sz));
2577
	}
2578
	return 0;
2579
}
2580

2581
static void ce_smudge_racily_clean_entry(struct index_state *istate,
2582
					 struct cache_entry *ce)
2583
{
2584
	/*
2585
	 * The only thing we care about in this function is to smudge the
2586
	 * falsely clean entry due to touch-update-touch race, so we leave
2587
	 * everything else as they are.  We are called for entries whose
2588
	 * ce_stat_data.sd_mtime match the index file mtime.
2589
	 *
2590
	 * Note that this actually does not do much for gitlinks, for
2591
	 * which ce_match_stat_basic() always goes to the actual
2592
	 * contents.  The caller checks with is_racy_timestamp() which
2593
	 * always says "no" for gitlinks, so we are not called for them ;-)
2594
	 */
2595
	struct stat st;
2596

2597
	if (lstat(ce->name, &st) < 0)
2598
		return;
2599
	if (ce_match_stat_basic(ce, &st))
2600
		return;
2601
	if (ce_modified_check_fs(istate, ce, &st)) {
2602
		/* This is "racily clean"; smudge it.  Note that this
2603
		 * is a tricky code.  At first glance, it may appear
2604
		 * that it can break with this sequence:
2605
		 *
2606
		 * $ echo xyzzy >frotz
2607
		 * $ git-update-index --add frotz
2608
		 * $ : >frotz
2609
		 * $ sleep 3
2610
		 * $ echo filfre >nitfol
2611
		 * $ git-update-index --add nitfol
2612
		 *
2613
		 * but it does not.  When the second update-index runs,
2614
		 * it notices that the entry "frotz" has the same timestamp
2615
		 * as index, and if we were to smudge it by resetting its
2616
		 * size to zero here, then the object name recorded
2617
		 * in index is the 6-byte file but the cached stat information
2618
		 * becomes zero --- which would then match what we would
2619
		 * obtain from the filesystem next time we stat("frotz").
2620
		 *
2621
		 * However, the second update-index, before calling
2622
		 * this function, notices that the cached size is 6
2623
		 * bytes and what is on the filesystem is an empty
2624
		 * file, and never calls us, so the cached size information
2625
		 * for "frotz" stays 6 which does not match the filesystem.
2626
		 */
2627
		ce->ce_stat_data.sd_size = 0;
2628
	}
2629
}
2630

2631
/* Copy miscellaneous fields but not the name */
2632
static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
2633
				       struct cache_entry *ce)
2634
{
2635
	short flags;
2636
	const unsigned hashsz = the_hash_algo->rawsz;
2637
	uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
2638

2639
	ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
2640
	ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
2641
	ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
2642
	ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
2643
	ondisk->dev  = htonl(ce->ce_stat_data.sd_dev);
2644
	ondisk->ino  = htonl(ce->ce_stat_data.sd_ino);
2645
	ondisk->mode = htonl(ce->ce_mode);
2646
	ondisk->uid  = htonl(ce->ce_stat_data.sd_uid);
2647
	ondisk->gid  = htonl(ce->ce_stat_data.sd_gid);
2648
	ondisk->size = htonl(ce->ce_stat_data.sd_size);
2649
	hashcpy(ondisk->data, ce->oid.hash, the_repository->hash_algo);
2650

2651
	flags = ce->ce_flags & ~CE_NAMEMASK;
2652
	flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
2653
	flagsp[0] = htons(flags);
2654
	if (ce->ce_flags & CE_EXTENDED) {
2655
		flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
2656
	}
2657
}
2658

2659
static int ce_write_entry(struct hashfile *f, struct cache_entry *ce,
2660
			  struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
2661
{
2662
	int size;
2663
	unsigned int saved_namelen;
2664
	int stripped_name = 0;
2665
	static unsigned char padding[8] = { 0x00 };
2666

2667
	if (ce->ce_flags & CE_STRIP_NAME) {
2668
		saved_namelen = ce_namelen(ce);
2669
		ce->ce_namelen = 0;
2670
		stripped_name = 1;
2671
	}
2672

2673
	size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
2674

2675
	if (!previous_name) {
2676
		int len = ce_namelen(ce);
2677
		copy_cache_entry_to_ondisk(ondisk, ce);
2678
		hashwrite(f, ondisk, size);
2679
		hashwrite(f, ce->name, len);
2680
		hashwrite(f, padding, align_padding_size(size, len));
2681
	} else {
2682
		int common, to_remove, prefix_size;
2683
		unsigned char to_remove_vi[16];
2684
		for (common = 0;
2685
		     (ce->name[common] &&
2686
		      common < previous_name->len &&
2687
		      ce->name[common] == previous_name->buf[common]);
2688
		     common++)
2689
			; /* still matching */
2690
		to_remove = previous_name->len - common;
2691
		prefix_size = encode_varint(to_remove, to_remove_vi);
2692

2693
		copy_cache_entry_to_ondisk(ondisk, ce);
2694
		hashwrite(f, ondisk, size);
2695
		hashwrite(f, to_remove_vi, prefix_size);
2696
		hashwrite(f, ce->name + common, ce_namelen(ce) - common);
2697
		hashwrite(f, padding, 1);
2698

2699
		strbuf_splice(previous_name, common, to_remove,
2700
			      ce->name + common, ce_namelen(ce) - common);
2701
	}
2702
	if (stripped_name) {
2703
		ce->ce_namelen = saved_namelen;
2704
		ce->ce_flags &= ~CE_STRIP_NAME;
2705
	}
2706

2707
	return 0;
2708
}
2709

2710
/*
2711
 * This function verifies if index_state has the correct sha1 of the
2712
 * index file.  Don't die if we have any other failure, just return 0.
2713
 */
2714
static int verify_index_from(const struct index_state *istate, const char *path)
2715
{
2716
	int fd;
2717
	ssize_t n;
2718
	struct stat st;
2719
	unsigned char hash[GIT_MAX_RAWSZ];
2720

2721
	if (!istate->initialized)
2722
		return 0;
2723

2724
	fd = open(path, O_RDONLY);
2725
	if (fd < 0)
2726
		return 0;
2727

2728
	if (fstat(fd, &st))
2729
		goto out;
2730

2731
	if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2732
		goto out;
2733

2734
	n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
2735
	if (n != the_hash_algo->rawsz)
2736
		goto out;
2737

2738
	if (!hasheq(istate->oid.hash, hash, the_repository->hash_algo))
2739
		goto out;
2740

2741
	close(fd);
2742
	return 1;
2743

2744
out:
2745
	close(fd);
2746
	return 0;
2747
}
2748

2749
static int repo_verify_index(struct repository *repo)
2750
{
2751
	return verify_index_from(repo->index, repo->index_file);
2752
}
2753

2754
int has_racy_timestamp(struct index_state *istate)
2755
{
2756
	int entries = istate->cache_nr;
2757
	int i;
2758

2759
	for (i = 0; i < entries; i++) {
2760
		struct cache_entry *ce = istate->cache[i];
2761
		if (is_racy_timestamp(istate, ce))
2762
			return 1;
2763
	}
2764
	return 0;
2765
}
2766

2767
void repo_update_index_if_able(struct repository *repo,
2768
			       struct lock_file *lockfile)
2769
{
2770
	if ((repo->index->cache_changed ||
2771
	     has_racy_timestamp(repo->index)) &&
2772
	    repo_verify_index(repo))
2773
		write_locked_index(repo->index, lockfile, COMMIT_LOCK);
2774
	else
2775
		rollback_lock_file(lockfile);
2776
}
2777

2778
static int record_eoie(void)
2779
{
2780
	int val;
2781

2782
	if (!git_config_get_bool("index.recordendofindexentries", &val))
2783
		return val;
2784

2785
	/*
2786
	 * As a convenience, the end of index entries extension
2787
	 * used for threading is written by default if the user
2788
	 * explicitly requested threaded index reads.
2789
	 */
2790
	return !repo_config_get_index_threads(the_repository, &val) && val != 1;
2791
}
2792

2793
static int record_ieot(void)
2794
{
2795
	int val;
2796

2797
	if (!git_config_get_bool("index.recordoffsettable", &val))
2798
		return val;
2799

2800
	/*
2801
	 * As a convenience, the offset table used for threading is
2802
	 * written by default if the user explicitly requested
2803
	 * threaded index reads.
2804
	 */
2805
	return !repo_config_get_index_threads(the_repository, &val) && val != 1;
2806
}
2807

2808
enum write_extensions {
2809
	WRITE_NO_EXTENSION =              0,
2810
	WRITE_SPLIT_INDEX_EXTENSION =     1<<0,
2811
	WRITE_CACHE_TREE_EXTENSION =      1<<1,
2812
	WRITE_RESOLVE_UNDO_EXTENSION =    1<<2,
2813
	WRITE_UNTRACKED_CACHE_EXTENSION = 1<<3,
2814
	WRITE_FSMONITOR_EXTENSION =       1<<4,
2815
};
2816
#define WRITE_ALL_EXTENSIONS ((enum write_extensions)-1)
2817

2818
/*
2819
 * On success, `tempfile` is closed. If it is the temporary file
2820
 * of a `struct lock_file`, we will therefore effectively perform
2821
 * a 'close_lock_file_gently()`. Since that is an implementation
2822
 * detail of lockfiles, callers of `do_write_index()` should not
2823
 * rely on it.
2824
 */
2825
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
2826
			  enum write_extensions write_extensions, unsigned flags)
2827
{
2828
	uint64_t start = getnanotime();
2829
	struct hashfile *f;
2830
	git_hash_ctx *eoie_c = NULL;
2831
	struct cache_header hdr;
2832
	int i, err = 0, removed, extended, hdr_version;
2833
	struct cache_entry **cache = istate->cache;
2834
	int entries = istate->cache_nr;
2835
	struct stat st;
2836
	struct ondisk_cache_entry ondisk;
2837
	struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
2838
	int drop_cache_tree = istate->drop_cache_tree;
2839
	off_t offset;
2840
	int csum_fsync_flag;
2841
	int ieot_entries = 1;
2842
	struct index_entry_offset_table *ieot = NULL;
2843
	struct repository *r = istate->repo;
2844
	struct strbuf sb = STRBUF_INIT;
2845
	int nr, nr_threads, ret;
2846

2847
	f = hashfd(tempfile->fd, tempfile->filename.buf);
2848

2849
	prepare_repo_settings(r);
2850
	f->skip_hash = r->settings.index_skip_hash;
2851

2852
	for (i = removed = extended = 0; i < entries; i++) {
2853
		if (cache[i]->ce_flags & CE_REMOVE)
2854
			removed++;
2855

2856
		/* reduce extended entries if possible */
2857
		cache[i]->ce_flags &= ~CE_EXTENDED;
2858
		if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) {
2859
			extended++;
2860
			cache[i]->ce_flags |= CE_EXTENDED;
2861
		}
2862
	}
2863

2864
	if (!istate->version)
2865
		istate->version = get_index_format_default(r);
2866

2867
	/* demote version 3 to version 2 when the latter suffices */
2868
	if (istate->version == 3 || istate->version == 2)
2869
		istate->version = extended ? 3 : 2;
2870

2871
	hdr_version = istate->version;
2872

2873
	hdr.hdr_signature = htonl(CACHE_SIGNATURE);
2874
	hdr.hdr_version = htonl(hdr_version);
2875
	hdr.hdr_entries = htonl(entries - removed);
2876

2877
	hashwrite(f, &hdr, sizeof(hdr));
2878

2879
	if (!HAVE_THREADS || repo_config_get_index_threads(the_repository, &nr_threads))
2880
		nr_threads = 1;
2881

2882
	if (nr_threads != 1 && record_ieot()) {
2883
		int ieot_blocks, cpus;
2884

2885
		/*
2886
		 * ensure default number of ieot blocks maps evenly to the
2887
		 * default number of threads that will process them leaving
2888
		 * room for the thread to load the index extensions.
2889
		 */
2890
		if (!nr_threads) {
2891
			ieot_blocks = istate->cache_nr / THREAD_COST;
2892
			cpus = online_cpus();
2893
			if (ieot_blocks > cpus - 1)
2894
				ieot_blocks = cpus - 1;
2895
		} else {
2896
			ieot_blocks = nr_threads;
2897
			if (ieot_blocks > istate->cache_nr)
2898
				ieot_blocks = istate->cache_nr;
2899
		}
2900

2901
		/*
2902
		 * no reason to write out the IEOT extension if we don't
2903
		 * have enough blocks to utilize multi-threading
2904
		 */
2905
		if (ieot_blocks > 1) {
2906
			ieot = xcalloc(1, sizeof(struct index_entry_offset_table)
2907
				+ (ieot_blocks * sizeof(struct index_entry_offset)));
2908
			ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
2909
		}
2910
	}
2911

2912
	offset = hashfile_total(f);
2913

2914
	nr = 0;
2915
	previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;
2916

2917
	for (i = 0; i < entries; i++) {
2918
		struct cache_entry *ce = cache[i];
2919
		if (ce->ce_flags & CE_REMOVE)
2920
			continue;
2921
		if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
2922
			ce_smudge_racily_clean_entry(istate, ce);
2923
		if (is_null_oid(&ce->oid)) {
2924
			static const char msg[] = "cache entry has null sha1: %s";
2925
			static int allow = -1;
2926

2927
			if (allow < 0)
2928
				allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
2929
			if (allow)
2930
				warning(msg, ce->name);
2931
			else
2932
				err = error(msg, ce->name);
2933

2934
			drop_cache_tree = 1;
2935
		}
2936
		if (ieot && i && (i % ieot_entries == 0)) {
2937
			ieot->entries[ieot->nr].nr = nr;
2938
			ieot->entries[ieot->nr].offset = offset;
2939
			ieot->nr++;
2940
			/*
2941
			 * If we have a V4 index, set the first byte to an invalid
2942
			 * character to ensure there is nothing common with the previous
2943
			 * entry
2944
			 */
2945
			if (previous_name)
2946
				previous_name->buf[0] = 0;
2947
			nr = 0;
2948

2949
			offset = hashfile_total(f);
2950
		}
2951
		if (ce_write_entry(f, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)
2952
			err = -1;
2953

2954
		if (err)
2955
			break;
2956
		nr++;
2957
	}
2958
	if (ieot && nr) {
2959
		ieot->entries[ieot->nr].nr = nr;
2960
		ieot->entries[ieot->nr].offset = offset;
2961
		ieot->nr++;
2962
	}
2963
	strbuf_release(&previous_name_buf);
2964

2965
	if (err) {
2966
		ret = err;
2967
		goto out;
2968
	}
2969

2970
	offset = hashfile_total(f);
2971

2972
	/*
2973
	 * The extension headers must be hashed on their own for the
2974
	 * EOIE extension. Create a hashfile here to compute that hash.
2975
	 */
2976
	if (offset && record_eoie()) {
2977
		CALLOC_ARRAY(eoie_c, 1);
2978
		the_hash_algo->init_fn(eoie_c);
2979
	}
2980

2981
	/*
2982
	 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2983
	 * can minimize the number of extensions we have to scan through to
2984
	 * find it during load.  Write it out regardless of the
2985
	 * strip_extensions parameter as we need it when loading the shared
2986
	 * index.
2987
	 */
2988
	if (ieot) {
2989
		strbuf_reset(&sb);
2990

2991
		write_ieot_extension(&sb, ieot);
2992
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0;
2993
		hashwrite(f, sb.buf, sb.len);
2994
		if (err) {
2995
			ret = -1;
2996
			goto out;
2997
		}
2998
	}
2999

3000
	if (write_extensions & WRITE_SPLIT_INDEX_EXTENSION &&
3001
	    istate->split_index) {
3002
		strbuf_reset(&sb);
3003

3004
		if (istate->sparse_index)
3005
			die(_("cannot write split index for a sparse index"));
3006

3007
		err = write_link_extension(&sb, istate) < 0 ||
3008
			write_index_ext_header(f, eoie_c, CACHE_EXT_LINK,
3009
					       sb.len) < 0;
3010
		hashwrite(f, sb.buf, sb.len);
3011
		if (err) {
3012
			ret = -1;
3013
			goto out;
3014
		}
3015
	}
3016
	if (write_extensions & WRITE_CACHE_TREE_EXTENSION &&
3017
	    !drop_cache_tree && istate->cache_tree) {
3018
		strbuf_reset(&sb);
3019

3020
		cache_tree_write(&sb, istate->cache_tree);
3021
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0;
3022
		hashwrite(f, sb.buf, sb.len);
3023
		if (err) {
3024
			ret = -1;
3025
			goto out;
3026
		}
3027
	}
3028
	if (write_extensions & WRITE_RESOLVE_UNDO_EXTENSION &&
3029
	    istate->resolve_undo) {
3030
		strbuf_reset(&sb);
3031

3032
		resolve_undo_write(&sb, istate->resolve_undo);
3033
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_RESOLVE_UNDO,
3034
					     sb.len) < 0;
3035
		hashwrite(f, sb.buf, sb.len);
3036
		if (err) {
3037
			ret = -1;
3038
			goto out;
3039
		}
3040
	}
3041
	if (write_extensions & WRITE_UNTRACKED_CACHE_EXTENSION &&
3042
	    istate->untracked) {
3043
		strbuf_reset(&sb);
3044

3045
		write_untracked_extension(&sb, istate->untracked);
3046
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_UNTRACKED,
3047
					     sb.len) < 0;
3048
		hashwrite(f, sb.buf, sb.len);
3049
		if (err) {
3050
			ret = -1;
3051
			goto out;
3052
		}
3053
	}
3054
	if (write_extensions & WRITE_FSMONITOR_EXTENSION &&
3055
	    istate->fsmonitor_last_update) {
3056
		strbuf_reset(&sb);
3057

3058
		write_fsmonitor_extension(&sb, istate);
3059
		err = write_index_ext_header(f, eoie_c, CACHE_EXT_FSMONITOR, sb.len) < 0;
3060
		hashwrite(f, sb.buf, sb.len);
3061
		if (err) {
3062
			ret = -1;
3063
			goto out;
3064
		}
3065
	}
3066
	if (istate->sparse_index) {
3067
		if (write_index_ext_header(f, eoie_c, CACHE_EXT_SPARSE_DIRECTORIES, 0) < 0) {
3068
			ret = -1;
3069
			goto out;
3070
		}
3071
	}
3072

3073
	/*
3074
	 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
3075
	 * so that it can be found and processed before all the index entries are
3076
	 * read.  Write it out regardless of the strip_extensions parameter as we need it
3077
	 * when loading the shared index.
3078
	 */
3079
	if (eoie_c) {
3080
		strbuf_reset(&sb);
3081

3082
		write_eoie_extension(&sb, eoie_c, offset);
3083
		err = write_index_ext_header(f, NULL, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0;
3084
		hashwrite(f, sb.buf, sb.len);
3085
		if (err) {
3086
			ret = -1;
3087
			goto out;
3088
		}
3089
	}
3090

3091
	csum_fsync_flag = 0;
3092
	if (!alternate_index_output && (flags & COMMIT_LOCK))
3093
		csum_fsync_flag = CSUM_FSYNC;
3094

3095
	finalize_hashfile(f, istate->oid.hash, FSYNC_COMPONENT_INDEX,
3096
			  CSUM_HASH_IN_STREAM | csum_fsync_flag);
3097
	f = NULL;
3098

3099
	if (close_tempfile_gently(tempfile)) {
3100
		ret = error(_("could not close '%s'"), get_tempfile_path(tempfile));
3101
		goto out;
3102
	}
3103
	if (stat(get_tempfile_path(tempfile), &st)) {
3104
		ret = -1;
3105
		goto out;
3106
	}
3107
	istate->timestamp.sec = (unsigned int)st.st_mtime;
3108
	istate->timestamp.nsec = ST_MTIME_NSEC(st);
3109
	trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
3110

3111
	/*
3112
	 * TODO trace2: replace "the_repository" with the actual repo instance
3113
	 * that is associated with the given "istate".
3114
	 */
3115
	trace2_data_intmax("index", the_repository, "write/version",
3116
			   istate->version);
3117
	trace2_data_intmax("index", the_repository, "write/cache_nr",
3118
			   istate->cache_nr);
3119

3120
	ret = 0;
3121

3122
out:
3123
	if (f)
3124
		free_hashfile(f);
3125
	strbuf_release(&sb);
3126
	free(ieot);
3127
	return ret;
3128
}
3129

3130
void set_alternate_index_output(const char *name)
3131
{
3132
	alternate_index_output = name;
3133
}
3134

3135
static int commit_locked_index(struct lock_file *lk)
3136
{
3137
	if (alternate_index_output)
3138
		return commit_lock_file_to(lk, alternate_index_output);
3139
	else
3140
		return commit_lock_file(lk);
3141
}
3142

3143
static int do_write_locked_index(struct index_state *istate,
3144
				 struct lock_file *lock,
3145
				 unsigned flags,
3146
				 enum write_extensions write_extensions)
3147
{
3148
	int ret;
3149
	int was_full = istate->sparse_index == INDEX_EXPANDED;
3150

3151
	ret = convert_to_sparse(istate, 0);
3152

3153
	if (ret) {
3154
		warning(_("failed to convert to a sparse-index"));
3155
		return ret;
3156
	}
3157

3158
	/*
3159
	 * TODO trace2: replace "the_repository" with the actual repo instance
3160
	 * that is associated with the given "istate".
3161
	 */
3162
	trace2_region_enter_printf("index", "do_write_index", the_repository,
3163
				   "%s", get_lock_file_path(lock));
3164
	ret = do_write_index(istate, lock->tempfile, write_extensions, flags);
3165
	trace2_region_leave_printf("index", "do_write_index", the_repository,
3166
				   "%s", get_lock_file_path(lock));
3167

3168
	if (was_full)
3169
		ensure_full_index(istate);
3170

3171
	if (ret)
3172
		return ret;
3173
	if (flags & COMMIT_LOCK)
3174
		ret = commit_locked_index(lock);
3175
	else
3176
		ret = close_lock_file_gently(lock);
3177

3178
	run_hooks_l(the_repository, "post-index-change",
3179
		    istate->updated_workdir ? "1" : "0",
3180
		    istate->updated_skipworktree ? "1" : "0", NULL);
3181
	istate->updated_workdir = 0;
3182
	istate->updated_skipworktree = 0;
3183

3184
	return ret;
3185
}
3186

3187
static int write_split_index(struct index_state *istate,
3188
			     struct lock_file *lock,
3189
			     unsigned flags)
3190
{
3191
	int ret;
3192
	prepare_to_write_split_index(istate);
3193
	ret = do_write_locked_index(istate, lock, flags, WRITE_ALL_EXTENSIONS);
3194
	finish_writing_split_index(istate);
3195
	return ret;
3196
}
3197

3198
static unsigned long get_shared_index_expire_date(void)
3199
{
3200
	static unsigned long shared_index_expire_date;
3201
	static int shared_index_expire_date_prepared;
3202

3203
	if (!shared_index_expire_date_prepared) {
3204
		const char *shared_index_expire = "2.weeks.ago";
3205
		char *value = NULL;
3206

3207
		repo_config_get_expiry(the_repository, "splitindex.sharedindexexpire",
3208
				       &value);
3209
		if (value)
3210
			shared_index_expire = value;
3211

3212
		shared_index_expire_date = approxidate(shared_index_expire);
3213
		shared_index_expire_date_prepared = 1;
3214

3215
		free(value);
3216
	}
3217

3218
	return shared_index_expire_date;
3219
}
3220

3221
static int should_delete_shared_index(const char *shared_index_path)
3222
{
3223
	struct stat st;
3224
	unsigned long expiration;
3225

3226
	/* Check timestamp */
3227
	expiration = get_shared_index_expire_date();
3228
	if (!expiration)
3229
		return 0;
3230
	if (stat(shared_index_path, &st))
3231
		return error_errno(_("could not stat '%s'"), shared_index_path);
3232
	if (st.st_mtime > expiration)
3233
		return 0;
3234

3235
	return 1;
3236
}
3237

3238
static int clean_shared_index_files(const char *current_hex)
3239
{
3240
	struct dirent *de;
3241
	DIR *dir = opendir(get_git_dir());
3242

3243
	if (!dir)
3244
		return error_errno(_("unable to open git dir: %s"), get_git_dir());
3245

3246
	while ((de = readdir(dir)) != NULL) {
3247
		const char *sha1_hex;
3248
		const char *shared_index_path;
3249
		if (!skip_prefix(de->d_name, "sharedindex.", &sha1_hex))
3250
			continue;
3251
		if (!strcmp(sha1_hex, current_hex))
3252
			continue;
3253
		shared_index_path = git_path("%s", de->d_name);
3254
		if (should_delete_shared_index(shared_index_path) > 0 &&
3255
		    unlink(shared_index_path))
3256
			warning_errno(_("unable to unlink: %s"), shared_index_path);
3257
	}
3258
	closedir(dir);
3259

3260
	return 0;
3261
}
3262

3263
static int write_shared_index(struct index_state *istate,
3264
			      struct tempfile **temp, unsigned flags)
3265
{
3266
	struct split_index *si = istate->split_index;
3267
	int ret, was_full = !istate->sparse_index;
3268

3269
	move_cache_to_base_index(istate);
3270
	convert_to_sparse(istate, 0);
3271

3272
	trace2_region_enter_printf("index", "shared/do_write_index",
3273
				   the_repository, "%s", get_tempfile_path(*temp));
3274
	ret = do_write_index(si->base, *temp, WRITE_NO_EXTENSION, flags);
3275
	trace2_region_leave_printf("index", "shared/do_write_index",
3276
				   the_repository, "%s", get_tempfile_path(*temp));
3277

3278
	if (was_full)
3279
		ensure_full_index(istate);
3280

3281
	if (ret)
3282
		return ret;
3283
	ret = adjust_shared_perm(get_tempfile_path(*temp));
3284
	if (ret) {
3285
		error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));
3286
		return ret;
3287
	}
3288
	ret = rename_tempfile(temp,
3289
			      git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
3290
	if (!ret) {
3291
		oidcpy(&si->base_oid, &si->base->oid);
3292
		clean_shared_index_files(oid_to_hex(&si->base->oid));
3293
	}
3294

3295
	return ret;
3296
}
3297

3298
static const int default_max_percent_split_change = 20;
3299

3300
static int too_many_not_shared_entries(struct index_state *istate)
3301
{
3302
	int i, not_shared = 0;
3303
	int max_split = repo_config_get_max_percent_split_change(the_repository);
3304

3305
	switch (max_split) {
3306
	case -1:
3307
		/* not or badly configured: use the default value */
3308
		max_split = default_max_percent_split_change;
3309
		break;
3310
	case 0:
3311
		return 1; /* 0% means always write a new shared index */
3312
	case 100:
3313
		return 0; /* 100% means never write a new shared index */
3314
	default:
3315
		break; /* just use the configured value */
3316
	}
3317

3318
	/* Count not shared entries */
3319
	for (i = 0; i < istate->cache_nr; i++) {
3320
		struct cache_entry *ce = istate->cache[i];
3321
		if (!ce->index)
3322
			not_shared++;
3323
	}
3324

3325
	return (int64_t)istate->cache_nr * max_split < (int64_t)not_shared * 100;
3326
}
3327

3328
int write_locked_index(struct index_state *istate, struct lock_file *lock,
3329
		       unsigned flags)
3330
{
3331
	int new_shared_index, ret, test_split_index_env;
3332
	struct split_index *si = istate->split_index;
3333

3334
	if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
3335
		cache_tree_verify(the_repository, istate);
3336

3337
	if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
3338
		if (flags & COMMIT_LOCK)
3339
			rollback_lock_file(lock);
3340
		return 0;
3341
	}
3342

3343
	if (istate->fsmonitor_last_update)
3344
		fill_fsmonitor_bitmap(istate);
3345

3346
	test_split_index_env = git_env_bool("GIT_TEST_SPLIT_INDEX", 0);
3347

3348
	if ((!si && !test_split_index_env) ||
3349
	    alternate_index_output ||
3350
	    (istate->cache_changed & ~EXTMASK)) {
3351
		ret = do_write_locked_index(istate, lock, flags,
3352
					    ~WRITE_SPLIT_INDEX_EXTENSION);
3353
		goto out;
3354
	}
3355

3356
	if (test_split_index_env) {
3357
		if (!si) {
3358
			si = init_split_index(istate);
3359
			istate->cache_changed |= SPLIT_INDEX_ORDERED;
3360
		} else {
3361
			int v = si->base_oid.hash[0];
3362
			if ((v & 15) < 6)
3363
				istate->cache_changed |= SPLIT_INDEX_ORDERED;
3364
		}
3365
	}
3366
	if (too_many_not_shared_entries(istate))
3367
		istate->cache_changed |= SPLIT_INDEX_ORDERED;
3368

3369
	new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
3370

3371
	if (new_shared_index) {
3372
		struct tempfile *temp;
3373
		int saved_errno;
3374

3375
		/* Same initial permissions as the main .git/index file */
3376
		temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
3377
		if (!temp) {
3378
			ret = do_write_locked_index(istate, lock, flags,
3379
						    ~WRITE_SPLIT_INDEX_EXTENSION);
3380
			goto out;
3381
		}
3382
		ret = write_shared_index(istate, &temp, flags);
3383

3384
		saved_errno = errno;
3385
		if (is_tempfile_active(temp))
3386
			delete_tempfile(&temp);
3387
		errno = saved_errno;
3388

3389
		if (ret)
3390
			goto out;
3391
	}
3392

3393
	ret = write_split_index(istate, lock, flags);
3394

3395
	/* Freshen the shared index only if the split-index was written */
3396
	if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
3397
		const char *shared_index = git_path("sharedindex.%s",
3398
						    oid_to_hex(&si->base_oid));
3399
		freshen_shared_index(shared_index, 1);
3400
	}
3401

3402
out:
3403
	if (flags & COMMIT_LOCK)
3404
		rollback_lock_file(lock);
3405
	return ret;
3406
}
3407

3408
/*
3409
 * Read the index file that is potentially unmerged into given
3410
 * index_state, dropping any unmerged entries to stage #0 (potentially
3411
 * resulting in a path appearing as both a file and a directory in the
3412
 * index; the caller is responsible to clear out the extra entries
3413
 * before writing the index to a tree).  Returns true if the index is
3414
 * unmerged.  Callers who want to refuse to work from an unmerged
3415
 * state can call this and check its return value, instead of calling
3416
 * read_cache().
3417
 */
3418
int repo_read_index_unmerged(struct repository *repo)
3419
{
3420
	struct index_state *istate;
3421
	int i;
3422
	int unmerged = 0;
3423

3424
	repo_read_index(repo);
3425
	istate = repo->index;
3426
	for (i = 0; i < istate->cache_nr; i++) {
3427
		struct cache_entry *ce = istate->cache[i];
3428
		struct cache_entry *new_ce;
3429
		int len;
3430

3431
		if (!ce_stage(ce))
3432
			continue;
3433
		unmerged = 1;
3434
		len = ce_namelen(ce);
3435
		new_ce = make_empty_cache_entry(istate, len);
3436
		memcpy(new_ce->name, ce->name, len);
3437
		new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
3438
		new_ce->ce_namelen = len;
3439
		new_ce->ce_mode = ce->ce_mode;
3440
		if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
3441
			return error(_("%s: cannot drop to stage #0"),
3442
				     new_ce->name);
3443
	}
3444
	return unmerged;
3445
}
3446

3447
/*
3448
 * Returns 1 if the path is an "other" path with respect to
3449
 * the index; that is, the path is not mentioned in the index at all,
3450
 * either as a file, a directory with some files in the index,
3451
 * or as an unmerged entry.
3452
 *
3453
 * We helpfully remove a trailing "/" from directories so that
3454
 * the output of read_directory can be used as-is.
3455
 */
3456
int index_name_is_other(struct index_state *istate, const char *name,
3457
			int namelen)
3458
{
3459
	int pos;
3460
	if (namelen && name[namelen - 1] == '/')
3461
		namelen--;
3462
	pos = index_name_pos(istate, name, namelen);
3463
	if (0 <= pos)
3464
		return 0;	/* exact match */
3465
	pos = -pos - 1;
3466
	if (pos < istate->cache_nr) {
3467
		struct cache_entry *ce = istate->cache[pos];
3468
		if (ce_namelen(ce) == namelen &&
3469
		    !memcmp(ce->name, name, namelen))
3470
			return 0; /* Yup, this one exists unmerged */
3471
	}
3472
	return 1;
3473
}
3474

3475
void *read_blob_data_from_index(struct index_state *istate,
3476
				const char *path, unsigned long *size)
3477
{
3478
	int pos, len;
3479
	unsigned long sz;
3480
	enum object_type type;
3481
	void *data;
3482

3483
	len = strlen(path);
3484
	pos = index_name_pos(istate, path, len);
3485
	if (pos < 0) {
3486
		/*
3487
		 * We might be in the middle of a merge, in which
3488
		 * case we would read stage #2 (ours).
3489
		 */
3490
		int i;
3491
		for (i = -pos - 1;
3492
		     (pos < 0 && i < istate->cache_nr &&
3493
		      !strcmp(istate->cache[i]->name, path));
3494
		     i++)
3495
			if (ce_stage(istate->cache[i]) == 2)
3496
				pos = i;
3497
	}
3498
	if (pos < 0)
3499
		return NULL;
3500
	data = repo_read_object_file(the_repository, &istate->cache[pos]->oid,
3501
				     &type, &sz);
3502
	if (!data || type != OBJ_BLOB) {
3503
		free(data);
3504
		return NULL;
3505
	}
3506
	if (size)
3507
		*size = sz;
3508
	return data;
3509
}
3510

3511
void move_index_extensions(struct index_state *dst, struct index_state *src)
3512
{
3513
	dst->untracked = src->untracked;
3514
	src->untracked = NULL;
3515
	dst->cache_tree = src->cache_tree;
3516
	src->cache_tree = NULL;
3517
}
3518

3519
struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
3520
				    struct index_state *istate)
3521
{
3522
	unsigned int size = ce_size(ce);
3523
	int mem_pool_allocated;
3524
	struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
3525
	mem_pool_allocated = new_entry->mem_pool_allocated;
3526

3527
	memcpy(new_entry, ce, size);
3528
	new_entry->mem_pool_allocated = mem_pool_allocated;
3529
	return new_entry;
3530
}
3531

3532
void discard_cache_entry(struct cache_entry *ce)
3533
{
3534
	if (ce && should_validate_cache_entries())
3535
		memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
3536

3537
	if (ce && ce->mem_pool_allocated)
3538
		return;
3539

3540
	free(ce);
3541
}
3542

3543
int should_validate_cache_entries(void)
3544
{
3545
	static int validate_index_cache_entries = -1;
3546

3547
	if (validate_index_cache_entries < 0) {
3548
		if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
3549
			validate_index_cache_entries = 1;
3550
		else
3551
			validate_index_cache_entries = 0;
3552
	}
3553

3554
	return validate_index_cache_entries;
3555
}
3556

3557
#define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
3558
#define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
3559

3560
static size_t read_eoie_extension(const char *mmap, size_t mmap_size)
3561
{
3562
	/*
3563
	 * The end of index entries (EOIE) extension is guaranteed to be last
3564
	 * so that it can be found by scanning backwards from the EOF.
3565
	 *
3566
	 * "EOIE"
3567
	 * <4-byte length>
3568
	 * <4-byte offset>
3569
	 * <20-byte hash>
3570
	 */
3571
	const char *index, *eoie;
3572
	uint32_t extsize;
3573
	size_t offset, src_offset;
3574
	unsigned char hash[GIT_MAX_RAWSZ];
3575
	git_hash_ctx c;
3576

3577
	/* ensure we have an index big enough to contain an EOIE extension */
3578
	if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)
3579
		return 0;
3580

3581
	/* validate the extension signature */
3582
	index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;
3583
	if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)
3584
		return 0;
3585
	index += sizeof(uint32_t);
3586

3587
	/* validate the extension size */
3588
	extsize = get_be32(index);
3589
	if (extsize != EOIE_SIZE)
3590
		return 0;
3591
	index += sizeof(uint32_t);
3592

3593
	/*
3594
	 * Validate the offset we're going to look for the first extension
3595
	 * signature is after the index header and before the eoie extension.
3596
	 */
3597
	offset = get_be32(index);
3598
	if (mmap + offset < mmap + sizeof(struct cache_header))
3599
		return 0;
3600
	if (mmap + offset >= eoie)
3601
		return 0;
3602
	index += sizeof(uint32_t);
3603

3604
	/*
3605
	 * The hash is computed over extension types and their sizes (but not
3606
	 * their contents).  E.g. if we have "TREE" extension that is N-bytes
3607
	 * long, "REUC" extension that is M-bytes long, followed by "EOIE",
3608
	 * then the hash would be:
3609
	 *
3610
	 * SHA-1("TREE" + <binary representation of N> +
3611
	 *	 "REUC" + <binary representation of M>)
3612
	 */
3613
	src_offset = offset;
3614
	the_hash_algo->init_fn(&c);
3615
	while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {
3616
		/* After an array of active_nr index entries,
3617
		 * there can be arbitrary number of extended
3618
		 * sections, each of which is prefixed with
3619
		 * extension name (4-byte) and section length
3620
		 * in 4-byte network byte order.
3621
		 */
3622
		uint32_t extsize;
3623
		memcpy(&extsize, mmap + src_offset + 4, 4);
3624
		extsize = ntohl(extsize);
3625

3626
		/* verify the extension size isn't so large it will wrap around */
3627
		if (src_offset + 8 + extsize < src_offset)
3628
			return 0;
3629

3630
		the_hash_algo->update_fn(&c, mmap + src_offset, 8);
3631

3632
		src_offset += 8;
3633
		src_offset += extsize;
3634
	}
3635
	the_hash_algo->final_fn(hash, &c);
3636
	if (!hasheq(hash, (const unsigned char *)index, the_repository->hash_algo))
3637
		return 0;
3638

3639
	/* Validate that the extension offsets returned us back to the eoie extension. */
3640
	if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)
3641
		return 0;
3642

3643
	return offset;
3644
}
3645

3646
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset)
3647
{
3648
	uint32_t buffer;
3649
	unsigned char hash[GIT_MAX_RAWSZ];
3650

3651
	/* offset */
3652
	put_be32(&buffer, offset);
3653
	strbuf_add(sb, &buffer, sizeof(uint32_t));
3654

3655
	/* hash */
3656
	the_hash_algo->final_fn(hash, eoie_context);
3657
	strbuf_add(sb, hash, the_hash_algo->rawsz);
3658
}
3659

3660
#define IEOT_VERSION	(1)
3661

3662
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
3663
{
3664
	const char *index = NULL;
3665
	uint32_t extsize, ext_version;
3666
	struct index_entry_offset_table *ieot;
3667
	int i, nr;
3668

3669
	/* find the IEOT extension */
3670
	if (!offset)
3671
		return NULL;
3672
	while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
3673
		extsize = get_be32(mmap + offset + 4);
3674
		if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
3675
			index = mmap + offset + 4 + 4;
3676
			break;
3677
		}
3678
		offset += 8;
3679
		offset += extsize;
3680
	}
3681
	if (!index)
3682
		return NULL;
3683

3684
	/* validate the version is IEOT_VERSION */
3685
	ext_version = get_be32(index);
3686
	if (ext_version != IEOT_VERSION) {
3687
		error("invalid IEOT version %d", ext_version);
3688
		return NULL;
3689
	}
3690
	index += sizeof(uint32_t);
3691

3692
	/* extension size - version bytes / bytes per entry */
3693
	nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
3694
	if (!nr) {
3695
		error("invalid number of IEOT entries %d", nr);
3696
		return NULL;
3697
	}
3698
	ieot = xmalloc(sizeof(struct index_entry_offset_table)
3699
		       + (nr * sizeof(struct index_entry_offset)));
3700
	ieot->nr = nr;
3701
	for (i = 0; i < nr; i++) {
3702
		ieot->entries[i].offset = get_be32(index);
3703
		index += sizeof(uint32_t);
3704
		ieot->entries[i].nr = get_be32(index);
3705
		index += sizeof(uint32_t);
3706
	}
3707

3708
	return ieot;
3709
}
3710

3711
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
3712
{
3713
	uint32_t buffer;
3714
	int i;
3715

3716
	/* version */
3717
	put_be32(&buffer, IEOT_VERSION);
3718
	strbuf_add(sb, &buffer, sizeof(uint32_t));
3719

3720
	/* ieot */
3721
	for (i = 0; i < ieot->nr; i++) {
3722

3723
		/* offset */
3724
		put_be32(&buffer, ieot->entries[i].offset);
3725
		strbuf_add(sb, &buffer, sizeof(uint32_t));
3726

3727
		/* count */
3728
		put_be32(&buffer, ieot->entries[i].nr);
3729
		strbuf_add(sb, &buffer, sizeof(uint32_t));
3730
	}
3731
}
3732

3733
void prefetch_cache_entries(const struct index_state *istate,
3734
			    must_prefetch_predicate must_prefetch)
3735
{
3736
	int i;
3737
	struct oid_array to_fetch = OID_ARRAY_INIT;
3738

3739
	for (i = 0; i < istate->cache_nr; i++) {
3740
		struct cache_entry *ce = istate->cache[i];
3741

3742
		if (S_ISGITLINK(ce->ce_mode) || !must_prefetch(ce))
3743
			continue;
3744
		if (!oid_object_info_extended(the_repository, &ce->oid,
3745
					      NULL,
3746
					      OBJECT_INFO_FOR_PREFETCH))
3747
			continue;
3748
		oid_array_append(&to_fetch, &ce->oid);
3749
	}
3750
	promisor_remote_get_direct(the_repository,
3751
				   to_fetch.oid, to_fetch.nr);
3752
	oid_array_clear(&to_fetch);
3753
}
3754

3755
static int read_one_entry_opt(struct index_state *istate,
3756
			      const struct object_id *oid,
3757
			      struct strbuf *base,
3758
			      const char *pathname,
3759
			      unsigned mode, int opt)
3760
{
3761
	int len;
3762
	struct cache_entry *ce;
3763

3764
	if (S_ISDIR(mode))
3765
		return READ_TREE_RECURSIVE;
3766

3767
	len = strlen(pathname);
3768
	ce = make_empty_cache_entry(istate, base->len + len);
3769

3770
	ce->ce_mode = create_ce_mode(mode);
3771
	ce->ce_flags = create_ce_flags(1);
3772
	ce->ce_namelen = base->len + len;
3773
	memcpy(ce->name, base->buf, base->len);
3774
	memcpy(ce->name + base->len, pathname, len+1);
3775
	oidcpy(&ce->oid, oid);
3776
	return add_index_entry(istate, ce, opt);
3777
}
3778

3779
static int read_one_entry(const struct object_id *oid, struct strbuf *base,
3780
			  const char *pathname, unsigned mode,
3781
			  void *context)
3782
{
3783
	struct index_state *istate = context;
3784
	return read_one_entry_opt(istate, oid, base, pathname,
3785
				  mode,
3786
				  ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
3787
}
3788

3789
/*
3790
 * This is used when the caller knows there is no existing entries at
3791
 * the stage that will conflict with the entry being added.
3792
 */
3793
static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
3794
				const char *pathname, unsigned mode,
3795
				void *context)
3796
{
3797
	struct index_state *istate = context;
3798
	return read_one_entry_opt(istate, oid, base, pathname,
3799
				  mode, ADD_CACHE_JUST_APPEND);
3800
}
3801

3802
/*
3803
 * Read the tree specified with --with-tree option
3804
 * (typically, HEAD) into stage #1 and then
3805
 * squash them down to stage #0.  This is used for
3806
 * --error-unmatch to list and check the path patterns
3807
 * that were given from the command line.  We are not
3808
 * going to write this index out.
3809
 */
3810
void overlay_tree_on_index(struct index_state *istate,
3811
			   const char *tree_name, const char *prefix)
3812
{
3813
	struct tree *tree;
3814
	struct object_id oid;
3815
	struct pathspec pathspec;
3816
	struct cache_entry *last_stage0 = NULL;
3817
	int i;
3818
	read_tree_fn_t fn = NULL;
3819
	int err;
3820

3821
	if (repo_get_oid(the_repository, tree_name, &oid))
3822
		die("tree-ish %s not found.", tree_name);
3823
	tree = parse_tree_indirect(&oid);
3824
	if (!tree)
3825
		die("bad tree-ish %s", tree_name);
3826

3827
	/* Hoist the unmerged entries up to stage #3 to make room */
3828
	/* TODO: audit for interaction with sparse-index. */
3829
	ensure_full_index(istate);
3830
	for (i = 0; i < istate->cache_nr; i++) {
3831
		struct cache_entry *ce = istate->cache[i];
3832
		if (!ce_stage(ce))
3833
			continue;
3834
		ce->ce_flags |= CE_STAGEMASK;
3835
	}
3836

3837
	if (prefix) {
3838
		static const char *(matchbuf[1]);
3839
		matchbuf[0] = NULL;
3840
		parse_pathspec(&pathspec, PATHSPEC_ALL_MAGIC,
3841
			       PATHSPEC_PREFER_CWD, prefix, matchbuf);
3842
	} else
3843
		memset(&pathspec, 0, sizeof(pathspec));
3844

3845
	/*
3846
	 * See if we have cache entry at the stage.  If so,
3847
	 * do it the original slow way, otherwise, append and then
3848
	 * sort at the end.
3849
	 */
3850
	for (i = 0; !fn && i < istate->cache_nr; i++) {
3851
		const struct cache_entry *ce = istate->cache[i];
3852
		if (ce_stage(ce) == 1)
3853
			fn = read_one_entry;
3854
	}
3855

3856
	if (!fn)
3857
		fn = read_one_entry_quick;
3858
	err = read_tree(the_repository, tree, &pathspec, fn, istate);
3859
	clear_pathspec(&pathspec);
3860
	if (err)
3861
		die("unable to read tree entries %s", tree_name);
3862

3863
	/*
3864
	 * Sort the cache entry -- we need to nuke the cache tree, though.
3865
	 */
3866
	if (fn == read_one_entry_quick) {
3867
		cache_tree_free(&istate->cache_tree);
3868
		QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
3869
	}
3870

3871
	for (i = 0; i < istate->cache_nr; i++) {
3872
		struct cache_entry *ce = istate->cache[i];
3873
		switch (ce_stage(ce)) {
3874
		case 0:
3875
			last_stage0 = ce;
3876
			/* fallthru */
3877
		default:
3878
			continue;
3879
		case 1:
3880
			/*
3881
			 * If there is stage #0 entry for this, we do not
3882
			 * need to show it.  We use CE_UPDATE bit to mark
3883
			 * such an entry.
3884
			 */
3885
			if (last_stage0 &&
3886
			    !strcmp(last_stage0->name, ce->name))
3887
				ce->ce_flags |= CE_UPDATE;
3888
		}
3889
	}
3890
}
3891

3892
struct update_callback_data {
3893
	struct index_state *index;
3894
	int include_sparse;
3895
	int flags;
3896
	int add_errors;
3897
};
3898

3899
static int fix_unmerged_status(struct diff_filepair *p,
3900
			       struct update_callback_data *data)
3901
{
3902
	if (p->status != DIFF_STATUS_UNMERGED)
3903
		return p->status;
3904
	if (!(data->flags & ADD_CACHE_IGNORE_REMOVAL) && !p->two->mode)
3905
		/*
3906
		 * This is not an explicit add request, and the
3907
		 * path is missing from the working tree (deleted)
3908
		 */
3909
		return DIFF_STATUS_DELETED;
3910
	else
3911
		/*
3912
		 * Either an explicit add request, or path exists
3913
		 * in the working tree.  An attempt to explicitly
3914
		 * add a path that does not exist in the working tree
3915
		 * will be caught as an error by the caller immediately.
3916
		 */
3917
		return DIFF_STATUS_MODIFIED;
3918
}
3919

3920
static void update_callback(struct diff_queue_struct *q,
3921
			    struct diff_options *opt UNUSED, void *cbdata)
3922
{
3923
	int i;
3924
	struct update_callback_data *data = cbdata;
3925

3926
	for (i = 0; i < q->nr; i++) {
3927
		struct diff_filepair *p = q->queue[i];
3928
		const char *path = p->one->path;
3929

3930
		if (!data->include_sparse &&
3931
		    !path_in_sparse_checkout(path, data->index))
3932
			continue;
3933

3934
		switch (fix_unmerged_status(p, data)) {
3935
		default:
3936
			die(_("unexpected diff status %c"), p->status);
3937
		case DIFF_STATUS_MODIFIED:
3938
		case DIFF_STATUS_TYPE_CHANGED:
3939
			if (add_file_to_index(data->index, path, data->flags)) {
3940
				if (!(data->flags & ADD_CACHE_IGNORE_ERRORS))
3941
					die(_("updating files failed"));
3942
				data->add_errors++;
3943
			}
3944
			break;
3945
		case DIFF_STATUS_DELETED:
3946
			if (data->flags & ADD_CACHE_IGNORE_REMOVAL)
3947
				break;
3948
			if (!(data->flags & ADD_CACHE_PRETEND))
3949
				remove_file_from_index(data->index, path);
3950
			if (data->flags & (ADD_CACHE_PRETEND|ADD_CACHE_VERBOSE))
3951
				printf(_("remove '%s'\n"), path);
3952
			break;
3953
		}
3954
	}
3955
}
3956

3957
int add_files_to_cache(struct repository *repo, const char *prefix,
3958
		       const struct pathspec *pathspec, char *ps_matched,
3959
		       int include_sparse, int flags)
3960
{
3961
	struct update_callback_data data;
3962
	struct rev_info rev;
3963

3964
	memset(&data, 0, sizeof(data));
3965
	data.index = repo->index;
3966
	data.include_sparse = include_sparse;
3967
	data.flags = flags;
3968

3969
	repo_init_revisions(repo, &rev, prefix);
3970
	setup_revisions(0, NULL, &rev, NULL);
3971
	if (pathspec) {
3972
		copy_pathspec(&rev.prune_data, pathspec);
3973
		rev.ps_matched = ps_matched;
3974
	}
3975
	rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
3976
	rev.diffopt.format_callback = update_callback;
3977
	rev.diffopt.format_callback_data = &data;
3978
	rev.diffopt.flags.override_submodule_config = 1;
3979
	rev.max_count = 0; /* do not compare unmerged paths with stage #2 */
3980

3981
	/*
3982
	 * Use an ODB transaction to optimize adding multiple objects.
3983
	 * This function is invoked from commands other than 'add', which
3984
	 * may not have their own transaction active.
3985
	 */
3986
	begin_odb_transaction();
3987
	run_diff_files(&rev, DIFF_RACY_IS_MODIFIED);
3988
	end_odb_transaction();
3989

3990
	release_revisions(&rev);
3991
	return !!data.add_errors;
3992
}
3993

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.