git

Форк
0
/
name-hash.c 
754 строки · 19.4 Кб
1
/*
2
 * name-hash.c
3
 *
4
 * Hashing names in the index state
5
 *
6
 * Copyright (C) 2008 Linus Torvalds
7
 */
8
#include "git-compat-util.h"
9
#include "environment.h"
10
#include "gettext.h"
11
#include "name-hash.h"
12
#include "object.h"
13
#include "read-cache-ll.h"
14
#include "thread-utils.h"
15
#include "trace.h"
16
#include "trace2.h"
17
#include "sparse-index.h"
18

19
struct dir_entry {
20
	struct hashmap_entry ent;
21
	struct dir_entry *parent;
22
	int nr;
23
	unsigned int namelen;
24
	char name[FLEX_ARRAY];
25
};
26

27
static int dir_entry_cmp(const void *cmp_data UNUSED,
28
			 const struct hashmap_entry *eptr,
29
			 const struct hashmap_entry *entry_or_key,
30
			 const void *keydata)
31
{
32
	const struct dir_entry *e1, *e2;
33
	const char *name = keydata;
34

35
	e1 = container_of(eptr, const struct dir_entry, ent);
36
	e2 = container_of(entry_or_key, const struct dir_entry, ent);
37

38
	return e1->namelen != e2->namelen || strncasecmp(e1->name,
39
			name ? name : e2->name, e1->namelen);
40
}
41

42
static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
43
		const char *name, unsigned int namelen, unsigned int hash)
44
{
45
	struct dir_entry key;
46
	hashmap_entry_init(&key.ent, hash);
47
	key.namelen = namelen;
48
	return hashmap_get_entry(&istate->dir_hash, &key, ent, name);
49
}
50

51
static struct dir_entry *find_dir_entry(struct index_state *istate,
52
		const char *name, unsigned int namelen)
53
{
54
	return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen));
55
}
56

57
static struct dir_entry *hash_dir_entry(struct index_state *istate,
58
		struct cache_entry *ce, int namelen)
59
{
60
	/*
61
	 * Throw each directory component in the hash for quick lookup
62
	 * during a git status. Directory components are stored without their
63
	 * closing slash.  Despite submodules being a directory, they never
64
	 * reach this point, because they are stored
65
	 * in index_state.name_hash (as ordinary cache_entries).
66
	 */
67
	struct dir_entry *dir;
68

69
	/* get length of parent directory */
70
	while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1]))
71
		namelen--;
72
	if (namelen <= 0)
73
		return NULL;
74
	namelen--;
75

76
	/* lookup existing entry for that directory */
77
	dir = find_dir_entry(istate, ce->name, namelen);
78
	if (!dir) {
79
		/* not found, create it and add to hash table */
80
		FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
81
		hashmap_entry_init(&dir->ent, memihash(ce->name, namelen));
82
		dir->namelen = namelen;
83
		hashmap_add(&istate->dir_hash, &dir->ent);
84

85
		/* recursively add missing parent directories */
86
		dir->parent = hash_dir_entry(istate, ce, namelen);
87
	}
88
	return dir;
89
}
90

91
static void add_dir_entry(struct index_state *istate, struct cache_entry *ce)
92
{
93
	/* Add reference to the directory entry (and parents if 0). */
94
	struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
95
	while (dir && !(dir->nr++))
96
		dir = dir->parent;
97
}
98

99
static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
100
{
101
	/*
102
	 * Release reference to the directory entry. If 0, remove and continue
103
	 * with parent directory.
104
	 */
105
	struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
106
	while (dir && !(--dir->nr)) {
107
		struct dir_entry *parent = dir->parent;
108
		hashmap_remove(&istate->dir_hash, &dir->ent, NULL);
109
		free(dir);
110
		dir = parent;
111
	}
112
}
113

114
static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
115
{
116
	if (ce->ce_flags & CE_HASHED)
117
		return;
118
	ce->ce_flags |= CE_HASHED;
119

120
	if (!S_ISSPARSEDIR(ce->ce_mode)) {
121
		hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce)));
122
		hashmap_add(&istate->name_hash, &ce->ent);
123
	}
124

125
	if (ignore_case)
126
		add_dir_entry(istate, ce);
127
}
128

129
static int cache_entry_cmp(const void *cmp_data UNUSED,
130
			   const struct hashmap_entry *eptr,
131
			   const struct hashmap_entry *entry_or_key,
132
			   const void *remove)
133
{
134
	const struct cache_entry *ce1, *ce2;
135

136
	ce1 = container_of(eptr, const struct cache_entry, ent);
137
	ce2 = container_of(entry_or_key, const struct cache_entry, ent);
138

139
	/*
140
	 * For remove_name_hash, find the exact entry (pointer equality); for
141
	 * index_file_exists, find all entries with matching hash code and
142
	 * decide whether the entry matches in same_name.
143
	 */
144
	return remove ? !(ce1 == ce2) : 0;
145
}
146

147
static int lazy_try_threaded = 1;
148
static int lazy_nr_dir_threads;
149

150
/*
151
 * Set a minimum number of cache_entries that we will handle per
152
 * thread and use that to decide how many threads to run (up to
153
 * the number on the system).
154
 *
155
 * For guidance setting the lower per-thread bound, see:
156
 *     t/helper/test-lazy-init-name-hash --analyze
157
 */
158
#define LAZY_THREAD_COST (2000)
159

160
/*
161
 * We use n mutexes to guard n partitions of the "istate->dir_hash"
162
 * hashtable.  Since "find" and "insert" operations will hash to a
163
 * particular bucket and modify/search a single chain, we can say
164
 * that "all chains mod n" are guarded by the same mutex -- rather
165
 * than having a single mutex to guard the entire table.  (This does
166
 * require that we disable "rehashing" on the hashtable.)
167
 *
168
 * So, a larger value here decreases the probability of a collision
169
 * and the time that each thread must wait for the mutex.
170
 */
171
#define LAZY_MAX_MUTEX   (32)
172

173
static pthread_mutex_t *lazy_dir_mutex_array;
174

175
/*
176
 * An array of lazy_entry items is used by the n threads in
177
 * the directory parse (first) phase to (lock-free) store the
178
 * intermediate results.  These values are then referenced by
179
 * the 2 threads in the second phase.
180
 */
181
struct lazy_entry {
182
	struct dir_entry *dir;
183
	unsigned int hash_dir;
184
	unsigned int hash_name;
185
};
186

187
/*
188
 * Decide if we want to use threads (if available) to load
189
 * the hash tables.  We set "lazy_nr_dir_threads" to zero when
190
 * it is not worth it.
191
 */
192
static int lookup_lazy_params(struct index_state *istate)
193
{
194
	int nr_cpus;
195

196
	lazy_nr_dir_threads = 0;
197

198
	if (!lazy_try_threaded)
199
		return 0;
200

201
	/*
202
	 * If we are respecting case, just use the original
203
	 * code to build the "istate->name_hash".  We don't
204
	 * need the complexity here.
205
	 */
206
	if (!ignore_case)
207
		return 0;
208

209
	nr_cpus = online_cpus();
210
	if (nr_cpus < 2)
211
		return 0;
212

213
	if (istate->cache_nr < 2 * LAZY_THREAD_COST)
214
		return 0;
215

216
	if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST)
217
		nr_cpus = istate->cache_nr / LAZY_THREAD_COST;
218
	lazy_nr_dir_threads = nr_cpus;
219
	return lazy_nr_dir_threads;
220
}
221

222
/*
223
 * Initialize n mutexes for use when searching and inserting
224
 * into "istate->dir_hash".  All "dir" threads are trying
225
 * to insert partial pathnames into the hash as they iterate
226
 * over their portions of the index, so lock contention is
227
 * high.
228
 *
229
 * However, the hashmap is going to put items into bucket
230
 * chains based on their hash values.  Use that to create n
231
 * mutexes and lock on mutex[bucket(hash) % n].  This will
232
 * decrease the collision rate by (hopefully) a factor of n.
233
 */
234
static void init_dir_mutex(void)
235
{
236
	int j;
237

238
	CALLOC_ARRAY(lazy_dir_mutex_array, LAZY_MAX_MUTEX);
239

240
	for (j = 0; j < LAZY_MAX_MUTEX; j++)
241
		init_recursive_mutex(&lazy_dir_mutex_array[j]);
242
}
243

244
static void cleanup_dir_mutex(void)
245
{
246
	int j;
247

248
	for (j = 0; j < LAZY_MAX_MUTEX; j++)
249
		pthread_mutex_destroy(&lazy_dir_mutex_array[j]);
250

251
	free(lazy_dir_mutex_array);
252
}
253

254
static void lock_dir_mutex(int j)
255
{
256
	pthread_mutex_lock(&lazy_dir_mutex_array[j]);
257
}
258

259
static void unlock_dir_mutex(int j)
260
{
261
	pthread_mutex_unlock(&lazy_dir_mutex_array[j]);
262
}
263

264
static inline int compute_dir_lock_nr(
265
	const struct hashmap *map,
266
	unsigned int hash)
267
{
268
	return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX;
269
}
270

271
static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
272
	struct index_state *istate,
273
	struct dir_entry *parent,
274
	struct strbuf *prefix)
275
{
276
	struct dir_entry *dir;
277
	unsigned int hash;
278
	int lock_nr;
279

280
	/*
281
	 * Either we have a parent directory and path with slash(es)
282
	 * or the directory is an immediate child of the root directory.
283
	 */
284
	assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL));
285

286
	if (parent)
287
		hash = memihash_cont(parent->ent.hash,
288
			prefix->buf + parent->namelen,
289
			prefix->len - parent->namelen);
290
	else
291
		hash = memihash(prefix->buf, prefix->len);
292

293
	lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash);
294
	lock_dir_mutex(lock_nr);
295

296
	dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
297
	if (!dir) {
298
		FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
299
		hashmap_entry_init(&dir->ent, hash);
300
		dir->namelen = prefix->len;
301
		dir->parent = parent;
302
		hashmap_add(&istate->dir_hash, &dir->ent);
303

304
		if (parent) {
305
			unlock_dir_mutex(lock_nr);
306

307
			/* All I really need here is an InterlockedIncrement(&(parent->nr)) */
308
			lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash);
309
			lock_dir_mutex(lock_nr);
310
			parent->nr++;
311
		}
312
	}
313

314
	unlock_dir_mutex(lock_nr);
315

316
	return dir;
317
}
318

319
/*
320
 * handle_range_1() and handle_range_dir() are derived from
321
 * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c
322
 * and handle the iteration over the entire array of index entries.
323
 * They use recursion for adjacent entries in the same parent
324
 * directory.
325
 */
326
static int handle_range_1(
327
	struct index_state *istate,
328
	int k_start,
329
	int k_end,
330
	struct dir_entry *parent,
331
	struct strbuf *prefix,
332
	struct lazy_entry *lazy_entries);
333

334
static int handle_range_dir(
335
	struct index_state *istate,
336
	int k_start,
337
	int k_end,
338
	struct dir_entry *parent,
339
	struct strbuf *prefix,
340
	struct lazy_entry *lazy_entries,
341
	struct dir_entry **dir_new_out)
342
{
343
	int rc, k;
344
	int input_prefix_len = prefix->len;
345
	struct dir_entry *dir_new;
346

347
	dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix);
348

349
	strbuf_addch(prefix, '/');
350

351
	/*
352
	 * Scan forward in the index array for index entries having the same
353
	 * path prefix (that are also in this directory).
354
	 */
355
	if (k_start + 1 >= k_end)
356
		k = k_end;
357
	else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0)
358
		k = k_start + 1;
359
	else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0)
360
		k = k_end;
361
	else {
362
		int begin = k_start;
363
		int end = k_end;
364
		assert(begin >= 0);
365
		while (begin < end) {
366
			int mid = begin + ((end - begin) >> 1);
367
			int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len);
368
			if (cmp == 0) /* mid has same prefix; look in second part */
369
				begin = mid + 1;
370
			else if (cmp > 0) /* mid is past group; look in first part */
371
				end = mid;
372
			else
373
				die("cache entry out of order");
374
		}
375
		k = begin;
376
	}
377

378
	/*
379
	 * Recurse and process what we can of this subset [k_start, k).
380
	 */
381
	rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries);
382

383
	strbuf_setlen(prefix, input_prefix_len);
384

385
	*dir_new_out = dir_new;
386
	return rc;
387
}
388

389
static int handle_range_1(
390
	struct index_state *istate,
391
	int k_start,
392
	int k_end,
393
	struct dir_entry *parent,
394
	struct strbuf *prefix,
395
	struct lazy_entry *lazy_entries)
396
{
397
	int input_prefix_len = prefix->len;
398
	int k = k_start;
399

400
	while (k < k_end) {
401
		struct cache_entry *ce_k = istate->cache[k];
402
		const char *name, *slash;
403

404
		if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len))
405
			break;
406

407
		name = ce_k->name + prefix->len;
408
		slash = strchr(name, '/');
409

410
		if (slash) {
411
			int len = slash - name;
412
			int processed;
413
			struct dir_entry *dir_new;
414

415
			strbuf_add(prefix, name, len);
416
			processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new);
417
			if (processed) {
418
				k += processed;
419
				strbuf_setlen(prefix, input_prefix_len);
420
				continue;
421
			}
422

423
			strbuf_addch(prefix, '/');
424
			processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries);
425
			k += processed;
426
			strbuf_setlen(prefix, input_prefix_len);
427
			continue;
428
		}
429

430
		/*
431
		 * It is too expensive to take a lock to insert "ce_k"
432
		 * into "istate->name_hash" and increment the ref-count
433
		 * on the "parent" dir.  So we defer actually updating
434
		 * permanent data structures until phase 2 (where we
435
		 * can change the locking requirements) and simply
436
		 * accumulate our current results into the lazy_entries
437
		 * data array).
438
		 *
439
		 * We do not need to lock the lazy_entries array because
440
		 * we have exclusive access to the cells in the range
441
		 * [k_start,k_end) that this thread was given.
442
		 */
443
		lazy_entries[k].dir = parent;
444
		if (parent) {
445
			lazy_entries[k].hash_name = memihash_cont(
446
				parent->ent.hash,
447
				ce_k->name + parent->namelen,
448
				ce_namelen(ce_k) - parent->namelen);
449
			lazy_entries[k].hash_dir = parent->ent.hash;
450
		} else {
451
			lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k));
452
		}
453

454
		k++;
455
	}
456

457
	return k - k_start;
458
}
459

460
struct lazy_dir_thread_data {
461
	pthread_t pthread;
462
	struct index_state *istate;
463
	struct lazy_entry *lazy_entries;
464
	int k_start;
465
	int k_end;
466
};
467

468
static void *lazy_dir_thread_proc(void *_data)
469
{
470
	struct lazy_dir_thread_data *d = _data;
471
	struct strbuf prefix = STRBUF_INIT;
472
	handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries);
473
	strbuf_release(&prefix);
474
	return NULL;
475
}
476

477
struct lazy_name_thread_data {
478
	pthread_t pthread;
479
	struct index_state *istate;
480
	struct lazy_entry *lazy_entries;
481
};
482

483
static void *lazy_name_thread_proc(void *_data)
484
{
485
	struct lazy_name_thread_data *d = _data;
486
	int k;
487

488
	for (k = 0; k < d->istate->cache_nr; k++) {
489
		struct cache_entry *ce_k = d->istate->cache[k];
490
		ce_k->ce_flags |= CE_HASHED;
491
		hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name);
492
		hashmap_add(&d->istate->name_hash, &ce_k->ent);
493
	}
494

495
	return NULL;
496
}
497

498
static inline void lazy_update_dir_ref_counts(
499
	struct index_state *istate,
500
	struct lazy_entry *lazy_entries)
501
{
502
	int k;
503

504
	for (k = 0; k < istate->cache_nr; k++) {
505
		if (lazy_entries[k].dir)
506
			lazy_entries[k].dir->nr++;
507
	}
508
}
509

510
static void threaded_lazy_init_name_hash(
511
	struct index_state *istate)
512
{
513
	int err;
514
	int nr_each;
515
	int k_start;
516
	int t;
517
	struct lazy_entry *lazy_entries;
518
	struct lazy_dir_thread_data *td_dir;
519
	struct lazy_name_thread_data *td_name;
520

521
	if (!HAVE_THREADS)
522
		return;
523

524
	k_start = 0;
525
	nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
526

527
	CALLOC_ARRAY(lazy_entries, istate->cache_nr);
528
	CALLOC_ARRAY(td_dir, lazy_nr_dir_threads);
529
	CALLOC_ARRAY(td_name, 1);
530

531
	init_dir_mutex();
532

533
	/*
534
	 * Phase 1:
535
	 * Build "istate->dir_hash" using n "dir" threads (and a read-only index).
536
	 */
537
	for (t = 0; t < lazy_nr_dir_threads; t++) {
538
		struct lazy_dir_thread_data *td_dir_t = td_dir + t;
539
		td_dir_t->istate = istate;
540
		td_dir_t->lazy_entries = lazy_entries;
541
		td_dir_t->k_start = k_start;
542
		k_start += nr_each;
543
		if (k_start > istate->cache_nr)
544
			k_start = istate->cache_nr;
545
		td_dir_t->k_end = k_start;
546
		err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t);
547
		if (err)
548
			die(_("unable to create lazy_dir thread: %s"), strerror(err));
549
	}
550
	for (t = 0; t < lazy_nr_dir_threads; t++) {
551
		struct lazy_dir_thread_data *td_dir_t = td_dir + t;
552
		if (pthread_join(td_dir_t->pthread, NULL))
553
			die("unable to join lazy_dir_thread");
554
	}
555

556
	/*
557
	 * Phase 2:
558
	 * Iterate over all index entries and add them to the "istate->name_hash"
559
	 * using a single "name" background thread.
560
	 * (Testing showed it wasn't worth running more than 1 thread for this.)
561
	 *
562
	 * Meanwhile, finish updating the parent directory ref-counts for each
563
	 * index entry using the current thread.  (This step is very fast and
564
	 * doesn't need threading.)
565
	 */
566
	td_name->istate = istate;
567
	td_name->lazy_entries = lazy_entries;
568
	err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name);
569
	if (err)
570
		die(_("unable to create lazy_name thread: %s"), strerror(err));
571

572
	lazy_update_dir_ref_counts(istate, lazy_entries);
573

574
	err = pthread_join(td_name->pthread, NULL);
575
	if (err)
576
		die(_("unable to join lazy_name thread: %s"), strerror(err));
577

578
	cleanup_dir_mutex();
579

580
	free(td_name);
581
	free(td_dir);
582
	free(lazy_entries);
583
}
584

585
static void lazy_init_name_hash(struct index_state *istate)
586
{
587

588
	if (istate->name_hash_initialized)
589
		return;
590
	trace_performance_enter();
591
	trace2_region_enter("index", "name-hash-init", istate->repo);
592
	hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
593
	hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
594

595
	if (lookup_lazy_params(istate)) {
596
		/*
597
		 * Disable item counting and automatic rehashing because
598
		 * we do per-chain (mod n) locking rather than whole hashmap
599
		 * locking and we need to prevent the table-size from changing
600
		 * and bucket items from being redistributed.
601
		 */
602
		hashmap_disable_item_counting(&istate->dir_hash);
603
		threaded_lazy_init_name_hash(istate);
604
		hashmap_enable_item_counting(&istate->dir_hash);
605
	} else {
606
		int nr;
607
		for (nr = 0; nr < istate->cache_nr; nr++)
608
			hash_index_entry(istate, istate->cache[nr]);
609
	}
610

611
	istate->name_hash_initialized = 1;
612
	trace2_region_leave("index", "name-hash-init", istate->repo);
613
	trace_performance_leave("initialize name hash");
614
}
615

616
/*
617
 * A test routine for t/helper/ sources.
618
 *
619
 * Returns the number of threads used or 0 when
620
 * the non-threaded code path was used.
621
 *
622
 * Requesting threading WILL NOT override guards
623
 * in lookup_lazy_params().
624
 */
625
int test_lazy_init_name_hash(struct index_state *istate, int try_threaded)
626
{
627
	lazy_nr_dir_threads = 0;
628
	lazy_try_threaded = try_threaded;
629

630
	lazy_init_name_hash(istate);
631

632
	return lazy_nr_dir_threads;
633
}
634

635
void add_name_hash(struct index_state *istate, struct cache_entry *ce)
636
{
637
	if (istate->name_hash_initialized)
638
		hash_index_entry(istate, ce);
639
}
640

641
void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
642
{
643
	if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
644
		return;
645
	ce->ce_flags &= ~CE_HASHED;
646
	hashmap_remove(&istate->name_hash, &ce->ent, ce);
647

648
	if (ignore_case)
649
		remove_dir_entry(istate, ce);
650
}
651

652
static int slow_same_name(const char *name1, int len1, const char *name2, int len2)
653
{
654
	if (len1 != len2)
655
		return 0;
656

657
	while (len1) {
658
		unsigned char c1 = *name1++;
659
		unsigned char c2 = *name2++;
660
		len1--;
661
		if (c1 != c2) {
662
			c1 = toupper(c1);
663
			c2 = toupper(c2);
664
			if (c1 != c2)
665
				return 0;
666
		}
667
	}
668
	return 1;
669
}
670

671
static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase)
672
{
673
	int len = ce_namelen(ce);
674

675
	/*
676
	 * Always do exact compare, even if we want a case-ignoring comparison;
677
	 * we do the quick exact one first, because it will be the common case.
678
	 */
679
	if (len == namelen && !memcmp(name, ce->name, len))
680
		return 1;
681

682
	if (!icase)
683
		return 0;
684

685
	return slow_same_name(name, namelen, ce->name, len);
686
}
687

688
int index_dir_find(struct index_state *istate, const char *name, int namelen,
689
		   struct strbuf *canonical_path)
690
{
691
	struct dir_entry *dir;
692

693
	lazy_init_name_hash(istate);
694
	expand_to_path(istate, name, namelen, 0);
695
	dir = find_dir_entry(istate, name, namelen);
696

697
	if (canonical_path && dir && dir->nr) {
698
		strbuf_reset(canonical_path);
699
		strbuf_add(canonical_path, dir->name, dir->namelen);
700
	}
701

702
	return dir && dir->nr;
703
}
704

705
void adjust_dirname_case(struct index_state *istate, char *name)
706
{
707
	const char *startPtr = name;
708
	const char *ptr = startPtr;
709

710
	lazy_init_name_hash(istate);
711
	expand_to_path(istate, name, strlen(name), 0);
712
	while (*ptr) {
713
		while (*ptr && *ptr != '/')
714
			ptr++;
715

716
		if (*ptr == '/') {
717
			struct dir_entry *dir;
718

719
			dir = find_dir_entry(istate, name, ptr - name);
720
			if (dir) {
721
				memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
722
				startPtr = ptr + 1;
723
			}
724
			ptr++;
725
		}
726
	}
727
}
728

729
struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
730
{
731
	struct cache_entry *ce;
732
	unsigned int hash = memihash(name, namelen);
733

734
	lazy_init_name_hash(istate);
735
	expand_to_path(istate, name, namelen, icase);
736

737
	ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL,
738
					 struct cache_entry, ent);
739
	hashmap_for_each_entry_from(&istate->name_hash, ce, ent) {
740
		if (same_name(ce, name, namelen, icase))
741
			return ce;
742
	}
743
	return NULL;
744
}
745

746
void free_name_hash(struct index_state *istate)
747
{
748
	if (!istate->name_hash_initialized)
749
		return;
750
	istate->name_hash_initialized = 0;
751

752
	hashmap_clear(&istate->name_hash);
753
	hashmap_clear_and_free(&istate->dir_hash, struct dir_entry, ent);
754
}
755

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.