git
/
hashmap.c
351 строка · 8.4 Кб
1/*
2* Generic implementation of hash-based key value mappings.
3*/
4#include "git-compat-util.h"
5#include "hashmap.h"
6
7#define FNV32_BASE ((unsigned int) 0x811c9dc5)
8#define FNV32_PRIME ((unsigned int) 0x01000193)
9
10unsigned int strhash(const char *str)
11{
12unsigned int c, hash = FNV32_BASE;
13while ((c = (unsigned char) *str++))
14hash = (hash * FNV32_PRIME) ^ c;
15return hash;
16}
17
18unsigned int strihash(const char *str)
19{
20unsigned int c, hash = FNV32_BASE;
21while ((c = (unsigned char) *str++)) {
22if (c >= 'a' && c <= 'z')
23c -= 'a' - 'A';
24hash = (hash * FNV32_PRIME) ^ c;
25}
26return hash;
27}
28
29unsigned int memhash(const void *buf, size_t len)
30{
31unsigned int hash = FNV32_BASE;
32unsigned char *ucbuf = (unsigned char *) buf;
33while (len--) {
34unsigned int c = *ucbuf++;
35hash = (hash * FNV32_PRIME) ^ c;
36}
37return hash;
38}
39
40unsigned int memihash(const void *buf, size_t len)
41{
42unsigned int hash = FNV32_BASE;
43unsigned char *ucbuf = (unsigned char *) buf;
44while (len--) {
45unsigned int c = *ucbuf++;
46if (c >= 'a' && c <= 'z')
47c -= 'a' - 'A';
48hash = (hash * FNV32_PRIME) ^ c;
49}
50return hash;
51}
52
53/*
54* Incorporate another chunk of data into a memihash
55* computation.
56*/
57unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len)
58{
59unsigned int hash = hash_seed;
60unsigned char *ucbuf = (unsigned char *) buf;
61while (len--) {
62unsigned int c = *ucbuf++;
63if (c >= 'a' && c <= 'z')
64c -= 'a' - 'A';
65hash = (hash * FNV32_PRIME) ^ c;
66}
67return hash;
68}
69
70#define HASHMAP_INITIAL_SIZE 64
71/* grow / shrink by 2^2 */
72#define HASHMAP_RESIZE_BITS 2
73/* load factor in percent */
74#define HASHMAP_LOAD_FACTOR 80
75
76static void alloc_table(struct hashmap *map, unsigned int size)
77{
78map->tablesize = size;
79CALLOC_ARRAY(map->table, size);
80
81/* calculate resize thresholds for new size */
82map->grow_at = (unsigned int) ((uint64_t) size * HASHMAP_LOAD_FACTOR / 100);
83if (size <= HASHMAP_INITIAL_SIZE)
84map->shrink_at = 0;
85else
86/*
87* The shrink-threshold must be slightly smaller than
88* (grow-threshold / resize-factor) to prevent erratic resizing,
89* thus we divide by (resize-factor + 1).
90*/
91map->shrink_at = map->grow_at / ((1 << HASHMAP_RESIZE_BITS) + 1);
92}
93
94static inline int entry_equals(const struct hashmap *map,
95const struct hashmap_entry *e1,
96const struct hashmap_entry *e2,
97const void *keydata)
98{
99return (e1 == e2) ||
100(e1->hash == e2->hash &&
101!map->cmpfn(map->cmpfn_data, e1, e2, keydata));
102}
103
104static inline unsigned int bucket(const struct hashmap *map,
105const struct hashmap_entry *key)
106{
107return key->hash & (map->tablesize - 1);
108}
109
110int hashmap_bucket(const struct hashmap *map, unsigned int hash)
111{
112return hash & (map->tablesize - 1);
113}
114
115static void rehash(struct hashmap *map, unsigned int newsize)
116{
117/* map->table MUST NOT be NULL when this function is called */
118unsigned int i, oldsize = map->tablesize;
119struct hashmap_entry **oldtable = map->table;
120
121alloc_table(map, newsize);
122for (i = 0; i < oldsize; i++) {
123struct hashmap_entry *e = oldtable[i];
124while (e) {
125struct hashmap_entry *next = e->next;
126unsigned int b = bucket(map, e);
127e->next = map->table[b];
128map->table[b] = e;
129e = next;
130}
131}
132free(oldtable);
133}
134
135static inline struct hashmap_entry **find_entry_ptr(const struct hashmap *map,
136const struct hashmap_entry *key, const void *keydata)
137{
138/* map->table MUST NOT be NULL when this function is called */
139struct hashmap_entry **e = &map->table[bucket(map, key)];
140while (*e && !entry_equals(map, *e, key, keydata))
141e = &(*e)->next;
142return e;
143}
144
145static int always_equal(const void *cmp_data UNUSED,
146const struct hashmap_entry *entry1 UNUSED,
147const struct hashmap_entry *entry2 UNUSED,
148const void *keydata UNUSED)
149{
150return 0;
151}
152
153void hashmap_init(struct hashmap *map, hashmap_cmp_fn equals_function,
154const void *cmpfn_data, size_t initial_size)
155{
156unsigned int size = HASHMAP_INITIAL_SIZE;
157
158memset(map, 0, sizeof(*map));
159
160map->cmpfn = equals_function ? equals_function : always_equal;
161map->cmpfn_data = cmpfn_data;
162
163/* calculate initial table size and allocate the table */
164initial_size = (unsigned int) ((uint64_t) initial_size * 100
165/ HASHMAP_LOAD_FACTOR);
166while (initial_size > size)
167size <<= HASHMAP_RESIZE_BITS;
168alloc_table(map, size);
169
170/*
171* Keep track of the number of items in the map and
172* allow the map to automatically grow as necessary.
173*/
174map->do_count_items = 1;
175}
176
177static void free_individual_entries(struct hashmap *map, ssize_t entry_offset)
178{
179struct hashmap_iter iter;
180struct hashmap_entry *e;
181
182hashmap_iter_init(map, &iter);
183while ((e = hashmap_iter_next(&iter)))
184/*
185* like container_of, but using caller-calculated
186* offset (caller being hashmap_clear_and_free)
187*/
188free((char *)e - entry_offset);
189}
190
191void hashmap_partial_clear_(struct hashmap *map, ssize_t entry_offset)
192{
193if (!map || !map->table)
194return;
195if (entry_offset >= 0) /* called by hashmap_clear_entries */
196free_individual_entries(map, entry_offset);
197memset(map->table, 0, map->tablesize * sizeof(struct hashmap_entry *));
198map->shrink_at = 0;
199map->private_size = 0;
200}
201
202void hashmap_clear_(struct hashmap *map, ssize_t entry_offset)
203{
204if (!map || !map->table)
205return;
206if (entry_offset >= 0) /* called by hashmap_clear_and_free */
207free_individual_entries(map, entry_offset);
208free(map->table);
209memset(map, 0, sizeof(*map));
210}
211
212struct hashmap_entry *hashmap_get(const struct hashmap *map,
213const struct hashmap_entry *key,
214const void *keydata)
215{
216if (!map->table)
217return NULL;
218return *find_entry_ptr(map, key, keydata);
219}
220
221struct hashmap_entry *hashmap_get_next(const struct hashmap *map,
222const struct hashmap_entry *entry)
223{
224struct hashmap_entry *e = entry->next;
225for (; e; e = e->next)
226if (entry_equals(map, entry, e, NULL))
227return e;
228return NULL;
229}
230
231void hashmap_add(struct hashmap *map, struct hashmap_entry *entry)
232{
233unsigned int b;
234
235if (!map->table)
236alloc_table(map, HASHMAP_INITIAL_SIZE);
237
238b = bucket(map, entry);
239/* add entry */
240entry->next = map->table[b];
241map->table[b] = entry;
242
243/* fix size and rehash if appropriate */
244if (map->do_count_items) {
245map->private_size++;
246if (map->private_size > map->grow_at)
247rehash(map, map->tablesize << HASHMAP_RESIZE_BITS);
248}
249}
250
251struct hashmap_entry *hashmap_remove(struct hashmap *map,
252const struct hashmap_entry *key,
253const void *keydata)
254{
255struct hashmap_entry *old;
256struct hashmap_entry **e;
257
258if (!map->table)
259return NULL;
260e = find_entry_ptr(map, key, keydata);
261if (!*e)
262return NULL;
263
264/* remove existing entry */
265old = *e;
266*e = old->next;
267old->next = NULL;
268
269/* fix size and rehash if appropriate */
270if (map->do_count_items) {
271map->private_size--;
272if (map->private_size < map->shrink_at)
273rehash(map, map->tablesize >> HASHMAP_RESIZE_BITS);
274}
275
276return old;
277}
278
279struct hashmap_entry *hashmap_put(struct hashmap *map,
280struct hashmap_entry *entry)
281{
282struct hashmap_entry *old = hashmap_remove(map, entry, NULL);
283hashmap_add(map, entry);
284return old;
285}
286
287void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter)
288{
289iter->map = map;
290iter->tablepos = 0;
291iter->next = NULL;
292}
293
294struct hashmap_entry *hashmap_iter_next(struct hashmap_iter *iter)
295{
296struct hashmap_entry *current = iter->next;
297for (;;) {
298if (current) {
299iter->next = current->next;
300return current;
301}
302
303if (iter->tablepos >= iter->map->tablesize)
304return NULL;
305
306current = iter->map->table[iter->tablepos++];
307}
308}
309
310struct pool_entry {
311struct hashmap_entry ent;
312size_t len;
313unsigned char data[FLEX_ARRAY];
314};
315
316static int pool_entry_cmp(const void *cmp_data UNUSED,
317const struct hashmap_entry *eptr,
318const struct hashmap_entry *entry_or_key,
319const void *keydata)
320{
321const struct pool_entry *e1, *e2;
322
323e1 = container_of(eptr, const struct pool_entry, ent);
324e2 = container_of(entry_or_key, const struct pool_entry, ent);
325
326return e1->data != keydata &&
327(e1->len != e2->len || memcmp(e1->data, keydata, e1->len));
328}
329
330const void *memintern(const void *data, size_t len)
331{
332static struct hashmap map;
333struct pool_entry key, *e;
334
335/* initialize string pool hashmap */
336if (!map.tablesize)
337hashmap_init(&map, pool_entry_cmp, NULL, 0);
338
339/* lookup interned string in pool */
340hashmap_entry_init(&key.ent, memhash(data, len));
341key.len = len;
342e = hashmap_get_entry(&map, &key, ent, data);
343if (!e) {
344/* not found: create it */
345FLEX_ALLOC_MEM(e, data, data, len);
346hashmap_entry_init(&e->ent, key.ent.hash);
347e->len = len;
348hashmap_add(&map, &e->ent);
349}
350return e->data;
351}
352