68
#include "qemu/osdep.h"
70
#include "qemu/atomic.h"
72
#include "qemu/memalign.h"
84
#define QHT_BUCKET_ALIGN 64
87
#if HOST_LONG_BITS == 32
88
#define QHT_BUCKET_ENTRIES 6
90
#define QHT_BUCKET_ENTRIES 4
100
qht_iter_func_t retvoid;
101
qht_iter_bool_func_t retbool;
103
enum qht_iter_type type;
110
static inline void qht_lock(struct qht *ht)
112
if (ht->mode & QHT_MODE_RAW_MUTEXES) {
113
qemu_mutex_lock__raw(&ht->lock);
115
qemu_mutex_lock(&ht->lock);
119
static inline int qht_trylock(struct qht *ht)
121
if (ht->mode & QHT_MODE_RAW_MUTEXES) {
122
return qemu_mutex_trylock__raw(&(ht)->lock);
124
return qemu_mutex_trylock(&(ht)->lock);
128
static inline void qht_unlock(struct qht *ht)
130
qemu_mutex_unlock(&ht->lock);
146
QemuSeqLock sequence;
147
uint32_t hashes[QHT_BUCKET_ENTRIES];
148
void *pointers[QHT_BUCKET_ENTRIES];
149
struct qht_bucket *next;
150
} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
152
QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
163
#define QHT_TSAN_BUCKET_LOCKS_BITS 4
164
#define QHT_TSAN_BUCKET_LOCKS (1 << QHT_TSAN_BUCKET_LOCKS_BITS)
166
struct qht_tsan_lock {
168
} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
185
struct qht_bucket *buckets;
187
size_t n_added_buckets;
188
size_t n_added_buckets_threshold;
190
struct qht_tsan_lock tsan_bucket_locks[QHT_TSAN_BUCKET_LOCKS];
195
#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
197
static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
199
static void qht_grow_maybe(struct qht *ht);
203
#define qht_debug_assert(X) do { assert(X); } while (0)
205
static void qht_bucket_debug__locked(struct qht_bucket *b)
207
bool seen_empty = false;
208
bool corrupt = false;
212
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
213
if (b->pointers[i] == NULL) {
218
fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
219
__func__, b, i, b->hashes[i], b->pointers[i]);
225
qht_debug_assert(!corrupt);
228
static void qht_map_debug__all_locked(struct qht_map *map)
232
for (i = 0; i < map->n_buckets; i++) {
233
qht_bucket_debug__locked(&map->buckets[i]);
238
#define qht_debug_assert(X) do { (void)(X); } while (0)
240
static inline void qht_bucket_debug__locked(struct qht_bucket *b)
243
static inline void qht_map_debug__all_locked(struct qht_map *map)
247
static inline size_t qht_elems_to_buckets(size_t n_elems)
249
return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
257
static inline void qht_do_if_first_in_stripe(struct qht_map *map,
258
struct qht_bucket *b,
259
void (*func)(QemuSpin *spin))
262
unsigned long bucket_idx = b - map->buckets;
263
bool is_first_in_stripe = (bucket_idx >> QHT_TSAN_BUCKET_LOCKS_BITS) == 0;
264
if (is_first_in_stripe) {
265
unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
266
func(&map->tsan_bucket_locks[lock_idx].lock);
273
static inline void qht_bucket_lock_do(struct qht_map *map,
274
struct qht_bucket *b,
275
void (*func)(QemuSpin *lock))
278
unsigned long bucket_idx = b - map->buckets;
279
unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
280
func(&map->tsan_bucket_locks[lock_idx].lock);
286
static inline void qht_bucket_lock(struct qht_map *map,
287
struct qht_bucket *b)
289
qht_bucket_lock_do(map, b, qemu_spin_lock);
292
static inline void qht_bucket_unlock(struct qht_map *map,
293
struct qht_bucket *b)
295
qht_bucket_lock_do(map, b, qemu_spin_unlock);
298
static inline void qht_head_init(struct qht_map *map, struct qht_bucket *b)
300
memset(b, 0, sizeof(*b));
301
qht_do_if_first_in_stripe(map, b, qemu_spin_init);
302
seqlock_init(&b->sequence);
306
struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
308
return &map->buckets[hash & (map->n_buckets - 1)];
312
static void qht_map_lock_buckets(struct qht_map *map)
316
for (i = 0; i < map->n_buckets; i++) {
317
struct qht_bucket *b = &map->buckets[i];
319
qht_do_if_first_in_stripe(map, b, qemu_spin_lock);
323
static void qht_map_unlock_buckets(struct qht_map *map)
327
for (i = 0; i < map->n_buckets; i++) {
328
struct qht_bucket *b = &map->buckets[i];
330
qht_do_if_first_in_stripe(map, b, qemu_spin_unlock);
338
static inline bool qht_map_is_stale__locked(const struct qht *ht,
339
const struct qht_map *map)
341
return map != ht->map;
352
void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
356
map = qatomic_rcu_read(&ht->map);
357
qht_map_lock_buckets(map);
358
if (likely(!qht_map_is_stale__locked(ht, map))) {
362
qht_map_unlock_buckets(map);
367
qht_map_lock_buckets(map);
382
struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
383
struct qht_map **pmap)
385
struct qht_bucket *b;
388
map = qatomic_rcu_read(&ht->map);
389
b = qht_map_to_bucket(map, hash);
391
qht_bucket_lock(map, b);
392
if (likely(!qht_map_is_stale__locked(ht, map))) {
396
qht_bucket_unlock(map, b);
401
b = qht_map_to_bucket(map, hash);
402
qht_bucket_lock(map, b);
408
static inline bool qht_map_needs_resize(const struct qht_map *map)
410
return qatomic_read(&map->n_added_buckets) >
411
map->n_added_buckets_threshold;
414
static inline void qht_chain_destroy(struct qht_map *map,
415
struct qht_bucket *head)
417
struct qht_bucket *curr = head->next;
418
struct qht_bucket *prev;
420
qht_do_if_first_in_stripe(map, head, qemu_spin_destroy);
429
static void qht_map_destroy(struct qht_map *map)
433
for (i = 0; i < map->n_buckets; i++) {
434
qht_chain_destroy(map, &map->buckets[i]);
436
qemu_vfree(map->buckets);
440
static struct qht_map *qht_map_create(size_t n_buckets)
445
map = g_malloc(sizeof(*map));
446
map->n_buckets = n_buckets;
448
map->n_added_buckets = 0;
449
map->n_added_buckets_threshold = n_buckets /
450
QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
453
if (unlikely(map->n_added_buckets_threshold == 0)) {
454
map->n_added_buckets_threshold = 1;
457
map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
458
sizeof(*map->buckets) * n_buckets);
459
for (i = 0; i < n_buckets; i++) {
460
qht_head_init(map, &map->buckets[i]);
465
void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
469
size_t n_buckets = qht_elems_to_buckets(n_elems);
474
qemu_mutex_init(&ht->lock);
475
map = qht_map_create(n_buckets);
476
qatomic_rcu_set(&ht->map, map);
480
void qht_destroy(struct qht *ht)
482
qht_map_destroy(ht->map);
483
memset(ht, 0, sizeof(*ht));
486
static void qht_bucket_reset__locked(struct qht_bucket *head)
488
struct qht_bucket *b = head;
491
seqlock_write_begin(&head->sequence);
493
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
494
if (b->pointers[i] == NULL) {
497
qatomic_set(&b->hashes[i], 0);
498
qatomic_set(&b->pointers[i], NULL);
503
seqlock_write_end(&head->sequence);
507
static void qht_map_reset__all_locked(struct qht_map *map)
511
for (i = 0; i < map->n_buckets; i++) {
512
qht_bucket_reset__locked(&map->buckets[i]);
514
qht_map_debug__all_locked(map);
517
void qht_reset(struct qht *ht)
521
qht_map_lock_buckets__no_stale(ht, &map);
522
qht_map_reset__all_locked(map);
523
qht_map_unlock_buckets(map);
526
static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
528
qht_do_resize_reset(ht, new, false);
531
static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
533
qht_do_resize_reset(ht, new, true);
536
bool qht_reset_size(struct qht *ht, size_t n_elems)
538
struct qht_map *new = NULL;
542
n_buckets = qht_elems_to_buckets(n_elems);
546
if (n_buckets != map->n_buckets) {
547
new = qht_map_create(n_buckets);
549
qht_do_resize_and_reset(ht, new);
556
void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
557
const void *userp, uint32_t hash)
559
const struct qht_bucket *b = head;
563
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
564
if (qatomic_read(&b->hashes[i]) == hash) {
569
void *p = qatomic_rcu_read(&b->pointers[i]);
571
if (likely(p) && likely(func(p, userp))) {
576
b = qatomic_rcu_read(&b->next);
582
static __attribute__((noinline))
583
void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
584
const void *userp, uint32_t hash)
586
unsigned int version;
590
version = seqlock_read_begin(&b->sequence);
591
ret = qht_do_lookup(b, func, userp, hash);
592
} while (seqlock_read_retry(&b->sequence, version));
596
void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
597
qht_lookup_func_t func)
599
const struct qht_bucket *b;
600
const struct qht_map *map;
601
unsigned int version;
604
map = qatomic_rcu_read(&ht->map);
605
b = qht_map_to_bucket(map, hash);
607
version = seqlock_read_begin(&b->sequence);
608
ret = qht_do_lookup(b, func, userp, hash);
609
if (likely(!seqlock_read_retry(&b->sequence, version))) {
616
return qht_lookup__slowpath(b, func, userp, hash);
619
void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
621
return qht_lookup_custom(ht, userp, hash, ht->cmp);
628
static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
629
struct qht_bucket *head, void *p, uint32_t hash,
632
struct qht_bucket *b = head;
633
struct qht_bucket *prev = NULL;
634
struct qht_bucket *new = NULL;
638
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
639
if (b->pointers[i]) {
640
if (unlikely(b->hashes[i] == hash &&
641
ht->cmp(b->pointers[i], p))) {
642
return b->pointers[i];
652
b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
653
memset(b, 0, sizeof(*b));
656
qatomic_inc(&map->n_added_buckets);
657
if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
658
*needs_resize = true;
663
seqlock_write_begin(&head->sequence);
665
qatomic_rcu_set(&prev->next, b);
668
qatomic_set(&b->hashes[i], hash);
669
qatomic_set(&b->pointers[i], p);
670
seqlock_write_end(&head->sequence);
674
static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
682
if (qht_trylock(ht)) {
687
if (qht_map_needs_resize(map)) {
688
struct qht_map *new = qht_map_create(map->n_buckets * 2);
690
qht_do_resize(ht, new);
695
bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
697
struct qht_bucket *b;
699
bool needs_resize = false;
705
b = qht_bucket_lock__no_stale(ht, hash, &map);
706
prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
707
qht_bucket_debug__locked(b);
708
qht_bucket_unlock(map, b);
710
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
713
if (likely(prev == NULL)) {
722
static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
724
if (pos == QHT_BUCKET_ENTRIES - 1) {
725
if (b->next == NULL) {
728
return b->next->pointers[0] == NULL;
730
return b->pointers[pos + 1] == NULL;
734
qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
736
qht_debug_assert(!(to == from && i == j));
737
qht_debug_assert(to->pointers[i]);
738
qht_debug_assert(from->pointers[j]);
740
qatomic_set(&to->hashes[i], from->hashes[j]);
741
qatomic_set(&to->pointers[i], from->pointers[j]);
743
qatomic_set(&from->hashes[j], 0);
744
qatomic_set(&from->pointers[j], NULL);
751
static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
753
struct qht_bucket *b = orig;
754
struct qht_bucket *prev = NULL;
757
if (qht_entry_is_last(orig, pos)) {
758
qatomic_set(&orig->hashes[pos], 0);
759
qatomic_set(&orig->pointers[pos], NULL);
763
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
764
if (b->pointers[i]) {
768
return qht_entry_move(orig, pos, b, i - 1);
770
qht_debug_assert(prev);
771
return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
777
qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
782
bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
784
struct qht_bucket *b = head;
788
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
789
void *q = b->pointers[i];
791
if (unlikely(q == NULL)) {
795
qht_debug_assert(b->hashes[i] == hash);
796
seqlock_write_begin(&head->sequence);
797
qht_bucket_remove_entry(b, i);
798
seqlock_write_end(&head->sequence);
807
bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
809
struct qht_bucket *b;
816
b = qht_bucket_lock__no_stale(ht, hash, &map);
817
ret = qht_remove__locked(b, p, hash);
818
qht_bucket_debug__locked(b);
819
qht_bucket_unlock(map, b);
823
static inline void qht_bucket_iter(struct qht_bucket *head,
824
const struct qht_iter *iter, void *userp)
826
struct qht_bucket *b = head;
830
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
831
if (b->pointers[i] == NULL) {
834
switch (iter->type) {
836
iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
839
if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
841
seqlock_write_begin(&head->sequence);
842
qht_bucket_remove_entry(b, i);
843
seqlock_write_end(&head->sequence);
844
qht_bucket_debug__locked(b);
851
g_assert_not_reached();
859
static inline void qht_map_iter__all_locked(struct qht_map *map,
860
const struct qht_iter *iter,
865
for (i = 0; i < map->n_buckets; i++) {
866
qht_bucket_iter(&map->buckets[i], iter, userp);
871
do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
875
map = qatomic_rcu_read(&ht->map);
876
qht_map_lock_buckets(map);
877
qht_map_iter__all_locked(map, iter, userp);
878
qht_map_unlock_buckets(map);
881
void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
883
const struct qht_iter iter = {
885
.type = QHT_ITER_VOID,
888
do_qht_iter(ht, &iter, userp);
891
void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
893
const struct qht_iter iter = {
898
do_qht_iter(ht, &iter, userp);
901
struct qht_map_copy_data {
906
static void qht_map_copy(void *p, uint32_t hash, void *userp)
908
struct qht_map_copy_data *data = userp;
909
struct qht *ht = data->ht;
910
struct qht_map *new = data->new;
911
struct qht_bucket *b = qht_map_to_bucket(new, hash);
914
qht_insert__locked(ht, new, b, p, hash, NULL);
921
static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
924
const struct qht_iter iter = {
925
.f.retvoid = qht_map_copy,
926
.type = QHT_ITER_VOID,
928
struct qht_map_copy_data data;
931
qht_map_lock_buckets(old);
934
qht_map_reset__all_locked(old);
938
qht_map_unlock_buckets(old);
942
g_assert(new->n_buckets != old->n_buckets);
945
qht_map_iter__all_locked(old, &iter, &data);
946
qht_map_debug__all_locked(new);
948
qatomic_rcu_set(&ht->map, new);
949
qht_map_unlock_buckets(old);
950
call_rcu(old, qht_map_destroy, rcu);
953
bool qht_resize(struct qht *ht, size_t n_elems)
955
size_t n_buckets = qht_elems_to_buckets(n_elems);
959
if (n_buckets != ht->map->n_buckets) {
962
new = qht_map_create(n_buckets);
963
qht_do_resize(ht, new);
972
void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
974
const struct qht_map *map;
977
map = qatomic_rcu_read(&ht->map);
979
stats->used_head_buckets = 0;
981
qdist_init(&stats->chain);
982
qdist_init(&stats->occupancy);
984
if (unlikely(map == NULL)) {
985
stats->head_buckets = 0;
988
stats->head_buckets = map->n_buckets;
990
for (i = 0; i < map->n_buckets; i++) {
991
const struct qht_bucket *head = &map->buckets[i];
992
const struct qht_bucket *b;
993
unsigned int version;
999
version = seqlock_read_begin(&head->sequence);
1004
for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
1005
if (qatomic_read(&b->pointers[j]) == NULL) {
1011
b = qatomic_rcu_read(&b->next);
1013
} while (seqlock_read_retry(&head->sequence, version));
1016
qdist_inc(&stats->chain, buckets);
1017
qdist_inc(&stats->occupancy,
1018
(double)entries / QHT_BUCKET_ENTRIES / buckets);
1019
stats->used_head_buckets++;
1020
stats->entries += entries;
1022
qdist_inc(&stats->occupancy, 0);
1027
void qht_statistics_destroy(struct qht_stats *stats)
1029
qdist_destroy(&stats->occupancy);
1030
qdist_destroy(&stats->chain);