qemu
1/*
2* Hierarchical Bitmap Data Type
3*
4* Copyright Red Hat, Inc., 2012
5*
6* Author: Paolo Bonzini <pbonzini@redhat.com>
7*
8* This work is licensed under the terms of the GNU GPL, version 2 or
9* later. See the COPYING file in the top-level directory.
10*/
11
12#include "qemu/osdep.h"13#include "qemu/hbitmap.h"14#include "qemu/host-utils.h"15#include "trace.h"16#include "crypto/hash.h"17
18/* HBitmaps provides an array of bits. The bits are stored as usual in an
19* array of unsigned longs, but HBitmap is also optimized to provide fast
20* iteration over set bits; going from one bit to the next is O(logB n)
21* worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
22* that the number of levels is in fact fixed.
23*
24* In order to do this, it stacks multiple bitmaps with progressively coarser
25* granularity; in all levels except the last, bit N is set iff the N-th
26* unsigned long is nonzero in the immediately next level. When iteration
27* completes on the last level it can examine the 2nd-last level to quickly
28* skip entire words, and even do so recursively to skip blocks of 64 words or
29* powers thereof (32 on 32-bit machines).
30*
31* Given an index in the bitmap, it can be split in group of bits like
32* this (for the 64-bit case):
33*
34* bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word
35* bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
36* bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
37*
38* So it is easy to move up simply by shifting the index right by
39* log2(BITS_PER_LONG) bits. To move down, you shift the index left
40* similarly, and add the word index within the group. Iteration uses
41* ffs (find first set bit) to find the next word to examine; this
42* operation can be done in constant time in most current architectures.
43*
44* Setting or clearing a range of m bits on all levels, the work to perform
45* is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
46*
47* When iterating on a bitmap, each bit (on any level) is only visited
48* once. Hence, The total cost of visiting a bitmap with m bits in it is
49* the number of bits that are set in all bitmaps. Unless the bitmap is
50* extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
51* cost of advancing from one bit to the next is usually constant (worst case
52* O(logB n) as in the non-amortized complexity).
53*/
54
55struct HBitmap {56/*57* Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate.
58*/
59uint64_t orig_size;60
61/* Number of total bits in the bottom level. */62uint64_t size;63
64/* Number of set bits in the bottom level. */65uint64_t count;66
67/* A scaling factor. Given a granularity of G, each bit in the bitmap will68* will actually represent a group of 2^G elements. Each operation on a
69* range of bits first rounds the bits to determine which group they land
70* in, and then affect the entire page; iteration will only visit the first
71* bit of each group. Here is an example of operations in a size-16,
72* granularity-1 HBitmap:
73*
74* initial state 00000000
75* set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8)
76* reset(start=1, count=3) 00111000 (iter: 4, 6, 8)
77* set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10)
78* reset(start=5, count=5) 00000000
79*
80* From an implementation point of view, when setting or resetting bits,
81* the bitmap will scale bit numbers right by this amount of bits. When
82* iterating, the bitmap will scale bit numbers left by this amount of
83* bits.
84*/
85int granularity;86
87/* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */88HBitmap *meta;89
90/* A number of progressively less coarse bitmaps (i.e. level 0 is the91* coarsest). Each bit in level N represents a word in level N+1 that
92* has a set bit, except the last level where each bit represents the
93* actual bitmap.
94*
95* Note that all bitmaps have the same number of levels. Even a 1-bit
96* bitmap will still allocate HBITMAP_LEVELS arrays.
97*/
98unsigned long *levels[HBITMAP_LEVELS];99
100/* The length of each levels[] array. */101uint64_t sizes[HBITMAP_LEVELS];102};103
104/* Advance hbi to the next nonzero word and return it. hbi->pos
105* is updated. Returns zero if we reach the end of the bitmap.
106*/
107static unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)108{
109size_t pos = hbi->pos;110const HBitmap *hb = hbi->hb;111unsigned i = HBITMAP_LEVELS - 1;112
113unsigned long cur;114do {115i--;116pos >>= BITS_PER_LEVEL;117cur = hbi->cur[i] & hb->levels[i][pos];118} while (cur == 0);119
120/* Check for end of iteration. We always use fewer than BITS_PER_LONG121* bits in the level 0 bitmap; thus we can repurpose the most significant
122* bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures
123* that the above loop ends even without an explicit check on i.
124*/
125
126if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) {127return 0;128}129for (; i < HBITMAP_LEVELS - 1; i++) {130/* Shift back pos to the left, matching the right shifts above.131* The index of this word's least significant set bit provides
132* the low-order bits.
133*/
134assert(cur);135pos = (pos << BITS_PER_LEVEL) + ctzl(cur);136hbi->cur[i] = cur & (cur - 1);137
138/* Set up next level for iteration. */139cur = hb->levels[i + 1][pos];140}141
142hbi->pos = pos;143trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur);144
145assert(cur);146return cur;147}
148
149int64_t hbitmap_iter_next(HBitmapIter *hbi)150{
151unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &152hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];153int64_t item;154
155if (cur == 0) {156cur = hbitmap_iter_skip_words(hbi);157if (cur == 0) {158return -1;159}160}161
162/* The next call will resume work from the next bit. */163hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);164item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);165
166return item << hbi->granularity;167}
168
169void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)170{
171unsigned i, bit;172uint64_t pos;173
174hbi->hb = hb;175pos = first >> hb->granularity;176assert(pos < hb->size);177hbi->pos = pos >> BITS_PER_LEVEL;178hbi->granularity = hb->granularity;179
180for (i = HBITMAP_LEVELS; i-- > 0; ) {181bit = pos & (BITS_PER_LONG - 1);182pos >>= BITS_PER_LEVEL;183
184/* Drop bits representing items before first. */185hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1);186
187/* We have already added level i+1, so the lowest set bit has188* been processed. Clear it.
189*/
190if (i != HBITMAP_LEVELS - 1) {191hbi->cur[i] &= ~(1UL << bit);192}193}194}
195
196int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count)197{
198HBitmapIter hbi;199int64_t first_dirty_off;200uint64_t end;201
202assert(start >= 0 && count >= 0);203
204if (start >= hb->orig_size || count == 0) {205return -1;206}207
208end = count > hb->orig_size - start ? hb->orig_size : start + count;209
210hbitmap_iter_init(&hbi, hb, start);211first_dirty_off = hbitmap_iter_next(&hbi);212
213if (first_dirty_off < 0 || first_dirty_off >= end) {214return -1;215}216
217return MAX(start, first_dirty_off);218}
219
220int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)221{
222size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;223unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];224unsigned long cur = last_lev[pos];225unsigned start_bit_offset;226uint64_t end_bit, sz;227int64_t res;228
229assert(start >= 0 && count >= 0);230
231if (start >= hb->orig_size || count == 0) {232return -1;233}234
235end_bit = count > hb->orig_size - start ?236hb->size :237((start + count - 1) >> hb->granularity) + 1;238sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;239
240/* There may be some zero bits in @cur before @start. We are not interested241* in them, let's set them.
242*/
243start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);244cur |= (1UL << start_bit_offset) - 1;245assert((start >> hb->granularity) < hb->size);246
247if (cur == (unsigned long)-1) {248do {249pos++;250} while (pos < sz && last_lev[pos] == (unsigned long)-1);251
252if (pos >= sz) {253return -1;254}255
256cur = last_lev[pos];257}258
259res = (pos << BITS_PER_LEVEL) + ctol(cur);260if (res >= end_bit) {261return -1;262}263
264res = res << hb->granularity;265if (res < start) {266assert(((start - res) >> hb->granularity) == 0);267return start;268}269
270return res;271}
272
273bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end,274int64_t max_dirty_count,275int64_t *dirty_start, int64_t *dirty_count)276{
277int64_t next_zero;278
279assert(start >= 0 && end >= 0 && max_dirty_count > 0);280
281end = MIN(end, hb->orig_size);282if (start >= end) {283return false;284}285
286start = hbitmap_next_dirty(hb, start, end - start);287if (start < 0) {288return false;289}290
291end = start + MIN(end - start, max_dirty_count);292
293next_zero = hbitmap_next_zero(hb, start, end - start);294if (next_zero >= 0) {295end = next_zero;296}297
298*dirty_start = start;299*dirty_count = end - start;300
301return true;302}
303
304bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count,305int64_t *pnum)306{
307int64_t next_dirty, next_zero;308
309assert(start >= 0);310assert(count > 0);311assert(start + count <= hb->orig_size);312
313next_dirty = hbitmap_next_dirty(hb, start, count);314if (next_dirty == -1) {315*pnum = count;316return false;317}318
319if (next_dirty > start) {320*pnum = next_dirty - start;321return false;322}323
324assert(next_dirty == start);325
326next_zero = hbitmap_next_zero(hb, start, count);327if (next_zero == -1) {328*pnum = count;329return true;330}331
332assert(next_zero > start);333*pnum = next_zero - start;334return true;335}
336
337bool hbitmap_empty(const HBitmap *hb)338{
339return hb->count == 0;340}
341
342int hbitmap_granularity(const HBitmap *hb)343{
344return hb->granularity;345}
346
347uint64_t hbitmap_count(const HBitmap *hb)348{
349return hb->count << hb->granularity;350}
351
352/**
353* hbitmap_iter_next_word:
354* @hbi: HBitmapIter to operate on.
355* @p_cur: Location where to store the next non-zero word.
356*
357* Return the index of the next nonzero word that is set in @hbi's
358* associated HBitmap, and set *p_cur to the content of that word
359* (bits before the index that was passed to hbitmap_iter_init are
360* trimmed on the first call). Return -1, and set *p_cur to zero,
361* if all remaining words are zero.
362*/
363static size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur)364{
365unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1];366
367if (cur == 0) {368cur = hbitmap_iter_skip_words(hbi);369if (cur == 0) {370*p_cur = 0;371return -1;372}373}374
375/* The next call will resume work from the next word. */376hbi->cur[HBITMAP_LEVELS - 1] = 0;377*p_cur = cur;378return hbi->pos;379}
380
381/* Count the number of set bits between start and end, not accounting for
382* the granularity. Also an example of how to use hbitmap_iter_next_word.
383*/
384static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)385{
386HBitmapIter hbi;387uint64_t count = 0;388uint64_t end = last + 1;389unsigned long cur;390size_t pos;391
392hbitmap_iter_init(&hbi, hb, start << hb->granularity);393for (;;) {394pos = hbitmap_iter_next_word(&hbi, &cur);395if (pos >= (end >> BITS_PER_LEVEL)) {396break;397}398count += ctpopl(cur);399}400
401if (pos == (end >> BITS_PER_LEVEL)) {402/* Drop bits representing the END-th and subsequent items. */403int bit = end & (BITS_PER_LONG - 1);404cur &= (1UL << bit) - 1;405count += ctpopl(cur);406}407
408return count;409}
410
411/* Setting starts at the last layer and propagates up if an element
412* changes.
413*/
414static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)415{
416unsigned long mask;417unsigned long old;418
419assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));420assert(start <= last);421
422mask = 2UL << (last & (BITS_PER_LONG - 1));423mask -= 1UL << (start & (BITS_PER_LONG - 1));424old = *elem;425*elem |= mask;426return old != *elem;427}
428
429/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
430* Returns true if at least one bit is changed. */
431static bool hb_set_between(HBitmap *hb, int level, uint64_t start,432uint64_t last)433{
434size_t pos = start >> BITS_PER_LEVEL;435size_t lastpos = last >> BITS_PER_LEVEL;436bool changed = false;437size_t i;438
439i = pos;440if (i < lastpos) {441uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;442changed |= hb_set_elem(&hb->levels[level][i], start, next - 1);443for (;;) {444start = next;445next += BITS_PER_LONG;446if (++i == lastpos) {447break;448}449changed |= (hb->levels[level][i] == 0);450hb->levels[level][i] = ~0UL;451}452}453changed |= hb_set_elem(&hb->levels[level][i], start, last);454
455/* If there was any change in this layer, we may have to update456* the one above.
457*/
458if (level > 0 && changed) {459hb_set_between(hb, level - 1, pos, lastpos);460}461return changed;462}
463
464void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)465{
466/* Compute range in the last layer. */467uint64_t first, n;468uint64_t last = start + count - 1;469
470if (count == 0) {471return;472}473
474trace_hbitmap_set(hb, start, count,475start >> hb->granularity, last >> hb->granularity);476
477first = start >> hb->granularity;478last >>= hb->granularity;479assert(last < hb->size);480n = last - first + 1;481
482hb->count += n - hb_count_between(hb, first, last);483if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&484hb->meta) {485hbitmap_set(hb->meta, start, count);486}487}
488
489/* Resetting works the other way round: propagate up if the new
490* value is zero.
491*/
492static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last)493{
494unsigned long mask;495bool blanked;496
497assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));498assert(start <= last);499
500mask = 2UL << (last & (BITS_PER_LONG - 1));501mask -= 1UL << (start & (BITS_PER_LONG - 1));502blanked = *elem != 0 && ((*elem & ~mask) == 0);503*elem &= ~mask;504return blanked;505}
506
507/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
508* Returns true if at least one bit is changed. */
509static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,510uint64_t last)511{
512size_t pos = start >> BITS_PER_LEVEL;513size_t lastpos = last >> BITS_PER_LEVEL;514bool changed = false;515size_t i;516
517i = pos;518if (i < lastpos) {519uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;520
521/* Here we need a more complex test than when setting bits. Even if522* something was changed, we must not blank bits in the upper level
523* unless the lower-level word became entirely zero. So, remove pos
524* from the upper-level range if bits remain set.
525*/
526if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) {527changed = true;528} else {529pos++;530}531
532for (;;) {533start = next;534next += BITS_PER_LONG;535if (++i == lastpos) {536break;537}538changed |= (hb->levels[level][i] != 0);539hb->levels[level][i] = 0UL;540}541}542
543/* Same as above, this time for lastpos. */544if (hb_reset_elem(&hb->levels[level][i], start, last)) {545changed = true;546} else {547lastpos--;548}549
550if (level > 0 && changed) {551hb_reset_between(hb, level - 1, pos, lastpos);552}553
554return changed;555
556}
557
558void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)559{
560/* Compute range in the last layer. */561uint64_t first;562uint64_t last = start + count - 1;563uint64_t gran = 1ULL << hb->granularity;564
565if (count == 0) {566return;567}568
569assert(QEMU_IS_ALIGNED(start, gran));570assert(QEMU_IS_ALIGNED(count, gran) || (start + count == hb->orig_size));571
572trace_hbitmap_reset(hb, start, count,573start >> hb->granularity, last >> hb->granularity);574
575first = start >> hb->granularity;576last >>= hb->granularity;577assert(last < hb->size);578
579hb->count -= hb_count_between(hb, first, last);580if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&581hb->meta) {582hbitmap_set(hb->meta, start, count);583}584}
585
586void hbitmap_reset_all(HBitmap *hb)587{
588unsigned int i;589
590/* Same as hbitmap_alloc() except for memset() instead of malloc() */591for (i = HBITMAP_LEVELS; --i >= 1; ) {592memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long));593}594
595hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1);596hb->count = 0;597}
598
599bool hbitmap_is_serializable(const HBitmap *hb)600{
601/* Every serialized chunk must be aligned to 64 bits so that endianness602* requirements can be fulfilled on both 64 bit and 32 bit hosts.
603* We have hbitmap_serialization_align() which converts this
604* alignment requirement from bitmap bits to items covered (e.g. sectors).
605* That value is:
606* 64 << hb->granularity
607* Since this value must not exceed UINT64_MAX, hb->granularity must be
608* less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
609*
610* In order for hbitmap_serialization_align() to always return a
611* meaningful value, bitmaps that are to be serialized must have a
612* granularity of less than 58. */
613
614return hb->granularity < 58;615}
616
617bool hbitmap_get(const HBitmap *hb, uint64_t item)618{
619/* Compute position and bit in the last layer. */620uint64_t pos = item >> hb->granularity;621unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1));622assert(pos < hb->size);623
624return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;625}
626
627uint64_t hbitmap_serialization_align(const HBitmap *hb)628{
629assert(hbitmap_is_serializable(hb));630
631/* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit632* hosts. */
633return UINT64_C(64) << hb->granularity;634}
635
636/* Start should be aligned to serialization granularity, chunk size should be
637* aligned to serialization granularity too, except for last chunk.
638*/
639static void serialization_chunk(const HBitmap *hb,640uint64_t start, uint64_t count,641unsigned long **first_el, uint64_t *el_count)642{
643uint64_t last = start + count - 1;644uint64_t gran = hbitmap_serialization_align(hb);645
646assert((start & (gran - 1)) == 0);647assert((last >> hb->granularity) < hb->size);648if ((last >> hb->granularity) != hb->size - 1) {649assert((count & (gran - 1)) == 0);650}651
652start = (start >> hb->granularity) >> BITS_PER_LEVEL;653last = (last >> hb->granularity) >> BITS_PER_LEVEL;654
655*first_el = &hb->levels[HBITMAP_LEVELS - 1][start];656*el_count = last - start + 1;657}
658
659uint64_t hbitmap_serialization_size(const HBitmap *hb,660uint64_t start, uint64_t count)661{
662uint64_t el_count;663unsigned long *cur;664
665if (!count) {666return 0;667}668serialization_chunk(hb, start, count, &cur, &el_count);669
670return el_count * sizeof(unsigned long);671}
672
673void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf,674uint64_t start, uint64_t count)675{
676uint64_t el_count;677unsigned long *cur, *end;678
679if (!count) {680return;681}682serialization_chunk(hb, start, count, &cur, &el_count);683end = cur + el_count;684
685while (cur != end) {686unsigned long el =687(BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur));688
689memcpy(buf, &el, sizeof(el));690buf += sizeof(el);691cur++;692}693}
694
695void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf,696uint64_t start, uint64_t count,697bool finish)698{
699uint64_t el_count;700unsigned long *cur, *end;701
702if (!count) {703return;704}705serialization_chunk(hb, start, count, &cur, &el_count);706end = cur + el_count;707
708while (cur != end) {709memcpy(cur, buf, sizeof(*cur));710
711if (BITS_PER_LONG == 32) {712le32_to_cpus((uint32_t *)cur);713} else {714le64_to_cpus((uint64_t *)cur);715}716
717buf += sizeof(unsigned long);718cur++;719}720if (finish) {721hbitmap_deserialize_finish(hb);722}723}
724
725void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count,726bool finish)727{
728uint64_t el_count;729unsigned long *first;730
731if (!count) {732return;733}734serialization_chunk(hb, start, count, &first, &el_count);735
736memset(first, 0, el_count * sizeof(unsigned long));737if (finish) {738hbitmap_deserialize_finish(hb);739}740}
741
742void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count,743bool finish)744{
745uint64_t el_count;746unsigned long *first;747
748if (!count) {749return;750}751serialization_chunk(hb, start, count, &first, &el_count);752
753memset(first, 0xff, el_count * sizeof(unsigned long));754if (finish) {755hbitmap_deserialize_finish(hb);756}757}
758
759void hbitmap_deserialize_finish(HBitmap *bitmap)760{
761int64_t i, size, prev_size;762int lev;763
764/* restore levels starting from penultimate to zero level, assuming765* that the last level is ok */
766size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);767for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) {768prev_size = size;769size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);770memset(bitmap->levels[lev], 0, size * sizeof(unsigned long));771
772for (i = 0; i < prev_size; ++i) {773if (bitmap->levels[lev + 1][i]) {774bitmap->levels[lev][i >> BITS_PER_LEVEL] |=7751UL << (i & (BITS_PER_LONG - 1));776}777}778}779
780bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);781bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1);782}
783
784void hbitmap_free(HBitmap *hb)785{
786unsigned i;787assert(!hb->meta);788for (i = HBITMAP_LEVELS; i-- > 0; ) {789g_free(hb->levels[i]);790}791g_free(hb);792}
793
794HBitmap *hbitmap_alloc(uint64_t size, int granularity)795{
796HBitmap *hb = g_new0(struct HBitmap, 1);797unsigned i;798
799assert(size <= INT64_MAX);800hb->orig_size = size;801
802assert(granularity >= 0 && granularity < 64);803size = (size + (1ULL << granularity) - 1) >> granularity;804assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));805
806hb->size = size;807hb->granularity = granularity;808for (i = HBITMAP_LEVELS; i-- > 0; ) {809size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);810hb->sizes[i] = size;811hb->levels[i] = g_new0(unsigned long, size);812}813
814/* We necessarily have free bits in level 0 due to the definition815* of HBITMAP_LEVELS, so use one for a sentinel. This speeds up
816* hbitmap_iter_skip_words.
817*/
818assert(size == 1);819hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);820return hb;821}
822
823void hbitmap_truncate(HBitmap *hb, uint64_t size)824{
825bool shrink;826unsigned i;827uint64_t num_elements = size;828uint64_t old;829
830assert(size <= INT64_MAX);831hb->orig_size = size;832
833/* Size comes in as logical elements, adjust for granularity. */834size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;835assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));836shrink = size < hb->size;837
838/* bit sizes are identical; nothing to do. */839if (size == hb->size) {840return;841}842
843/* If we're losing bits, let's clear those bits before we invalidate all of844* our invariants. This helps keep the bitcount consistent, and will prevent
845* us from carrying around garbage bits beyond the end of the map.
846*/
847if (shrink) {848/* Don't clear partial granularity groups;849* start at the first full one. */
850uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity);851uint64_t fix_count = (hb->size << hb->granularity) - start;852
853assert(fix_count);854hbitmap_reset(hb, start, fix_count);855}856
857hb->size = size;858for (i = HBITMAP_LEVELS; i-- > 0; ) {859size = MAX(BITS_TO_LONGS(size), 1);860if (hb->sizes[i] == size) {861break;862}863old = hb->sizes[i];864hb->sizes[i] = size;865hb->levels[i] = g_renew(unsigned long, hb->levels[i], size);866if (!shrink) {867memset(&hb->levels[i][old], 0x00,868(size - old) * sizeof(*hb->levels[i]));869}870}871if (hb->meta) {872hbitmap_truncate(hb->meta, hb->size << hb->granularity);873}874}
875
876/**
877* hbitmap_sparse_merge: performs dst = dst | src
878* works with differing granularities.
879* best used when src is sparsely populated.
880*/
881static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)882{
883int64_t offset;884int64_t count;885
886for (offset = 0;887hbitmap_next_dirty_area(src, offset, src->orig_size, INT64_MAX,888&offset, &count);889offset += count)890{891hbitmap_set(dst, offset, count);892}893}
894
895/**
896* Given HBitmaps A and B, let R := A (BITOR) B.
897* Bitmaps A and B will not be modified,
898* except when bitmap R is an alias of A or B.
899* Bitmaps must have same size.
900*/
901void hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)902{
903int i;904uint64_t j;905
906assert(a->orig_size == result->orig_size);907assert(b->orig_size == result->orig_size);908
909if ((!hbitmap_count(a) && result == b) ||910(!hbitmap_count(b) && result == a)) {911return;912}913
914if (!hbitmap_count(a) && !hbitmap_count(b)) {915hbitmap_reset_all(result);916return;917}918
919if (a->granularity != b->granularity) {920if ((a != result) && (b != result)) {921hbitmap_reset_all(result);922}923if (a != result) {924hbitmap_sparse_merge(result, a);925}926if (b != result) {927hbitmap_sparse_merge(result, b);928}929return;930}931
932/* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.933* It may be possible to improve running times for sparsely populated maps
934* by using hbitmap_iter_next, but this is suboptimal for dense maps.
935*/
936assert(a->size == b->size);937for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {938for (j = 0; j < a->sizes[i]; j++) {939result->levels[i][j] = a->levels[i][j] | b->levels[i][j];940}941}942
943/* Recompute the dirty count */944result->count = hb_count_between(result, 0, result->size - 1);945}
946
947char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)948{
949size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);950char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];951char *hash = NULL;952qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);953
954return hash;955}
956