25
#include "qemu/osdep.h"
26
#include "block/block-io.h"
27
#include "qemu/memalign.h"
31
typedef struct Qcow2CachedTable {
39
Qcow2CachedTable *entries;
40
struct Qcow2Cache *depends;
43
bool depends_on_flush;
46
uint64_t cache_clean_lru_counter;
49
static inline void *qcow2_cache_get_table_addr(Qcow2Cache *c, int table)
51
return (uint8_t *) c->table_array + (size_t) table * c->table_size;
54
static inline int qcow2_cache_get_table_idx(Qcow2Cache *c, void *table)
56
ptrdiff_t table_offset = (uint8_t *) table - (uint8_t *) c->table_array;
57
int idx = table_offset / c->table_size;
58
assert(idx >= 0 && idx < c->size && table_offset % c->table_size == 0);
62
static inline const char *qcow2_cache_get_name(BDRVQcow2State *s, Qcow2Cache *c)
64
if (c == s->refcount_block_cache) {
65
return "refcount block";
66
} else if (c == s->l2_table_cache) {
74
static void qcow2_cache_table_release(Qcow2Cache *c, int i, int num_tables)
78
void *t = qcow2_cache_get_table_addr(c, i);
79
int align = qemu_real_host_page_size();
80
size_t mem_size = (size_t) c->table_size * num_tables;
81
size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
82
size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
83
if (mem_size > offset && length > 0) {
84
madvise((uint8_t *) t + offset, length, MADV_DONTNEED);
89
static inline bool can_clean_entry(Qcow2Cache *c, int i)
91
Qcow2CachedTable *t = &c->entries[i];
92
return t->ref == 0 && !t->dirty && t->offset != 0 &&
93
t->lru_counter <= c->cache_clean_lru_counter;
96
void qcow2_cache_clean_unused(Qcow2Cache *c)
103
while (i < c->size && !can_clean_entry(c, i)) {
108
while (i < c->size && can_clean_entry(c, i)) {
109
c->entries[i].offset = 0;
110
c->entries[i].lru_counter = 0;
116
qcow2_cache_table_release(c, i - to_clean, to_clean);
120
c->cache_clean_lru_counter = c->lru_counter;
123
Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
126
BDRVQcow2State *s = bs->opaque;
129
assert(num_tables > 0);
130
assert(is_power_of_2(table_size));
131
assert(table_size >= (1 << MIN_CLUSTER_BITS));
132
assert(table_size <= s->cluster_size);
134
c = g_new0(Qcow2Cache, 1);
135
c->size = num_tables;
136
c->table_size = table_size;
137
c->entries = g_try_new0(Qcow2CachedTable, num_tables);
138
c->table_array = qemu_try_blockalign(bs->file->bs,
139
(size_t) num_tables * c->table_size);
141
if (!c->entries || !c->table_array) {
142
qemu_vfree(c->table_array);
151
int qcow2_cache_destroy(Qcow2Cache *c)
155
for (i = 0; i < c->size; i++) {
156
assert(c->entries[i].ref == 0);
159
qemu_vfree(c->table_array);
166
static int GRAPH_RDLOCK
167
qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
171
ret = qcow2_cache_flush(bs, c->depends);
177
c->depends_on_flush = false;
182
static int GRAPH_RDLOCK
183
qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
185
BDRVQcow2State *s = bs->opaque;
188
if (!c->entries[i].dirty || !c->entries[i].offset) {
192
trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
193
c == s->l2_table_cache, i);
196
ret = qcow2_cache_flush_dependency(bs, c);
197
} else if (c->depends_on_flush) {
198
ret = bdrv_flush(bs->file->bs);
200
c->depends_on_flush = false;
208
if (c == s->refcount_block_cache) {
209
ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_BLOCK,
210
c->entries[i].offset, c->table_size, false);
211
} else if (c == s->l2_table_cache) {
212
ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
213
c->entries[i].offset, c->table_size, false);
215
ret = qcow2_pre_write_overlap_check(bs, 0,
216
c->entries[i].offset, c->table_size, false);
223
if (c == s->refcount_block_cache) {
224
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
225
} else if (c == s->l2_table_cache) {
226
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
229
ret = bdrv_pwrite(bs->file, c->entries[i].offset, c->table_size,
230
qcow2_cache_get_table_addr(c, i), 0);
235
c->entries[i].dirty = false;
240
int qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c)
242
BDRVQcow2State *s = bs->opaque;
247
trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
249
for (i = 0; i < c->size; i++) {
250
ret = qcow2_cache_entry_flush(bs, c, i);
251
if (ret < 0 && result != -ENOSPC) {
259
int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
261
int result = qcow2_cache_write(bs, c);
264
int ret = bdrv_flush(bs->file->bs);
273
int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
274
Qcow2Cache *dependency)
278
if (dependency->depends) {
279
ret = qcow2_cache_flush_dependency(bs, dependency);
285
if (c->depends && (c->depends != dependency)) {
286
ret = qcow2_cache_flush_dependency(bs, c);
292
c->depends = dependency;
296
void qcow2_cache_depends_on_flush(Qcow2Cache *c)
298
c->depends_on_flush = true;
301
int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
305
ret = qcow2_cache_flush(bs, c);
310
for (i = 0; i < c->size; i++) {
311
assert(c->entries[i].ref == 0);
312
c->entries[i].offset = 0;
313
c->entries[i].lru_counter = 0;
316
qcow2_cache_table_release(c, 0, c->size);
323
static int GRAPH_RDLOCK
324
qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
325
void **table, bool read_from_disk)
327
BDRVQcow2State *s = bs->opaque;
331
uint64_t min_lru_counter = UINT64_MAX;
332
int min_lru_index = -1;
336
trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
337
offset, read_from_disk);
339
if (!QEMU_IS_ALIGNED(offset, c->table_size)) {
340
qcow2_signal_corruption(bs, true, -1, -1, "Cannot get entry from %s "
341
"cache: Offset %#" PRIx64 " is unaligned",
342
qcow2_cache_get_name(s, c), offset);
347
i = lookup_index = (offset / c->table_size * 4) % c->size;
349
const Qcow2CachedTable *t = &c->entries[i];
350
if (t->offset == offset) {
353
if (t->ref == 0 && t->lru_counter < min_lru_counter) {
354
min_lru_counter = t->lru_counter;
357
if (++i == c->size) {
360
} while (i != lookup_index);
362
if (min_lru_index == -1) {
370
trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
371
c == s->l2_table_cache, i);
373
ret = qcow2_cache_entry_flush(bs, c, i);
378
trace_qcow2_cache_get_read(qemu_coroutine_self(),
379
c == s->l2_table_cache, i);
380
c->entries[i].offset = 0;
381
if (read_from_disk) {
382
if (c == s->l2_table_cache) {
383
BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
386
ret = bdrv_pread(bs->file, offset, c->table_size,
387
qcow2_cache_get_table_addr(c, i), 0);
393
c->entries[i].offset = offset;
398
*table = qcow2_cache_get_table_addr(c, i);
400
trace_qcow2_cache_get_done(qemu_coroutine_self(),
401
c == s->l2_table_cache, i);
406
int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
409
return qcow2_cache_do_get(bs, c, offset, table, true);
412
int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
415
return qcow2_cache_do_get(bs, c, offset, table, false);
418
void qcow2_cache_put(Qcow2Cache *c, void **table)
420
int i = qcow2_cache_get_table_idx(c, *table);
425
if (c->entries[i].ref == 0) {
426
c->entries[i].lru_counter = ++c->lru_counter;
429
assert(c->entries[i].ref >= 0);
432
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
434
int i = qcow2_cache_get_table_idx(c, table);
435
assert(c->entries[i].offset != 0);
436
c->entries[i].dirty = true;
439
void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset)
443
for (i = 0; i < c->size; i++) {
444
if (c->entries[i].offset == offset) {
445
return qcow2_cache_get_table_addr(c, i);
451
void qcow2_cache_discard(Qcow2Cache *c, void *table)
453
int i = qcow2_cache_get_table_idx(c, table);
455
assert(c->entries[i].ref == 0);
457
c->entries[i].offset = 0;
458
c->entries[i].lru_counter = 0;
459
c->entries[i].dirty = false;
461
qcow2_cache_table_release(c, i, 1);