2
* Data plane event loop
4
* Copyright (c) 2003-2008 Fabrice Bellard
5
* Copyright (c) 2009-2017 QEMU contributors
7
* Permission is hereby granted, free of charge, to any person obtaining a copy
8
* of this software and associated documentation files (the "Software"), to deal
9
* in the Software without restriction, including without limitation the rights
10
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
* copies of the Software, and to permit persons to whom the Software is
12
* furnished to do so, subject to the following conditions:
14
* The above copyright notice and this permission notice shall be included in
15
* all copies or substantial portions of the Software.
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26
#include "qemu/osdep.h"
27
#include "qapi/error.h"
29
#include "block/thread-pool.h"
30
#include "block/graph-lock.h"
31
#include "qemu/main-loop.h"
32
#include "qemu/atomic.h"
33
#include "qemu/rcu_queue.h"
34
#include "block/raw-aio.h"
35
#include "qemu/coroutine_int.h"
36
#include "qemu/coroutine-tls.h"
37
#include "sysemu/cpu-timers.h"
40
/***********************************************************/
41
/* bottom halves (can be seen as timers which expire ASAP) */
43
/* QEMUBH::flags values */
45
/* Already enqueued and waiting for aio_bh_poll() */
46
BH_PENDING = (1 << 0),
48
/* Invoke the callback */
49
BH_SCHEDULED = (1 << 1),
51
/* Delete without invoking callback */
52
BH_DELETED = (1 << 2),
54
/* Delete after invoking callback */
55
BH_ONESHOT = (1 << 3),
57
/* Schedule periodically when the event loop is idle */
66
QSLIST_ENTRY(QEMUBH) next;
68
MemReentrancyGuard *reentrancy_guard;
71
/* Called concurrently from any thread */
72
static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
74
AioContext *ctx = bh->ctx;
78
* Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
79
* insertion starts after BH_PENDING is set.
81
old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
83
if (!(old_flags & BH_PENDING)) {
85
* At this point the bottom half becomes visible to aio_bh_poll().
86
* This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
87
* aio_bh_poll(), ensuring that:
88
* 1. any writes needed by the callback are visible from the callback
89
* after aio_bh_dequeue() returns bh.
90
* 2. ctx is loaded before the callback has a chance to execute and bh
93
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
97
if (unlikely(icount_enabled())) {
99
* Workaround for record/replay.
100
* vCPU execution should be suspended when new BH is set.
101
* This is needed to avoid guest timeouts caused
102
* by the long cycles of the execution.
104
icount_notify_exit();
108
/* Only called from aio_bh_poll() and aio_ctx_finalize() */
109
static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
111
QEMUBH *bh = QSLIST_FIRST_RCU(head);
117
QSLIST_REMOVE_HEAD(head, next);
120
* Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
121
* the removal finishes before BH_PENDING is reset.
123
*flags = qatomic_fetch_and(&bh->flags,
124
~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
128
void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
129
void *opaque, const char *name)
132
bh = g_new(QEMUBH, 1);
139
aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
142
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
143
const char *name, MemReentrancyGuard *reentrancy_guard)
146
bh = g_new(QEMUBH, 1);
152
.reentrancy_guard = reentrancy_guard,
157
void aio_bh_call(QEMUBH *bh)
159
bool last_engaged_in_io = false;
161
/* Make a copy of the guard-pointer as cb may free the bh */
162
MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
163
if (reentrancy_guard) {
164
last_engaged_in_io = reentrancy_guard->engaged_in_io;
165
if (reentrancy_guard->engaged_in_io) {
166
trace_reentrant_aio(bh->ctx, bh->name);
168
reentrancy_guard->engaged_in_io = true;
173
if (reentrancy_guard) {
174
reentrancy_guard->engaged_in_io = last_engaged_in_io;
178
/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
179
int aio_bh_poll(AioContext *ctx)
185
/* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */
186
QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
189
* GCC13 [-Werror=dangling-pointer=] complains that the local variable
190
* 'slice' is being stored in the global 'ctx->bh_slice_list' but the
191
* list is emptied before this function returns.
193
#if !defined(__clang__)
194
#pragma GCC diagnostic push
195
#pragma GCC diagnostic ignored "-Wpragmas"
196
#pragma GCC diagnostic ignored "-Wdangling-pointer="
198
QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
199
#if !defined(__clang__)
200
#pragma GCC diagnostic pop
203
while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
207
bh = aio_bh_dequeue(&s->bh_list, &flags);
209
QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
213
if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
214
/* Idle BHs don't count as progress */
215
if (!(flags & BH_IDLE)) {
220
if (flags & (BH_DELETED | BH_ONESHOT)) {
228
void qemu_bh_schedule_idle(QEMUBH *bh)
230
aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
233
void qemu_bh_schedule(QEMUBH *bh)
235
aio_bh_enqueue(bh, BH_SCHEDULED);
238
/* This func is async.
240
void qemu_bh_cancel(QEMUBH *bh)
242
qatomic_and(&bh->flags, ~BH_SCHEDULED);
245
/* This func is async.The bottom half will do the delete action at the finial
248
void qemu_bh_delete(QEMUBH *bh)
250
aio_bh_enqueue(bh, BH_DELETED);
253
static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
257
QSLIST_FOREACH_RCU(bh, head, next) {
258
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
259
if (bh->flags & BH_IDLE) {
260
/* idle bottom halves will be polled at least
264
/* non-idle bottom halves will be executed
275
aio_compute_timeout(AioContext *ctx)
281
timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
286
QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
287
timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
293
deadline = timerlistgroup_deadline_ns(&ctx->tlg);
297
return qemu_soonest_timeout(timeout, deadline);
302
aio_ctx_prepare(GSource *source, gint *timeout)
304
AioContext *ctx = (AioContext *) source;
306
qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
309
* Write ctx->notify_me before computing the timeout
310
* (reading bottom half flags, etc.). Pairs with
311
* smp_mb in aio_notify().
315
/* We assume there is no timeout already supplied */
316
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
318
if (aio_prepare(ctx)) {
322
return *timeout == 0;
326
aio_ctx_check(GSource *source)
328
AioContext *ctx = (AioContext *) source;
332
/* Finish computing the timeout before clearing the flag. */
333
qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
334
aio_notify_accept(ctx);
336
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
337
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
342
QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
343
QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
344
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
349
return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
353
aio_ctx_dispatch(GSource *source,
354
GSourceFunc callback,
357
AioContext *ctx = (AioContext *) source;
359
assert(callback == NULL);
365
aio_ctx_finalize(GSource *source)
367
AioContext *ctx = (AioContext *) source;
371
thread_pool_free(ctx->thread_pool);
373
#ifdef CONFIG_LINUX_AIO
374
if (ctx->linux_aio) {
375
laio_detach_aio_context(ctx->linux_aio, ctx);
376
laio_cleanup(ctx->linux_aio);
377
ctx->linux_aio = NULL;
381
#ifdef CONFIG_LINUX_IO_URING
382
if (ctx->linux_io_uring) {
383
luring_detach_aio_context(ctx->linux_io_uring, ctx);
384
luring_cleanup(ctx->linux_io_uring);
385
ctx->linux_io_uring = NULL;
389
assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
390
qemu_bh_delete(ctx->co_schedule_bh);
392
/* There must be no aio_bh_poll() calls going on */
393
assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
395
while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
397
* qemu_bh_delete() must have been called on BHs in this AioContext. In
398
* many cases memory leaks, hangs, or inconsistent state occur when a
399
* BH is leaked because something still expects it to run.
401
* If you hit this, fix the lifecycle of the BH so that
402
* qemu_bh_delete() and any associated cleanup is called before the
403
* AioContext is finalized.
405
if (unlikely(!(flags & BH_DELETED))) {
406
fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
414
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL);
415
event_notifier_cleanup(&ctx->notifier);
416
qemu_rec_mutex_destroy(&ctx->lock);
417
qemu_lockcnt_destroy(&ctx->list_lock);
418
timerlistgroup_deinit(&ctx->tlg);
419
unregister_aiocontext(ctx);
420
aio_context_destroy(ctx);
423
static GSourceFuncs aio_source_funcs = {
430
GSource *aio_get_g_source(AioContext *ctx)
432
aio_context_use_g_source(ctx);
433
g_source_ref(&ctx->source);
437
ThreadPool *aio_get_thread_pool(AioContext *ctx)
439
if (!ctx->thread_pool) {
440
ctx->thread_pool = thread_pool_new(ctx);
442
return ctx->thread_pool;
445
#ifdef CONFIG_LINUX_AIO
446
LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
448
if (!ctx->linux_aio) {
449
ctx->linux_aio = laio_init(errp);
450
if (ctx->linux_aio) {
451
laio_attach_aio_context(ctx->linux_aio, ctx);
454
return ctx->linux_aio;
457
LinuxAioState *aio_get_linux_aio(AioContext *ctx)
459
assert(ctx->linux_aio);
460
return ctx->linux_aio;
464
#ifdef CONFIG_LINUX_IO_URING
465
LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
467
if (ctx->linux_io_uring) {
468
return ctx->linux_io_uring;
471
ctx->linux_io_uring = luring_init(errp);
472
if (!ctx->linux_io_uring) {
476
luring_attach_aio_context(ctx->linux_io_uring, ctx);
477
return ctx->linux_io_uring;
480
LuringState *aio_get_linux_io_uring(AioContext *ctx)
482
assert(ctx->linux_io_uring);
483
return ctx->linux_io_uring;
487
void aio_notify(AioContext *ctx)
490
* Write e.g. ctx->bh_list before writing ctx->notified. Pairs with
491
* smp_mb() in aio_notify_accept().
494
qatomic_set(&ctx->notified, true);
497
* Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
498
* Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
501
if (qatomic_read(&ctx->notify_me)) {
502
event_notifier_set(&ctx->notifier);
506
void aio_notify_accept(AioContext *ctx)
508
qatomic_set(&ctx->notified, false);
511
* Order reads of ctx->notified (in aio_context_notifier_poll()) and the
512
* above clearing of ctx->notified before reads of e.g. bh->flags. Pairs
513
* with smp_wmb() in aio_notify.
518
static void aio_timerlist_notify(void *opaque, QEMUClockType type)
523
static void aio_context_notifier_cb(EventNotifier *e)
525
AioContext *ctx = container_of(e, AioContext, notifier);
527
event_notifier_test_and_clear(&ctx->notifier);
530
/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
531
static bool aio_context_notifier_poll(void *opaque)
533
EventNotifier *e = opaque;
534
AioContext *ctx = container_of(e, AioContext, notifier);
537
* No need for load-acquire because we just want to kick the
538
* event loop. aio_notify_accept() takes care of synchronizing
539
* the event loop with the producers.
541
return qatomic_read(&ctx->notified);
544
static void aio_context_notifier_poll_ready(EventNotifier *e)
546
/* Do nothing, we just wanted to kick the event loop */
549
static void co_schedule_bh_cb(void *opaque)
551
AioContext *ctx = opaque;
552
QSLIST_HEAD(, Coroutine) straight, reversed;
554
QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
555
QSLIST_INIT(&straight);
557
while (!QSLIST_EMPTY(&reversed)) {
558
Coroutine *co = QSLIST_FIRST(&reversed);
559
QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
560
QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
563
while (!QSLIST_EMPTY(&straight)) {
564
Coroutine *co = QSLIST_FIRST(&straight);
565
QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
566
trace_aio_co_schedule_bh_cb(ctx, co);
568
/* Protected by write barrier in qemu_aio_coroutine_enter */
569
qatomic_set(&co->scheduled, NULL);
570
qemu_aio_coroutine_enter(ctx, co);
574
AioContext *aio_context_new(Error **errp)
579
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
580
QSLIST_INIT(&ctx->bh_list);
581
QSIMPLEQ_INIT(&ctx->bh_slice_list);
582
aio_context_setup(ctx);
584
ret = event_notifier_init(&ctx->notifier, false);
586
error_setg_errno(errp, -ret, "Failed to initialize event notifier");
589
g_source_set_can_recurse(&ctx->source, true);
590
qemu_lockcnt_init(&ctx->list_lock);
592
ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
593
QSLIST_INIT(&ctx->scheduled_coroutines);
595
aio_set_event_notifier(ctx, &ctx->notifier,
596
aio_context_notifier_cb,
597
aio_context_notifier_poll,
598
aio_context_notifier_poll_ready);
599
#ifdef CONFIG_LINUX_AIO
600
ctx->linux_aio = NULL;
603
#ifdef CONFIG_LINUX_IO_URING
604
ctx->linux_io_uring = NULL;
607
ctx->thread_pool = NULL;
608
qemu_rec_mutex_init(&ctx->lock);
609
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
612
ctx->poll_max_ns = 0;
614
ctx->poll_shrink = 0;
616
ctx->aio_max_batch = 0;
618
ctx->thread_pool_min = 0;
619
ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
621
register_aiocontext(ctx);
625
g_source_destroy(&ctx->source);
629
void aio_co_schedule(AioContext *ctx, Coroutine *co)
631
trace_aio_co_schedule(ctx, co);
632
const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
637
"%s: Co-routine was already scheduled in '%s'\n",
638
__func__, scheduled);
642
/* The coroutine might run and release the last ctx reference before we
643
* invoke qemu_bh_schedule(). Take a reference to keep ctx alive until
646
aio_context_ref(ctx);
648
QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
649
co, co_scheduled_next);
650
qemu_bh_schedule(ctx->co_schedule_bh);
652
aio_context_unref(ctx);
655
typedef struct AioCoRescheduleSelf {
658
} AioCoRescheduleSelf;
660
static void aio_co_reschedule_self_bh(void *opaque)
662
AioCoRescheduleSelf *data = opaque;
663
aio_co_schedule(data->new_ctx, data->co);
666
void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
668
AioContext *old_ctx = qemu_get_current_aio_context();
670
if (old_ctx != new_ctx) {
671
AioCoRescheduleSelf data = {
672
.co = qemu_coroutine_self(),
676
* We can't directly schedule the coroutine in the target context
677
* because this would be racy: The other thread could try to enter the
678
* coroutine before it has yielded in this one.
680
aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
681
qemu_coroutine_yield();
685
void aio_co_wake(Coroutine *co)
689
/* Read coroutine before co->ctx. Matches smp_wmb in
690
* qemu_coroutine_enter.
692
smp_read_barrier_depends();
693
ctx = qatomic_read(&co->ctx);
695
aio_co_enter(ctx, co);
698
void aio_co_enter(AioContext *ctx, Coroutine *co)
700
if (ctx != qemu_get_current_aio_context()) {
701
aio_co_schedule(ctx, co);
705
if (qemu_in_coroutine()) {
706
Coroutine *self = qemu_coroutine_self();
708
QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
710
qemu_aio_coroutine_enter(ctx, co);
714
void aio_context_ref(AioContext *ctx)
716
g_source_ref(&ctx->source);
719
void aio_context_unref(AioContext *ctx)
721
g_source_unref(&ctx->source);
724
QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
726
AioContext *qemu_get_current_aio_context(void)
728
AioContext *ctx = get_my_aiocontext();
733
/* Possibly in a vCPU thread. */
734
return qemu_get_aio_context();
739
void qemu_set_current_aio_context(AioContext *ctx)
741
assert(!get_my_aiocontext());
742
set_my_aiocontext(ctx);
745
void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
746
int64_t max, Error **errp)
749
if (min > max || max <= 0 || min < 0 || min > INT_MAX || max > INT_MAX) {
750
error_setg(errp, "bad thread-pool-min/thread-pool-max values");
754
ctx->thread_pool_min = min;
755
ctx->thread_pool_max = max;
757
if (ctx->thread_pool) {
758
thread_pool_update_params(ctx->thread_pool, ctx);