qemu

Форк
0
/
async.c 
760 строк · 19.9 Кб
1
/*
2
 * Data plane event loop
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 * Copyright (c) 2009-2017 QEMU contributors
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a copy
8
 * of this software and associated documentation files (the "Software"), to deal
9
 * in the Software without restriction, including without limitation the rights
10
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
 * copies of the Software, and to permit persons to whom the Software is
12
 * furnished to do so, subject to the following conditions:
13
 *
14
 * The above copyright notice and this permission notice shall be included in
15
 * all copies or substantial portions of the Software.
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
 * THE SOFTWARE.
24
 */
25

26
#include "qemu/osdep.h"
27
#include "qapi/error.h"
28
#include "block/aio.h"
29
#include "block/thread-pool.h"
30
#include "block/graph-lock.h"
31
#include "qemu/main-loop.h"
32
#include "qemu/atomic.h"
33
#include "qemu/rcu_queue.h"
34
#include "block/raw-aio.h"
35
#include "qemu/coroutine_int.h"
36
#include "qemu/coroutine-tls.h"
37
#include "sysemu/cpu-timers.h"
38
#include "trace.h"
39

40
/***********************************************************/
41
/* bottom halves (can be seen as timers which expire ASAP) */
42

43
/* QEMUBH::flags values */
44
enum {
45
    /* Already enqueued and waiting for aio_bh_poll() */
46
    BH_PENDING   = (1 << 0),
47

48
    /* Invoke the callback */
49
    BH_SCHEDULED = (1 << 1),
50

51
    /* Delete without invoking callback */
52
    BH_DELETED   = (1 << 2),
53

54
    /* Delete after invoking callback */
55
    BH_ONESHOT   = (1 << 3),
56

57
    /* Schedule periodically when the event loop is idle */
58
    BH_IDLE      = (1 << 4),
59
};
60

61
struct QEMUBH {
62
    AioContext *ctx;
63
    const char *name;
64
    QEMUBHFunc *cb;
65
    void *opaque;
66
    QSLIST_ENTRY(QEMUBH) next;
67
    unsigned flags;
68
    MemReentrancyGuard *reentrancy_guard;
69
};
70

71
/* Called concurrently from any thread */
72
static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
73
{
74
    AioContext *ctx = bh->ctx;
75
    unsigned old_flags;
76

77
    /*
78
     * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
79
     * insertion starts after BH_PENDING is set.
80
     */
81
    old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
82

83
    if (!(old_flags & BH_PENDING)) {
84
        /*
85
         * At this point the bottom half becomes visible to aio_bh_poll().
86
         * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
87
         * aio_bh_poll(), ensuring that:
88
         * 1. any writes needed by the callback are visible from the callback
89
         *    after aio_bh_dequeue() returns bh.
90
         * 2. ctx is loaded before the callback has a chance to execute and bh
91
         *    could be freed.
92
         */
93
        QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
94
    }
95

96
    aio_notify(ctx);
97
    if (unlikely(icount_enabled())) {
98
        /*
99
         * Workaround for record/replay.
100
         * vCPU execution should be suspended when new BH is set.
101
         * This is needed to avoid guest timeouts caused
102
         * by the long cycles of the execution.
103
         */
104
        icount_notify_exit();
105
    }
106
}
107

108
/* Only called from aio_bh_poll() and aio_ctx_finalize() */
109
static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
110
{
111
    QEMUBH *bh = QSLIST_FIRST_RCU(head);
112

113
    if (!bh) {
114
        return NULL;
115
    }
116

117
    QSLIST_REMOVE_HEAD(head, next);
118

119
    /*
120
     * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
121
     * the removal finishes before BH_PENDING is reset.
122
     */
123
    *flags = qatomic_fetch_and(&bh->flags,
124
                              ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
125
    return bh;
126
}
127

128
void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
129
                                  void *opaque, const char *name)
130
{
131
    QEMUBH *bh;
132
    bh = g_new(QEMUBH, 1);
133
    *bh = (QEMUBH){
134
        .ctx = ctx,
135
        .cb = cb,
136
        .opaque = opaque,
137
        .name = name,
138
    };
139
    aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
140
}
141

142
QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
143
                        const char *name, MemReentrancyGuard *reentrancy_guard)
144
{
145
    QEMUBH *bh;
146
    bh = g_new(QEMUBH, 1);
147
    *bh = (QEMUBH){
148
        .ctx = ctx,
149
        .cb = cb,
150
        .opaque = opaque,
151
        .name = name,
152
        .reentrancy_guard = reentrancy_guard,
153
    };
154
    return bh;
155
}
156

157
void aio_bh_call(QEMUBH *bh)
158
{
159
    bool last_engaged_in_io = false;
160

161
    /* Make a copy of the guard-pointer as cb may free the bh */
162
    MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
163
    if (reentrancy_guard) {
164
        last_engaged_in_io = reentrancy_guard->engaged_in_io;
165
        if (reentrancy_guard->engaged_in_io) {
166
            trace_reentrant_aio(bh->ctx, bh->name);
167
        }
168
        reentrancy_guard->engaged_in_io = true;
169
    }
170

171
    bh->cb(bh->opaque);
172

173
    if (reentrancy_guard) {
174
        reentrancy_guard->engaged_in_io = last_engaged_in_io;
175
    }
176
}
177

178
/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
179
int aio_bh_poll(AioContext *ctx)
180
{
181
    BHListSlice slice;
182
    BHListSlice *s;
183
    int ret = 0;
184

185
    /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue().  */
186
    QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
187

188
    /*
189
     * GCC13 [-Werror=dangling-pointer=] complains that the local variable
190
     * 'slice' is being stored in the global 'ctx->bh_slice_list' but the
191
     * list is emptied before this function returns.
192
     */
193
#if !defined(__clang__)
194
#pragma GCC diagnostic push
195
#pragma GCC diagnostic ignored "-Wpragmas"
196
#pragma GCC diagnostic ignored "-Wdangling-pointer="
197
#endif
198
    QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
199
#if !defined(__clang__)
200
#pragma GCC diagnostic pop
201
#endif
202

203
    while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
204
        QEMUBH *bh;
205
        unsigned flags;
206

207
        bh = aio_bh_dequeue(&s->bh_list, &flags);
208
        if (!bh) {
209
            QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
210
            continue;
211
        }
212

213
        if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
214
            /* Idle BHs don't count as progress */
215
            if (!(flags & BH_IDLE)) {
216
                ret = 1;
217
            }
218
            aio_bh_call(bh);
219
        }
220
        if (flags & (BH_DELETED | BH_ONESHOT)) {
221
            g_free(bh);
222
        }
223
    }
224

225
    return ret;
226
}
227

228
void qemu_bh_schedule_idle(QEMUBH *bh)
229
{
230
    aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
231
}
232

233
void qemu_bh_schedule(QEMUBH *bh)
234
{
235
    aio_bh_enqueue(bh, BH_SCHEDULED);
236
}
237

238
/* This func is async.
239
 */
240
void qemu_bh_cancel(QEMUBH *bh)
241
{
242
    qatomic_and(&bh->flags, ~BH_SCHEDULED);
243
}
244

245
/* This func is async.The bottom half will do the delete action at the finial
246
 * end.
247
 */
248
void qemu_bh_delete(QEMUBH *bh)
249
{
250
    aio_bh_enqueue(bh, BH_DELETED);
251
}
252

253
static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
254
{
255
    QEMUBH *bh;
256

257
    QSLIST_FOREACH_RCU(bh, head, next) {
258
        if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
259
            if (bh->flags & BH_IDLE) {
260
                /* idle bottom halves will be polled at least
261
                 * every 10ms */
262
                timeout = 10000000;
263
            } else {
264
                /* non-idle bottom halves will be executed
265
                 * immediately */
266
                return 0;
267
            }
268
        }
269
    }
270

271
    return timeout;
272
}
273

274
int64_t
275
aio_compute_timeout(AioContext *ctx)
276
{
277
    BHListSlice *s;
278
    int64_t deadline;
279
    int timeout = -1;
280

281
    timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
282
    if (timeout == 0) {
283
        return 0;
284
    }
285

286
    QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
287
        timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
288
        if (timeout == 0) {
289
            return 0;
290
        }
291
    }
292

293
    deadline = timerlistgroup_deadline_ns(&ctx->tlg);
294
    if (deadline == 0) {
295
        return 0;
296
    } else {
297
        return qemu_soonest_timeout(timeout, deadline);
298
    }
299
}
300

301
static gboolean
302
aio_ctx_prepare(GSource *source, gint    *timeout)
303
{
304
    AioContext *ctx = (AioContext *) source;
305

306
    qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
307

308
    /*
309
     * Write ctx->notify_me before computing the timeout
310
     * (reading bottom half flags, etc.).  Pairs with
311
     * smp_mb in aio_notify().
312
     */
313
    smp_mb();
314

315
    /* We assume there is no timeout already supplied */
316
    *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
317

318
    if (aio_prepare(ctx)) {
319
        *timeout = 0;
320
    }
321

322
    return *timeout == 0;
323
}
324

325
static gboolean
326
aio_ctx_check(GSource *source)
327
{
328
    AioContext *ctx = (AioContext *) source;
329
    QEMUBH *bh;
330
    BHListSlice *s;
331

332
    /* Finish computing the timeout before clearing the flag.  */
333
    qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
334
    aio_notify_accept(ctx);
335

336
    QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
337
        if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
338
            return true;
339
        }
340
    }
341

342
    QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
343
        QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
344
            if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
345
                return true;
346
            }
347
        }
348
    }
349
    return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
350
}
351

352
static gboolean
353
aio_ctx_dispatch(GSource     *source,
354
                 GSourceFunc  callback,
355
                 gpointer     user_data)
356
{
357
    AioContext *ctx = (AioContext *) source;
358

359
    assert(callback == NULL);
360
    aio_dispatch(ctx);
361
    return true;
362
}
363

364
static void
365
aio_ctx_finalize(GSource     *source)
366
{
367
    AioContext *ctx = (AioContext *) source;
368
    QEMUBH *bh;
369
    unsigned flags;
370

371
    thread_pool_free(ctx->thread_pool);
372

373
#ifdef CONFIG_LINUX_AIO
374
    if (ctx->linux_aio) {
375
        laio_detach_aio_context(ctx->linux_aio, ctx);
376
        laio_cleanup(ctx->linux_aio);
377
        ctx->linux_aio = NULL;
378
    }
379
#endif
380

381
#ifdef CONFIG_LINUX_IO_URING
382
    if (ctx->linux_io_uring) {
383
        luring_detach_aio_context(ctx->linux_io_uring, ctx);
384
        luring_cleanup(ctx->linux_io_uring);
385
        ctx->linux_io_uring = NULL;
386
    }
387
#endif
388

389
    assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
390
    qemu_bh_delete(ctx->co_schedule_bh);
391

392
    /* There must be no aio_bh_poll() calls going on */
393
    assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
394

395
    while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
396
        /*
397
         * qemu_bh_delete() must have been called on BHs in this AioContext. In
398
         * many cases memory leaks, hangs, or inconsistent state occur when a
399
         * BH is leaked because something still expects it to run.
400
         *
401
         * If you hit this, fix the lifecycle of the BH so that
402
         * qemu_bh_delete() and any associated cleanup is called before the
403
         * AioContext is finalized.
404
         */
405
        if (unlikely(!(flags & BH_DELETED))) {
406
            fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
407
                    __func__, bh->name);
408
            abort();
409
        }
410

411
        g_free(bh);
412
    }
413

414
    aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL);
415
    event_notifier_cleanup(&ctx->notifier);
416
    qemu_rec_mutex_destroy(&ctx->lock);
417
    qemu_lockcnt_destroy(&ctx->list_lock);
418
    timerlistgroup_deinit(&ctx->tlg);
419
    unregister_aiocontext(ctx);
420
    aio_context_destroy(ctx);
421
}
422

423
static GSourceFuncs aio_source_funcs = {
424
    aio_ctx_prepare,
425
    aio_ctx_check,
426
    aio_ctx_dispatch,
427
    aio_ctx_finalize
428
};
429

430
GSource *aio_get_g_source(AioContext *ctx)
431
{
432
    aio_context_use_g_source(ctx);
433
    g_source_ref(&ctx->source);
434
    return &ctx->source;
435
}
436

437
ThreadPool *aio_get_thread_pool(AioContext *ctx)
438
{
439
    if (!ctx->thread_pool) {
440
        ctx->thread_pool = thread_pool_new(ctx);
441
    }
442
    return ctx->thread_pool;
443
}
444

445
#ifdef CONFIG_LINUX_AIO
446
LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
447
{
448
    if (!ctx->linux_aio) {
449
        ctx->linux_aio = laio_init(errp);
450
        if (ctx->linux_aio) {
451
            laio_attach_aio_context(ctx->linux_aio, ctx);
452
        }
453
    }
454
    return ctx->linux_aio;
455
}
456

457
LinuxAioState *aio_get_linux_aio(AioContext *ctx)
458
{
459
    assert(ctx->linux_aio);
460
    return ctx->linux_aio;
461
}
462
#endif
463

464
#ifdef CONFIG_LINUX_IO_URING
465
LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
466
{
467
    if (ctx->linux_io_uring) {
468
        return ctx->linux_io_uring;
469
    }
470

471
    ctx->linux_io_uring = luring_init(errp);
472
    if (!ctx->linux_io_uring) {
473
        return NULL;
474
    }
475

476
    luring_attach_aio_context(ctx->linux_io_uring, ctx);
477
    return ctx->linux_io_uring;
478
}
479

480
LuringState *aio_get_linux_io_uring(AioContext *ctx)
481
{
482
    assert(ctx->linux_io_uring);
483
    return ctx->linux_io_uring;
484
}
485
#endif
486

487
void aio_notify(AioContext *ctx)
488
{
489
    /*
490
     * Write e.g. ctx->bh_list before writing ctx->notified.  Pairs with
491
     * smp_mb() in aio_notify_accept().
492
     */
493
    smp_wmb();
494
    qatomic_set(&ctx->notified, true);
495

496
    /*
497
     * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
498
     * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
499
     */
500
    smp_mb();
501
    if (qatomic_read(&ctx->notify_me)) {
502
        event_notifier_set(&ctx->notifier);
503
    }
504
}
505

506
void aio_notify_accept(AioContext *ctx)
507
{
508
    qatomic_set(&ctx->notified, false);
509

510
    /*
511
     * Order reads of ctx->notified (in aio_context_notifier_poll()) and the
512
     * above clearing of ctx->notified before reads of e.g. bh->flags.  Pairs
513
     * with smp_wmb() in aio_notify.
514
     */
515
    smp_mb();
516
}
517

518
static void aio_timerlist_notify(void *opaque, QEMUClockType type)
519
{
520
    aio_notify(opaque);
521
}
522

523
static void aio_context_notifier_cb(EventNotifier *e)
524
{
525
    AioContext *ctx = container_of(e, AioContext, notifier);
526

527
    event_notifier_test_and_clear(&ctx->notifier);
528
}
529

530
/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
531
static bool aio_context_notifier_poll(void *opaque)
532
{
533
    EventNotifier *e = opaque;
534
    AioContext *ctx = container_of(e, AioContext, notifier);
535

536
    /*
537
     * No need for load-acquire because we just want to kick the
538
     * event loop.  aio_notify_accept() takes care of synchronizing
539
     * the event loop with the producers.
540
     */
541
    return qatomic_read(&ctx->notified);
542
}
543

544
static void aio_context_notifier_poll_ready(EventNotifier *e)
545
{
546
    /* Do nothing, we just wanted to kick the event loop */
547
}
548

549
static void co_schedule_bh_cb(void *opaque)
550
{
551
    AioContext *ctx = opaque;
552
    QSLIST_HEAD(, Coroutine) straight, reversed;
553

554
    QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
555
    QSLIST_INIT(&straight);
556

557
    while (!QSLIST_EMPTY(&reversed)) {
558
        Coroutine *co = QSLIST_FIRST(&reversed);
559
        QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
560
        QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
561
    }
562

563
    while (!QSLIST_EMPTY(&straight)) {
564
        Coroutine *co = QSLIST_FIRST(&straight);
565
        QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
566
        trace_aio_co_schedule_bh_cb(ctx, co);
567

568
        /* Protected by write barrier in qemu_aio_coroutine_enter */
569
        qatomic_set(&co->scheduled, NULL);
570
        qemu_aio_coroutine_enter(ctx, co);
571
    }
572
}
573

574
AioContext *aio_context_new(Error **errp)
575
{
576
    int ret;
577
    AioContext *ctx;
578

579
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
580
    QSLIST_INIT(&ctx->bh_list);
581
    QSIMPLEQ_INIT(&ctx->bh_slice_list);
582
    aio_context_setup(ctx);
583

584
    ret = event_notifier_init(&ctx->notifier, false);
585
    if (ret < 0) {
586
        error_setg_errno(errp, -ret, "Failed to initialize event notifier");
587
        goto fail;
588
    }
589
    g_source_set_can_recurse(&ctx->source, true);
590
    qemu_lockcnt_init(&ctx->list_lock);
591

592
    ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
593
    QSLIST_INIT(&ctx->scheduled_coroutines);
594

595
    aio_set_event_notifier(ctx, &ctx->notifier,
596
                           aio_context_notifier_cb,
597
                           aio_context_notifier_poll,
598
                           aio_context_notifier_poll_ready);
599
#ifdef CONFIG_LINUX_AIO
600
    ctx->linux_aio = NULL;
601
#endif
602

603
#ifdef CONFIG_LINUX_IO_URING
604
    ctx->linux_io_uring = NULL;
605
#endif
606

607
    ctx->thread_pool = NULL;
608
    qemu_rec_mutex_init(&ctx->lock);
609
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
610

611
    ctx->poll_ns = 0;
612
    ctx->poll_max_ns = 0;
613
    ctx->poll_grow = 0;
614
    ctx->poll_shrink = 0;
615

616
    ctx->aio_max_batch = 0;
617

618
    ctx->thread_pool_min = 0;
619
    ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
620

621
    register_aiocontext(ctx);
622

623
    return ctx;
624
fail:
625
    g_source_destroy(&ctx->source);
626
    return NULL;
627
}
628

629
void aio_co_schedule(AioContext *ctx, Coroutine *co)
630
{
631
    trace_aio_co_schedule(ctx, co);
632
    const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
633
                                           __func__);
634

635
    if (scheduled) {
636
        fprintf(stderr,
637
                "%s: Co-routine was already scheduled in '%s'\n",
638
                __func__, scheduled);
639
        abort();
640
    }
641

642
    /* The coroutine might run and release the last ctx reference before we
643
     * invoke qemu_bh_schedule().  Take a reference to keep ctx alive until
644
     * we're done.
645
     */
646
    aio_context_ref(ctx);
647

648
    QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
649
                              co, co_scheduled_next);
650
    qemu_bh_schedule(ctx->co_schedule_bh);
651

652
    aio_context_unref(ctx);
653
}
654

655
typedef struct AioCoRescheduleSelf {
656
    Coroutine *co;
657
    AioContext *new_ctx;
658
} AioCoRescheduleSelf;
659

660
static void aio_co_reschedule_self_bh(void *opaque)
661
{
662
    AioCoRescheduleSelf *data = opaque;
663
    aio_co_schedule(data->new_ctx, data->co);
664
}
665

666
void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
667
{
668
    AioContext *old_ctx = qemu_get_current_aio_context();
669

670
    if (old_ctx != new_ctx) {
671
        AioCoRescheduleSelf data = {
672
            .co = qemu_coroutine_self(),
673
            .new_ctx = new_ctx,
674
        };
675
        /*
676
         * We can't directly schedule the coroutine in the target context
677
         * because this would be racy: The other thread could try to enter the
678
         * coroutine before it has yielded in this one.
679
         */
680
        aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
681
        qemu_coroutine_yield();
682
    }
683
}
684

685
void aio_co_wake(Coroutine *co)
686
{
687
    AioContext *ctx;
688

689
    /* Read coroutine before co->ctx.  Matches smp_wmb in
690
     * qemu_coroutine_enter.
691
     */
692
    smp_read_barrier_depends();
693
    ctx = qatomic_read(&co->ctx);
694

695
    aio_co_enter(ctx, co);
696
}
697

698
void aio_co_enter(AioContext *ctx, Coroutine *co)
699
{
700
    if (ctx != qemu_get_current_aio_context()) {
701
        aio_co_schedule(ctx, co);
702
        return;
703
    }
704

705
    if (qemu_in_coroutine()) {
706
        Coroutine *self = qemu_coroutine_self();
707
        assert(self != co);
708
        QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
709
    } else {
710
        qemu_aio_coroutine_enter(ctx, co);
711
    }
712
}
713

714
void aio_context_ref(AioContext *ctx)
715
{
716
    g_source_ref(&ctx->source);
717
}
718

719
void aio_context_unref(AioContext *ctx)
720
{
721
    g_source_unref(&ctx->source);
722
}
723

724
QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
725

726
AioContext *qemu_get_current_aio_context(void)
727
{
728
    AioContext *ctx = get_my_aiocontext();
729
    if (ctx) {
730
        return ctx;
731
    }
732
    if (bql_locked()) {
733
        /* Possibly in a vCPU thread.  */
734
        return qemu_get_aio_context();
735
    }
736
    return NULL;
737
}
738

739
void qemu_set_current_aio_context(AioContext *ctx)
740
{
741
    assert(!get_my_aiocontext());
742
    set_my_aiocontext(ctx);
743
}
744

745
void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
746
                                        int64_t max, Error **errp)
747
{
748

749
    if (min > max || max <= 0 || min < 0 || min > INT_MAX || max > INT_MAX) {
750
        error_setg(errp, "bad thread-pool-min/thread-pool-max values");
751
        return;
752
    }
753

754
    ctx->thread_pool_min = min;
755
    ctx->thread_pool_max = max;
756

757
    if (ctx->thread_pool) {
758
        thread_pool_update_params(ctx->thread_pool, ctx);
759
    }
760
}
761

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.