@@ -359,9 +359,7 @@ bool aio_dispatch(AioContext *ctx)
qemu_lockcnt_dec(&ctx->list_lock);
/* Run our timers */
- aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
- aio_context_release(ctx);
return progress;
}
@@ -372,9 +372,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress |= aio_dispatch_handlers(ctx, event);
} while (count > 0);
- aio_context_acquire(ctx);
progress |= timerlistgroup_run_timers(&ctx->tlg);
- aio_context_release(ctx);
return progress;
}
@@ -376,9 +376,11 @@ static void curl_multi_timeout_do(void *arg)
return;
}
+ aio_context_acquire(s->aio_context);
curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
curl_multi_check_completion(s);
+ aio_context_release(s->aio_context);
#else
abort();
#endif
@@ -1214,6 +1214,7 @@ static void iscsi_nop_timed_event(void *opaque)
{
IscsiLun *iscsilun = opaque;
+ aio_context_acquire(iscsilun->aio_context);
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
error_report("iSCSI: NOP timeout. Reconnecting...");
iscsilun->request_timed_out = true;
@@ -1224,6 +1225,7 @@ static void iscsi_nop_timed_event(void *opaque)
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
iscsi_set_events(iscsilun);
+ aio_context_release(iscsilun->aio_context);
}
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
@@ -129,7 +129,11 @@ static void null_bh_cb(void *opaque)
static void null_timer_cb(void *opaque)
{
NullAIOCB *acb = opaque;
+ AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
+
+ aio_context_acquire(ctx);
acb->common.cb(acb->common.opaque, 0);
+ aio_context_release(ctx);
timer_deinit(&acb->timer);
qemu_aio_unref(acb);
}
@@ -342,10 +342,12 @@ static void qed_need_check_timer_cb(void *opaque)
trace_qed_need_check_timer_cb(s);
+ qed_acquire(s);
qed_plug_allocating_write_reqs(s);
/* Ensure writes are on disk before clearing flag */
bdrv_aio_flush(s->bs, qed_clear_need_check, s);
+ qed_release(s);
}
void qed_acquire(BDRVQEDState *s)
@@ -370,7 +370,9 @@ static void timer_cb(BlockDriverState *bs, bool is_write)
qemu_mutex_unlock(&tg->lock);
/* Run the request that was waiting for this timer */
+ aio_context_acquire(bdrv_get_aio_context(bs));
empty_queue = !qemu_co_enter_next(&bs->throttled_reqs[is_write]);
+ aio_context_release(bdrv_get_aio_context(bs));
/* If the request queue was empty then we have to take care of
* scheduling the next one */
@@ -18,13 +18,17 @@
typedef struct CoSleepCB {
QEMUTimer *ts;
Coroutine *co;
+ AioContext *ctx;
} CoSleepCB;
static void co_sleep_cb(void *opaque)
{
CoSleepCB *sleep_cb = opaque;
+ AioContext *ctx = sleep_cb->ctx;
+ aio_context_acquire(ctx);
qemu_coroutine_enter(sleep_cb->co, NULL);
+ aio_context_release(ctx);
}
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
@@ -32,6 +36,7 @@ void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
{
CoSleepCB sleep_cb = {
.co = qemu_coroutine_self(),
+ .ctx = ctx,
};
sleep_cb.ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, &sleep_cb);
timer_mod(sleep_cb.ts, qemu_clock_get_ns(type) + ns);
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- aio-posix.c | 2 -- aio-win32.c | 2 -- block/curl.c | 2 ++ block/iscsi.c | 2 ++ block/null.c | 4 ++++ block/qed.c | 2 ++ block/throttle-groups.c | 2 ++ util/qemu-coroutine-sleep.c | 5 +++++ 8 files changed, 17 insertions(+), 4 deletions(-)