@@ -174,8 +174,8 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
/* Get this show started by hooking up our callbacks */
aio_context_acquire(s->ctx);
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx,
- virtio_blk_data_plane_handle_output);
+ virtio_queue_set_host_notifier_handler(s->vq, s->ctx, true,
+ virtio_blk_data_plane_handle_output);
aio_context_release(s->ctx);
return;
@@ -210,7 +210,7 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
aio_context_acquire(s->ctx);
/* Stop notifications for new requests from guest */
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, NULL);
+ virtio_queue_set_host_notifier_handler(s->vq, s->ctx, false, NULL);
/* Drain and switch bs back to the QEMU main loop */
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
@@ -80,7 +80,7 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
return rc;
}
- virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, fn);
+ virtio_queue_set_host_notifier_handler(vq, s->ctx, true, fn);
return 0;
}
@@ -97,10 +97,11 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
int i;
- virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL);
- virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL);
+ virtio_queue_set_host_notifier_handler(vs->ctrl_vq, s->ctx, false, NULL);
+ virtio_queue_set_host_notifier_handler(vs->event_vq, s->ctx, false, NULL);
for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL);
+ virtio_queue_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, false,
+ NULL);
}
}
@@ -166,16 +166,20 @@ static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
error_report("%s: unable to init event notifier: %d", __func__, r);
return r;
}
- virtio_queue_set_host_notifier_fd_handler(vq, true, true);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ true, NULL);
+
r = k->ioeventfd_assign(proxy, notifier, n, assign);
if (r < 0) {
error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ false, NULL);
event_notifier_cleanup(notifier);
return r;
}
} else {
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ false, NULL);
k->ioeventfd_assign(proxy, notifier, n, assign);
event_notifier_cleanup(notifier);
}
@@ -269,7 +273,8 @@ int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
* ioeventfd and we may end up with a notification where
* we don't expect one.
*/
- virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign);
+ virtio_queue_set_host_notifier_handler(vq, qemu_get_aio_context(),
+ false, NULL);
if (!assign) {
/* Use generic ioeventfd handler again. */
k->ioeventfd_set_disabled(proxy, false);
@@ -1793,10 +1793,16 @@ static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
}
}
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- VirtQueueHandleOutput handle_output)
+/* If assign == true, set the host notifier handler to @handle_output, or use
+ * the default vq handler if it is NULL, in the aio context @ctx.
+ * If assign == false, unregister the handler of host notifier in @ctx, and do
+ * a last host notify if there are notifications pending. */
+void virtio_queue_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
+ bool assign,
+ VirtQueueHandleOutput handle_output)
{
- if (handle_output) {
+ if (assign) {
+ handle_output = handle_output ?: vq->handle_output;
vq->handle_aio_output = handle_output;
aio_set_event_notifier(ctx, &vq->host_notifier, true,
virtio_queue_host_notifier_aio_read);
@@ -250,9 +250,9 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
bool set_handler);
-void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
- void (*fn)(VirtIODevice *,
- VirtQueue *));
+void virtio_queue_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
+ bool assign,
+ VirtQueueHandleOutput handle_output);
void virtio_irq(VirtQueue *vq);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
Apart from the interface difference, the aio version works the same as the non-aio one. The event notifier versus aio fd handler makes no diffeerence, except the former led to an ugly patch in commit ab27c3b5e7, which won't be necessary any more. As the first step to unify them, all callers are switched to this renamed aio iterface, and function comment is added. Signed-off-by: Fam Zheng <famz@redhat.com> --- hw/block/dataplane/virtio-blk.c | 6 +++--- hw/scsi/virtio-scsi-dataplane.c | 9 +++++---- hw/virtio/virtio-bus.c | 13 +++++++++---- hw/virtio/virtio.c | 12 +++++++++--- include/hw/virtio/virtio.h | 6 +++--- 5 files changed, 29 insertions(+), 17 deletions(-)