Message ID | 1371210243-6099-18-git-send-email-stefanha@redhat.com |
---|---|
State | New |
Headers | show |
Il 14/06/2013 13:44, Stefan Hajnoczi ha scritto: > The .io_flush() handler no longer exists and has no users. Drop the > io_flush argument to aio_set_fd_handler() and related functions. > > The AioFlushEventNotifierHandler and AioFlushHandler typedefs are no > longer used and are dropped too. > > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> > --- > aio-posix.c | 7 ++----- > aio-win32.c | 3 +-- > async.c | 4 ++-- > block/curl.c | 9 ++++----- > block/gluster.c | 7 +++---- > block/iscsi.c | 3 +-- > block/linux-aio.c | 3 +-- > block/nbd.c | 11 ++++------- > block/rbd.c | 4 ++-- > block/sheepdog.c | 18 ++++++++---------- > block/ssh.c | 4 ++-- > hw/block/dataplane/virtio-blk.c | 8 ++++---- > include/block/aio.h | 14 ++------------ > main-loop.c | 9 +++------ > tests/test-aio.c | 40 ++++++++++++++++++++-------------------- > thread-pool.c | 5 ++--- > 16 files changed, 61 insertions(+), 88 deletions(-) > > diff --git a/aio-posix.c b/aio-posix.c > index 7d66048..2440eb9 100644 > --- a/aio-posix.c > +++ b/aio-posix.c > @@ -46,7 +46,6 @@ void aio_set_fd_handler(AioContext *ctx, > int fd, > IOHandler *io_read, > IOHandler *io_write, > - AioFlushHandler *io_flush, > void *opaque) > { > AioHandler *node; > @@ -95,12 +94,10 @@ void aio_set_fd_handler(AioContext *ctx, > > void aio_set_event_notifier(AioContext *ctx, > EventNotifier *notifier, > - EventNotifierHandler *io_read, > - AioFlushEventNotifierHandler *io_flush) > + EventNotifierHandler *io_read) > { > aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), > - (IOHandler *)io_read, NULL, > - (AioFlushHandler *)io_flush, notifier); > + (IOHandler *)io_read, NULL, notifier); > } > > bool aio_pending(AioContext *ctx) > diff --git a/aio-win32.c b/aio-win32.c > index 4309c16..78b2801 100644 > --- a/aio-win32.c > +++ b/aio-win32.c > @@ -30,8 +30,7 @@ struct AioHandler { > > void aio_set_event_notifier(AioContext *ctx, > EventNotifier *e, > - EventNotifierHandler *io_notify, > - AioFlushEventNotifierHandler *io_flush) > + EventNotifierHandler *io_notify) > { > AioHandler *node; > > diff --git a/async.c b/async.c > index 90fe906..fe2c8bf 100644 > --- a/async.c > +++ b/async.c > @@ -174,7 +174,7 @@ aio_ctx_finalize(GSource *source) > AioContext *ctx = (AioContext *) source; > > thread_pool_free(ctx->thread_pool); > - aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); > + aio_set_event_notifier(ctx, &ctx->notifier, NULL); > event_notifier_cleanup(&ctx->notifier); > g_array_free(ctx->pollfds, TRUE); > } > @@ -214,7 +214,7 @@ AioContext *aio_context_new(void) > event_notifier_init(&ctx->notifier, false); > aio_set_event_notifier(ctx, &ctx->notifier, > (EventNotifierHandler *) > - event_notifier_test_and_clear, NULL); > + event_notifier_test_and_clear); > > return ctx; > } > diff --git a/block/curl.c b/block/curl.c > index 2147076..e88621a 100644 > --- a/block/curl.c > +++ b/block/curl.c > @@ -92,17 +92,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, > DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd); > switch (action) { > case CURL_POLL_IN: > - qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, s); > + qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s); > break; > case CURL_POLL_OUT: > - qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, s); > + qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s); > break; > case CURL_POLL_INOUT: > - qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, > - NULL, s); > + qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s); > break; > case CURL_POLL_REMOVE: > - qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(fd, NULL, NULL, NULL); > break; > } > > diff --git a/block/gluster.c b/block/gluster.c > index 7a69a12..3cff308 100644 > --- a/block/gluster.c > +++ b/block/gluster.c > @@ -339,7 +339,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, > } > fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK); > qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], > - qemu_gluster_aio_event_reader, NULL, NULL, s); > + qemu_gluster_aio_event_reader, NULL, s); > > out: > qemu_opts_del(opts); > @@ -438,8 +438,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) > qemu_aio_release(acb); > close(s->fds[GLUSTER_FD_READ]); > close(s->fds[GLUSTER_FD_WRITE]); > - qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, > - NULL); > + qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); > bs->drv = NULL; /* Make the disk inaccessible */ > qemu_mutex_unlock_iothread(); > } > @@ -551,7 +550,7 @@ static void qemu_gluster_close(BlockDriverState *bs) > > close(s->fds[GLUSTER_FD_READ]); > close(s->fds[GLUSTER_FD_WRITE]); > - qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); > > if (s->fd) { > glfs_close(s->fd); > diff --git a/block/iscsi.c b/block/iscsi.c > index e2041ca..721c7d9 100644 > --- a/block/iscsi.c > +++ b/block/iscsi.c > @@ -160,7 +160,6 @@ iscsi_set_events(IscsiLun *iscsilun) > qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), > iscsi_process_read, > (ev & POLLOUT) ? iscsi_process_write : NULL, > - NULL, > iscsilun); > > } > @@ -1176,7 +1175,7 @@ static void iscsi_close(BlockDriverState *bs) > qemu_del_timer(iscsilun->nop_timer); > qemu_free_timer(iscsilun->nop_timer); > } > - qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL); > iscsi_destroy_context(iscsi); > memset(iscsilun, 0, sizeof(IscsiLun)); > } > diff --git a/block/linux-aio.c b/block/linux-aio.c > index d9128f3..53434e2 100644 > --- a/block/linux-aio.c > +++ b/block/linux-aio.c > @@ -190,8 +190,7 @@ void *laio_init(void) > goto out_close_efd; > } > > - qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb, > - NULL); > + qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb); > > return s; > > diff --git a/block/nbd.c b/block/nbd.c > index 80d2b31..f5350fb 100644 > --- a/block/nbd.c > +++ b/block/nbd.c > @@ -325,8 +325,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, > > qemu_co_mutex_lock(&s->send_mutex); > s->send_coroutine = qemu_coroutine_self(); > - qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, > - NULL, s); > + qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s); > if (qiov) { > if (!s->is_unix) { > socket_set_cork(s->sock, 1); > @@ -345,8 +344,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, > } else { > rc = nbd_send_request(s->sock, request); > } > - qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, > - NULL, s); > + qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s); > s->send_coroutine = NULL; > qemu_co_mutex_unlock(&s->send_mutex); > return rc; > @@ -422,8 +420,7 @@ static int nbd_establish_connection(BlockDriverState *bs) > /* Now that we're connected, set the socket to be non-blocking and > * kick the reply mechanism. */ > qemu_set_nonblock(sock); > - qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, > - NULL, s); > + qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, s); > > s->sock = sock; > s->size = size; > @@ -443,7 +440,7 @@ static void nbd_teardown_connection(BlockDriverState *bs) > request.len = 0; > nbd_send_request(s->sock, &request); > > - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL); > closesocket(s->sock); > } > > diff --git a/block/rbd.c b/block/rbd.c > index 40e5d55..78b8564 100644 > --- a/block/rbd.c > +++ b/block/rbd.c > @@ -545,7 +545,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags) > fcntl(s->fds[0], F_SETFL, O_NONBLOCK); > fcntl(s->fds[1], F_SETFL, O_NONBLOCK); > qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, > - NULL, NULL, s); > + NULL, s); > > > qemu_opts_del(opts); > @@ -569,7 +569,7 @@ static void qemu_rbd_close(BlockDriverState *bs) > > close(s->fds[0]); > close(s->fds[1]); > - qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL); > > rbd_close(s->image); > rados_ioctx_destroy(s->io_ctx); > diff --git a/block/sheepdog.c b/block/sheepdog.c > index 66918c6..be2a876 100644 > --- a/block/sheepdog.c > +++ b/block/sheepdog.c > @@ -531,14 +531,14 @@ static coroutine_fn void do_co_req(void *opaque) > unsigned int *rlen = srco->rlen; > > co = qemu_coroutine_self(); > - qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, NULL, co); > + qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co); > > ret = send_co_req(sockfd, hdr, data, wlen); > if (ret < 0) { > goto out; > } > > - qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, NULL, co); > + qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co); > > ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr)); > if (ret < sizeof(*hdr)) { > @@ -563,7 +563,7 @@ static coroutine_fn void do_co_req(void *opaque) > out: > /* there is at most one request for this sockfd, so it is safe to > * set each handler to NULL. */ > - qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL); > > srco->ret = ret; > srco->finished = true; > @@ -804,7 +804,7 @@ static int get_sheep_fd(BDRVSheepdogState *s) > return fd; > } > > - qemu_aio_set_fd_handler(fd, co_read_response, NULL, NULL, s); > + qemu_aio_set_fd_handler(fd, co_read_response, NULL, s); > return fd; > } > > @@ -1054,8 +1054,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, > > qemu_co_mutex_lock(&s->lock); > s->co_send = qemu_coroutine_self(); > - qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, > - NULL, s); > + qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s); > socket_set_cork(s->fd, 1); > > /* send a header */ > @@ -1076,8 +1075,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, > } > > socket_set_cork(s->fd, 0); > - qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, > - NULL, s); > + qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s); > qemu_co_mutex_unlock(&s->lock); > > return 0; > @@ -1335,7 +1333,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags) > g_free(buf); > return 0; > out: > - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL); > if (s->fd >= 0) { > closesocket(s->fd); > } > @@ -1563,7 +1561,7 @@ static void sd_close(BlockDriverState *bs) > error_report("%s, %s", sd_strerror(rsp->result), s->name); > } > > - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL); > closesocket(s->fd); > g_free(s->host_spec); > } > diff --git a/block/ssh.c b/block/ssh.c > index ed525cc..b78253f 100644 > --- a/block/ssh.c > +++ b/block/ssh.c > @@ -743,13 +743,13 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s) > DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock, > rd_handler, wr_handler); > > - qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, NULL, co); > + qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, co); > } > > static coroutine_fn void clear_fd_handler(BDRVSSHState *s) > { > DPRINTF("s->sock=%d", s->sock); > - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); > + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL); > } > > /* A non-blocking call returned EAGAIN, so yield, ensuring the > diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c > index 9e6d32b..bd1b51a 100644 > --- a/hw/block/dataplane/virtio-blk.c > +++ b/hw/block/dataplane/virtio-blk.c > @@ -473,7 +473,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) > exit(1); > } > s->host_notifier = *virtio_queue_get_host_notifier(vq); > - aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, NULL); > + aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify); > > /* Set up ioqueue */ > ioq_init(&s->ioqueue, s->fd, REQ_MAX); > @@ -481,7 +481,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) > ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb); > } > s->io_notifier = *ioq_get_notifier(&s->ioqueue); > - aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, NULL); > + aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io); > > s->started = true; > trace_virtio_blk_data_plane_start(s); > @@ -513,10 +513,10 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) > qemu_thread_join(&s->thread); > } > > - aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL); > + aio_set_event_notifier(s->ctx, &s->io_notifier, NULL); > ioq_cleanup(&s->ioqueue); > > - aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL); > + aio_set_event_notifier(s->ctx, &s->host_notifier, NULL); > k->set_host_notifier(qbus->parent, 0, false); > > aio_context_unref(s->ctx); > diff --git a/include/block/aio.h b/include/block/aio.h > index 1836793..e17066b 100644 > --- a/include/block/aio.h > +++ b/include/block/aio.h > @@ -71,9 +71,6 @@ typedef struct AioContext { > struct ThreadPool *thread_pool; > } AioContext; > > -/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ > -typedef int (AioFlushEventNotifierHandler)(EventNotifier *e); > - > /** > * aio_context_new: Allocate a new AioContext. > * > @@ -191,9 +188,6 @@ bool aio_pending(AioContext *ctx); > bool aio_poll(AioContext *ctx, bool blocking); > > #ifdef CONFIG_POSIX > -/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ > -typedef int (AioFlushHandler)(void *opaque); > - > /* Register a file descriptor and associated callbacks. Behaves very similarly > * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will > * be invoked when using qemu_aio_wait(). > @@ -205,7 +199,6 @@ void aio_set_fd_handler(AioContext *ctx, > int fd, > IOHandler *io_read, > IOHandler *io_write, > - AioFlushHandler *io_flush, > void *opaque); > #endif > > @@ -218,8 +211,7 @@ void aio_set_fd_handler(AioContext *ctx, > */ > void aio_set_event_notifier(AioContext *ctx, > EventNotifier *notifier, > - EventNotifierHandler *io_read, > - AioFlushEventNotifierHandler *io_flush); > + EventNotifierHandler *io_read); > > /* Return a GSource that lets the main loop poll the file descriptors attached > * to this AioContext. > @@ -233,14 +225,12 @@ struct ThreadPool *aio_get_thread_pool(AioContext *ctx); > > bool qemu_aio_wait(void); > void qemu_aio_set_event_notifier(EventNotifier *notifier, > - EventNotifierHandler *io_read, > - AioFlushEventNotifierHandler *io_flush); > + EventNotifierHandler *io_read); > > #ifdef CONFIG_POSIX > void qemu_aio_set_fd_handler(int fd, > IOHandler *io_read, > IOHandler *io_write, > - AioFlushHandler *io_flush, > void *opaque); > #endif > > diff --git a/main-loop.c b/main-loop.c > index cf36645..2581939 100644 > --- a/main-loop.c > +++ b/main-loop.c > @@ -488,17 +488,14 @@ bool qemu_aio_wait(void) > void qemu_aio_set_fd_handler(int fd, > IOHandler *io_read, > IOHandler *io_write, > - AioFlushHandler *io_flush, > void *opaque) > { > - aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush, > - opaque); > + aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque); > } > #endif > > void qemu_aio_set_event_notifier(EventNotifier *notifier, > - EventNotifierHandler *io_read, > - AioFlushEventNotifierHandler *io_flush) > + EventNotifierHandler *io_read) > { > - aio_set_event_notifier(qemu_aio_context, notifier, io_read, io_flush); > + aio_set_event_notifier(qemu_aio_context, notifier, io_read); > } > diff --git a/tests/test-aio.c b/tests/test-aio.c > index 7b2892a..1ab5637 100644 > --- a/tests/test-aio.c > +++ b/tests/test-aio.c > @@ -233,11 +233,11 @@ static void test_set_event_notifier(void) > { > EventNotifierTestData data = { .n = 0, .active = 0 }; > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 0); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 0); > event_notifier_cleanup(&data.e); > @@ -247,7 +247,7 @@ static void test_wait_event_notifier(void) > { > EventNotifierTestData data = { .n = 0, .active = 1 }; > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 0); > g_assert_cmpint(data.active, ==, 1); > @@ -261,7 +261,7 @@ static void test_wait_event_notifier(void) > g_assert_cmpint(data.n, ==, 1); > g_assert_cmpint(data.active, ==, 0); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 1); > > @@ -272,7 +272,7 @@ static void test_flush_event_notifier(void) > { > EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 0); > g_assert_cmpint(data.active, ==, 10); > @@ -288,7 +288,7 @@ static void test_flush_event_notifier(void) > g_assert_cmpint(data.active, ==, 0); > g_assert(!aio_poll(ctx, false)); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > g_assert(!aio_poll(ctx, false)); > event_notifier_cleanup(&data.e); > } > @@ -299,7 +299,7 @@ static void test_wait_event_notifier_noflush(void) > EventNotifierTestData dummy = { .n = 0, .active = 1 }; > > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 0); > @@ -312,7 +312,7 @@ static void test_wait_event_notifier_noflush(void) > > /* An active event notifier forces aio_poll to look at EventNotifiers. */ > event_notifier_init(&dummy.e, false); > - aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &dummy.e, event_ready_cb); > > event_notifier_set(&data.e); > g_assert(aio_poll(ctx, false)); > @@ -332,10 +332,10 @@ static void test_wait_event_notifier_noflush(void) > g_assert_cmpint(dummy.n, ==, 1); > g_assert_cmpint(dummy.active, ==, 0); > > - aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); > + aio_set_event_notifier(ctx, &dummy.e, NULL); > event_notifier_cleanup(&dummy.e); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > g_assert(!aio_poll(ctx, false)); > g_assert_cmpint(data.n, ==, 2); > > @@ -515,11 +515,11 @@ static void test_source_set_event_notifier(void) > { > EventNotifierTestData data = { .n = 0, .active = 0 }; > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > while (g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 0); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > while (g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 0); > event_notifier_cleanup(&data.e); > @@ -529,7 +529,7 @@ static void test_source_wait_event_notifier(void) > { > EventNotifierTestData data = { .n = 0, .active = 1 }; > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > g_assert(g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 0); > g_assert_cmpint(data.active, ==, 1); > @@ -543,7 +543,7 @@ static void test_source_wait_event_notifier(void) > g_assert_cmpint(data.n, ==, 1); > g_assert_cmpint(data.active, ==, 0); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > while (g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 1); > > @@ -554,7 +554,7 @@ static void test_source_flush_event_notifier(void) > { > EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > g_assert(g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 0); > g_assert_cmpint(data.active, ==, 10); > @@ -570,7 +570,7 @@ static void test_source_flush_event_notifier(void) > g_assert_cmpint(data.active, ==, 0); > g_assert(!g_main_context_iteration(NULL, false)); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > while (g_main_context_iteration(NULL, false)); > event_notifier_cleanup(&data.e); > } > @@ -581,7 +581,7 @@ static void test_source_wait_event_notifier_noflush(void) > EventNotifierTestData dummy = { .n = 0, .active = 1 }; > > event_notifier_init(&data.e, false); > - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &data.e, event_ready_cb); > > while (g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 0); > @@ -594,7 +594,7 @@ static void test_source_wait_event_notifier_noflush(void) > > /* An active event notifier forces aio_poll to look at EventNotifiers. */ > event_notifier_init(&dummy.e, false); > - aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, NULL); > + aio_set_event_notifier(ctx, &dummy.e, event_ready_cb); > > event_notifier_set(&data.e); > g_assert(g_main_context_iteration(NULL, false)); > @@ -614,10 +614,10 @@ static void test_source_wait_event_notifier_noflush(void) > g_assert_cmpint(dummy.n, ==, 1); > g_assert_cmpint(dummy.active, ==, 0); > > - aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); > + aio_set_event_notifier(ctx, &dummy.e, NULL); > event_notifier_cleanup(&dummy.e); > > - aio_set_event_notifier(ctx, &data.e, NULL, NULL); > + aio_set_event_notifier(ctx, &data.e, NULL); > while (g_main_context_iteration(NULL, false)); > g_assert_cmpint(data.n, ==, 2); > > diff --git a/thread-pool.c b/thread-pool.c > index 096f007..5025567 100644 > --- a/thread-pool.c > +++ b/thread-pool.c > @@ -303,8 +303,7 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) > QLIST_INIT(&pool->head); > QTAILQ_INIT(&pool->request_list); > > - aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready, > - NULL); > + aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready); > } > > ThreadPool *thread_pool_new(AioContext *ctx) > @@ -338,7 +337,7 @@ void thread_pool_free(ThreadPool *pool) > > qemu_mutex_unlock(&pool->lock); > > - aio_set_event_notifier(pool->ctx, &pool->notifier, NULL, NULL); > + aio_set_event_notifier(pool->ctx, &pool->notifier, NULL); > qemu_sem_destroy(&pool->sem); > qemu_cond_destroy(&pool->check_cancel); > qemu_cond_destroy(&pool->worker_stopped); > Yay! Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Paolo
diff --git a/aio-posix.c b/aio-posix.c index 7d66048..2440eb9 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -46,7 +46,6 @@ void aio_set_fd_handler(AioContext *ctx, int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque) { AioHandler *node; @@ -95,12 +94,10 @@ void aio_set_fd_handler(AioContext *ctx, void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush) + EventNotifierHandler *io_read) { aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), - (IOHandler *)io_read, NULL, - (AioFlushHandler *)io_flush, notifier); + (IOHandler *)io_read, NULL, notifier); } bool aio_pending(AioContext *ctx) diff --git a/aio-win32.c b/aio-win32.c index 4309c16..78b2801 100644 --- a/aio-win32.c +++ b/aio-win32.c @@ -30,8 +30,7 @@ struct AioHandler { void aio_set_event_notifier(AioContext *ctx, EventNotifier *e, - EventNotifierHandler *io_notify, - AioFlushEventNotifierHandler *io_flush) + EventNotifierHandler *io_notify) { AioHandler *node; diff --git a/async.c b/async.c index 90fe906..fe2c8bf 100644 --- a/async.c +++ b/async.c @@ -174,7 +174,7 @@ aio_ctx_finalize(GSource *source) AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); - aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); + aio_set_event_notifier(ctx, &ctx->notifier, NULL); event_notifier_cleanup(&ctx->notifier); g_array_free(ctx->pollfds, TRUE); } @@ -214,7 +214,7 @@ AioContext *aio_context_new(void) event_notifier_init(&ctx->notifier, false); aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) - event_notifier_test_and_clear, NULL); + event_notifier_test_and_clear); return ctx; } diff --git a/block/curl.c b/block/curl.c index 2147076..e88621a 100644 --- a/block/curl.c +++ b/block/curl.c @@ -92,17 +92,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd); switch (action) { case CURL_POLL_IN: - qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, NULL, s); + qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s); break; case CURL_POLL_OUT: - qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, NULL, s); + qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s); break; case CURL_POLL_INOUT: - qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, - NULL, s); + qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s); break; case CURL_POLL_REMOVE: - qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(fd, NULL, NULL, NULL); break; } diff --git a/block/gluster.c b/block/gluster.c index 7a69a12..3cff308 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -339,7 +339,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, } fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK); qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], - qemu_gluster_aio_event_reader, NULL, NULL, s); + qemu_gluster_aio_event_reader, NULL, s); out: qemu_opts_del(opts); @@ -438,8 +438,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) qemu_aio_release(acb); close(s->fds[GLUSTER_FD_READ]); close(s->fds[GLUSTER_FD_WRITE]); - qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, - NULL); + qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); bs->drv = NULL; /* Make the disk inaccessible */ qemu_mutex_unlock_iothread(); } @@ -551,7 +550,7 @@ static void qemu_gluster_close(BlockDriverState *bs) close(s->fds[GLUSTER_FD_READ]); close(s->fds[GLUSTER_FD_WRITE]); - qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); if (s->fd) { glfs_close(s->fd); diff --git a/block/iscsi.c b/block/iscsi.c index e2041ca..721c7d9 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -160,7 +160,6 @@ iscsi_set_events(IscsiLun *iscsilun) qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), iscsi_process_read, (ev & POLLOUT) ? iscsi_process_write : NULL, - NULL, iscsilun); } @@ -1176,7 +1175,7 @@ static void iscsi_close(BlockDriverState *bs) qemu_del_timer(iscsilun->nop_timer); qemu_free_timer(iscsilun->nop_timer); } - qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(iscsi_get_fd(iscsi), NULL, NULL, NULL); iscsi_destroy_context(iscsi); memset(iscsilun, 0, sizeof(IscsiLun)); } diff --git a/block/linux-aio.c b/block/linux-aio.c index d9128f3..53434e2 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -190,8 +190,7 @@ void *laio_init(void) goto out_close_efd; } - qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb, - NULL); + qemu_aio_set_event_notifier(&s->e, qemu_laio_completion_cb); return s; diff --git a/block/nbd.c b/block/nbd.c index 80d2b31..f5350fb 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -325,8 +325,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, qemu_co_mutex_lock(&s->send_mutex); s->send_coroutine = qemu_coroutine_self(); - qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, - NULL, s); + qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, s); if (qiov) { if (!s->is_unix) { socket_set_cork(s->sock, 1); @@ -345,8 +344,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, } else { rc = nbd_send_request(s->sock, request); } - qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, - NULL, s); + qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, s); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; @@ -422,8 +420,7 @@ static int nbd_establish_connection(BlockDriverState *bs) /* Now that we're connected, set the socket to be non-blocking and * kick the reply mechanism. */ qemu_set_nonblock(sock); - qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, - NULL, s); + qemu_aio_set_fd_handler(sock, nbd_reply_ready, NULL, s); s->sock = sock; s->size = size; @@ -443,7 +440,7 @@ static void nbd_teardown_connection(BlockDriverState *bs) request.len = 0; nbd_send_request(s->sock, &request); - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL); closesocket(s->sock); } diff --git a/block/rbd.c b/block/rbd.c index 40e5d55..78b8564 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -545,7 +545,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags) fcntl(s->fds[0], F_SETFL, O_NONBLOCK); fcntl(s->fds[1], F_SETFL, O_NONBLOCK); qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader, - NULL, NULL, s); + NULL, s); qemu_opts_del(opts); @@ -569,7 +569,7 @@ static void qemu_rbd_close(BlockDriverState *bs) close(s->fds[0]); close(s->fds[1]); - qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL); rbd_close(s->image); rados_ioctx_destroy(s->io_ctx); diff --git a/block/sheepdog.c b/block/sheepdog.c index 66918c6..be2a876 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -531,14 +531,14 @@ static coroutine_fn void do_co_req(void *opaque) unsigned int *rlen = srco->rlen; co = qemu_coroutine_self(); - qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, NULL, co); + qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, co); ret = send_co_req(sockfd, hdr, data, wlen); if (ret < 0) { goto out; } - qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, NULL, co); + qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, co); ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr)); if (ret < sizeof(*hdr)) { @@ -563,7 +563,7 @@ static coroutine_fn void do_co_req(void *opaque) out: /* there is at most one request for this sockfd, so it is safe to * set each handler to NULL. */ - qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL); srco->ret = ret; srco->finished = true; @@ -804,7 +804,7 @@ static int get_sheep_fd(BDRVSheepdogState *s) return fd; } - qemu_aio_set_fd_handler(fd, co_read_response, NULL, NULL, s); + qemu_aio_set_fd_handler(fd, co_read_response, NULL, s); return fd; } @@ -1054,8 +1054,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, qemu_co_mutex_lock(&s->lock); s->co_send = qemu_coroutine_self(); - qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, - NULL, s); + qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request, s); socket_set_cork(s->fd, 1); /* send a header */ @@ -1076,8 +1075,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req, } socket_set_cork(s->fd, 0); - qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, - NULL, s); + qemu_aio_set_fd_handler(s->fd, co_read_response, NULL, s); qemu_co_mutex_unlock(&s->lock); return 0; @@ -1335,7 +1333,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags) g_free(buf); return 0; out: - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL); if (s->fd >= 0) { closesocket(s->fd); } @@ -1563,7 +1561,7 @@ static void sd_close(BlockDriverState *bs) error_report("%s, %s", sd_strerror(rsp->result), s->name); } - qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL); closesocket(s->fd); g_free(s->host_spec); } diff --git a/block/ssh.c b/block/ssh.c index ed525cc..b78253f 100644 --- a/block/ssh.c +++ b/block/ssh.c @@ -743,13 +743,13 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s) DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock, rd_handler, wr_handler); - qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, NULL, co); + qemu_aio_set_fd_handler(s->sock, rd_handler, wr_handler, co); } static coroutine_fn void clear_fd_handler(BDRVSSHState *s) { DPRINTF("s->sock=%d", s->sock); - qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL, NULL); + qemu_aio_set_fd_handler(s->sock, NULL, NULL, NULL); } /* A non-blocking call returned EAGAIN, so yield, ensuring the diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index 9e6d32b..bd1b51a 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -473,7 +473,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) exit(1); } s->host_notifier = *virtio_queue_get_host_notifier(vq); - aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify, NULL); + aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify); /* Set up ioqueue */ ioq_init(&s->ioqueue, s->fd, REQ_MAX); @@ -481,7 +481,7 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb); } s->io_notifier = *ioq_get_notifier(&s->ioqueue); - aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io, NULL); + aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io); s->started = true; trace_virtio_blk_data_plane_start(s); @@ -513,10 +513,10 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) qemu_thread_join(&s->thread); } - aio_set_event_notifier(s->ctx, &s->io_notifier, NULL, NULL); + aio_set_event_notifier(s->ctx, &s->io_notifier, NULL); ioq_cleanup(&s->ioqueue); - aio_set_event_notifier(s->ctx, &s->host_notifier, NULL, NULL); + aio_set_event_notifier(s->ctx, &s->host_notifier, NULL); k->set_host_notifier(qbus->parent, 0, false); aio_context_unref(s->ctx); diff --git a/include/block/aio.h b/include/block/aio.h index 1836793..e17066b 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -71,9 +71,6 @@ typedef struct AioContext { struct ThreadPool *thread_pool; } AioContext; -/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ -typedef int (AioFlushEventNotifierHandler)(EventNotifier *e); - /** * aio_context_new: Allocate a new AioContext. * @@ -191,9 +188,6 @@ bool aio_pending(AioContext *ctx); bool aio_poll(AioContext *ctx, bool blocking); #ifdef CONFIG_POSIX -/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ -typedef int (AioFlushHandler)(void *opaque); - /* Register a file descriptor and associated callbacks. Behaves very similarly * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will * be invoked when using qemu_aio_wait(). @@ -205,7 +199,6 @@ void aio_set_fd_handler(AioContext *ctx, int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque); #endif @@ -218,8 +211,7 @@ void aio_set_fd_handler(AioContext *ctx, */ void aio_set_event_notifier(AioContext *ctx, EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush); + EventNotifierHandler *io_read); /* Return a GSource that lets the main loop poll the file descriptors attached * to this AioContext. @@ -233,14 +225,12 @@ struct ThreadPool *aio_get_thread_pool(AioContext *ctx); bool qemu_aio_wait(void); void qemu_aio_set_event_notifier(EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush); + EventNotifierHandler *io_read); #ifdef CONFIG_POSIX void qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque); #endif diff --git a/main-loop.c b/main-loop.c index cf36645..2581939 100644 --- a/main-loop.c +++ b/main-loop.c @@ -488,17 +488,14 @@ bool qemu_aio_wait(void) void qemu_aio_set_fd_handler(int fd, IOHandler *io_read, IOHandler *io_write, - AioFlushHandler *io_flush, void *opaque) { - aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, io_flush, - opaque); + aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque); } #endif void qemu_aio_set_event_notifier(EventNotifier *notifier, - EventNotifierHandler *io_read, - AioFlushEventNotifierHandler *io_flush) + EventNotifierHandler *io_read) { - aio_set_event_notifier(qemu_aio_context, notifier, io_read, io_flush); + aio_set_event_notifier(qemu_aio_context, notifier, io_read); } diff --git a/tests/test-aio.c b/tests/test-aio.c index 7b2892a..1ab5637 100644 --- a/tests/test-aio.c +++ b/tests/test-aio.c @@ -233,11 +233,11 @@ static void test_set_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 0 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); event_notifier_cleanup(&data.e); @@ -247,7 +247,7 @@ static void test_wait_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); @@ -261,7 +261,7 @@ static void test_wait_event_notifier(void) g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); @@ -272,7 +272,7 @@ static void test_flush_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); @@ -288,7 +288,7 @@ static void test_flush_event_notifier(void) g_assert_cmpint(data.active, ==, 0); g_assert(!aio_poll(ctx, false)); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); event_notifier_cleanup(&data.e); } @@ -299,7 +299,7 @@ static void test_wait_event_notifier_noflush(void) EventNotifierTestData dummy = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); @@ -312,7 +312,7 @@ static void test_wait_event_notifier_noflush(void) /* An active event notifier forces aio_poll to look at EventNotifiers. */ event_notifier_init(&dummy.e, false); - aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &dummy.e, event_ready_cb); event_notifier_set(&data.e); g_assert(aio_poll(ctx, false)); @@ -332,10 +332,10 @@ static void test_wait_event_notifier_noflush(void) g_assert_cmpint(dummy.n, ==, 1); g_assert_cmpint(dummy.active, ==, 0); - aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); + aio_set_event_notifier(ctx, &dummy.e, NULL); event_notifier_cleanup(&dummy.e); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); @@ -515,11 +515,11 @@ static void test_source_set_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 0 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); event_notifier_cleanup(&data.e); @@ -529,7 +529,7 @@ static void test_source_wait_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); @@ -543,7 +543,7 @@ static void test_source_wait_event_notifier(void) g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 0); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 1); @@ -554,7 +554,7 @@ static void test_source_flush_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); @@ -570,7 +570,7 @@ static void test_source_flush_event_notifier(void) g_assert_cmpint(data.active, ==, 0); g_assert(!g_main_context_iteration(NULL, false)); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); event_notifier_cleanup(&data.e); } @@ -581,7 +581,7 @@ static void test_source_wait_event_notifier_noflush(void) EventNotifierTestData dummy = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); - aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &data.e, event_ready_cb); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); @@ -594,7 +594,7 @@ static void test_source_wait_event_notifier_noflush(void) /* An active event notifier forces aio_poll to look at EventNotifiers. */ event_notifier_init(&dummy.e, false); - aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, NULL); + aio_set_event_notifier(ctx, &dummy.e, event_ready_cb); event_notifier_set(&data.e); g_assert(g_main_context_iteration(NULL, false)); @@ -614,10 +614,10 @@ static void test_source_wait_event_notifier_noflush(void) g_assert_cmpint(dummy.n, ==, 1); g_assert_cmpint(dummy.active, ==, 0); - aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); + aio_set_event_notifier(ctx, &dummy.e, NULL); event_notifier_cleanup(&dummy.e); - aio_set_event_notifier(ctx, &data.e, NULL, NULL); + aio_set_event_notifier(ctx, &data.e, NULL); while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 2); diff --git a/thread-pool.c b/thread-pool.c index 096f007..5025567 100644 --- a/thread-pool.c +++ b/thread-pool.c @@ -303,8 +303,7 @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) QLIST_INIT(&pool->head); QTAILQ_INIT(&pool->request_list); - aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready, - NULL); + aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready); } ThreadPool *thread_pool_new(AioContext *ctx) @@ -338,7 +337,7 @@ void thread_pool_free(ThreadPool *pool) qemu_mutex_unlock(&pool->lock); - aio_set_event_notifier(pool->ctx, &pool->notifier, NULL, NULL); + aio_set_event_notifier(pool->ctx, &pool->notifier, NULL); qemu_sem_destroy(&pool->sem); qemu_cond_destroy(&pool->check_cancel); qemu_cond_destroy(&pool->worker_stopped);
The .io_flush() handler no longer exists and has no users. Drop the io_flush argument to aio_set_fd_handler() and related functions. The AioFlushEventNotifierHandler and AioFlushHandler typedefs are no longer used and are dropped too. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> --- aio-posix.c | 7 ++----- aio-win32.c | 3 +-- async.c | 4 ++-- block/curl.c | 9 ++++----- block/gluster.c | 7 +++---- block/iscsi.c | 3 +-- block/linux-aio.c | 3 +-- block/nbd.c | 11 ++++------- block/rbd.c | 4 ++-- block/sheepdog.c | 18 ++++++++---------- block/ssh.c | 4 ++-- hw/block/dataplane/virtio-blk.c | 8 ++++---- include/block/aio.h | 14 ++------------ main-loop.c | 9 +++------ tests/test-aio.c | 40 ++++++++++++++++++++-------------------- thread-pool.c | 5 ++--- 16 files changed, 61 insertions(+), 88 deletions(-)