@@ -38,6 +38,7 @@ struct QEMUBH {
bool scheduled;
bool idle;
bool deleted;
+ bool canceled;
};
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
@@ -69,8 +70,15 @@ int aio_bh_poll(AioContext *ctx)
/* Make sure that fetching bh happens before accessing its members */
smp_read_barrier_depends();
next = bh->next;
- if (!bh->deleted && bh->scheduled) {
+ if (bh->scheduled) {
bh->scheduled = 0;
+ if (unlikely(bh->deleted)) {
+ continue;
+ }
+ if (unlikely(bh->canceled)) {
+ bh->canceled = 0;
+ continue;
+ }
/* Paired with write barrier in bh schedule to ensure reading for
* idle & callbacks coming after bh's scheduling.
*/
@@ -133,7 +141,7 @@ void qemu_bh_schedule(QEMUBH *bh)
*/
void qemu_bh_cancel(QEMUBH *bh)
{
- bh->scheduled = 0;
+ bh->canceled = 1;
}
/* This func is async.The bottom half will do the delete action at the finial
@@ -141,7 +149,6 @@ void qemu_bh_cancel(QEMUBH *bh)
*/
void qemu_bh_delete(QEMUBH *bh)
{
- bh->scheduled = 0;
bh->deleted = 1;
}
@@ -152,7 +159,7 @@ aio_ctx_prepare(GSource *source, gint *timeout)
QEMUBH *bh;
for (bh = ctx->first_bh; bh; bh = bh->next) {
- if (!bh->deleted && bh->scheduled) {
+ if (!bh->deleted && !bh->canceled && bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
* every 10ms */
@@ -176,7 +183,7 @@ aio_ctx_check(GSource *source)
QEMUBH *bh;
for (bh = ctx->first_bh; bh; bh = bh->next) {
- if (!bh->deleted && bh->scheduled) {
+ if (!bh->deleted && !bh->canceled && bh->scheduled) {
return true;
}
}
When dismissing the bh scheduling(delete/cancel/schedule), we need to reset bh->scheduled to zero and release the refcnt of object which is referred by bh(will introduced by next patch). Currently, the bh->scheduled will be reset to zero by many writers, so atomic ops should be involved in, which results in expensive memory barrier. With this patch, bh->scheduled is only reset by aio_bh_poll(). Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com> --- async.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-)