@@ -57,7 +57,7 @@ struct qemu_laio_state {
EventNotifier e;
/* io queue for submit at batch */
- LaioQueue io_q;
+ LaioQueue *io_q;
/* I/O completion processing */
QEMUBH *completion_bh;
@@ -146,8 +146,8 @@ static void qemu_laio_completion_bh(void *opaque)
static void qemu_laio_start_retry(struct qemu_laio_state *s)
{
- if (s->io_q.idx)
- qemu_bh_schedule(s->io_q.retry);
+ if (s->io_q->idx)
+ qemu_bh_schedule(s->io_q->retry);
}
static void qemu_laio_completion_cb(EventNotifier *e)
@@ -197,8 +197,8 @@ static void ioq_init(LaioQueue *io_q)
static void abort_queue(struct qemu_laio_state *s)
{
int i;
- for (i = 0; i < s->io_q.idx; i++) {
- struct qemu_laiocb *laiocb = container_of(s->io_q.iocbs[i],
+ for (i = 0; i < s->io_q->idx; i++) {
+ struct qemu_laiocb *laiocb = container_of(s->io_q->iocbs[i],
struct qemu_laiocb,
iocb);
laiocb->ret = -EIO;
@@ -209,14 +209,14 @@ static void abort_queue(struct qemu_laio_state *s)
static int ioq_submit(struct qemu_laio_state *s, bool enqueue)
{
int ret, i = 0;
- int len = s->io_q.idx;
+ int len = s->io_q->idx;
int j = 0;
if (!len) {
return 0;
}
- ret = io_submit(s->ctx, len, s->io_q.iocbs);
+ ret = io_submit(s->ctx, len, s->io_q->iocbs);
if (ret == -EAGAIN) { /* retry in following completion cb */
return 0;
} else if (ret < 0) {
@@ -232,7 +232,7 @@ static int ioq_submit(struct qemu_laio_state *s, bool enqueue)
}
for (i = ret; i < len; i++) {
- s->io_q.iocbs[j++] = s->io_q.iocbs[i];
+ s->io_q->iocbs[j++] = s->io_q->iocbs[i];
}
out:
@@ -240,7 +240,7 @@ static int ioq_submit(struct qemu_laio_state *s, bool enqueue)
* update io queue, for partial completion, retry will be
* started automatically in following completion cb.
*/
- s->io_q.idx -= ret;
+ s->io_q->idx -= ret;
return ret;
}
@@ -253,22 +253,22 @@ static void ioq_submit_retry(void *opaque)
static int ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb)
{
- unsigned int idx = s->io_q.idx;
+ unsigned int idx = s->io_q->idx;
- if (unlikely(idx == s->io_q.size)) {
+ if (unlikely(idx == s->io_q->size)) {
return -1;
}
- s->io_q.iocbs[idx++] = iocb;
- s->io_q.idx = idx;
+ s->io_q->iocbs[idx++] = iocb;
+ s->io_q->idx = idx;
/* don't submit until next completion for -EAGAIN of non plug case */
- if (unlikely(!s->io_q.plugged)) {
+ if (unlikely(!s->io_q->plugged)) {
return 0;
}
/* submit immediately if queue depth is above 2/3 */
- if (idx > s->io_q.size * 2 / 3) {
+ if (idx > s->io_q->size * 2 / 3) {
return ioq_submit(s, true);
}
@@ -279,7 +279,7 @@ void laio_io_plug(BlockDriverState *bs, void *aio_ctx)
{
struct qemu_laio_state *s = aio_ctx;
- s->io_q.plugged++;
+ s->io_q->plugged++;
}
int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
@@ -287,13 +287,13 @@ int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
struct qemu_laio_state *s = aio_ctx;
int ret = 0;
- assert(s->io_q.plugged > 0 || !unplug);
+ assert(s->io_q->plugged > 0 || !unplug);
- if (unplug && --s->io_q.plugged > 0) {
+ if (unplug && --s->io_q->plugged > 0) {
return 0;
}
- if (s->io_q.idx > 0) {
+ if (s->io_q->idx > 0) {
ret = ioq_submit(s, false);
}
@@ -333,10 +333,10 @@ BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
}
io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
- if (!s->io_q.plugged) {
+ if (!s->io_q->plugged) {
int ret;
- if (!s->io_q.idx) {
+ if (!s->io_q->idx) {
ret = io_submit(s->ctx, 1, &iocbs);
} else {
ret = -EAGAIN;
@@ -364,20 +364,38 @@ out_free_aiocb:
return NULL;
}
+static LaioQueue *laio_alloc_ioq(AioContext *ctx, struct qemu_laio_state *s)
+{
+ LaioQueue *ioq = g_malloc0(sizeof(*ioq));
+
+ ioq_init(ioq);
+ ioq->retry = aio_bh_new(ctx, ioq_submit_retry, s);
+ return ioq;
+}
+
+static void laio_free_ioq(struct qemu_laio_state *s, LaioQueue *ioq)
+{
+ qemu_bh_delete(ioq->retry);
+ g_free(ioq);
+ s->io_q = NULL;
+}
+
void laio_detach_aio_context(void *s_, AioContext *old_context)
{
struct qemu_laio_state *s = s_;
aio_set_event_notifier(old_context, &s->e, NULL);
qemu_bh_delete(s->completion_bh);
- qemu_bh_delete(s->io_q.retry);
+
+ laio_free_ioq(s, s->io_q);
}
void laio_attach_aio_context(void *s_, AioContext *new_context)
{
struct qemu_laio_state *s = s_;
- s->io_q.retry = aio_bh_new(new_context, ioq_submit_retry, s);
+ s->io_q = laio_alloc_ioq(new_context, s);
+
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb);
}
@@ -395,8 +413,6 @@ void *laio_init(void)
goto out_close_efd;
}
- ioq_init(&s->io_q);
-
return s;
out_close_efd:
This patch allocates io queue dynamically so that we can support aio_context wide io queue in the following patch. Signed-off-by: Ming Lei <ming.lei@canonical.com> --- block/linux-aio.c | 66 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 25 deletions(-)