@@ -52,6 +52,11 @@ typedef struct {
QEMUBH *retry;
} LaioQueue;
+typedef struct LaioTrackedBs {
+ BlockDriverState *bs;
+ QLIST_ENTRY(LaioTrackedBs) list;
+} LaioTrackedBs;
+
/* lifetime: between aio_attach and aio_detach */
struct qemu_laio_state {
io_context_t ctx;
@@ -65,6 +70,9 @@ struct qemu_laio_state {
struct io_event events[MAX_EVENTS];
int event_idx;
int event_max;
+
+ /* All BS in the list shared this 'qemu_laio_state' */
+ QLIST_HEAD(, LaioTrackedBs) tracked_bs;
};
typedef struct {
@@ -433,6 +441,23 @@ void laio_detach_aio_context(void *s_, BlockDriverState *bs,
AioContext *old_context)
{
QemuLaioState *qs = s_;
+ LaioTrackedBs *tbs, *ntbs;
+
+ QLIST_FOREACH_SAFE(tbs, &qs->state->tracked_bs, list, ntbs) {
+ if (tbs->bs == bs) {
+ QLIST_REMOVE(tbs, list);
+ g_free(tbs);
+ }
+ }
+
+ if (!aio_detach_aio_bs(old_context, bs)) {
+ /* assign new master aio bs for the aio context */
+ if (old_context->master_aio_bs == bs) {
+ tbs = QLIST_FIRST(&qs->state->tracked_bs);
+ old_context->master_aio_bs = tbs->bs;
+ }
+ return;
+ }
laio_state_free(qs->state, old_context);
qs->state = NULL;
@@ -442,9 +467,16 @@ void laio_attach_aio_context(void *s_, BlockDriverState *bs,
AioContext *new_context)
{
QemuLaioState *qs = s_;
- struct qemu_laio_state *s = laio_state_alloc(new_context);
+ LaioTrackedBs *tbs = g_malloc0(sizeof(*tbs));
+
+ if (aio_attach_aio_bs(new_context, bs)) {
+ new_context->opaque = qs->state = laio_state_alloc(new_context);
+ } else {
+ qs->state = new_context->opaque;
+ }
- qs->state = s;
+ tbs->bs = bs;
+ QLIST_INSERT_HEAD(&qs->state->tracked_bs, tbs, list);
}
void *laio_init(void)
This patch supports IO submission as batch in AioContext wide by sharing 'struct qemu_laio_state' instance among all linux-aio backend in same AioContext. Signed-off-by: Ming Lei <ming.lei@canonical.com> --- block/linux-aio.c | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-)