@@ -177,6 +177,7 @@ aio_ctx_finalize(GSource *source)
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
g_array_free(ctx->pollfds, TRUE);
+ qemu_free_timerlist(ctx->tl);
}
static GSourceFuncs aio_source_funcs = {
@@ -215,6 +216,12 @@ AioContext *aio_context_new(void)
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
event_notifier_test_and_clear, NULL);
+ /* Assert if we don't have rt_clock yet. If you see this assertion
+ * it means you are using AioContext without having first called
+ * init_clocks() in main().
+ */
+ assert(rt_clock);
+ ctx->tl = qemu_new_timerlist(rt_clock);
return ctx;
}
@@ -41,6 +41,8 @@ typedef struct AioHandler AioHandler;
typedef void QEMUBHFunc(void *opaque);
typedef void IOHandler(void *opaque);
+typedef struct QEMUTimerList QEMUTimerList;
+
typedef struct AioContext {
GSource source;
@@ -69,6 +71,9 @@ typedef struct AioContext {
/* Thread pool for performing work and receiving completion callbacks */
struct ThreadPool *thread_pool;
+
+ /* TimerList for calling timers */
+ QEMUTimerList *tl;
} AioContext;
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
@@ -12,6 +12,7 @@
#include <glib.h>
#include "block/aio.h"
+#include "qemu/timer.h"
AioContext *ctx;
@@ -628,6 +629,8 @@ int main(int argc, char **argv)
{
GSource *src;
+ init_clocks();
+
ctx = aio_context_new();
src = aio_get_g_source(ctx);
g_source_attach(src, NULL);
@@ -3,6 +3,7 @@
#include "block/aio.h"
#include "block/thread-pool.h"
#include "block/block.h"
+#include "qemu/timer.h"
static AioContext *ctx;
static ThreadPool *pool;
@@ -205,6 +206,8 @@ int main(int argc, char **argv)
{
int ret;
+ init_clocks();
+
ctx = aio_context_new();
pool = aio_get_thread_pool(ctx);
Add a QEMUTimerList to each AioContext and delete it when the AioContext is freed. Signed-off-by: Alex Bligh <alex@alex.org.uk> --- async.c | 7 +++++++ include/block/aio.h | 5 +++++ tests/test-aio.c | 3 +++ tests/test-thread-pool.c | 3 +++ 4 files changed, 18 insertions(+)