@@ -5200,7 +5200,7 @@ fi
if test "$coroutine" = ""; then
if test "$mingw32" = "yes"; then
coroutine=win32
- elif test "$cpu" = "x86_64"; then
+ elif test "$cpu" = "x86_64" || test "$cpu" = "aarch64"; then
coroutine=asm
elif test "$ucontext_works" = "yes"; then
coroutine=ucontext
@@ -17,4 +17,8 @@ U64_PTR = gdb.lookup_type('uint64_t').pointer()
def get_coroutine_regs(addr):
addr = addr.cast(gdb.lookup_type('CoroutineAsm').pointer())
rsp = addr['sp'].cast(U64_PTR)
- return {'sp': rsp, 'pc': rsp.dereference()}
+ arch = gdb.selected_frame().architecture.name().split(':'):
+ if arch[0] == 'i386' and arch[1] == 'x86-64':
+ return {'rsp': rsp, 'pc': rsp.dereference()}
+ else:
+ return {'sp': rsp, 'pc': addr['scratch'].cast(U64_PTR) }
@@ -39,7 +39,9 @@ util-obj-$(CONFIG_MEMBARRIER) += sys_membarrier.o
util-obj-y += qemu-coroutine.o qemu-coroutine-lock.o qemu-coroutine-io.o
util-obj-y += qemu-coroutine-sleep.o
util-obj-y += coroutine-$(CONFIG_COROUTINE_BACKEND).o
+ifeq ($(ARCH),x86_64)
coroutine-asm.o-cflags := -mno-red-zone
+endif
util-obj-y += buffer.o
util-obj-y += timed-average.o
util-obj-y += base64.o
@@ -40,6 +40,11 @@ typedef struct {
Coroutine base;
void *sp;
+ /*
+ * aarch64: instruction pointer
+ */
+ void *scratch;
+
void *stack;
size_t stack_size;
@@ -116,6 +121,49 @@ static void start_switch_fiber(void **fake_stack_save,
/* Use "call" to ensure the stack is aligned correctly. */
#define CO_SWITCH_NEW(from, to) CO_SWITCH(from, to, 0, "call coroutine_trampoline")
#define CO_SWITCH_RET(from, to, action) CO_SWITCH(from, to, action, "ret")
+
+#elif defined __aarch64__
+/*
+ * GCC does not support clobbering the frame pointer, so we save it ourselves.
+ * Saving the link register as well generates slightly better code because then
+ * qemu_coroutine_switch can be treated as a leaf procedure.
+ */
+#define CO_SWITCH_RET(from, to, action) ({ \
+ register uintptr_t action_ __asm__("x0") = action; \
+ register void *from_ __asm__("x16") = from; \
+ register void *to_ __asm__("x1") = to; \
+ asm volatile( \
+ ".cfi_remember_state\n" \
+ "stp x29, x30, [sp, #-16]!\n" /* GCC does not save it, do it ourselves */ \
+ ".cfi_adjust_cfa_offset 16\n" \
+ ".cfi_def_cfa_register sp\n" \
+ "adr x30, 2f\n" /* source PC will be after the BR */ \
+ "str x30, [x16, %[SCRATCH]]\n" /* save it */ \
+ "mov x30, sp\n" /* save source SP */ \
+ "str x30, [x16, %[SP]]\n" \
+ "ldr x30, [x1, %[SCRATCH]]\n" /* load destination PC */ \
+ "ldr x1, [x1, %[SP]]\n" /* load destination SP */ \
+ "mov sp, x1\n" \
+ "br x30\n" \
+ "2: \n" \
+ "ldp x29, x30, [sp], #16\n" \
+ ".cfi_restore_state\n" \
+ : "+r" (action_), "+r" (from_), "+r" (to_) \
+ : [SP] "i" (offsetof(CoroutineAsm, sp)), \
+ [SCRATCH] "i" (offsetof(CoroutineAsm, scratch)) \
+ : "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", \
+ "x13", "x14", "x15", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
+ "x24", "x25", "x26", "x27", "x28", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", \
+ "v12", "v13", "v14", "v15", v16", "v17", "v18", "v19", "v20", "v21", "v22", \
+ "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "memory", \
+ action_; \
+})
+
+#define CO_SWITCH_NEW(from, to) do { \
+ (to)->scratch = (void *) coroutine_trampoline; \
+ (void) CO_SWITCH_RET(from, to, (uintptr_t) to); \
+} while(0)
#else
#error coroutine-asm.c not ported to this architecture.
#endif
The speedup is similar to x86, 120 ns vs 180 ns on an APM Mustang. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- configure | 2 +- scripts/qemugdb/coroutine_asm.py | 6 ++++- util/Makefile.objs | 2 ++ util/coroutine-asm.c | 45 ++++++++++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 2 deletions(-)