diff mbox series

[12/35] hax: access cpu->interrupt_request with atomics

Message ID 20180917163103.6113-13-cota@braap.org
State New
Headers show
Series exec: drop BQL from interrupt handling | expand

Commit Message

Emilio Cota Sept. 17, 2018, 4:30 p.m. UTC
I am not familiar with hax, so I opted for the safe
(and slow) route of performing probably many unnecessary
atomic_reads.

Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 target/i386/hax-all.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

Comments

Philippe Mathieu-Daudé Sept. 19, 2018, 9:05 p.m. UTC | #1
On 9/17/18 6:30 PM, Emilio G. Cota wrote:
> I am not familiar with hax, so I opted for the safe
> (and slow) route of performing probably many unnecessary
> atomic_reads.
> 
> Cc: Richard Henderson <rth@twiddle.net>
> Cc: Eduardo Habkost <ehabkost@redhat.com>
> Signed-off-by: Emilio G. Cota <cota@braap.org>

Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>

> ---
>  target/i386/hax-all.c | 18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
> index ae8b678db0..56f999d5ba 100644
> --- a/target/i386/hax-all.c
> +++ b/target/i386/hax-all.c
> @@ -293,7 +293,7 @@ int hax_vm_destroy(struct hax_vm *vm)
>  
>  static void hax_handle_interrupt(CPUState *cpu, int mask)
>  {
> -    cpu->interrupt_request |= mask;
> +    atomic_or(&cpu->interrupt_request, mask);
>  
>      if (!qemu_cpu_is_self(cpu)) {
>          qemu_cpu_kick(cpu);
> @@ -427,7 +427,7 @@ static int hax_vcpu_interrupt(CPUArchState *env)
>       * Unlike KVM, HAX kernel check for the eflags, instead of qemu
>       */
>      if (ht->ready_for_interrupt_injection &&
> -        (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
> +        (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_HARD)) {
>          int irq;
>  
>          irq = cpu_get_pic_interrupt(env);
> @@ -441,7 +441,7 @@ static int hax_vcpu_interrupt(CPUArchState *env)
>       * interrupt, request an interrupt window exit.  This will
>       * cause a return to userspace as soon as the guest is ready to
>       * receive interrupts. */
> -    if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
> +    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_HARD) {
>          ht->request_interrupt_window = 1;
>      } else {
>          ht->request_interrupt_window = 0;
> @@ -482,19 +482,19 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
>  
>      cpu->halted = 0;
>  
> -    if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
> +    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_POLL) {
>          cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
>          apic_poll_irq(x86_cpu->apic_state);
>      }
>  
> -    if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
> +    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_INIT) {
>          DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
>                  cpu->cpu_index);
>          do_cpu_init(x86_cpu);
>          hax_vcpu_sync_state(env, 1);
>      }
>  
> -    if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
> +    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_SIPI) {
>          DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
>                  cpu->cpu_index);
>          hax_vcpu_sync_state(env, 0);
> @@ -503,6 +503,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
>      }
>  
>      do {
> +        int interrupt_request;
>          int hax_ret;
>  
>          if (cpu->exit_request) {
> @@ -553,8 +554,9 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
>              ret = -1;
>              break;
>          case HAX_EXIT_HLT:
> -            if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
> -                !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
> +            interrupt_request = atomic_read(&cpu->interrupt_request);
> +            if (!(interrupt_request & CPU_INTERRUPT_HARD) &&
> +                !(interrupt_request & CPU_INTERRUPT_NMI)) {
>                  /* hlt instruction with interrupt disabled is shutdown */
>                  env->eflags |= IF_MASK;
>                  cpu->halted = 1;
>
diff mbox series

Patch

diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index ae8b678db0..56f999d5ba 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -293,7 +293,7 @@  int hax_vm_destroy(struct hax_vm *vm)
 
 static void hax_handle_interrupt(CPUState *cpu, int mask)
 {
-    cpu->interrupt_request |= mask;
+    atomic_or(&cpu->interrupt_request, mask);
 
     if (!qemu_cpu_is_self(cpu)) {
         qemu_cpu_kick(cpu);
@@ -427,7 +427,7 @@  static int hax_vcpu_interrupt(CPUArchState *env)
      * Unlike KVM, HAX kernel check for the eflags, instead of qemu
      */
     if (ht->ready_for_interrupt_injection &&
-        (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+        (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_HARD)) {
         int irq;
 
         irq = cpu_get_pic_interrupt(env);
@@ -441,7 +441,7 @@  static int hax_vcpu_interrupt(CPUArchState *env)
      * interrupt, request an interrupt window exit.  This will
      * cause a return to userspace as soon as the guest is ready to
      * receive interrupts. */
-    if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_HARD) {
         ht->request_interrupt_window = 1;
     } else {
         ht->request_interrupt_window = 0;
@@ -482,19 +482,19 @@  static int hax_vcpu_hax_exec(CPUArchState *env)
 
     cpu->halted = 0;
 
-    if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_POLL) {
         cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
         apic_poll_irq(x86_cpu->apic_state);
     }
 
-    if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_INIT) {
         DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
                 cpu->cpu_index);
         do_cpu_init(x86_cpu);
         hax_vcpu_sync_state(env, 1);
     }
 
-    if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+    if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_SIPI) {
         DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
                 cpu->cpu_index);
         hax_vcpu_sync_state(env, 0);
@@ -503,6 +503,7 @@  static int hax_vcpu_hax_exec(CPUArchState *env)
     }
 
     do {
+        int interrupt_request;
         int hax_ret;
 
         if (cpu->exit_request) {
@@ -553,8 +554,9 @@  static int hax_vcpu_hax_exec(CPUArchState *env)
             ret = -1;
             break;
         case HAX_EXIT_HLT:
-            if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
-                !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+            interrupt_request = atomic_read(&cpu->interrupt_request);
+            if (!(interrupt_request & CPU_INTERRUPT_HARD) &&
+                !(interrupt_request & CPU_INTERRUPT_NMI)) {
                 /* hlt instruction with interrupt disabled is shutdown */
                 env->eflags |= IF_MASK;
                 cpu->halted = 1;