diff mbox series

[v4,2/3] target/arm: Support VSError injection

Message ID 20200218020416.50244-3-gshan@redhat.com
State New
Headers show
Series hw/arm/virt: Simulate NMI Injection | expand

Commit Message

Gavin Shan Feb. 18, 2020, 2:04 a.m. UTC
This supports virtual SError injection, which can be used to inject
SError to guest running on the emulated hypervisor. The functionality
is enabled only when we're in non-secured mode and {HCR.TGE, HCR.AMO}
are set to {0, 1}. Also, it can be masked by PState.A bit. Apart from
that, the implementation is similar to VFIQ.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 target/arm/cpu.c       | 48 +++++++++++++++++++++++++++++++++++++++++-
 target/arm/cpu.h       | 13 +++++++-----
 target/arm/helper.c    | 20 +++++++++++++++++-
 target/arm/internals.h | 10 +++++++++
 target/arm/machine.c   |  2 +-
 5 files changed, 85 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index e5750080bc..5969674941 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -78,7 +78,8 @@  static bool arm_cpu_has_work(CPUState *cs)
         && cs->interrupt_request &
         (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
          | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
-         | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_EXITTB);
+         | CPU_INTERRUPT_SERROR | CPU_INTERRUPT_VSERROR
+         | CPU_INTERRUPT_EXITTB);
 }
 
 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
@@ -452,6 +453,12 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
     case EXCP_SERROR:
        pstate_unmasked = !(env->daif & PSTATE_A);
        break;
+    case EXCP_VSERROR:
+        if (secure || !(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
+            /* VSError is only taken when hypervized and non-secure.  */
+            return false;
+        }
+        return !(env->daif & PSTATE_A);
     default:
         g_assert_not_reached();
     }
@@ -550,6 +557,15 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
         }
     }
 
+    if (interrupt_request & CPU_INTERRUPT_VSERROR) {
+        excp_idx = EXCP_VSERROR;
+        target_el = 1;
+        if (arm_excp_unmasked(cs, excp_idx, target_el,
+                              cur_el, secure, hcr_el2)) {
+            goto found;
+        }
+    }
+
     if (interrupt_request & CPU_INTERRUPT_FIQ) {
         excp_idx = EXCP_FIQ;
         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -558,6 +574,7 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
             goto found;
         }
     }
+
     if (interrupt_request & CPU_INTERRUPT_HARD) {
         excp_idx = EXCP_IRQ;
         target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
@@ -566,6 +583,7 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
             goto found;
         }
     }
+
     if (interrupt_request & CPU_INTERRUPT_VIRQ) {
         excp_idx = EXCP_VIRQ;
         target_el = 1;
@@ -574,6 +592,7 @@  bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
             goto found;
         }
     }
+
     if (interrupt_request & CPU_INTERRUPT_VFIQ) {
         excp_idx = EXCP_VFIQ;
         target_el = 1;
@@ -672,6 +691,28 @@  void arm_cpu_update_vfiq(ARMCPU *cpu)
     }
 }
 
+void arm_cpu_update_vserror(ARMCPU *cpu)
+{
+    /*
+     * Update the interrupt level for virtual SError, which is the logical
+     * OR of the HCR_EL2.VSE bit and the input line level from the GIC.
+     */
+    CPUARMState *env = &cpu->env;
+    CPUState *cs = CPU(cpu);
+
+    bool new_state = (env->cp15.hcr_el2 & HCR_VSE) ||
+        (env->irq_line_state & CPU_INTERRUPT_VSERROR);
+
+    if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERROR) != 0)) {
+        if (new_state) {
+            cpu_interrupt(cs, CPU_INTERRUPT_VSERROR);
+        } else {
+            cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERROR);
+        }
+    }
+}
+
+
 #ifndef CONFIG_USER_ONLY
 static void arm_cpu_set_irq(void *opaque, int irq, int level)
 {
@@ -684,6 +725,7 @@  static void arm_cpu_set_irq(void *opaque, int irq, int level)
         [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
         [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ,
         [ARM_CPU_SERROR] = CPU_INTERRUPT_SERROR,
+        [ARM_CPU_VSERROR] = CPU_INTERRUPT_VSERROR,
     };
 
     if (level) {
@@ -710,6 +752,10 @@  static void arm_cpu_set_irq(void *opaque, int irq, int level)
             cpu_reset_interrupt(cs, mask[irq]);
         }
         break;
+    case ARM_CPU_VSERROR:
+        assert(arm_feature(env, ARM_FEATURE_EL2));
+        arm_cpu_update_vserror(cpu);
+        break;
     default:
         g_assert_not_reached();
     }
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 23e9f7ee2d..30056c6dbc 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -50,6 +50,7 @@ 
 #define EXCP_LSERR          21   /* v8M LSERR SecureFault */
 #define EXCP_UNALIGNED      22   /* v7M UNALIGNED UsageFault */
 #define EXCP_SERROR         23   /* SError Interrupt */
+#define EXCP_VSERROR        24   /* Virtual SError Interrupt */
 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
 
 #define ARMV7M_EXCP_RESET   1
@@ -80,10 +81,11 @@  enum {
 };
 
 /* ARM-specific interrupt pending bits.  */
-#define CPU_INTERRUPT_FIQ     CPU_INTERRUPT_TGT_EXT_1
-#define CPU_INTERRUPT_VIRQ    CPU_INTERRUPT_TGT_EXT_2
-#define CPU_INTERRUPT_VFIQ    CPU_INTERRUPT_TGT_EXT_3
-#define CPU_INTERRUPT_SERROR  CPU_INTERRUPT_TGT_EXT_4
+#define CPU_INTERRUPT_FIQ     CPU_INTERRUPT_TGT_EXT_0
+#define CPU_INTERRUPT_VIRQ    CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VFIQ    CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_SERROR  CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_VSERROR CPU_INTERRUPT_TGT_EXT_4
 
 /* The usual mapping for an AArch64 system register to its AArch32
  * counterpart is for the 32 bit world to have access to the lower
@@ -105,7 +107,8 @@  enum {
 #define ARM_CPU_VIRQ    2
 #define ARM_CPU_VFIQ    3
 #define ARM_CPU_SERROR  4
-#define ARM_CPU_NUM_IRQ 5
+#define ARM_CPU_VSERROR 5
+#define ARM_CPU_NUM_IRQ 6
 
 /* ARM-specific extra insn start words:
  * 1: Conditional execution bits
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 3f00af4c41..7fa6653f10 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1969,7 +1969,11 @@  static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
         }
     }
 
-    if (!allow_virt || !(hcr_el2 & HCR_AMO)) {
+    if (allow_virt && (hcr_el2 & HCR_AMO)) {
+        if (cs->interrupt_request & CPU_INTERRUPT_VSERROR) {
+            ret |= CPSR_A;
+        }
+    } else {
         if (cs->interrupt_request & CPU_INTERRUPT_SERROR) {
             ret |= CPSR_A;
         }
@@ -5103,6 +5107,7 @@  static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
     g_assert(qemu_mutex_iothread_locked());
     arm_cpu_update_virq(cpu);
     arm_cpu_update_vfiq(cpu);
+    arm_cpu_update_vserror(cpu);
 }
 
 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -8605,6 +8610,7 @@  void arm_log_exception(int idx)
             [EXCP_LSERR] = "v8M LSERR UsageFault",
             [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
             [EXCP_SERROR] = "SError Interrupt",
+            [EXCP_VSERROR] = "Virtual SError Interrupt",
         };
 
         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -9113,6 +9119,17 @@  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
         mask = CPSR_A | CPSR_I | CPSR_F;
         offset = 0;
         break;
+    case EXCP_VSERROR:
+        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
+        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
+        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
+                      env->exception.fsr,
+                      (uint32_t)env->exception.vaddress);
+        new_mode = ARM_CPU_MODE_ABT;
+        addr = 0x10;
+        mask = CPSR_A | CPSR_I;
+        offset = 8;
+        break;
     default:
         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
         return; /* Never happens.  Keep compiler happy.  */
@@ -9223,6 +9240,7 @@  static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
         addr += 0x100;
         break;
     case EXCP_SERROR:
+    case EXCP_VSERROR:
         addr += 0x180;
         break;
     default:
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 58c4d707c5..4625bf984e 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1023,6 +1023,16 @@  void arm_cpu_update_virq(ARMCPU *cpu);
  */
 void arm_cpu_update_vfiq(ARMCPU *cpu);
 
+/**
+ * arm_cpu_update_vserror: Update CPU_INTERRUPT_VSERROR interrupt
+ *
+ * Update the CPU_INTERRUPT_VSERROR bit in cs->interrupt_request, following
+ * a change to either the input virtual SError line from the GIC or the
+ * HCR_EL2.VSE bit. Must be called with the iothread lock held.
+ */
+void arm_cpu_update_vserror(ARMCPU *cpu);
+
+
 /**
  * arm_mmu_idx_el:
  * @env: The cpu environment
diff --git a/target/arm/machine.c b/target/arm/machine.c
index e2ad2f156e..1bc9319f9b 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -715,7 +715,7 @@  static int cpu_post_load(void *opaque, int version_id)
         env->irq_line_state = cs->interrupt_request &
             (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
              CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ |
-             CPU_INTERRUPT_SERROR);
+             CPU_INTERRUPT_SERROR | CPU_INTERRUPT_VSERROR);
     }
 
     /* Update the values list from the incoming migration data.