diff mbox series

[10/10] target/arm: Implement support for taking exceptions to Hyp mode

Message ID 20180814124254.5229-11-peter.maydell@linaro.org
State New
Headers show
Series target/arm: Some pieces of support for 32-bit Hyp mode | expand

Commit Message

Peter Maydell Aug. 14, 2018, 12:42 p.m. UTC
Implement the necessary support code for taking exceptions
to Hyp mode in AArch32.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/helper.c | 146 +++++++++++++++++++++++++++++++++++++-------
 1 file changed, 123 insertions(+), 23 deletions(-)

Comments

Edgar E. Iglesias Aug. 15, 2018, 10:54 a.m. UTC | #1
On Tue, Aug 14, 2018 at 01:42:54PM +0100, Peter Maydell wrote:
> Implement the necessary support code for taking exceptions
> to Hyp mode in AArch32.

Hi Peter,

A general comment that I think this would be a little easier
to look at if it was split into two patches, one non-functional
change to break-out take_aarch32_exception() and then another
patch to add the new logic...

Another comment inline below

> 
> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
> ---
>  target/arm/helper.c | 146 +++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 123 insertions(+), 23 deletions(-)
> 
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 80855302089..167203ac664 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -8013,6 +8013,123 @@ void aarch64_sync_64_to_32(CPUARMState *env)
>      env->regs[15] = env->pc;
>  }
>  
> +static void take_aarch32_exception(CPUARMState *env, int new_mode,
> +                                   uint32_t mask, uint32_t offset,
> +                                   uint32_t newpc)
> +{
> +    /* Change the CPU state so as to actually take the exception. */
> +    switch_mode(env, new_mode);
> +    /*
> +     * For exceptions taken to AArch32 we must clear the SS bit in both
> +     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
> +     */
> +    env->uncached_cpsr &= ~PSTATE_SS;
> +    env->spsr = cpsr_read(env);
> +    /* Clear IT bits.  */
> +    env->condexec_bits = 0;
> +    /* Switch to the new mode, and to the correct instruction set.  */
> +    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
> +    /* Set new mode endianness */
> +    env->uncached_cpsr &= ~CPSR_E;
> +    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
> +        env->uncached_cpsr |= CPSR_E;
> +    }
> +    env->daif |= mask;
> +
> +    if (new_mode == ARM_CPU_MODE_HYP) {
> +        env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
> +        env->elr_el[2] = env->regs[15];
> +    } else {
> +        /*
> +         * this is a lie, as there was no c1_sys on V4T/V5, but who cares
> +         * and we should just guard the thumb mode on V4
> +         */
> +        if (arm_feature(env, ARM_FEATURE_V4T)) {
> +            env->thumb =
> +                (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
> +        }
> +        env->regs[14] = env->regs[15] + offset;
> +    }
> +    env->regs[15] = newpc;
> +}
> +
> +static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
> +{
> +    /*
> +     * Handle exception entry to Hyp mode; this is sufficiently
> +     * different to entry to other AArch32 modes that we handle it
> +     * separately here.
> +     *
> +     * The vector table entry used is always the 0x14 Hyp mode entry point,
> +     * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
> +     * The offset applied to the preferred return address is always zero
> +     * (see DDI0487C.a section G1.12.3).
> +     * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
> +     */
> +    uint32_t addr, mask;
> +    ARMCPU *cpu = ARM_CPU(cs);
> +    CPUARMState *env = &cpu->env;
> +
> +    switch (cs->exception_index) {
> +    case EXCP_UDEF:
> +        addr = 0x04;
> +        break;
> +    case EXCP_SWI:
> +        addr = 0x14;
> +        break;
> +    case EXCP_BKPT:
> +        /* Fall through to prefetch abort.  */
> +    case EXCP_PREFETCH_ABORT:
> +        env->cp15.ifar_s = env->exception.vaddress;
> +        qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
> +                      (uint32_t)env->exception.vaddress);
> +        addr = 0x0c;
> +        break;
> +    case EXCP_DATA_ABORT:
> +        env->cp15.dfar_s = env->exception.vaddress;
> +        qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
> +                      (uint32_t)env->exception.vaddress);
> +        addr = 0x10;
> +        break;
> +    case EXCP_IRQ:
> +        addr = 0x18;
> +        break;
> +    case EXCP_FIQ:
> +        addr = 0x1c;
> +        break;
> +    case EXCP_HVC:
> +        addr = 0x08;
> +        break;
> +    case EXCP_HYP_TRAP:
> +        addr = 0x14;
> +    default:
> +        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
> +    }
> +
> +    if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
> +        env->cp15.esr_el[2] = env->exception.syndrome;
> +    }
> +
> +    if (arm_current_el(env) != 2 && addr < 0x14) {
> +        addr = 0x14;
> +    }
> +
> +    mask = 0;
> +    if (!(env->cp15.scr_el3 & SCR_EA)) {
> +        mask |= CPSR_A;
> +    }
> +    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
> +        mask |= CPSR_I;
> +    }
> +    if (!(env->cp15.scr_el3 & SCR_IRQ)) {a
                                 ^^^^^^^
I think this should test for SCR_FIQ.

Other than those two comments I think this looks OK!

Thanks,
Edgar





> +        mask |= CPSR_F;
> +    }
> +
> +    addr += env->cp15.hvbar;
> +
> +    take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
> +}
> +
>  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
>  {
>      ARMCPU *cpu = ARM_CPU(cs);
> @@ -8048,6 +8165,11 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
>          env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
>      }
>  
> +    if (env->exception.target_el == 2) {
> +        arm_cpu_do_interrupt_aarch32_hyp(cs);
> +        return;
> +    }
> +
>      /* TODO: Vectored interrupt controller.  */
>      switch (cs->exception_index) {
>      case EXCP_UDEF:
> @@ -8155,29 +8277,7 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
>          env->cp15.scr_el3 &= ~SCR_NS;
>      }
>  
> -    switch_mode (env, new_mode);
> -    /* For exceptions taken to AArch32 we must clear the SS bit in both
> -     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
> -     */
> -    env->uncached_cpsr &= ~PSTATE_SS;
> -    env->spsr = cpsr_read(env);
> -    /* Clear IT bits.  */
> -    env->condexec_bits = 0;
> -    /* Switch to the new mode, and to the correct instruction set.  */
> -    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
> -    /* Set new mode endianness */
> -    env->uncached_cpsr &= ~CPSR_E;
> -    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
> -        env->uncached_cpsr |= CPSR_E;
> -    }
> -    env->daif |= mask;
> -    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
> -     * and we should just guard the thumb mode on V4 */
> -    if (arm_feature(env, ARM_FEATURE_V4T)) {
> -        env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
> -    }
> -    env->regs[14] = env->regs[15] + offset;
> -    env->regs[15] = addr;
> +    take_aarch32_exception(env, new_mode, mask, offset, addr);
>  }
>  
>  /* Handle exception entry to a target EL which is using AArch64 */
> -- 
> 2.18.0
>
Peter Maydell Aug. 15, 2018, 10:58 a.m. UTC | #2
On 15 August 2018 at 11:54, Edgar E. Iglesias <edgar.iglesias@xilinx.com> wrote:
> On Tue, Aug 14, 2018 at 01:42:54PM +0100, Peter Maydell wrote:
>> Implement the necessary support code for taking exceptions
>> to Hyp mode in AArch32.
>
> Hi Peter,
>
> A general comment that I think this would be a little easier
> to look at if it was split into two patches, one non-functional
> change to break-out take_aarch32_exception() and then another
> patch to add the new logic...

Yeah, you're right. I'll split it.

>> +    mask = 0;
>> +    if (!(env->cp15.scr_el3 & SCR_EA)) {
>> +        mask |= CPSR_A;
>> +    }
>> +    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
>> +        mask |= CPSR_I;
>> +    }
>> +    if (!(env->cp15.scr_el3 & SCR_IRQ)) {a
>                                  ^^^^^^^
> I think this should test for SCR_FIQ.

Yep, cut-n-paste error.

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/helper.c b/target/arm/helper.c
index 80855302089..167203ac664 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -8013,6 +8013,123 @@  void aarch64_sync_64_to_32(CPUARMState *env)
     env->regs[15] = env->pc;
 }
 
+static void take_aarch32_exception(CPUARMState *env, int new_mode,
+                                   uint32_t mask, uint32_t offset,
+                                   uint32_t newpc)
+{
+    /* Change the CPU state so as to actually take the exception. */
+    switch_mode(env, new_mode);
+    /*
+     * For exceptions taken to AArch32 we must clear the SS bit in both
+     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
+     */
+    env->uncached_cpsr &= ~PSTATE_SS;
+    env->spsr = cpsr_read(env);
+    /* Clear IT bits.  */
+    env->condexec_bits = 0;
+    /* Switch to the new mode, and to the correct instruction set.  */
+    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
+    /* Set new mode endianness */
+    env->uncached_cpsr &= ~CPSR_E;
+    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
+        env->uncached_cpsr |= CPSR_E;
+    }
+    env->daif |= mask;
+
+    if (new_mode == ARM_CPU_MODE_HYP) {
+        env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
+        env->elr_el[2] = env->regs[15];
+    } else {
+        /*
+         * this is a lie, as there was no c1_sys on V4T/V5, but who cares
+         * and we should just guard the thumb mode on V4
+         */
+        if (arm_feature(env, ARM_FEATURE_V4T)) {
+            env->thumb =
+                (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
+        }
+        env->regs[14] = env->regs[15] + offset;
+    }
+    env->regs[15] = newpc;
+}
+
+static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
+{
+    /*
+     * Handle exception entry to Hyp mode; this is sufficiently
+     * different to entry to other AArch32 modes that we handle it
+     * separately here.
+     *
+     * The vector table entry used is always the 0x14 Hyp mode entry point,
+     * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
+     * The offset applied to the preferred return address is always zero
+     * (see DDI0487C.a section G1.12.3).
+     * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
+     */
+    uint32_t addr, mask;
+    ARMCPU *cpu = ARM_CPU(cs);
+    CPUARMState *env = &cpu->env;
+
+    switch (cs->exception_index) {
+    case EXCP_UDEF:
+        addr = 0x04;
+        break;
+    case EXCP_SWI:
+        addr = 0x14;
+        break;
+    case EXCP_BKPT:
+        /* Fall through to prefetch abort.  */
+    case EXCP_PREFETCH_ABORT:
+        env->cp15.ifar_s = env->exception.vaddress;
+        qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
+                      (uint32_t)env->exception.vaddress);
+        addr = 0x0c;
+        break;
+    case EXCP_DATA_ABORT:
+        env->cp15.dfar_s = env->exception.vaddress;
+        qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
+                      (uint32_t)env->exception.vaddress);
+        addr = 0x10;
+        break;
+    case EXCP_IRQ:
+        addr = 0x18;
+        break;
+    case EXCP_FIQ:
+        addr = 0x1c;
+        break;
+    case EXCP_HVC:
+        addr = 0x08;
+        break;
+    case EXCP_HYP_TRAP:
+        addr = 0x14;
+    default:
+        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+    }
+
+    if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
+        env->cp15.esr_el[2] = env->exception.syndrome;
+    }
+
+    if (arm_current_el(env) != 2 && addr < 0x14) {
+        addr = 0x14;
+    }
+
+    mask = 0;
+    if (!(env->cp15.scr_el3 & SCR_EA)) {
+        mask |= CPSR_A;
+    }
+    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
+        mask |= CPSR_I;
+    }
+    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
+        mask |= CPSR_F;
+    }
+
+    addr += env->cp15.hvbar;
+
+    take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
+}
+
 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
 {
     ARMCPU *cpu = ARM_CPU(cs);
@@ -8048,6 +8165,11 @@  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
     }
 
+    if (env->exception.target_el == 2) {
+        arm_cpu_do_interrupt_aarch32_hyp(cs);
+        return;
+    }
+
     /* TODO: Vectored interrupt controller.  */
     switch (cs->exception_index) {
     case EXCP_UDEF:
@@ -8155,29 +8277,7 @@  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
         env->cp15.scr_el3 &= ~SCR_NS;
     }
 
-    switch_mode (env, new_mode);
-    /* For exceptions taken to AArch32 we must clear the SS bit in both
-     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
-     */
-    env->uncached_cpsr &= ~PSTATE_SS;
-    env->spsr = cpsr_read(env);
-    /* Clear IT bits.  */
-    env->condexec_bits = 0;
-    /* Switch to the new mode, and to the correct instruction set.  */
-    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
-    /* Set new mode endianness */
-    env->uncached_cpsr &= ~CPSR_E;
-    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
-        env->uncached_cpsr |= CPSR_E;
-    }
-    env->daif |= mask;
-    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
-     * and we should just guard the thumb mode on V4 */
-    if (arm_feature(env, ARM_FEATURE_V4T)) {
-        env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
-    }
-    env->regs[14] = env->regs[15] + offset;
-    env->regs[15] = addr;
+    take_aarch32_exception(env, new_mode, mask, offset, addr);
 }
 
 /* Handle exception entry to a target EL which is using AArch64 */