diff mbox

[v7,04/32] target-arm: rename arm_current_pl to arm_current_el

Message ID 1413910544-20150-5-git-send-email-greg.bellows@linaro.org
State New
Headers show

Commit Message

Greg Bellows Oct. 21, 2014, 4:55 p.m. UTC
Renamed the arm_current_pl CPU function to more accurately represent that it
returns the ARMv8 EL rather than ARMv7 PL.

Signed-off-by: Greg Bellows <greg.bellows@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

Comments

Peter Maydell Oct. 23, 2014, 2:53 p.m. UTC | #1
On 21 October 2014 17:55, Greg Bellows <greg.bellows@linaro.org> wrote:
> Renamed the arm_current_pl CPU function to more accurately represent that it
> returns the ARMv8 EL rather than ARMv7 PL.
>
> Signed-off-by: Greg Bellows <greg.bellows@linaro.org>
> Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

> @@ -1485,7 +1485,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
>              gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
>              break;
>          case 2:
> -            if (s->current_pl == 0) {
> +            if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el == 0) {
>                  unallocated_encoding(s);
>                  break;
>              }
> @@ -1498,7 +1498,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
>              gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
>              break;
>          case 3:
> -            if (s->current_pl == 0) {
> +            if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->current_el == 0) {
>                  unallocated_encoding(s);
>                  break;
>              }

These hunks introduce spurious changes which break PSCI booting
of AArch64. I will fix up in target-arm.next.

thanks
-- PMM
diff mbox

Patch

==========

v6 -> v7
- Fix comment

v5 -> v6
- Renamed DisasContext current_pl field to current_el
- Added comment to arm_current_el on handling v7 PL
- Fixed comments referencing PL

Signed-off-by: Greg Bellows <greg.bellows@linaro.org>
---
 target-arm/cpu.h           | 27 +++++++++++++++------------
 target-arm/helper-a64.c    |  6 +++---
 target-arm/helper.c        | 22 +++++++++++-----------
 target-arm/internals.h     |  2 +-
 target-arm/op_helper.c     | 16 ++++++++--------
 target-arm/translate-a64.c | 16 ++++++++--------
 target-arm/translate.c     |  4 ++--
 target-arm/translate.h     |  4 ++--
 8 files changed, 50 insertions(+), 47 deletions(-)

diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 44ed6fe..1138539 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -991,7 +991,10 @@  static inline bool cptype_valid(int cptype)
 #define PL1_RW (PL1_R | PL1_W)
 #define PL0_RW (PL0_R | PL0_W)
 
-static inline int arm_current_pl(CPUARMState *env)
+/* Return the current Exception Level (as per ARMv8; note that this differs
+ * from the ARMv7 Privilege Level).
+ */
+static inline int arm_current_el(CPUARMState *env)
 {
     if (env->aarch64) {
         return extract32(env->pstate, 2, 2);
@@ -1001,7 +1004,7 @@  static inline int arm_current_pl(CPUARMState *env)
         return 0;
     }
     /* We don't currently implement the Virtualization or TrustZone
-     * extensions, so PL2 and PL3 don't exist for us.
+     * extensions, so EL2 and EL3 don't exist for us.
      */
     return 1;
 }
@@ -1164,10 +1167,10 @@  static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
     return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
 }
 
-static inline bool cp_access_ok(int current_pl,
+static inline bool cp_access_ok(int current_el,
                                 const ARMCPRegInfo *ri, int isread)
 {
-    return (ri->access >> ((current_pl * 2) + isread)) & 1;
+    return (ri->access >> ((current_el * 2) + isread)) & 1;
 }
 
 /**
@@ -1231,7 +1234,7 @@  bool write_cpustate_to_list(ARMCPU *cpu);
 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx)
 {
     CPUARMState *env = cs->env_ptr;
-    unsigned int cur_el = arm_current_pl(env);
+    unsigned int cur_el = arm_current_el(env);
     unsigned int target_el = arm_excp_target_el(cs, excp_idx);
     /* FIXME: Use actual secure state.  */
     bool secure = false;
@@ -1303,7 +1306,7 @@  static inline CPUARMState *cpu_init(const char *cpu_model)
 #define MMU_USER_IDX 0
 static inline int cpu_mmu_index (CPUARMState *env)
 {
-    return arm_current_pl(env);
+    return arm_current_el(env);
 }
 
 /* Return the Exception Level targeted by debug exceptions;
@@ -1316,7 +1319,7 @@  static inline int arm_debug_target_el(CPUARMState *env)
 
 static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
 {
-    if (arm_current_pl(env) == arm_debug_target_el(env)) {
+    if (arm_current_el(env) == arm_debug_target_el(env)) {
         if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
             || (env->daif & PSTATE_D)) {
             return false;
@@ -1327,10 +1330,10 @@  static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
 
 static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
 {
-    if (arm_current_pl(env) == 0 && arm_el_is_aa64(env, 1)) {
+    if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) {
         return aa64_generate_debug_exceptions(env);
     }
-    return arm_current_pl(env) != 2;
+    return arm_current_el(env) != 2;
 }
 
 /* Return true if debugging exceptions are currently enabled.
@@ -1460,8 +1463,8 @@  static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
     if (is_a64(env)) {
         *pc = env->pc;
         *flags = ARM_TBFLAG_AARCH64_STATE_MASK
-            | (arm_current_pl(env) << ARM_TBFLAG_AA64_EL_SHIFT);
-        if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
+            | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT);
+        if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
             *flags |= ARM_TBFLAG_AA64_FPEN_MASK;
         }
         /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
@@ -1497,7 +1500,7 @@  static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
             || arm_el_is_aa64(env, 1)) {
             *flags |= ARM_TBFLAG_VFPEN_MASK;
         }
-        if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
+        if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
             *flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
         }
         /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index daf5adc..81066ca 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -450,7 +450,7 @@  void aarch64_cpu_do_interrupt(CPUState *cs)
     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
     int i;
 
-    if (arm_current_pl(env) < new_el) {
+    if (arm_current_el(env) < new_el) {
         if (env->aarch64) {
             addr += 0x400;
         } else {
@@ -461,7 +461,7 @@  void aarch64_cpu_do_interrupt(CPUState *cs)
     }
 
     arm_log_exception(cs->exception_index);
-    qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_pl(env));
+    qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env));
     if (qemu_loglevel_mask(CPU_LOG_INT)
         && !excp_is_internal(cs->exception_index)) {
         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
@@ -503,7 +503,7 @@  void aarch64_cpu_do_interrupt(CPUState *cs)
 
     if (is_a64(env)) {
         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
-        aarch64_save_sp(env, arm_current_pl(env));
+        aarch64_save_sp(env, arm_current_el(env));
         env->elr_el[new_el] = env->pc;
     } else {
         env->banked_spsr[0] = cpsr_read(env);
diff --git a/target-arm/helper.c b/target-arm/helper.c
index c36ee6a..033b18f 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -571,7 +571,7 @@  static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
     /* Performance monitor registers user accessibility is controlled
      * by PMUSERENR.
      */
-    if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
+    if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -996,7 +996,7 @@  static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 
 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
 {
-    if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
+    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -1042,7 +1042,7 @@  static const ARMCPRegInfo v6k_cp_reginfo[] = {
 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
 {
     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
-    if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
+    if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -1051,7 +1051,7 @@  static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
 {
     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
-    if (arm_current_pl(env) == 0 &&
+    if (arm_current_el(env) == 0 &&
         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
         return CP_ACCESS_TRAP;
     }
@@ -1063,7 +1063,7 @@  static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
      * EL0[PV]TEN is zero.
      */
-    if (arm_current_pl(env) == 0 &&
+    if (arm_current_el(env) == 0 &&
         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
         return CP_ACCESS_TRAP;
     }
@@ -1911,7 +1911,7 @@  static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 
 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
 {
-    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+    if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -1929,7 +1929,7 @@  static CPAccessResult aa64_cacheop_access(CPUARMState *env,
     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
      * SCTLR_EL1.UCI is set.
      */
-    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
+    if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -2006,7 +2006,7 @@  static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
     /* We don't implement EL2, so the only control on DC ZVA is the
      * bit in the SCTLR which can prohibit access for EL0.
      */
-    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
+    if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -2366,7 +2366,7 @@  static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
      * but the AArch32 CTR has its own reginfo struct)
      */
-    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
+    if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -3768,7 +3768,7 @@  unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx)
 {
     ARMCPU *cpu = ARM_CPU(cs);
     CPUARMState *env = &cpu->env;
-    unsigned int cur_el = arm_current_pl(env);
+    unsigned int cur_el = arm_current_el(env);
     unsigned int target_el;
     /* FIXME: Use actual secure state.  */
     bool secure = false;
@@ -4770,7 +4770,7 @@  int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
     int prot;
     int ret, is_user;
     uint32_t syn;
-    bool same_el = (arm_current_pl(env) != 0);
+    bool same_el = (arm_current_el(env) != 0);
 
     is_user = mmu_idx == MMU_USER_IDX;
     ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
diff --git a/target-arm/internals.h b/target-arm/internals.h
index 51c5c16..2dff4ff 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -130,7 +130,7 @@  static inline void aarch64_restore_sp(CPUARMState *env, int el)
 
 static inline void update_spsel(CPUARMState *env, uint32_t imm)
 {
-    unsigned int cur_el = arm_current_pl(env);
+    unsigned int cur_el = arm_current_el(env);
     /* Update PSTATE SPSel bit; this requires us to update the
      * working stack pointer in xregs[31].
      */
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 464a5ce..6cc3387 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -361,7 +361,7 @@  void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
      * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
      * to catch that case at translate time.
      */
-    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+    if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
         raise_exception(env, EXCP_UDEF);
     }
 
@@ -388,7 +388,7 @@  void HELPER(clear_pstate_ss)(CPUARMState *env)
 void HELPER(pre_hvc)(CPUARMState *env)
 {
     ARMCPU *cpu = arm_env_get_cpu(env);
-    int cur_el = arm_current_pl(env);
+    int cur_el = arm_current_el(env);
     /* FIXME: Use actual secure state.  */
     bool secure = false;
     bool undef;
@@ -428,7 +428,7 @@  void HELPER(pre_hvc)(CPUARMState *env)
 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 {
     ARMCPU *cpu = arm_env_get_cpu(env);
-    int cur_el = arm_current_pl(env);
+    int cur_el = arm_current_el(env);
     /* FIXME: Use real secure state.  */
     bool secure = false;
     bool smd = env->cp15.scr_el3 & SCR_SMD;
@@ -463,7 +463,7 @@  void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 
 void HELPER(exception_return)(CPUARMState *env)
 {
-    int cur_el = arm_current_pl(env);
+    int cur_el = arm_current_el(env);
     unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
     uint32_t spsr = env->banked_spsr[spsr_idx];
     int new_el, i;
@@ -580,7 +580,7 @@  static bool linked_bp_matches(ARMCPU *cpu, int lbn)
 
     switch (bt) {
     case 3: /* linked context ID match */
-        if (arm_current_pl(env) > 1) {
+        if (arm_current_el(env) > 1) {
             /* Context matches never fire in EL2 or (AArch64) EL3 */
             return false;
         }
@@ -660,7 +660,7 @@  static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
      * rely on this behaviour currently.
      * For breakpoints we do want to use the current CPU state.
      */
-    switch (arm_current_pl(env)) {
+    switch (arm_current_el(env)) {
     case 3:
     case 2:
         if (!hmc) {
@@ -747,7 +747,7 @@  void arm_debug_excp_handler(CPUState *cs)
             cs->watchpoint_hit = NULL;
             if (check_watchpoints(cpu)) {
                 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
-                bool same_el = arm_debug_target_el(env) == arm_current_pl(env);
+                bool same_el = arm_debug_target_el(env) == arm_current_el(env);
 
                 env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
                 if (extended_addresses_enabled(env)) {
@@ -763,7 +763,7 @@  void arm_debug_excp_handler(CPUState *cs)
         }
     } else {
         if (check_breakpoints(cpu)) {
-            bool same_el = (arm_debug_target_el(env) == arm_current_pl(env));
+            bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
             env->exception.syndrome = syn_breakpoint(same_el);
             if (extended_addresses_enabled(env)) {
                 env->exception.fsr = (1 << 9) | 0x22;
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index b15261b..b141452 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -1226,7 +1226,7 @@  static void handle_msr_i(DisasContext *s, uint32_t insn,
     int op = op1 << 3 | op2;
     switch (op) {
     case 0x05: /* SPSel */
-        if (s->current_pl == 0) {
+        if (s->current_el == 0) {
             unallocated_encoding(s);
             return;
         }
@@ -1323,7 +1323,7 @@  static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
     }
 
     /* Check access permissions */
-    if (!cp_access_ok(s->current_pl, ri, isread)) {
+    if (!cp_access_ok(s->current_el, ri, isread)) {
         unallocated_encoding(s);
         return;
     }
@@ -1362,7 +1362,7 @@  static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
          * guaranteed to be constant by the tb flags.
          */
         tcg_rt = cpu_reg(s, rt);
-        tcg_gen_movi_i64(tcg_rt, s->current_pl << 2);
+        tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
         return;
     case ARM_CP_DC_ZVA:
         /* Writes clear the aligned block of memory which rt points into. */
@@ -1485,7 +1485,7 @@  static void disas_exc(DisasContext *s, uint32_t insn)
             gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
             break;
         case 2:
-            if (s->current_pl == 0) {
+            if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el == 0) {
                 unallocated_encoding(s);
                 break;
             }
@@ -1498,7 +1498,7 @@  static void disas_exc(DisasContext *s, uint32_t insn)
             gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
             break;
         case 3:
-            if (s->current_pl == 0) {
+            if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->current_el == 0) {
                 unallocated_encoding(s);
                 break;
             }
@@ -1575,7 +1575,7 @@  static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
         tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
         break;
     case 4: /* ERET */
-        if (s->current_pl == 0) {
+        if (s->current_el == 0) {
             unallocated_encoding(s);
             return;
         }
@@ -10930,7 +10930,7 @@  void gen_intermediate_code_internal_a64(ARMCPU *cpu,
     dc->vec_len = 0;
     dc->vec_stride = 0;
     dc->cp_regs = cpu->cp_regs;
-    dc->current_pl = arm_current_pl(env);
+    dc->current_el = arm_current_el(env);
     dc->features = env->features;
 
     /* Single step state. The code-generation logic here is:
@@ -10951,7 +10951,7 @@  void gen_intermediate_code_internal_a64(ARMCPU *cpu,
     dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags);
     dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags);
     dc->is_ldex = false;
-    dc->ss_same_el = (arm_debug_target_el(env) == dc->current_pl);
+    dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
 
     init_tmp_a64_array(dc);
 
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 4e764d3..91958b6 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -7074,7 +7074,7 @@  static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
                             ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
     if (ri) {
         /* Check access permissions */
-        if (!cp_access_ok(s->current_pl, ri, isread)) {
+        if (!cp_access_ok(s->current_el, ri, isread)) {
             return 1;
         }
 
@@ -11008,7 +11008,7 @@  static inline void gen_intermediate_code_internal(ARMCPU *cpu,
     dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
     dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
     dc->cp_regs = cpu->cp_regs;
-    dc->current_pl = arm_current_pl(env);
+    dc->current_el = arm_current_el(env);
     dc->features = env->features;
 
     /* Single step state. The code-generation logic here is:
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 83fbf38..41a9071 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -29,7 +29,7 @@  typedef struct DisasContext {
      */
     uint32_t svc_imm;
     int aarch64;
-    int current_pl;
+    int current_el;
     GHashTable *cp_regs;
     uint64_t features; /* CPU features bits */
     /* Because unallocated encodings generate different exception syndrome
@@ -68,7 +68,7 @@  static inline int arm_dc_feature(DisasContext *dc, int feature)
 
 static inline int get_mem_index(DisasContext *s)
 {
-    return s->current_pl;
+    return s->current_el;
 }
 
 /* target-specific extra values for is_jmp */