diff mbox

[v3,02/15] softmmu: provide softmmu access type enum

Message ID 1414154549-2102-3-git-send-email-leon.alrae@imgtec.com
State New
Headers show

Commit Message

Leon Alrae Oct. 24, 2014, 12:42 p.m. UTC
New MIPS features depend on the access type and enum is more convenient than
using the numbers directly.

Signed-off-by: Leon Alrae <leon.alrae@imgtec.com>
---
 include/exec/cpu-common.h |  6 ++++++
 softmmu_template.h        | 26 ++++++++++++++++----------
 2 files changed, 22 insertions(+), 10 deletions(-)

Comments

Thomas Huth Oct. 24, 2014, 1:59 p.m. UTC | #1
On Fri, 24 Oct 2014 13:42:16 +0100
Leon Alrae <leon.alrae@imgtec.com> wrote:

> New MIPS features depend on the access type and enum is more convenient than
> using the numbers directly.
> 
> Signed-off-by: Leon Alrae <leon.alrae@imgtec.com>
> ---
>  include/exec/cpu-common.h |  6 ++++++
>  softmmu_template.h        | 26 ++++++++++++++++----------
>  2 files changed, 22 insertions(+), 10 deletions(-)
> 
> diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
> index e3ec4c8..427b851 100644
> --- a/include/exec/cpu-common.h
> +++ b/include/exec/cpu-common.h
> @@ -26,6 +26,12 @@ typedef struct CPUListState {
>      FILE *file;
>  } CPUListState;
> 
> +typedef enum MMUAccessType {
> +    MMU_DATA_LOAD  = 0,
> +    MMU_DATA_STORE = 1,
> +    MMU_INST_FETCH = 2
> +} MMUAccessType;
> +
>  #if !defined(CONFIG_USER_ONLY)
> 
>  enum device_endian {
> diff --git a/softmmu_template.h b/softmmu_template.h
> index 88e3390..6b4e615 100644
> --- a/softmmu_template.h
> +++ b/softmmu_template.h
> @@ -67,10 +67,10 @@
>  #endif
> 
>  #ifdef SOFTMMU_CODE_ACCESS
> -#define READ_ACCESS_TYPE 2
> +#define READ_ACCESS_TYPE MMU_INST_FETCH
>  #define ADDR_READ addr_code
>  #else
> -#define READ_ACCESS_TYPE 0
> +#define READ_ACCESS_TYPE MMU_DATA_LOAD
>  #define ADDR_READ addr_read
>  #endif
> 
> @@ -396,11 +396,12 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>          != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
>  #ifdef ALIGNED_ONLY
>          if ((addr & (DATA_SIZE - 1)) != 0) {
> -            cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                                 mmu_idx, retaddr);
>          }
>  #endif
>          if (!VICTIM_TLB_HIT(addr_write)) {
> -            tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
>          }
>          tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      }
> @@ -427,7 +428,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>          int i;
>      do_unaligned_access:
>  #ifdef ALIGNED_ONLY
> -        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                             mmu_idx, retaddr);
>  #endif
>          /* XXX: not efficient, but simple */
>          /* Note: relies on the fact that tlb_fill() does not remove the
> @@ -446,7 +448,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>      /* Handle aligned access or unaligned access in the same page.  */
>  #ifdef ALIGNED_ONLY
>      if ((addr & (DATA_SIZE - 1)) != 0) {
> -        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                             mmu_idx, retaddr);
>      }
>  #endif
> 
> @@ -474,11 +477,12 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>          != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
>  #ifdef ALIGNED_ONLY
>          if ((addr & (DATA_SIZE - 1)) != 0) {
> -            cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                                 mmu_idx, retaddr);
>          }
>  #endif
>          if (!VICTIM_TLB_HIT(addr_write)) {
> -            tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
>          }
>          tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
>      }
> @@ -505,7 +509,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>          int i;
>      do_unaligned_access:
>  #ifdef ALIGNED_ONLY
> -        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                             mmu_idx, retaddr);
>  #endif
>          /* XXX: not efficient, but simple */
>          /* Note: relies on the fact that tlb_fill() does not remove the
> @@ -524,7 +529,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>      /* Handle aligned access or unaligned access in the same page.  */
>  #ifdef ALIGNED_ONLY
>      if ((addr & (DATA_SIZE - 1)) != 0) {
> -        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
> +        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                             mmu_idx, retaddr);
>      }
>  #endif
> 

I very much like the idea to get rid of these "magic" numbers!

Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com>
diff mbox

Patch

diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index e3ec4c8..427b851 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -26,6 +26,12 @@  typedef struct CPUListState {
     FILE *file;
 } CPUListState;
 
+typedef enum MMUAccessType {
+    MMU_DATA_LOAD  = 0,
+    MMU_DATA_STORE = 1,
+    MMU_INST_FETCH = 2
+} MMUAccessType;
+
 #if !defined(CONFIG_USER_ONLY)
 
 enum device_endian {
diff --git a/softmmu_template.h b/softmmu_template.h
index 88e3390..6b4e615 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -67,10 +67,10 @@ 
 #endif
 
 #ifdef SOFTMMU_CODE_ACCESS
-#define READ_ACCESS_TYPE 2
+#define READ_ACCESS_TYPE MMU_INST_FETCH
 #define ADDR_READ addr_code
 #else
-#define READ_ACCESS_TYPE 0
+#define READ_ACCESS_TYPE MMU_DATA_LOAD
 #define ADDR_READ addr_read
 #endif
 
@@ -396,11 +396,12 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0) {
-            cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                                 mmu_idx, retaddr);
         }
 #endif
         if (!VICTIM_TLB_HIT(addr_write)) {
-            tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
         }
         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
     }
@@ -427,7 +428,8 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
         int i;
     do_unaligned_access:
 #ifdef ALIGNED_ONLY
-        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
 #endif
         /* XXX: not efficient, but simple */
         /* Note: relies on the fact that tlb_fill() does not remove the
@@ -446,7 +448,8 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     /* Handle aligned access or unaligned access in the same page.  */
 #ifdef ALIGNED_ONLY
     if ((addr & (DATA_SIZE - 1)) != 0) {
-        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
     }
 #endif
 
@@ -474,11 +477,12 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 #ifdef ALIGNED_ONLY
         if ((addr & (DATA_SIZE - 1)) != 0) {
-            cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                                 mmu_idx, retaddr);
         }
 #endif
         if (!VICTIM_TLB_HIT(addr_write)) {
-            tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
         }
         tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
     }
@@ -505,7 +509,8 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
         int i;
     do_unaligned_access:
 #ifdef ALIGNED_ONLY
-        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
 #endif
         /* XXX: not efficient, but simple */
         /* Note: relies on the fact that tlb_fill() does not remove the
@@ -524,7 +529,8 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     /* Handle aligned access or unaligned access in the same page.  */
 #ifdef ALIGNED_ONLY
     if ((addr & (DATA_SIZE - 1)) != 0) {
-        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
     }
 #endif