diff mbox series

[5/9] accel/tcg: Hoist CPUClass arg to functions with external linkage

Message ID 20240124101639.30056-6-philmd@linaro.org
State New
Headers show
Series accel/tcg: Extract some x86-specific code | expand

Commit Message

Philippe Mathieu-Daudé Jan. 24, 2024, 10:16 a.m. UTC
Hoist the CPUClass argument from most of these internal helpers:

 - check_for_breakpoints_slow
 - check_for_breakpoints()
 - cpu_tb_exec()
 - cpu_exec_enter()
 - cpu_exec_exit()
 - cpu_handle_halt()
 - cpu_handle_debug_exception()
 - cpu_handle_exception()
 - need_replay_interrupt()
 - cpu_handle_interrupt()
 - cpu_loop_exec_tb()
 - cpu_exec_loop()
 - cpu_exec_setjmp()

to the following ones with external linkage:

 - lookup_tb_ptr()
 - cpu_exec_step_atomic()
 - cpu_exec()

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
 accel/tcg/cpu-exec.c | 82 ++++++++++++++++++++------------------------
 1 file changed, 37 insertions(+), 45 deletions(-)

Comments

Richard Henderson Jan. 24, 2024, 10:59 p.m. UTC | #1
On 1/24/24 20:16, Philippe Mathieu-Daudé wrote:
> Hoist the CPUClass argument from most of these internal helpers:
> 
>   - check_for_breakpoints_slow
>   - check_for_breakpoints()
>   - cpu_tb_exec()
>   - cpu_exec_enter()
>   - cpu_exec_exit()
>   - cpu_handle_halt()
>   - cpu_handle_debug_exception()
>   - cpu_handle_exception()
>   - need_replay_interrupt()
>   - cpu_handle_interrupt()
>   - cpu_loop_exec_tb()
>   - cpu_exec_loop()
>   - cpu_exec_setjmp()
> 
> to the following ones with external linkage:
> 
>   - lookup_tb_ptr()
>   - cpu_exec_step_atomic()
>   - cpu_exec()
> 
> Signed-off-by: Philippe Mathieu-Daudé<philmd@linaro.org>
> ---
>   accel/tcg/cpu-exec.c | 82 ++++++++++++++++++++------------------------
>   1 file changed, 37 insertions(+), 45 deletions(-)

I'm not so keen on this.  Does it really make a difference?
What about simply making more use of CPUState->cc instead?


r~
Philippe Mathieu-Daudé Jan. 25, 2024, 4:46 a.m. UTC | #2
On 24/1/24 23:59, Richard Henderson wrote:
> On 1/24/24 20:16, Philippe Mathieu-Daudé wrote:
>> Hoist the CPUClass argument from most of these internal helpers:
>>
>>   - check_for_breakpoints_slow
>>   - check_for_breakpoints()
>>   - cpu_tb_exec()
>>   - cpu_exec_enter()
>>   - cpu_exec_exit()
>>   - cpu_handle_halt()
>>   - cpu_handle_debug_exception()
>>   - cpu_handle_exception()
>>   - need_replay_interrupt()
>>   - cpu_handle_interrupt()
>>   - cpu_loop_exec_tb()
>>   - cpu_exec_loop()
>>   - cpu_exec_setjmp()
>>
>> to the following ones with external linkage:
>>
>>   - lookup_tb_ptr()
>>   - cpu_exec_step_atomic()
>>   - cpu_exec()
>>
>> Signed-off-by: Philippe Mathieu-Daudé<philmd@linaro.org>
>> ---
>>   accel/tcg/cpu-exec.c | 82 ++++++++++++++++++++------------------------
>>   1 file changed, 37 insertions(+), 45 deletions(-)
> 
> I'm not so keen on this.  Does it really make a difference?
> What about simply making more use of CPUState->cc instead?

TIL CPUState->cc... Which makes me wonder why this isn't handler
generically via QOM macros.

> 
> 
> r~
diff mbox series

Patch

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index d61b285d5e..b10472cbc7 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -324,8 +324,8 @@  static void log_cpu_exec(vaddr pc, CPUState *cpu,
     }
 }
 
-static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
-                                       uint32_t *cflags)
+static bool check_for_breakpoints_slow(CPUClass *cc, CPUState *cpu,
+                                       vaddr pc, uint32_t *cflags)
 {
     CPUBreakpoint *bp;
     bool match_page = false;
@@ -357,7 +357,6 @@  static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
 #ifdef CONFIG_USER_ONLY
                 g_assert_not_reached();
 #else
-                CPUClass *cc = CPU_GET_CLASS(cpu);
                 assert(cc->tcg_ops->debug_check_breakpoint);
                 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
 #endif
@@ -390,11 +389,11 @@  static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
     return false;
 }
 
-static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
-                                         uint32_t *cflags)
+static inline bool check_for_breakpoints(CPUClass *cc, CPUState *cpu,
+                                         vaddr pc, uint32_t *cflags)
 {
     return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
-        check_for_breakpoints_slow(cpu, pc, cflags);
+        check_for_breakpoints_slow(cc, cpu, pc, cflags);
 }
 
 /**
@@ -408,6 +407,7 @@  static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
 const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
 {
     CPUState *cpu = env_cpu(env);
+    CPUClass *cc = CPU_GET_CLASS(cpu);
     TranslationBlock *tb;
     vaddr pc;
     uint64_t cs_base;
@@ -416,7 +416,7 @@  const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 
     cflags = curr_cflags(cpu);
-    if (check_for_breakpoints(cpu, pc, &cflags)) {
+    if (check_for_breakpoints(cc, cpu, pc, &cflags)) {
         cpu_loop_exit(cpu);
     }
 
@@ -443,7 +443,7 @@  const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
  * affect the impact of CFI in environment with high security requirements
  */
 static inline TranslationBlock * QEMU_DISABLE_CFI
-cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
+cpu_tb_exec(CPUClass *cc, CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 {
     CPUArchState *env = cpu_env(cpu);
     uintptr_t ret;
@@ -476,8 +476,6 @@  cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
          * counter hit zero); we must restore the guest PC to the address
          * of the start of the TB.
          */
-        CPUClass *cc = CPU_GET_CLASS(cpu);
-
         if (cc->tcg_ops->synchronize_from_tb) {
             cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
         } else {
@@ -509,19 +507,15 @@  cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 }
 
 
-static void cpu_exec_enter(CPUState *cpu)
+static void cpu_exec_enter(CPUClass *cc, CPUState *cpu)
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
-
     if (cc->tcg_ops->cpu_exec_enter) {
         cc->tcg_ops->cpu_exec_enter(cpu);
     }
 }
 
-static void cpu_exec_exit(CPUState *cpu)
+static void cpu_exec_exit(CPUClass *cc, CPUState *cpu)
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
-
     if (cc->tcg_ops->cpu_exec_exit) {
         cc->tcg_ops->cpu_exec_exit(cpu);
     }
@@ -566,6 +560,7 @@  static void cpu_exec_longjmp_cleanup(CPUState *cpu)
 
 void cpu_exec_step_atomic(CPUState *cpu)
 {
+    CPUClass *cc = CPU_GET_CLASS(cpu);
     CPUArchState *env = cpu_env(cpu);
     TranslationBlock *tb;
     vaddr pc;
@@ -600,11 +595,11 @@  void cpu_exec_step_atomic(CPUState *cpu)
             mmap_unlock();
         }
 
-        cpu_exec_enter(cpu);
+        cpu_exec_enter(cc, cpu);
         /* execute the generated code */
         trace_exec_tb(tb, pc);
-        cpu_tb_exec(cpu, tb, &tb_exit);
-        cpu_exec_exit(cpu);
+        cpu_tb_exec(cc, cpu, tb, &tb_exit);
+        cpu_exec_exit(cc, cpu);
     } else {
         cpu_exec_longjmp_cleanup(cpu);
     }
@@ -673,7 +668,7 @@  static inline void tb_add_jump(TranslationBlock *tb, int n,
     return;
 }
 
-static inline bool cpu_handle_halt(CPUState *cpu)
+static inline bool cpu_handle_halt(CPUClass *cc, CPUState *cpu)
 {
 #ifndef CONFIG_USER_ONLY
     if (cpu->halted) {
@@ -697,9 +692,8 @@  static inline bool cpu_handle_halt(CPUState *cpu)
     return false;
 }
 
-static inline void cpu_handle_debug_exception(CPUState *cpu)
+static inline void cpu_handle_debug_exception(CPUClass *cc, CPUState *cpu)
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
     CPUWatchpoint *wp;
 
     if (!cpu->watchpoint_hit) {
@@ -713,7 +707,7 @@  static inline void cpu_handle_debug_exception(CPUState *cpu)
     }
 }
 
-static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
+static inline bool cpu_handle_exception(CPUClass *cc, CPUState *cpu, int *ret)
 {
     if (cpu->exception_index < 0) {
 #ifndef CONFIG_USER_ONLY
@@ -730,7 +724,7 @@  static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
         /* exit request from the cpu execution loop */
         *ret = cpu->exception_index;
         if (*ret == EXCP_DEBUG) {
-            cpu_handle_debug_exception(cpu);
+            cpu_handle_debug_exception(cc, cpu);
         }
         cpu->exception_index = -1;
         return true;
@@ -740,7 +734,6 @@  static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
            which will be handled outside the cpu execution
            loop */
 #if defined(TARGET_I386)
-        CPUClass *cc = CPU_GET_CLASS(cpu);
         cc->tcg_ops->fake_user_interrupt(cpu);
 #endif /* TARGET_I386 */
         *ret = cpu->exception_index;
@@ -748,7 +741,6 @@  static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
         return true;
 #else
         if (replay_exception()) {
-            CPUClass *cc = CPU_GET_CLASS(cpu);
             bql_lock();
             cc->tcg_ops->do_interrupt(cpu);
             bql_unlock();
@@ -761,7 +753,7 @@  static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
                  * next instruction.
                  */
                 *ret = EXCP_DEBUG;
-                cpu_handle_debug_exception(cpu);
+                cpu_handle_debug_exception(cc, cpu);
                 return true;
             }
         } else if (!replay_has_interrupt()) {
@@ -781,7 +773,7 @@  static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
  * "real" interrupt event later. It does not need to be recorded for
  * replay purposes.
  */
-static inline bool need_replay_interrupt(int interrupt_request)
+static inline bool need_replay_interrupt(CPUClass *cc, int interrupt_request)
 {
 #if defined(TARGET_I386)
     return !(interrupt_request & CPU_INTERRUPT_POLL);
@@ -802,7 +794,7 @@  static inline bool icount_exit_request(CPUState *cpu)
     return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
 }
 
-static inline bool cpu_handle_interrupt(CPUState *cpu,
+static inline bool cpu_handle_interrupt(CPUClass *cc, CPUState *cpu,
                                         TranslationBlock **last_tb)
 {
     /*
@@ -870,11 +862,9 @@  static inline bool cpu_handle_interrupt(CPUState *cpu,
            True when it is, and we should restart on a new TB,
            and via longjmp via cpu_loop_exit.  */
         else {
-            CPUClass *cc = CPU_GET_CLASS(cpu);
-
             if (cc->tcg_ops->cpu_exec_interrupt &&
                 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
-                if (need_replay_interrupt(interrupt_request)) {
+                if (need_replay_interrupt(cc, interrupt_request)) {
                     replay_interrupt();
                 }
                 /*
@@ -918,14 +908,15 @@  static inline bool cpu_handle_interrupt(CPUState *cpu,
     return false;
 }
 
-static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
+static inline void cpu_loop_exec_tb(CPUClass *cc, CPUState *cpu,
+                                    TranslationBlock *tb,
                                     vaddr pc, TranslationBlock **last_tb,
                                     int *tb_exit)
 {
     int32_t insns_left;
 
     trace_exec_tb(tb, pc);
-    tb = cpu_tb_exec(cpu, tb, tb_exit);
+    tb = cpu_tb_exec(cc, cpu, tb, tb_exit);
     if (*tb_exit != TB_EXIT_REQUESTED) {
         *last_tb = tb;
         return;
@@ -970,16 +961,16 @@  static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 /* main execution loop */
 
 static int __attribute__((noinline))
-cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
+cpu_exec_loop(CPUClass *cc, CPUState *cpu, SyncClocks *sc)
 {
     int ret;
 
     /* if an exception is pending, we execute it here */
-    while (!cpu_handle_exception(cpu, &ret)) {
+    while (!cpu_handle_exception(cc, cpu, &ret)) {
         TranslationBlock *last_tb = NULL;
         int tb_exit = 0;
 
-        while (!cpu_handle_interrupt(cpu, &last_tb)) {
+        while (!cpu_handle_interrupt(cc, cpu, &last_tb)) {
             TranslationBlock *tb;
             vaddr pc;
             uint64_t cs_base;
@@ -1001,7 +992,7 @@  cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                 cpu->cflags_next_tb = -1;
             }
 
-            if (check_for_breakpoints(cpu, pc, &cflags)) {
+            if (check_for_breakpoints(cc, cpu, pc, &cflags)) {
                 break;
             }
 
@@ -1046,7 +1037,7 @@  cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                 tb_add_jump(last_tb, tb_exit, tb);
             }
 
-            cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
+            cpu_loop_exec_tb(cc, cpu, tb, pc, &last_tb, &tb_exit);
 
             /* Try to align the host and virtual clocks
                if the guest is in advance */
@@ -1056,30 +1047,31 @@  cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
     return ret;
 }
 
-static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
+static int cpu_exec_setjmp(CPUClass *cc, CPUState *cpu, SyncClocks *sc)
 {
     /* Prepare setjmp context for exception handling. */
     if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
         cpu_exec_longjmp_cleanup(cpu);
     }
 
-    return cpu_exec_loop(cpu, sc);
+    return cpu_exec_loop(cc, cpu, sc);
 }
 
 int cpu_exec(CPUState *cpu)
 {
     int ret;
     SyncClocks sc = { 0 };
+    CPUClass *cc = CPU_GET_CLASS(cpu);
 
     /* replay_interrupt may need current_cpu */
     current_cpu = cpu;
 
-    if (cpu_handle_halt(cpu)) {
+    if (cpu_handle_halt(cc, cpu)) {
         return EXCP_HALTED;
     }
 
     WITH_RCU_READ_LOCK_GUARD() {
-        cpu_exec_enter(cpu);
+        cpu_exec_enter(cc, cpu);
 
         /*
          * Calculate difference between guest clock and host clock.
@@ -1089,9 +1081,9 @@  int cpu_exec(CPUState *cpu)
          */
         init_delay_params(&sc, cpu);
 
-        ret = cpu_exec_setjmp(cpu, &sc);
+        ret = cpu_exec_setjmp(cc, cpu, &sc);
 
-        cpu_exec_exit(cpu);
+        cpu_exec_exit(cc, cpu);
     };
 
     return ret;