diff mbox

[v7,00/73] per-CPU locks

Message ID 20190304181813.8075-1-cota@braap.org
State New
Headers show

Commit Message

Emilio Cota March 4, 2019, 6:17 p.m. UTC
v6: https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg07650.html

All patches in the series have reviews now. Thanks everyone!

I've tested all patches with `make check-qtest -j' for all targets.
The series is checkpatch-clean (just some warnings about __COVERITY__).

You can fetch the series from:
  https://github.com/cota/qemu/tree/cpu-lock-v7

---
v6->v7:

- Rebase on master
  - Add a cpu_halted_set call to arm code that wasn't there in v6

- Add R-b's and Ack's.

- Add comment to patch 3's log to explain why the bitmap is added
  there, even though it only gains a user at the end of the series.

- Fix "prevent deadlock" comments before assertions; use
  "enforce locking order" instead, which is more accurate.

- Add a few more comments, as suggested by Alex.

v6->v7 diff (before rebase) below.

Thanks,

		Emilio
---

Comments

Alex Bennée March 5, 2019, 9:16 a.m. UTC | #1
Emilio G. Cota <cota@braap.org> writes:

> v6: https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg07650.html
>
> All patches in the series have reviews now. Thanks everyone!
>
> I've tested all patches with `make check-qtest -j' for all targets.
> The series is checkpatch-clean (just some warnings about __COVERITY__).
>
> You can fetch the series from:
>   https://github.com/cota/qemu/tree/cpu-lock-v7

Just to say I've applied and tested the whole series and I'm still
seeing the improvements so I think it's ready to be picked up:

  Tested-by: Alex Bennée <alex.bennee@linaro.org>


>
> ---
> v6->v7:
>
> - Rebase on master
>   - Add a cpu_halted_set call to arm code that wasn't there in v6
>
> - Add R-b's and Ack's.
>
> - Add comment to patch 3's log to explain why the bitmap is added
>   there, even though it only gains a user at the end of the series.
>
> - Fix "prevent deadlock" comments before assertions; use
>   "enforce locking order" instead, which is more accurate.
>
> - Add a few more comments, as suggested by Alex.
>
> v6->v7 diff (before rebase) below.
>
> Thanks,
>
> 		Emilio
> ---
> diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
> index e4ae04f72c..a513457520 100644
> --- a/accel/tcg/cpu-exec.c
> +++ b/accel/tcg/cpu-exec.c
> @@ -435,7 +435,7 @@ static inline bool cpu_handle_halt_locked(CPUState *cpu)
>              && replay_interrupt()) {
>              X86CPU *x86_cpu = X86_CPU(cpu);
>
> -            /* prevent deadlock; cpu_mutex must be acquired _after_ the BQL */
> +            /* locking order: cpu_mutex must be acquired _after_ the BQL */
>              cpu_mutex_unlock(cpu);
>              qemu_mutex_lock_iothread();
>              cpu_mutex_lock(cpu);
> diff --git a/cpus.c b/cpus.c
> index 4f17fe25bf..82a93f2a5a 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -2062,7 +2062,7 @@ void qemu_mutex_lock_iothread_impl(const char *file, int line)
>  {
>      QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
>
> -    /* prevent deadlock with CPU mutex */
> +    /* enforce locking order */
>      g_assert(no_cpu_mutex_locked());
>
>      g_assert(!qemu_mutex_iothread_locked());
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index bb0729f969..726cb7b090 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -322,7 +322,8 @@ struct qemu_work_item;
>   * @mem_io_pc: Host Program Counter at which the memory was accessed.
>   * @mem_io_vaddr: Target virtual address at which the memory was accessed.
>   * @kvm_fd: vCPU file descriptor for KVM.
> - * @lock: Lock to prevent multiple access to per-CPU fields.
> + * @lock: Lock to prevent multiple access to per-CPU fields. Must be acquired
> + *        after the BQL.
>   * @cond: Condition variable for per-CPU events.
>   * @work_list: List of pending asynchronous work.
>   * @halted: Nonzero if the CPU is in suspended state.
> @@ -804,6 +805,7 @@ static inline bool cpu_has_work(CPUState *cpu)
>      bool (*func)(CPUState *cpu);
>      bool ret;
>
> +    /* some targets require us to hold the BQL when checking for work */
>      if (cc->has_work_with_iothread_lock) {
>          if (qemu_mutex_iothread_locked()) {
>              func = cc->has_work_with_iothread_lock;
> diff --git a/target/i386/kvm.c b/target/i386/kvm.c
> index 3f3c670897..65a14deb2f 100644
> --- a/target/i386/kvm.c
> +++ b/target/i386/kvm.c
> @@ -3216,6 +3216,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
>          qemu_mutex_lock_iothread();
>      }
>
> +    /*
> +     * We might have cleared some bits in cpu->interrupt_request since reading
> +     * it; read it again.
> +     */
>      interrupt_request = cpu_interrupt_request(cpu);
>
>      /* Force the VCPU out of its inner loop to process any INIT requests


--
Alex Bennée
Richard Henderson March 6, 2019, 7:40 p.m. UTC | #2
On 3/4/19 10:17 AM, Emilio G. Cota wrote:
> v6: https://lists.gnu.org/archive/html/qemu-devel/2019-01/msg07650.html
> 
> All patches in the series have reviews now. Thanks everyone!
> 
> I've tested all patches with `make check-qtest -j' for all targets.
> The series is checkpatch-clean (just some warnings about __COVERITY__).
> 
> You can fetch the series from:
>   https://github.com/cota/qemu/tree/cpu-lock-v7

Having fixed the OSX build errors, Peter reported to me a hang on the x86_64
bios-tables-test with an arm32 host.  I have been able to replicate this.


(gdb) info thr
  Id   Target Id                                       Frame
  1    Thread 0xb35a72e0 (LWP 10966) "qemu-system-x86" 0xb606d138 in
pthread_cond_wait@@GLIBC_2.4 () from /usr/lib/libpthread.so.0
  2    Thread 0xb35a3d30 (LWP 10967) "qemu-system-x86" 0xb5fe70fc in syscall ()
   from /usr/lib/libc.so.6
  3    Thread 0xa07fed30 (LWP 10968) "qemu-system-x86" 0xb5fe0688 in poll ()
   from /usr/lib/libc.so.6
* 4    Thread 0x9fdfed30 (LWP 10969) "qemu-system-x86" 0xb606d138 in
pthread_cond_wait@@GLIBC_2.4 () from /usr/lib/libpthread.so.0

(gdb) thr 1
[Switching to thread 1 (Thread 0xb35a72e0 (LWP 10966))]
#0  0xb606d138 in pthread_cond_wait@@GLIBC_2.4 () from /usr/lib/libpthread.so.0
(gdb) where
#0  0xb606d138 in pthread_cond_wait@@GLIBC_2.4 () from /usr/lib/libpthread.so.0
#1  0x009a66a4 in qemu_cond_wait_impl (cond=cond@entry=0x16727b0,
    mutex=0xcad4c8 <qemu_global_mutex>,
    file=file@entry=0xa057f0 "/home/rth/qemu/qemu/cpus-common.c",
    line=line@entry=166) at /home/rth/qemu/qemu/util/qemu-thread-posix.c:161
#2  0x00692378 in run_on_cpu (cpu=cpu@entry=0x16725a8, func=<optimized out>,
    data=...) at /home/rth/qemu/qemu/cpus-common.c:166
#3  0x00604c18 in vapic_enable_tpr_reporting (enable=<optimized out>)
    at /home/rth/qemu/qemu/hw/i386/kvmvapic.c:507
#4  0x00710160 in qdev_reset_one (dev=<optimized out>, opaque=<optimized out>)
    at /home/rth/qemu/qemu/hw/core/qdev.c:250

(gdb) thr 4
[Switching to thread 4 (Thread 0x9fdfed30 (LWP 10969))]
#0  0xb606d138 in pthread_cond_wait@@GLIBC_2.4 () from /usr/lib/libpthread.so.0
(gdb) where
#0  0xb606d138 in pthread_cond_wait@@GLIBC_2.4 () from /usr/lib/libpthread.so.0
#1  0x009a66a4 in qemu_cond_wait_impl (cond=0x1692ed8,
    mutex=0xcad4c8 <qemu_global_mutex>,
    file=file@entry=0x9d0108 "/home/rth/qemu/qemu/cpus.c",
    line=line@entry=1647) at /home/rth/qemu/qemu/util/qemu-thread-posix.c:161
#2  0x00553070 in qemu_tcg_rr_cpu_thread_fn (arg=<optimized out>)
    at /home/rth/qemu/qemu/cpus.c:1647
#3  0x009a5f20 in qemu_thread_start (args=<optimized out>)
    at /home/rth/qemu/qemu/util/qemu-thread-posix.c:502


I had hoped that I would have been able to reproduce this on another host by
forcing rr mode.  But so far no luck.

Thoughts?


r~
Peter Maydell March 6, 2019, 7:57 p.m. UTC | #3
On Wed, 6 Mar 2019 at 19:41, Richard Henderson
<richard.henderson@linaro.org> wrote:
> I had hoped that I would have been able to reproduce this on another host by
> forcing rr mode.  But so far no luck.

Have you tried rr's "chaos mode" switch?

thanks
-- PMM
Emilio Cota March 6, 2019, 11:49 p.m. UTC | #4
On Wed, Mar 06, 2019 at 11:40:57 -0800, Richard Henderson wrote:
> Peter reported to me a hang on the x86_64
> bios-tables-test with an arm32 host.  I have been able to replicate this.
(snip)

I reproduced this on a power7 machine (gcc110). However, after a few
git checkout->build cycles I was not able to reproduce this again (!?).

I currently have very little time to spend on this. Given how close we
are to the soft freeze, I suggest we defer the merge of this series
to post-v4.0.

Thanks,

		Emilio
diff mbox

Patch

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index e4ae04f72c..a513457520 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -435,7 +435,7 @@  static inline bool cpu_handle_halt_locked(CPUState *cpu)
             && replay_interrupt()) {
             X86CPU *x86_cpu = X86_CPU(cpu);
 
-            /* prevent deadlock; cpu_mutex must be acquired _after_ the BQL */
+            /* locking order: cpu_mutex must be acquired _after_ the BQL */
             cpu_mutex_unlock(cpu);
             qemu_mutex_lock_iothread();
             cpu_mutex_lock(cpu);
diff --git a/cpus.c b/cpus.c
index 4f17fe25bf..82a93f2a5a 100644
--- a/cpus.c
+++ b/cpus.c
@@ -2062,7 +2062,7 @@  void qemu_mutex_lock_iothread_impl(const char *file, int line)
 {
     QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
 
-    /* prevent deadlock with CPU mutex */
+    /* enforce locking order */
     g_assert(no_cpu_mutex_locked());
 
     g_assert(!qemu_mutex_iothread_locked());
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index bb0729f969..726cb7b090 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -322,7 +322,8 @@  struct qemu_work_item;
  * @mem_io_pc: Host Program Counter at which the memory was accessed.
  * @mem_io_vaddr: Target virtual address at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
- * @lock: Lock to prevent multiple access to per-CPU fields.
+ * @lock: Lock to prevent multiple access to per-CPU fields. Must be acquired
+ *        after the BQL.
  * @cond: Condition variable for per-CPU events.
  * @work_list: List of pending asynchronous work.
  * @halted: Nonzero if the CPU is in suspended state.
@@ -804,6 +805,7 @@  static inline bool cpu_has_work(CPUState *cpu)
     bool (*func)(CPUState *cpu);
     bool ret;
 
+    /* some targets require us to hold the BQL when checking for work */
     if (cc->has_work_with_iothread_lock) {
         if (qemu_mutex_iothread_locked()) {
             func = cc->has_work_with_iothread_lock;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 3f3c670897..65a14deb2f 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -3216,6 +3216,10 @@  void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
         qemu_mutex_lock_iothread();
     }
 
+    /*
+     * We might have cleared some bits in cpu->interrupt_request since reading
+     * it; read it again.
+     */
     interrupt_request = cpu_interrupt_request(cpu);
 
     /* Force the VCPU out of its inner loop to process any INIT requests