diff mbox

[Trusty,SRU] sched/stop_machine: Fix deadlock between multiple stop_two_cpus()

Message ID 1437680409-5871-1-git-send-email-rafael.tinoco@canonical.com
State New
Headers show

Commit Message

Rafael David Tinoco July 23, 2015, 7:40 p.m. UTC
From: Peter Zijlstra <peterz@infradead.org>

BugLink: https://bugs.launchpad.net/bugs/1461620

Jiri reported a machine stuck in multi_cpu_stop() with
migrate_swap_stop() as function and with the following src,dst cpu
pairs: {11,  4} {13, 11} { 4, 13}

                        4       11      13

cpuM: queue(4 ,13)
                        *Ma
cpuN: queue(13,11)
                                *N      Na
                        *M              Mb
cpuO: queue(11, 4)
                        *O      Oa
                                *Nb
                        *Ob

Where *X denotes the cpu running the queueing of cpu-X and X[ab] denotes
the first/second queued work.

You'll observe the top of the workqueue for each cpu: 4,11,13 to be work
from cpus: M, O, N resp. IOW. deadlock.

Do away with the queueing trickery and introduce lg_double_lock() to
lock both CPUs and fully serialize the stop_two_cpus() callers instead
of the partial (and buggy) serialization we have now.

Reported-by: Jiri Olsa <jolsa@redhat.com>
OriginalAuthor: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
(cherry-picked from commit b17718d02f54b90978d0e0146368b512b11c3e84)
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150605153023.GH19282@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Rafael David Tinoco <rafael.tinoco@canonical.com>
---
 include/linux/lglock.h  |  5 +++++
 kernel/locking/lglock.c | 22 ++++++++++++++++++++++
 kernel/stop_machine.c   | 42 +++++-------------------------------------
 3 files changed, 32 insertions(+), 37 deletions(-)

Comments

Chris J Arges July 23, 2015, 7:53 p.m. UTC | #1
Clean cherry-pick, testable.
--chris

On 07/23/2015 02:40 PM, Rafael David Tinoco wrote:
> From: Peter Zijlstra <peterz@infradead.org>
> 
> BugLink: https://bugs.launchpad.net/bugs/1461620
> 
> Jiri reported a machine stuck in multi_cpu_stop() with
> migrate_swap_stop() as function and with the following src,dst cpu
> pairs: {11,  4} {13, 11} { 4, 13}
> 
>                         4       11      13
> 
> cpuM: queue(4 ,13)
>                         *Ma
> cpuN: queue(13,11)
>                                 *N      Na
>                         *M              Mb
> cpuO: queue(11, 4)
>                         *O      Oa
>                                 *Nb
>                         *Ob
> 
> Where *X denotes the cpu running the queueing of cpu-X and X[ab] denotes
> the first/second queued work.
> 
> You'll observe the top of the workqueue for each cpu: 4,11,13 to be work
> from cpus: M, O, N resp. IOW. deadlock.
> 
> Do away with the queueing trickery and introduce lg_double_lock() to
> lock both CPUs and fully serialize the stop_two_cpus() callers instead
> of the partial (and buggy) serialization we have now.
> 
> Reported-by: Jiri Olsa <jolsa@redhat.com>
> OriginalAuthor: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> (cherry-picked from commit b17718d02f54b90978d0e0146368b512b11c3e84)
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: H. Peter Anvin <hpa@zytor.com>
> Cc: Linus Torvalds <torvalds@linux-foundation.org>
> Cc: Oleg Nesterov <oleg@redhat.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Rik van Riel <riel@redhat.com>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Link: http://lkml.kernel.org/r/20150605153023.GH19282@twins.programming.kicks-ass.net
> Signed-off-by: Ingo Molnar <mingo@kernel.org>
> Signed-off-by: Rafael David Tinoco <rafael.tinoco@canonical.com>
> ---
>  include/linux/lglock.h  |  5 +++++
>  kernel/locking/lglock.c | 22 ++++++++++++++++++++++
>  kernel/stop_machine.c   | 42 +++++-------------------------------------
>  3 files changed, 32 insertions(+), 37 deletions(-)
> 
> diff --git a/include/linux/lglock.h b/include/linux/lglock.h
> index 96549abe..81fa989 100644
> --- a/include/linux/lglock.h
> +++ b/include/linux/lglock.h
> @@ -50,10 +50,15 @@ struct lglock {
>  	static struct lglock name = { .lock = &name ## _lock }
>  
>  void lg_lock_init(struct lglock *lg, char *name);
> +
>  void lg_local_lock(struct lglock *lg);
>  void lg_local_unlock(struct lglock *lg);
>  void lg_local_lock_cpu(struct lglock *lg, int cpu);
>  void lg_local_unlock_cpu(struct lglock *lg, int cpu);
> +
> +void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
> +void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
> +
>  void lg_global_lock(struct lglock *lg);
>  void lg_global_unlock(struct lglock *lg);
>  
> diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
> index 86ae2ae..951cfcd 100644
> --- a/kernel/locking/lglock.c
> +++ b/kernel/locking/lglock.c
> @@ -60,6 +60,28 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
>  }
>  EXPORT_SYMBOL(lg_local_unlock_cpu);
>  
> +void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
> +{
> +	BUG_ON(cpu1 == cpu2);
> +
> +	/* lock in cpu order, just like lg_global_lock */
> +	if (cpu2 < cpu1)
> +		swap(cpu1, cpu2);
> +
> +	preempt_disable();
> +	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
> +	arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
> +	arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
> +}
> +
> +void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
> +{
> +	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
> +	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
> +	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
> +	preempt_enable();
> +}
> +
>  void lg_global_lock(struct lglock *lg)
>  {
>  	int i;
> diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
> index 01fbae5..13ef6e0 100644
> --- a/kernel/stop_machine.c
> +++ b/kernel/stop_machine.c
> @@ -211,25 +211,6 @@ static int multi_cpu_stop(void *data)
>  	return err;
>  }
>  
> -struct irq_cpu_stop_queue_work_info {
> -	int cpu1;
> -	int cpu2;
> -	struct cpu_stop_work *work1;
> -	struct cpu_stop_work *work2;
> -};
> -
> -/*
> - * This function is always run with irqs and preemption disabled.
> - * This guarantees that both work1 and work2 get queued, before
> - * our local migrate thread gets the chance to preempt us.
> - */
> -static void irq_cpu_stop_queue_work(void *arg)
> -{
> -	struct irq_cpu_stop_queue_work_info *info = arg;
> -	cpu_stop_queue_work(info->cpu1, info->work1);
> -	cpu_stop_queue_work(info->cpu2, info->work2);
> -}
> -
>  /**
>   * stop_two_cpus - stops two cpus
>   * @cpu1: the cpu to stop
> @@ -245,7 +226,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
>  {
>  	struct cpu_stop_done done;
>  	struct cpu_stop_work work1, work2;
> -	struct irq_cpu_stop_queue_work_info call_args;
>  	struct multi_stop_data msdata;
>  
>  	preempt_disable();
> @@ -262,13 +242,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
>  		.done = &done
>  	};
>  
> -	call_args = (struct irq_cpu_stop_queue_work_info){
> -		.cpu1 = cpu1,
> -		.cpu2 = cpu2,
> -		.work1 = &work1,
> -		.work2 = &work2,
> -	};
> -
>  	cpu_stop_init_done(&done, 2);
>  	set_state(&msdata, MULTI_STOP_PREPARE);
>  
> @@ -285,16 +258,11 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
>  		return -ENOENT;
>  	}
>  
> -	lg_local_lock(&stop_cpus_lock);
> -	/*
> -	 * Queuing needs to be done by the lowest numbered CPU, to ensure
> -	 * that works are always queued in the same order on every CPU.
> -	 * This prevents deadlocks.
> -	 */
> -	smp_call_function_single(min(cpu1, cpu2),
> -				 &irq_cpu_stop_queue_work,
> -				 &call_args, 1);
> -	lg_local_unlock(&stop_cpus_lock);
> +	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
> +	cpu_stop_queue_work(cpu1, &work1);
> +	cpu_stop_queue_work(cpu2, &work2);
> +	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
> +
>  	preempt_enable();
>  
>  	wait_for_completion(&done.completion);
>
Tim Gardner July 23, 2015, 7:56 p.m. UTC | #2
Positive test results in that there is no detectable regression, but 
still don't really know that this fixes the original bug. I guess time 
will tell.
Luis Henriques July 27, 2015, 3:53 p.m. UTC | #3
Applied to Trusty master-next branch.

Cheers,
--
Luís
diff mbox

Patch

diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 96549abe..81fa989 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -50,10 +50,15 @@  struct lglock {
 	static struct lglock name = { .lock = &name ## _lock }
 
 void lg_lock_init(struct lglock *lg, char *name);
+
 void lg_local_lock(struct lglock *lg);
 void lg_local_unlock(struct lglock *lg);
 void lg_local_lock_cpu(struct lglock *lg, int cpu);
 void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
 void lg_global_lock(struct lglock *lg);
 void lg_global_unlock(struct lglock *lg);
 
diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
index 86ae2ae..951cfcd 100644
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
@@ -60,6 +60,28 @@  void lg_local_unlock_cpu(struct lglock *lg, int cpu)
 }
 EXPORT_SYMBOL(lg_local_unlock_cpu);
 
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
+{
+	BUG_ON(cpu1 == cpu2);
+
+	/* lock in cpu order, just like lg_global_lock */
+	if (cpu2 < cpu1)
+		swap(cpu1, cpu2);
+
+	preempt_disable();
+	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+	arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
+	arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
+}
+
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
+{
+	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
+	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
+	preempt_enable();
+}
+
 void lg_global_lock(struct lglock *lg)
 {
 	int i;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 01fbae5..13ef6e0 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -211,25 +211,6 @@  static int multi_cpu_stop(void *data)
 	return err;
 }
 
-struct irq_cpu_stop_queue_work_info {
-	int cpu1;
-	int cpu2;
-	struct cpu_stop_work *work1;
-	struct cpu_stop_work *work2;
-};
-
-/*
- * This function is always run with irqs and preemption disabled.
- * This guarantees that both work1 and work2 get queued, before
- * our local migrate thread gets the chance to preempt us.
- */
-static void irq_cpu_stop_queue_work(void *arg)
-{
-	struct irq_cpu_stop_queue_work_info *info = arg;
-	cpu_stop_queue_work(info->cpu1, info->work1);
-	cpu_stop_queue_work(info->cpu2, info->work2);
-}
-
 /**
  * stop_two_cpus - stops two cpus
  * @cpu1: the cpu to stop
@@ -245,7 +226,6 @@  int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
 {
 	struct cpu_stop_done done;
 	struct cpu_stop_work work1, work2;
-	struct irq_cpu_stop_queue_work_info call_args;
 	struct multi_stop_data msdata;
 
 	preempt_disable();
@@ -262,13 +242,6 @@  int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
 		.done = &done
 	};
 
-	call_args = (struct irq_cpu_stop_queue_work_info){
-		.cpu1 = cpu1,
-		.cpu2 = cpu2,
-		.work1 = &work1,
-		.work2 = &work2,
-	};
-
 	cpu_stop_init_done(&done, 2);
 	set_state(&msdata, MULTI_STOP_PREPARE);
 
@@ -285,16 +258,11 @@  int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
 		return -ENOENT;
 	}
 
-	lg_local_lock(&stop_cpus_lock);
-	/*
-	 * Queuing needs to be done by the lowest numbered CPU, to ensure
-	 * that works are always queued in the same order on every CPU.
-	 * This prevents deadlocks.
-	 */
-	smp_call_function_single(min(cpu1, cpu2),
-				 &irq_cpu_stop_queue_work,
-				 &call_args, 1);
-	lg_local_unlock(&stop_cpus_lock);
+	lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+	cpu_stop_queue_work(cpu1, &work1);
+	cpu_stop_queue_work(cpu2, &work2);
+	lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
 	preempt_enable();
 
 	wait_for_completion(&done.completion);