diff mbox

powerpc: Fix deadlock in icswx code

Message ID 20110915054315.5e5ae062@kryten (mailing list archive)
State Accepted, archived
Delegated to: Benjamin Herrenschmidt
Headers show

Commit Message

Anton Blanchard Sept. 14, 2011, 7:43 p.m. UTC
The icswx code introduced an A-B B-A deadlock:

     CPU0                    CPU1
     ----                    ----
lock(&anon_vma->mutex);
                             lock(&mm->mmap_sem);
                             lock(&anon_vma->mutex);
lock(&mm->mmap_sem);

Instead of using the mmap_sem to keep mm_users constant, take the
page table spinlock.

Signed-off-by: Anton Blanchard <anton@samba.org>
Cc: <stable@kernel.org>
---

Comments

Jimi Xenidis Sept. 15, 2011, 1:09 p.m. UTC | #1
On Sep 14, 2011, at 2:43 PM, Anton Blanchard wrote:


Hi Anton,
It would really help me a lot if you could review and maybe merge this with my earlier patch that splits this file.
  <http://patchwork.ozlabs.org/patch/109103/>
All it does is split.. I promise.
You don't have to take the other stuff.. yet :)

-JX


> 
> The icswx code introduced an A-B B-A deadlock:
> 
>     CPU0                    CPU1
>     ----                    ----
> lock(&anon_vma->mutex);
>                             lock(&mm->mmap_sem);
>                             lock(&anon_vma->mutex);
> lock(&mm->mmap_sem);
> 
> Instead of using the mmap_sem to keep mm_users constant, take the
> page table spinlock.
> 
> Signed-off-by: Anton Blanchard <anton@samba.org>
> Cc: <stable@kernel.org>
> ---
> 
> diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
> index 3bafc3d..4ff587e 100644
> --- a/arch/powerpc/mm/mmu_context_hash64.c
> +++ b/arch/powerpc/mm/mmu_context_hash64.c
> @@ -136,8 +136,8 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
> 	if (!mm || !acop)
> 		return -EINVAL;
> 
> -	/* We need to make sure mm_users doesn't change */
> -	down_read(&mm->mmap_sem);
> +	/* The page_table_lock ensures mm_users won't change under us */
> +	spin_lock(&mm->page_table_lock);
> 	spin_lock(mm->context.cop_lockp);
> 
> 	if (mm->context.cop_pid == COP_PID_NONE) {
> @@ -164,7 +164,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
> 
> out:
> 	spin_unlock(mm->context.cop_lockp);
> -	up_read(&mm->mmap_sem);
> +	spin_unlock(&mm->page_table_lock);
> 
> 	return ret;
> }
> @@ -185,8 +185,8 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
> 	if (WARN_ON_ONCE(!mm))
> 		return;
> 
> -	/* We need to make sure mm_users doesn't change */
> -	down_read(&mm->mmap_sem);
> +	/* The page_table_lock ensures mm_users won't change under us */
> +	spin_lock(&mm->page_table_lock);
> 	spin_lock(mm->context.cop_lockp);
> 
> 	mm->context.acop &= ~acop;
> @@ -213,7 +213,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
> 	}
> 
> 	spin_unlock(mm->context.cop_lockp);
> -	up_read(&mm->mmap_sem);
> +	spin_unlock(&mm->page_table_lock);
> }
> EXPORT_SYMBOL_GPL(drop_cop);
> 
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev
Anton Blanchard Sept. 27, 2011, 10:04 a.m. UTC | #2
Hi Jimi,

> It would really help me a lot if you could review and maybe merge
> this with my earlier patch that splits this file.
> <http://patchwork.ozlabs.org/patch/109103/> All it does is split.. I
> promise. You don't have to take the other stuff.. yet :)

Sorry it took so long to get to this. Looking at it now.

Anton
diff mbox

Patch

diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 3bafc3d..4ff587e 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -136,8 +136,8 @@  int use_cop(unsigned long acop, struct mm_struct *mm)
 	if (!mm || !acop)
 		return -EINVAL;
 
-	/* We need to make sure mm_users doesn't change */
-	down_read(&mm->mmap_sem);
+	/* The page_table_lock ensures mm_users won't change under us */
+	spin_lock(&mm->page_table_lock);
 	spin_lock(mm->context.cop_lockp);
 
 	if (mm->context.cop_pid == COP_PID_NONE) {
@@ -164,7 +164,7 @@  int use_cop(unsigned long acop, struct mm_struct *mm)
 
 out:
 	spin_unlock(mm->context.cop_lockp);
-	up_read(&mm->mmap_sem);
+	spin_unlock(&mm->page_table_lock);
 
 	return ret;
 }
@@ -185,8 +185,8 @@  void drop_cop(unsigned long acop, struct mm_struct *mm)
 	if (WARN_ON_ONCE(!mm))
 		return;
 
-	/* We need to make sure mm_users doesn't change */
-	down_read(&mm->mmap_sem);
+	/* The page_table_lock ensures mm_users won't change under us */
+	spin_lock(&mm->page_table_lock);
 	spin_lock(mm->context.cop_lockp);
 
 	mm->context.acop &= ~acop;
@@ -213,7 +213,7 @@  void drop_cop(unsigned long acop, struct mm_struct *mm)
 	}
 
 	spin_unlock(mm->context.cop_lockp);
-	up_read(&mm->mmap_sem);
+	spin_unlock(&mm->page_table_lock);
 }
 EXPORT_SYMBOL_GPL(drop_cop);