diff mbox series

[13/26] ppc: Convert mmu context allocation to new IDA API

Message ID 20180621212835.5636-14-willy@infradead.org (mailing list archive)
State Not Applicable
Headers show
Series None | expand

Commit Message

Matthew Wilcox (Oracle) June 21, 2018, 9:28 p.m. UTC
ida_alloc_range is the perfect fit for this use case.  Eliminates
a custom spinlock, a call to ida_pre_get and a local check for the
allocated ID exceeding a maximum.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 arch/powerpc/mm/mmu_context_book3s64.c | 44 +++-----------------------
 1 file changed, 4 insertions(+), 40 deletions(-)

Comments

Nicholas Piggin June 22, 2018, 2:15 a.m. UTC | #1
On Thu, 21 Jun 2018 14:28:22 -0700
Matthew Wilcox <willy@infradead.org> wrote:

> ida_alloc_range is the perfect fit for this use case.  Eliminates
> a custom spinlock, a call to ida_pre_get and a local check for the
> allocated ID exceeding a maximum.
> 
> Signed-off-by: Matthew Wilcox <willy@infradead.org>
> ---
>  arch/powerpc/mm/mmu_context_book3s64.c | 44 +++-----------------------
>  1 file changed, 4 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
> index f3d4b4a0e561..5a0cf2cc8ba0 100644
> --- a/arch/powerpc/mm/mmu_context_book3s64.c
> +++ b/arch/powerpc/mm/mmu_context_book3s64.c
> @@ -26,48 +26,16 @@
>  #include <asm/mmu_context.h>
>  #include <asm/pgalloc.h>
>  
> -static DEFINE_SPINLOCK(mmu_context_lock);
>  static DEFINE_IDA(mmu_context_ida);
>  
>  static int alloc_context_id(int min_id, int max_id)
>  {
> -	int index, err;
> -
> -again:
> -	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
> -		return -ENOMEM;
> -
> -	spin_lock(&mmu_context_lock);
> -	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
> -	spin_unlock(&mmu_context_lock);
> -
> -	if (err == -EAGAIN)
> -		goto again;
> -	else if (err)
> -		return err;
> -
> -	if (index > max_id) {
> -		spin_lock(&mmu_context_lock);
> -		ida_remove(&mmu_context_ida, index);
> -		spin_unlock(&mmu_context_lock);
> -		return -ENOMEM;
> -	}
> -
> -	return index;
> +	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
>  }
>  
>  void hash__reserve_context_id(int id)
>  {
> -	int rc, result = 0;
> -
> -	do {
> -		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
> -			break;
> -
> -		spin_lock(&mmu_context_lock);
> -		rc = ida_get_new_above(&mmu_context_ida, id, &result);
> -		spin_unlock(&mmu_context_lock);
> -	} while (rc == -EAGAIN);
> +	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
>  
>  	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
>  }
> @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
>  
>  void __destroy_context(int context_id)
>  {
> -	spin_lock(&mmu_context_lock);
> -	ida_remove(&mmu_context_ida, context_id);
> -	spin_unlock(&mmu_context_lock);
> +	ida_free(&mmu_context_ida, context_id);
>  }
>  EXPORT_SYMBOL_GPL(__destroy_context);
>  
> @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
>  {
>  	int index, context_id;
>  
> -	spin_lock(&mmu_context_lock);
>  	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
>  		context_id = ctx->extended_id[index];
>  		if (context_id)
> -			ida_remove(&mmu_context_ida, context_id);
> +			ida_free(&mmu_context_ida, context_id);
>  	}
> -	spin_unlock(&mmu_context_lock);
>  }
>  
>  static void pte_frag_destroy(void *pte_frag)

This hunk should be okay because the mmu_context_lock does not protect
the extended_id array, right Aneesh?

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

Thanks,
Nick
Matthew Wilcox (Oracle) June 22, 2018, 4:38 a.m. UTC | #2
On Fri, Jun 22, 2018 at 12:15:11PM +1000, Nicholas Piggin wrote:
> On Thu, 21 Jun 2018 14:28:22 -0700
> Matthew Wilcox <willy@infradead.org> wrote:
> >  static int alloc_context_id(int min_id, int max_id)
...
> > -	spin_lock(&mmu_context_lock);
> > -	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
> > -	spin_unlock(&mmu_context_lock);
...
> > @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
> >  {
> >  	int index, context_id;
> >  
> > -	spin_lock(&mmu_context_lock);
> >  	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
> >  		context_id = ctx->extended_id[index];
> >  		if (context_id)
> > -			ida_remove(&mmu_context_ida, context_id);
> > +			ida_free(&mmu_context_ida, context_id);
> >  	}
> > -	spin_unlock(&mmu_context_lock);
> >  }
> >  
> >  static void pte_frag_destroy(void *pte_frag)
> 
> This hunk should be okay because the mmu_context_lock does not protect
> the extended_id array, right Aneesh?

That's my understanding.  The code today does this:

static inline int alloc_extended_context(struct mm_struct *mm,
                                         unsigned long ea)
{
        int context_id;

        int index = ea >> MAX_EA_BITS_PER_CONTEXT;

        context_id = hash__alloc_context_id();
        if (context_id < 0)
                return context_id;

        VM_WARN_ON(mm->context.extended_id[index]);
        mm->context.extended_id[index] = context_id;

so it's not currently protected by this lock.  I suppose we are currently
protected from destroy_contexts() being called twice simultaneously, but
you'll notice that we don't zero the array elements in destroy_contexts(),
so if we somehow had a code path which could call it concurrently, we'd
be seeing warnings when the second caller tried to remove the context
IDs from the IDA.  I deduced that something else must be preventing
this situation from occurring (like, oh i don't know, this function only
being called on process exit, so implicitly only called once per context).

> Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

Thanks.
Nicholas Piggin June 22, 2018, 4:53 a.m. UTC | #3
On Thu, 21 Jun 2018 21:38:15 -0700
Matthew Wilcox <willy@infradead.org> wrote:

> On Fri, Jun 22, 2018 at 12:15:11PM +1000, Nicholas Piggin wrote:
> > On Thu, 21 Jun 2018 14:28:22 -0700
> > Matthew Wilcox <willy@infradead.org> wrote:  
> > >  static int alloc_context_id(int min_id, int max_id)  
> ...
> > > -	spin_lock(&mmu_context_lock);
> > > -	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
> > > -	spin_unlock(&mmu_context_lock);  
> ...
> > > @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
> > >  {
> > >  	int index, context_id;
> > >  
> > > -	spin_lock(&mmu_context_lock);
> > >  	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
> > >  		context_id = ctx->extended_id[index];
> > >  		if (context_id)
> > > -			ida_remove(&mmu_context_ida, context_id);
> > > +			ida_free(&mmu_context_ida, context_id);
> > >  	}
> > > -	spin_unlock(&mmu_context_lock);
> > >  }
> > >  
> > >  static void pte_frag_destroy(void *pte_frag)  
> > 
> > This hunk should be okay because the mmu_context_lock does not protect
> > the extended_id array, right Aneesh?  
> 
> That's my understanding.  The code today does this:
> 
> static inline int alloc_extended_context(struct mm_struct *mm,
>                                          unsigned long ea)
> {
>         int context_id;
> 
>         int index = ea >> MAX_EA_BITS_PER_CONTEXT;
> 
>         context_id = hash__alloc_context_id();
>         if (context_id < 0)
>                 return context_id;
> 
>         VM_WARN_ON(mm->context.extended_id[index]);
>         mm->context.extended_id[index] = context_id;
> 
> so it's not currently protected by this lock.  I suppose we are currently
> protected from destroy_contexts() being called twice simultaneously, but
> you'll notice that we don't zero the array elements in destroy_contexts(),
> so if we somehow had a code path which could call it concurrently, we'd
> be seeing warnings when the second caller tried to remove the context

Yeah that'd be an existing bug.

> IDs from the IDA.  I deduced that something else must be preventing
> this situation from occurring (like, oh i don't know, this function only
> being called on process exit, so implicitly only called once per context).

I think that's exactly right.

Thanks,
Nick
Aneesh Kumar K.V June 22, 2018, 5:47 a.m. UTC | #4
Nicholas Piggin <npiggin@gmail.com> writes:

> On Thu, 21 Jun 2018 14:28:22 -0700
> Matthew Wilcox <willy@infradead.org> wrote:
>
>> ida_alloc_range is the perfect fit for this use case.  Eliminates
>> a custom spinlock, a call to ida_pre_get and a local check for the
>> allocated ID exceeding a maximum.
>> 
>> Signed-off-by: Matthew Wilcox <willy@infradead.org>
>> ---
>>  arch/powerpc/mm/mmu_context_book3s64.c | 44 +++-----------------------
>>  1 file changed, 4 insertions(+), 40 deletions(-)
>> 
>> diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
>> index f3d4b4a0e561..5a0cf2cc8ba0 100644
>> --- a/arch/powerpc/mm/mmu_context_book3s64.c
>> +++ b/arch/powerpc/mm/mmu_context_book3s64.c
>> @@ -26,48 +26,16 @@
>>  #include <asm/mmu_context.h>
>>  #include <asm/pgalloc.h>
>>  
>> -static DEFINE_SPINLOCK(mmu_context_lock);
>>  static DEFINE_IDA(mmu_context_ida);
>>  
>>  static int alloc_context_id(int min_id, int max_id)
>>  {
>> -	int index, err;
>> -
>> -again:
>> -	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
>> -		return -ENOMEM;
>> -
>> -	spin_lock(&mmu_context_lock);
>> -	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
>> -	spin_unlock(&mmu_context_lock);
>> -
>> -	if (err == -EAGAIN)
>> -		goto again;
>> -	else if (err)
>> -		return err;
>> -
>> -	if (index > max_id) {
>> -		spin_lock(&mmu_context_lock);
>> -		ida_remove(&mmu_context_ida, index);
>> -		spin_unlock(&mmu_context_lock);
>> -		return -ENOMEM;
>> -	}
>> -
>> -	return index;
>> +	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
>>  }
>>  
>>  void hash__reserve_context_id(int id)
>>  {
>> -	int rc, result = 0;
>> -
>> -	do {
>> -		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
>> -			break;
>> -
>> -		spin_lock(&mmu_context_lock);
>> -		rc = ida_get_new_above(&mmu_context_ida, id, &result);
>> -		spin_unlock(&mmu_context_lock);
>> -	} while (rc == -EAGAIN);
>> +	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
>>  
>>  	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
>>  }
>> @@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
>>  
>>  void __destroy_context(int context_id)
>>  {
>> -	spin_lock(&mmu_context_lock);
>> -	ida_remove(&mmu_context_ida, context_id);
>> -	spin_unlock(&mmu_context_lock);
>> +	ida_free(&mmu_context_ida, context_id);
>>  }
>>  EXPORT_SYMBOL_GPL(__destroy_context);
>>  
>> @@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
>>  {
>>  	int index, context_id;
>>  
>> -	spin_lock(&mmu_context_lock);
>>  	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
>>  		context_id = ctx->extended_id[index];
>>  		if (context_id)
>> -			ida_remove(&mmu_context_ida, context_id);
>> +			ida_free(&mmu_context_ida, context_id);
>>  	}
>> -	spin_unlock(&mmu_context_lock);
>>  }
>>  
>>  static void pte_frag_destroy(void *pte_frag)
>
> This hunk should be okay because the mmu_context_lock does not protect
> the extended_id array, right Aneesh?

Yes. This is called at process exit, so we should not find parallel
calls. On the allocation side, we are protected by mmap_sem. We do
allocate extended_id when doing mmap.

-aneesh
Aneesh Kumar K.V June 22, 2018, 5:47 a.m. UTC | #5
Matthew Wilcox <willy@infradead.org> writes:



> this situation from occurring (like, oh i don't know, this function only
> being called on process exit, so implicitly only called once per context).
>

That is correct.
diff mbox series

Patch

diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index f3d4b4a0e561..5a0cf2cc8ba0 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -26,48 +26,16 @@ 
 #include <asm/mmu_context.h>
 #include <asm/pgalloc.h>
 
-static DEFINE_SPINLOCK(mmu_context_lock);
 static DEFINE_IDA(mmu_context_ida);
 
 static int alloc_context_id(int min_id, int max_id)
 {
-	int index, err;
-
-again:
-	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
-		return -ENOMEM;
-
-	spin_lock(&mmu_context_lock);
-	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
-	spin_unlock(&mmu_context_lock);
-
-	if (err == -EAGAIN)
-		goto again;
-	else if (err)
-		return err;
-
-	if (index > max_id) {
-		spin_lock(&mmu_context_lock);
-		ida_remove(&mmu_context_ida, index);
-		spin_unlock(&mmu_context_lock);
-		return -ENOMEM;
-	}
-
-	return index;
+	return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
 }
 
 void hash__reserve_context_id(int id)
 {
-	int rc, result = 0;
-
-	do {
-		if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
-			break;
-
-		spin_lock(&mmu_context_lock);
-		rc = ida_get_new_above(&mmu_context_ida, id, &result);
-		spin_unlock(&mmu_context_lock);
-	} while (rc == -EAGAIN);
+	int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
 
 	WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
 }
@@ -172,9 +140,7 @@  int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 
 void __destroy_context(int context_id)
 {
-	spin_lock(&mmu_context_lock);
-	ida_remove(&mmu_context_ida, context_id);
-	spin_unlock(&mmu_context_lock);
+	ida_free(&mmu_context_ida, context_id);
 }
 EXPORT_SYMBOL_GPL(__destroy_context);
 
@@ -182,13 +148,11 @@  static void destroy_contexts(mm_context_t *ctx)
 {
 	int index, context_id;
 
-	spin_lock(&mmu_context_lock);
 	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
 		context_id = ctx->extended_id[index];
 		if (context_id)
-			ida_remove(&mmu_context_ida, context_id);
+			ida_free(&mmu_context_ida, context_id);
 	}
-	spin_unlock(&mmu_context_lock);
 }
 
 static void pte_frag_destroy(void *pte_frag)