diff mbox

[v3] net/irda: fix lockdep annotation

Message ID 20170119.112736.1928027614533032571.davem@davemloft.net
State RFC, archived
Delegated to: David Miller
Headers show

Commit Message

David Miller Jan. 19, 2017, 4:27 p.m. UTC
From: Dmitry Vyukov <dvyukov@google.com>
Date: Thu, 19 Jan 2017 11:05:36 +0100

> Thanks for looking into it! This particular issue bothers my fuzzers
> considerably. I agree that removing recursion is better.
> So do how we proceed? Will you mail this as a real patch?

Someone needs to test this:

Comments

Dmitry Vyukov Jan. 20, 2017, 10:53 p.m. UTC | #1
On Thu, Jan 19, 2017 at 5:27 PM, David Miller <davem@davemloft.net> wrote:
> From: Dmitry Vyukov <dvyukov@google.com>
> Date: Thu, 19 Jan 2017 11:05:36 +0100
>
>> Thanks for looking into it! This particular issue bothers my fuzzers
>> considerably. I agree that removing recursion is better.
>> So do how we proceed? Will you mail this as a real patch?
>
> Someone needs to test this:


I've stressed this with the fuzzer for a day.
It gets rid of the lockdep warning, and I did not notice any other
related crashes.

Tested-by: Dmitry Vyukov <dvyukov@google.com>

Thanks!


> diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
> index acbe61c..160dc89 100644
> --- a/net/irda/irqueue.c
> +++ b/net/irda/irqueue.c
> @@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
>   *    for deallocating this structure if it's complex. If not the user can
>   *    just supply kfree, which should take care of the job.
>   */
> -#ifdef CONFIG_LOCKDEP
> -static int hashbin_lock_depth = 0;
> -#endif
>  int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
>  {
>         irda_queue_t* queue;
> @@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
>         IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
>
>         /* Synchronize */
> -       if ( hashbin->hb_type & HB_LOCK ) {
> -               spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
> -                                        hashbin_lock_depth++);
> -       }
> +       if (hashbin->hb_type & HB_LOCK)
> +               spin_lock_irqsave(&hashbin->hb_spinlock, flags);
>
>         /*
>          *  Free the entries in the hashbin, TODO: use hashbin_clear when
>          *  it has been shown to work
>          */
>         for (i = 0; i < HASHBIN_SIZE; i ++ ) {
> -               queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
> -               while (queue ) {
> -                       if (free_func)
> -                               (*free_func)(queue);
> -                       queue = dequeue_first(
> -                               (irda_queue_t**) &hashbin->hb_queue[i]);
> +               while (1) {
> +                       queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
> +
> +                       if (!queue)
> +                               break;
> +
> +                       if (free_func) {
> +                               if (hashbin->hb_type & HB_LOCK)
> +                                       spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
> +                               free_func(queue);
> +                               if (hashbin->hb_type & HB_LOCK)
> +                                       spin_lock_irqsave(&hashbin->hb_spinlock, flags);
> +                       }
>                 }
>         }
>
> @@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
>         hashbin->magic = ~HB_MAGIC;
>
>         /* Release lock */
> -       if ( hashbin->hb_type & HB_LOCK) {
> +       if (hashbin->hb_type & HB_LOCK)
>                 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
> -#ifdef CONFIG_LOCKDEP
> -               hashbin_lock_depth--;
> -#endif
> -       }
>
>         /*
>          *  Free the hashbin structure
Dmitry Vyukov Feb. 17, 2017, 9:13 p.m. UTC | #2
On Fri, Jan 20, 2017 at 11:53 PM, Dmitry Vyukov <dvyukov@google.com> wrote:
> On Thu, Jan 19, 2017 at 5:27 PM, David Miller <davem@davemloft.net> wrote:
>> From: Dmitry Vyukov <dvyukov@google.com>
>> Date: Thu, 19 Jan 2017 11:05:36 +0100
>>
>>> Thanks for looking into it! This particular issue bothers my fuzzers
>>> considerably. I agree that removing recursion is better.
>>> So do how we proceed? Will you mail this as a real patch?
>>
>> Someone needs to test this:
>
>
> I've stressed this with the fuzzer for a day.
> It gets rid of the lockdep warning, and I did not notice any other
> related crashes.
>
> Tested-by: Dmitry Vyukov <dvyukov@google.com>
>
> Thanks!


Hello David,

Have you merged this into some tree? Can't find it. If not, please
queue this for the next release.
This hurts us badly, but I don't want to turn off lockdep entirely.

Thanks

>> diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
>> index acbe61c..160dc89 100644
>> --- a/net/irda/irqueue.c
>> +++ b/net/irda/irqueue.c
>> @@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
>>   *    for deallocating this structure if it's complex. If not the user can
>>   *    just supply kfree, which should take care of the job.
>>   */
>> -#ifdef CONFIG_LOCKDEP
>> -static int hashbin_lock_depth = 0;
>> -#endif
>>  int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
>>  {
>>         irda_queue_t* queue;
>> @@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
>>         IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
>>
>>         /* Synchronize */
>> -       if ( hashbin->hb_type & HB_LOCK ) {
>> -               spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
>> -                                        hashbin_lock_depth++);
>> -       }
>> +       if (hashbin->hb_type & HB_LOCK)
>> +               spin_lock_irqsave(&hashbin->hb_spinlock, flags);
>>
>>         /*
>>          *  Free the entries in the hashbin, TODO: use hashbin_clear when
>>          *  it has been shown to work
>>          */
>>         for (i = 0; i < HASHBIN_SIZE; i ++ ) {
>> -               queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
>> -               while (queue ) {
>> -                       if (free_func)
>> -                               (*free_func)(queue);
>> -                       queue = dequeue_first(
>> -                               (irda_queue_t**) &hashbin->hb_queue[i]);
>> +               while (1) {
>> +                       queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
>> +
>> +                       if (!queue)
>> +                               break;
>> +
>> +                       if (free_func) {
>> +                               if (hashbin->hb_type & HB_LOCK)
>> +                                       spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
>> +                               free_func(queue);
>> +                               if (hashbin->hb_type & HB_LOCK)
>> +                                       spin_lock_irqsave(&hashbin->hb_spinlock, flags);
>> +                       }
>>                 }
>>         }
>>
>> @@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
>>         hashbin->magic = ~HB_MAGIC;
>>
>>         /* Release lock */
>> -       if ( hashbin->hb_type & HB_LOCK) {
>> +       if (hashbin->hb_type & HB_LOCK)
>>                 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
>> -#ifdef CONFIG_LOCKDEP
>> -               hashbin_lock_depth--;
>> -#endif
>> -       }
>>
>>         /*
>>          *  Free the hashbin structure
diff mbox

Patch

diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index acbe61c..160dc89 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -383,9 +383,6 @@  EXPORT_SYMBOL(hashbin_new);
  *    for deallocating this structure if it's complex. If not the user can
  *    just supply kfree, which should take care of the job.
  */
-#ifdef CONFIG_LOCKDEP
-static int hashbin_lock_depth = 0;
-#endif
 int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
 {
 	irda_queue_t* queue;
@@ -396,22 +393,27 @@  int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
 	IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
 
 	/* Synchronize */
-	if ( hashbin->hb_type & HB_LOCK ) {
-		spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
-					 hashbin_lock_depth++);
-	}
+	if (hashbin->hb_type & HB_LOCK)
+		spin_lock_irqsave(&hashbin->hb_spinlock, flags);
 
 	/*
 	 *  Free the entries in the hashbin, TODO: use hashbin_clear when
 	 *  it has been shown to work
 	 */
 	for (i = 0; i < HASHBIN_SIZE; i ++ ) {
-		queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
-		while (queue ) {
-			if (free_func)
-				(*free_func)(queue);
-			queue = dequeue_first(
-				(irda_queue_t**) &hashbin->hb_queue[i]);
+		while (1) {
+			queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+
+			if (!queue)
+				break;
+
+			if (free_func) {
+				if (hashbin->hb_type & HB_LOCK)
+					spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+				free_func(queue);
+				if (hashbin->hb_type & HB_LOCK)
+					spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+			}
 		}
 	}
 
@@ -420,12 +422,8 @@  int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
 	hashbin->magic = ~HB_MAGIC;
 
 	/* Release lock */
-	if ( hashbin->hb_type & HB_LOCK) {
+	if (hashbin->hb_type & HB_LOCK)
 		spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
-#ifdef CONFIG_LOCKDEP
-		hashbin_lock_depth--;
-#endif
-	}
 
 	/*
 	 *  Free the hashbin structure