diff mbox series

[v3,1/2] rtmutex: allow specifying a subclass for nested locking

Message ID 20180524135240.10881-2-peda@axentia.se
State Awaiting Upstream
Headers show
Series Re: Problem: lockdep warning with nested instances of i2c-mux | expand

Commit Message

Peter Rosin May 24, 2018, 1:52 p.m. UTC
Needed for annotating rt_mutex locks.

Signed-off-by: Peter Rosin <peda@axentia.se>
---
 include/linux/rtmutex.h  |  7 +++++++
 kernel/locking/rtmutex.c | 29 +++++++++++++++++++++++++----
 2 files changed, 32 insertions(+), 4 deletions(-)

Comments

Joel Fernandes May 28, 2018, 5:19 a.m. UTC | #1
On Thu, May 24, 2018 at 03:52:39PM +0200, Peter Rosin wrote:
> Needed for annotating rt_mutex locks.
> 
> Signed-off-by: Peter Rosin <peda@axentia.se>
> ---
>  include/linux/rtmutex.h  |  7 +++++++
>  kernel/locking/rtmutex.c | 29 +++++++++++++++++++++++++----
>  2 files changed, 32 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
> index 1b92a28dd672..6fd615a0eea9 100644
> --- a/include/linux/rtmutex.h
> +++ b/include/linux/rtmutex.h
> @@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
>  extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
>  extern void rt_mutex_destroy(struct rt_mutex *lock);
>  
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
> +#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
> +#else
>  extern void rt_mutex_lock(struct rt_mutex *lock);
> +#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
> +#endif
> +
>  extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
>  extern int rt_mutex_timed_lock(struct rt_mutex *lock,
>  			       struct hrtimer_sleeper *timeout);
> diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
:
>  }
>  
> +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
> +{
> +	might_sleep();
> +
> +	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
> +	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
> +}
> +
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +/**
> + * rt_mutex_lock_nested - lock a rt_mutex

This ifdef seems consistent with other nested locking primitives, but its
kind of confusing.

The Kconfig.debug for DEBUG_LOCK_ALLOC says:

config DEBUG_LOCK_ALLOC
	bool "Lock debugging: detect incorrect freeing of live locks"
	[...]
	help
	 This feature will check whether any held lock (spinlock, rwlock,
	 mutex or rwsem) is incorrectly freed by the kernel, via any of the
	 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
	 vfree(), etc.), whether a live lock is incorrectly reinitialized via
	 spin_lock_init()/mutex_init()/etc., or whether there is any lock
	 held during task exit.

Shouldn't this ideally be ifdef'd under PROVE_LOCKING for this and other
locking primitives? Any idea what's the reason? I know PROVE_LOCKING selects
DEBUG_LOCK_ALLOC but still..

thanks!

- Joel
Peter Zijlstra May 28, 2018, 7:17 a.m. UTC | #2
On Sun, May 27, 2018 at 10:19:36PM -0700, Joel Fernandes wrote:

> > +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
> > +{
> > +	might_sleep();
> > +
> > +	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
> > +	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
> > +}
> > +
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > +/**
> > + * rt_mutex_lock_nested - lock a rt_mutex
> 
> This ifdef seems consistent with other nested locking primitives, but its
> kind of confusing.
> 
> The Kconfig.debug for DEBUG_LOCK_ALLOC says:
> 
> config DEBUG_LOCK_ALLOC
> 	bool "Lock debugging: detect incorrect freeing of live locks"
> 	[...]
> 	help
> 	 This feature will check whether any held lock (spinlock, rwlock,
> 	 mutex or rwsem) is incorrectly freed by the kernel, via any of the
> 	 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
> 	 vfree(), etc.), whether a live lock is incorrectly reinitialized via
> 	 spin_lock_init()/mutex_init()/etc., or whether there is any lock
> 	 held during task exit.
> 
> Shouldn't this ideally be ifdef'd under PROVE_LOCKING for this and other
> locking primitives? Any idea what's the reason? I know PROVE_LOCKING selects
> DEBUG_LOCK_ALLOC but still..

No, the reason is that DEBUG_LOCK_ALLOC needs the lockdep hooks to know
which locks are held, so it can warn when we try and free a held one.
PROVE_LOCKING builds upon that.

The the locking primitives should key off of DEBUG_LOCK_ALLOC for
introducing the hooks.
Joel Fernandes May 28, 2018, 8:51 p.m. UTC | #3
On Mon, May 28, 2018 at 09:17:51AM +0200, Peter Zijlstra wrote:
> On Sun, May 27, 2018 at 10:19:36PM -0700, Joel Fernandes wrote:
> 
> > > +static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
> > > +{
> > > +	might_sleep();
> > > +
> > > +	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
> > > +	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
> > > +}
> > > +
> > > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > > +/**
> > > + * rt_mutex_lock_nested - lock a rt_mutex
> > 
> > This ifdef seems consistent with other nested locking primitives, but its
> > kind of confusing.
> > 
> > The Kconfig.debug for DEBUG_LOCK_ALLOC says:
> > 
> > config DEBUG_LOCK_ALLOC
> > 	bool "Lock debugging: detect incorrect freeing of live locks"
> > 	[...]
> > 	help
> > 	 This feature will check whether any held lock (spinlock, rwlock,
> > 	 mutex or rwsem) is incorrectly freed by the kernel, via any of the
> > 	 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
> > 	 vfree(), etc.), whether a live lock is incorrectly reinitialized via
> > 	 spin_lock_init()/mutex_init()/etc., or whether there is any lock
> > 	 held during task exit.
> > 
> > Shouldn't this ideally be ifdef'd under PROVE_LOCKING for this and other
> > locking primitives? Any idea what's the reason? I know PROVE_LOCKING selects
> > DEBUG_LOCK_ALLOC but still..
> 
> No, the reason is that DEBUG_LOCK_ALLOC needs the lockdep hooks to know
> which locks are held, so it can warn when we try and free a held one.
> PROVE_LOCKING builds upon that.
> 
> The the locking primitives should key off of DEBUG_LOCK_ALLOC for
> introducing the hooks.

Got it, thanks for the clarification Peter!

Regards,

 -Joel
diff mbox series

Patch

diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1b92a28dd672..6fd615a0eea9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -106,7 +106,14 @@  static inline int rt_mutex_is_locked(struct rt_mutex *lock)
 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#else
 extern void rt_mutex_lock(struct rt_mutex *lock);
+#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#endif
+
 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
 			       struct hrtimer_sleeper *timeout);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f014be7a4b8..2823d4163a37 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1465,6 +1465,29 @@  rt_mutex_fastunlock(struct rt_mutex *lock,
 		rt_mutex_postunlock(&wake_q);
 }
 
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+{
+	might_sleep();
+
+	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/**
+ * rt_mutex_lock_nested - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ * @subclass: the lockdep subclass
+ */
+void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+{
+	__rt_mutex_lock(lock, subclass);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+#endif
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
 /**
  * rt_mutex_lock - lock a rt_mutex
  *
@@ -1472,12 +1495,10 @@  rt_mutex_fastunlock(struct rt_mutex *lock,
  */
 void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
-	might_sleep();
-
-	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	__rt_mutex_lock(lock, 0);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
+#endif
 
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible