@@ -225,9 +225,13 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
return (u32)(((u64) val * ep_ro) >> 32);
}
-#if defined(CONFIG_MMU) && \
- (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
+#if defined(CONFIG_MMU) && defined(CONFIG_PROVE_LOCKING)
void might_fault(void);
+#elif defined(CONFIG_MMU) && defined(CONFIG_DEBUG_ATOMIC_SLEEP)
+static inline void might_fault(void)
+{
+ __might_sleep(__FILE__, __LINE__, 0);
+}
#else
static inline void might_fault(void) { }
#endif
@@ -3699,7 +3699,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
up_read(&mm->mmap_sem);
}
-#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
+#ifdef CONFIG_PROVE_LOCKING
void might_fault(void)
{
/*
@@ -3711,17 +3711,14 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
+ __might_sleep(__FILE__, __LINE__, 0);
+
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
- if (in_atomic())
- return;
-
- __might_sleep(__FILE__, __LINE__, 0);
-
- if (current->mm)
+ if (!in_atomic() && current->mm)
might_lock_read(¤t->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
Commit 662bbcb2747c2422cf98d3d97619509379eee466 disabled in atomic checks for all user access code (that uses might_fault()). That change basically disabled CONFIG_DEBUG_ATOMIC_SLEEP for all user access functions. However, this is a mighty debugging aid that we want. If user memory is to be accessed while pagefault_disabled() is set, the atomic variants of copy_(to|from)_user can be used. This patch reverts commit 662bbcb2747c2422cf98d3d97619509379eee466 taking care of the !MMU optimization. Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> --- include/linux/kernel.h | 8 ++++++-- mm/memory.c | 11 ++++------- 2 files changed, 10 insertions(+), 9 deletions(-)