Patchwork [4/4] powerpc: Add vm debug code to catch errors

login
register
mail settings
Submitter Aneesh Kumar K.V
Date Feb. 14, 2013, 8:36 a.m.
Message ID <1360830983-1812-4-git-send-email-aneesh.kumar@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/220398/
State Superseded
Headers show

Comments

Aneesh Kumar K.V - Feb. 14, 2013, 8:36 a.m.
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

We need to make sure that we don't have higher bits of kernel effective
address set. That would result in multiple kernel segments having same
proto vsid. Add debug code to make sure we capture this.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/mmu-hash64.h |   49 +++++++++++++++++++++++++++++----
 1 file changed, 43 insertions(+), 6 deletions(-)
Paul Mackerras - Feb. 15, 2013, 4:46 a.m.
On Thu, Feb 14, 2013 at 02:06:23PM +0530, Aneesh Kumar K.V wrote:
> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
> 
> We need to make sure that we don't have higher bits of kernel effective
> address set. That would result in multiple kernel segments having same
> proto vsid. Add debug code to make sure we capture this.

I'm not sure that WARN_ON is the best way to handle this, since this
code is called from low levels of the MMU management code, and a
WARN_ON will cause a trap that could possibly come at a time when we
can't handle a trap very well.  I think instead these functions should
return a vsid of 0 for bad addresses, and the callers should detect
that case and handle it appropriately, e.g. hash_page() should return
an error.

Paul.

Patch

diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 0e08252..3e297ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,13 @@ 
  */
 #include <asm/pgtable-ppc64.h>
 
+#ifdef CONFIG_DEBUG_VM
+#ifndef __ASSEMBLY__
+#include <asm/udbg.h>
+#include <asm/bug.h>
+#endif
+#endif
+
 /*
  * Segment table
  */
@@ -522,11 +529,32 @@  static inline int user_segment_size(unsigned long addr)
 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 				     int ssize)
 {
-	if (ssize == MMU_SEGSIZE_256M)
-		return vsid_scramble((context << USER_ESID_BITS)
-				     | (ea >> SID_SHIFT), 256M);
-	return vsid_scramble((context << USER_ESID_BITS_1T)
-			     | (ea >> SID_SHIFT_1T), 1T);
+	if (ssize == MMU_SEGSIZE_256M) {
+		context = context << USER_ESID_BITS;
+		ea = ea >> SID_SHIFT;
+#ifdef CONFIG_DEBUG_VM
+		/*
+		 * context and ea should not overlap.
+		 */
+		if (context & ea) {
+			udbg_printf("Overlapping bits %lx %lx\n", context, ea);
+			WARN_ON(1);
+		}
+#endif
+		return vsid_scramble(context | ea, 256M);
+	}
+	context = context << USER_ESID_BITS_1T;
+	ea = ea >> SID_SHIFT_1T;
+#ifdef CONFIG_DEBUG_VM
+	/*
+	 * context and ea should not overlap.
+	 */
+	if (context & ea) {
+		udbg_printf("Overlapping bits for 1T %lx %lx\n", context, ea);
+		WARN_ON(1);
+	}
+#endif
+	return vsid_scramble(context | ea, 1T);
 }
 
 /*
@@ -540,11 +568,20 @@  static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
  */
 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
 {
+	unsigned int c_index;
 	unsigned long context;
 	/*
 	 * kernel take the top 4 context from the available range
 	 */
-	context = (MAX_CONTEXT - 4) +  ((ea >> 60) - 0xc);
+	c_index =   ((ea >> 60) - 0xc);
+	context = (MAX_CONTEXT - 4) + c_index;
+#ifdef CONFIG_DEBUG_VM
+	/*
+	 * Drop the c_index related bits from ea, so we get
+	 * non overlapping context and ea.
+	 */
+	ea = ea - ((0xcUL + c_index) << 60);
+#endif
 	return get_vsid(context, ea, ssize);
 }
 #endif /* __ASSEMBLY__ */