Patchwork [RFC] sparc32: make srmmu helpers leon compatible

login
register
mail settings
Submitter Sam Ravnborg
Date May 19, 2012, 8:03 p.m.
Message ID <20120519200321.GA7617@merkur.ravnborg.org>
Download mbox | patch
Permalink /patch/160202/
State RFC
Delegated to: David Miller
Headers show

Comments

Sam Ravnborg - May 19, 2012, 8:03 p.m.
I am trying to make the saprc32 kernel compatible with
leon and sun4m, sun4d boxes at the same time.

The stuff in arch/sparc/mm/ looked like the simplest
to fix so I looked at that first.
Some of the helper functions used ASI_M_MMUREGS which
is redefiend to 0x19 in the LEON case - so these functions
needs some special care.

I was originally planning to use the cpuid_patch support
to deal with the differences - but this is only in place
_after_ we have initialised the mmu.

But sparc_cpu_model is set before we initialize the mmu,
so we could use that.

I came up with the following that I am not too happy with...
Some of the functions are really not used by leon, but I
converted all to be complete.

Any better ideas?

Could we call per_cpu_patch() before paging_init() maybe?

The patch below depends on other changes I only have in my
tree - to be submitted if/when ready.

	Sam


--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Miller - May 19, 2012, 10:25 p.m.
From: Sam Ravnborg <sam@ravnborg.org>
Date: Sat, 19 May 2012 22:03:21 +0200

> Could we call per_cpu_patch() before paging_init() maybe?

You definitely want to patch this since it's just the ASI value in the
loads and stores.

But the reason we have to invoke per_cpu_patch() after paging_init() is
that we need the cache flushing operations hooked up.

Otherwise we can't flush the instruction cache properly.

Actually...

The way we do that now is overkill.  We only needed to use the MMU
cache ops when we had sun4c around because sun4c lacked support for
the "flush" instruction.

But all sun4m and later chips have it so we can use it
unconditionally.

So in the per_cpu_patch() code, get rid of the cache ops invocation,
and instead execute a "flush %reg" after each of the instruction patch
assignments, where %reg is set to the address of the instruction that
was stored into.

Perhaps take the flushi() definition from asm/cacheflush_64.h and
place it into asm/cacheflush.h, then you can simply use that.

We can then use it to do things like make kprobes and jump_label
work on sparc32.
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/arch/sparc/include/asm/cpu_type.h b/arch/sparc/include/asm/cpu_type.h
index 84d7d83..5a8b206 100644
--- a/arch/sparc/include/asm/cpu_type.h
+++ b/arch/sparc/include/asm/cpu_type.h
@@ -1,6 +1,7 @@ 
 #ifndef __ASM_CPU_TYPE_H
 #define __ASM_CPU_TYPE_H
 
+#ifndef __ASSEMBLY__
 /*
  * Sparc (general) CPU types
  */
@@ -25,4 +26,5 @@  extern enum sparc_cpu sparc_cpu_model;
 
 #endif
 
+#endif /* __ASSEMBLY__ */
 #endif /* __ASM_CPU_TYPE_H */
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index cb82870..4b50085 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -7,6 +7,7 @@ 
 #ifndef _SPARC_PGTSRMMU_H
 #define _SPARC_PGTSRMMU_H
 
+#include <asm/cpu_type.h>
 #include <asm/page.h>
 
 #ifdef __ASSEMBLY__
@@ -151,42 +152,81 @@  extern void *srmmu_nocache_pool;
 static inline unsigned int srmmu_get_mmureg(void)
 {
         unsigned int retval;
-	__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
-			     "=r" (retval) :
-			     "i" (ASI_M_MMUREGS));
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
+				     "=r" (retval) :
+				     "i" (ASI_M_MMUREGS));
+	} else {
+		__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
+				     "=r" (retval) :
+				     "i" (ASI_LEON_MMUREGS));
+	}
+
 	return retval;
 }
 
 static inline void srmmu_set_mmureg(unsigned long regval)
 {
-	__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
-			     "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
+				     "r" (regval),
+				     "i" (ASI_M_MMUREGS) :
+				     "memory");
+	} else {
+		__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
+				     "r" (regval),
+				     "i" (ASI_LEON_MMUREGS) :
+				     "memory");
+	}
 
 }
 
 static inline void srmmu_set_ctable_ptr(unsigned long paddr)
 {
 	paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
-	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-			     "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
-			     "i" (ASI_M_MMUREGS) :
-			     "memory");
+
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
+				     "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
+				     "i" (ASI_M_MMUREGS) :
+				     "memory");
+	} else {
+		__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
+				     "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
+				     "i" (ASI_LEON_MMUREGS) :
+				     "memory");
+	}
 }
 
 static inline void srmmu_set_context(int context)
 {
-	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-			     "r" (context), "r" (SRMMU_CTX_REG),
-			     "i" (ASI_M_MMUREGS) : "memory");
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
+				     "r" (context), "r" (SRMMU_CTX_REG),
+				     "i" (ASI_M_MMUREGS) : "memory");
+	} else {
+		__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
+				     "r" (context), "r" (SRMMU_CTX_REG),
+				     "i" (ASI_LEON_MMUREGS) : "memory");
+	}
 }
 
 static inline int srmmu_get_context(void)
 {
 	register int retval;
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_CTX_REG),
-			     "i" (ASI_M_MMUREGS));
+
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (SRMMU_CTX_REG),
+				     "i" (ASI_M_MMUREGS));
+	} else {
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (SRMMU_CTX_REG),
+				     "i" (ASI_LEON_MMUREGS));
+	}
+
 	return retval;
 }
 
@@ -194,9 +234,15 @@  static inline unsigned int srmmu_get_fstatus(void)
 {
 	unsigned int retval;
 
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
+	} else {
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (SRMMU_FAULT_STATUS), "i" (ASI_LEON_MMUREGS));
+	}
 	return retval;
 }
 
@@ -204,9 +250,15 @@  static inline unsigned int srmmu_get_faddr(void)
 {
 	unsigned int retval;
 
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
+	if (sparc_cpu_model != sparc_leon) {
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
+	} else {
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (SRMMU_FAULT_ADDR), "i" (ASI_LEON_MMUREGS));
+	}
 	return retval;
 }