@@ -59,6 +59,7 @@ _GLOBAL(slb_miss_kernel_load_linear)
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+ srdi r10,r10,40-28 /* get 1T ESID */
b slb_finish_load_1T
1:
@@ -88,6 +89,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+ srdi r10,r10,40-28 /* get 1T ESID */
b slb_finish_load_1T
0: /* user address: proto-VSID = context << 15 | ESID. First check
@@ -155,13 +157,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
+ bge 9f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldimi r10,r9,USER_ESID_BITS,0
+ b slb_finish_load
BEGIN_FTR_SECTION
- bge slb_finish_load_1T
+9:
+ srdi r10,r10,40-28 /* get 1T ESID */
+ rldimi r10,r9,USER_ESID_BITS,0
+ b slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- b slb_finish_load
-
8: /* invalid EA */
li r10,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
@@ -292,7 +297,6 @@ _GLOBAL(slb_compare_rr_to_size)
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
*/
slb_finish_load_1T:
- srdi r10,r10,40-28 /* get 1T ESID */
ASM_VSID_SCRAMBLE(r10,r9,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10
--
1.7.10
From 43792d9e2c394370c71623f2769d11ff98090918 Mon Sep 17 00:00:00 2001
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Date: Tue, 24 Jul 2012 11:40:29 +0530
Subject: [PATCH 2/2] arch/powerpc: properly isolate kernel and user
proto-VSID
The proto-VSID space is divided into two class
User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
This patch does above isolation
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/mmu-hash64.h | 32 ++++++++++++++++++++++++--------
arch/powerpc/kernel/exceptions-64s.S | 4 +++-
arch/powerpc/mm/slb_low.S | 10 ++++++++++
3 files changed, 37 insertions(+), 9 deletions(-)
@@ -516,9 +516,19 @@ typedef struct {
/* This is only valid for addresses >= PAGE_OFFSET */
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
- if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble(ea >> SID_SHIFT, 256M);
- return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
+ unsigned long proto_vsid;
+ /*
+ * We need to make sure proto_vsid for the kernel is
+ * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
+ */
+ if (ssize == MMU_SEGSIZE_256M) {
+ proto_vsid = ea >> SID_SHIFT;
+ proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
+ return vsid_scramble(proto_vsid, 256M);
+ }
+ proto_vsid = ea >> SID_SHIFT_1T;
+ proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
+ return vsid_scramble(proto_vsid, 1T);
}
/* Returns the segment size indicator for a user address */
@@ -534,11 +544,17 @@ static inline int user_segment_size(unsigned long addr)
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
- if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble((context << USER_ESID_BITS)
- | (ea >> SID_SHIFT), 256M);
- return vsid_scramble((context << USER_ESID_BITS_1T)
- | (ea >> SID_SHIFT_1T), 1T);
+ unsigned long proto_vsid;
+ if (ssize == MMU_SEGSIZE_256M) {
+ proto_vsid = ((context << USER_ESID_BITS) |(ea >> SID_SHIFT));
+ /* truncate this to 37 bits */
+ proto_vsid &= (1UL << (CONTEXT_BITS + USER_ESID_BITS)) - 1;
+ return vsid_scramble(proto_vsid, 256M);
+ }
+ proto_vsid = ((context << USER_ESID_BITS_1T) | (ea >> SID_SHIFT_1T));
+ /* truncate this to 25 bits */
+ proto_vsid &= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)) - 1;
+ return vsid_scramble( proto_vsid, 1T);
}
#endif /* __ASSEMBLY__ */
@@ -958,7 +958,9 @@ _GLOBAL(do_stab_bolted)
rldimi r10,r11,7,52 /* r10 = first ste of the group */
/* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID */
+ /* This is a kernel address, so protovsid = ESID | 1 << 37 */
+ li r9,0x1
+ rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
ASM_VSID_SCRAMBLE(r11, r9, 256M)
rldic r9,r11,12,16 /* r9 = vsid << 12 */
@@ -57,9 +57,13 @@ _GLOBAL(slb_allocate_realmode)
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
BEGIN_FTR_SECTION
+ li r9,0x1
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
srdi r10,r10,40-28 /* get 1T ESID */
+ li r9,0x1
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS_1T),0
b slb_finish_load_1T
1:
@@ -87,9 +91,13 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
li r11,0
6:
BEGIN_FTR_SECTION
+ li r9,0x1
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
srdi r10,r10,40-28 /* get 1T ESID */
+ li r9,0x1
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS_1T),0
b slb_finish_load_1T
0: /* user address: proto-VSID = context << 15 | ESID. First check
@@ -160,11 +168,13 @@ BEGIN_FTR_SECTION
bge 9f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldimi r10,r9,USER_ESID_BITS,0
+ clrldi r10,r10,(64 - (CONTEXT_BITS + USER_ESID_BITS))
b slb_finish_load
BEGIN_FTR_SECTION
9:
srdi r10,r10,40-28 /* get 1T ESID */
rldimi r10,r9,USER_ESID_BITS,0
+ clrldi r10,r10,(64 - (CONTEXT_BITS + USER_ESID_BITS_1T))
b slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
8: /* invalid EA */