Patchwork [-V8,0/11] arch/powerpc: Add 64TB support to ppc64

login
register
mail settings
Submitter Aneesh Kumar K.V
Date Sept. 7, 2012, 5:42 a.m.
Message ID <871uiexuau.fsf@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/182322/
State Superseded
Headers show

Comments

Aneesh Kumar K.V - Sept. 7, 2012, 5:42 a.m.
Benjamin Herrenschmidt <benh@kernel.crashing.org> writes:

> On Thu, 2012-09-06 at 20:59 +0530, Aneesh Kumar K.V wrote:
>> Hi,
>> 
>> This patchset include patches for supporting 64TB with ppc64. I haven't booted
>> this on hardware with 64TB memory yet. But they boot fine on real hardware with
>> less memory. Changes extend VSID bits to 38 bits for a 256MB segment
>> and 26 bits for 1TB segments.
>
> Your series breaks the embedded 64-bit build. You seem to be hard wiring
> dependencies on slice stuff all over 64-bit stuff regardless of the MMU
> type or the value of CONFIG_MM_SLICES.
>
> Also all these:
>
>> +/* 4 bits per slice and we have one slice per 1TB */
>> +#if 0 /* We can't directly include pgtable.h hence this hack */
>> +#define SLICE_ARRAY_SIZE  (PGTABLE_RANGE >> 41)
>> +#else
>> +/* Right now we only support 64TB */
>> +#define SLICE_ARRAY_SIZE  32
>> +#endif
>
> Things are just too horrible. Find a different way of doing it, if
> necessary create a new range define somewhere, whatever but don't leave
> that crap as-is, it's too wrong.
>
> Dropping the series for now.
>

How about the change below. If you are ok moving the range details to
new header, I can fold this into patch 7 and send a new series

-aneesh
Benjamin Herrenschmidt - Sept. 7, 2012, 7:53 a.m.
On Fri, 2012-09-07 at 11:12 +0530, Aneesh Kumar K.V wrote:

> 
> diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> index 428f23e..057a12a 100644
> --- a/arch/powerpc/include/asm/mmu-hash64.h
> +++ b/arch/powerpc/include/asm/mmu-hash64.h
> @@ -14,6 +14,7 @@
>  
>  #include <asm/asm-compat.h>
>  #include <asm/page.h>
> +#include <asm/pgtable-ppc64-range.h>

Nah, that's all too gross... I think the right thing to do is to move
the slice stuff out of page_64.h

>  /*
>   * Segment table
> @@ -415,12 +416,7 @@ extern void slb_set_size(u16 size);
>  	add	rt,rt,rx
>  
>  /* 4 bits per slice and we have one slice per 1TB */
> -#if 0 /* We can't directly include pgtable.h hence this hack */
>  #define SLICE_ARRAY_SIZE  (PGTABLE_RANGE >> 41)
> -#else
> -/* Right now we only support 64TB */
> -#define SLICE_ARRAY_SIZE  32
> -#endif
>  
>  #ifndef __ASSEMBLY__
>  
> diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
> index b55beb4..01ab518 100644
> --- a/arch/powerpc/include/asm/page_64.h
> +++ b/arch/powerpc/include/asm/page_64.h
> @@ -78,16 +78,14 @@ extern u64 ppc64_pft_size;
>  #define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
>  #define GET_HIGH_SLICE_INDEX(addr)	((addr) >> SLICE_HIGH_SHIFT)
>  
> -/* 1 bit per slice and we have one slice per 1TB */
> -#if 0 /* We can't directly include pgtable.h hence this hack */
> -#define SLICE_MASK_SIZE (PGTABLE_RANGE >> 43)
> -#else
> -/*
> +/* 1 bit per slice and we have one slice per 1TB
>   * Right now we support only 64TB.
>   * IF we change this we will have to change the type
>   * of high_slices
>   */
>  #define SLICE_MASK_SIZE 8
> +#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
> +#error PGTABLE_RANGE exceeds slice_mask high_slices size
>  #endif
>  
>  #ifndef __ASSEMBLY__
> diff --git a/arch/powerpc/include/asm/pgtable-ppc64-range.h b/arch/powerpc/include/asm/pgtable-ppc64-range.h
> new file mode 100644
> index 0000000..04a825c
> --- /dev/null
> +++ b/arch/powerpc/include/asm/pgtable-ppc64-range.h
> @@ -0,0 +1,16 @@
> +#ifndef _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_
> +#define _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_
> +
> +#ifdef CONFIG_PPC_64K_PAGES
> +#include <asm/pgtable-ppc64-64k.h>
> +#else
> +#include <asm/pgtable-ppc64-4k.h>
> +#endif
> +
> +/*
> + * Size of EA range mapped by our pagetables.
> + */
> +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
> +			    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
> +#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
> +#endif
> diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
> index dea953f..ee783b4 100644
> --- a/arch/powerpc/include/asm/pgtable-ppc64.h
> +++ b/arch/powerpc/include/asm/pgtable-ppc64.h
> @@ -13,13 +13,7 @@
>  
>  #define FIRST_USER_ADDRESS	0
>  
> -/*
> - * Size of EA range mapped by our pagetables.
> - */
> -#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
> -                	    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
> -#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
> -
> +#include <asm/pgtable-ppc64-range.h>
>  
>  /* Some sanity checking */
>  #if TASK_SIZE_USER64 > PGTABLE_RANGE
> @@ -32,14 +26,6 @@
>  #endif
>  #endif
>  
> -#if (PGTABLE_RANGE >> 41) > SLICE_ARRAY_SIZE
> -#error PGTABLE_RANGE exceeds SLICE_ARRAY_SIZE
> -#endif
> -
> -#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
> -#error PGTABLE_RANGE exceeds slice_mask high_slices size
> -#endif
> -
>  /*
>   * Define the address range of the kernel non-linear virtual area
>   */

Ben.

Patch

diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 428f23e..057a12a 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -14,6 +14,7 @@ 
 
 #include <asm/asm-compat.h>
 #include <asm/page.h>
+#include <asm/pgtable-ppc64-range.h>
 
 /*
  * Segment table
@@ -415,12 +416,7 @@  extern void slb_set_size(u16 size);
 	add	rt,rt,rx
 
 /* 4 bits per slice and we have one slice per 1TB */
-#if 0 /* We can't directly include pgtable.h hence this hack */
 #define SLICE_ARRAY_SIZE  (PGTABLE_RANGE >> 41)
-#else
-/* Right now we only support 64TB */
-#define SLICE_ARRAY_SIZE  32
-#endif
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index b55beb4..01ab518 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -78,16 +78,14 @@  extern u64 ppc64_pft_size;
 #define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
 #define GET_HIGH_SLICE_INDEX(addr)	((addr) >> SLICE_HIGH_SHIFT)
 
-/* 1 bit per slice and we have one slice per 1TB */
-#if 0 /* We can't directly include pgtable.h hence this hack */
-#define SLICE_MASK_SIZE (PGTABLE_RANGE >> 43)
-#else
-/*
+/* 1 bit per slice and we have one slice per 1TB
  * Right now we support only 64TB.
  * IF we change this we will have to change the type
  * of high_slices
  */
 #define SLICE_MASK_SIZE 8
+#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
+#error PGTABLE_RANGE exceeds slice_mask high_slices size
 #endif
 
 #ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-range.h b/arch/powerpc/include/asm/pgtable-ppc64-range.h
new file mode 100644
index 0000000..04a825c
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-ppc64-range.h
@@ -0,0 +1,16 @@ 
+#ifndef _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_
+#define _ASM_POWERPC_PGTABLE_PPC64_RANGE_H_
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/pgtable-ppc64-64k.h>
+#else
+#include <asm/pgtable-ppc64-4k.h>
+#endif
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+			    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+#endif
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index dea953f..ee783b4 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -13,13 +13,7 @@ 
 
 #define FIRST_USER_ADDRESS	0
 
-/*
- * Size of EA range mapped by our pagetables.
- */
-#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
-                	    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
-#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
-
+#include <asm/pgtable-ppc64-range.h>
 
 /* Some sanity checking */
 #if TASK_SIZE_USER64 > PGTABLE_RANGE
@@ -32,14 +26,6 @@ 
 #endif
 #endif
 
-#if (PGTABLE_RANGE >> 41) > SLICE_ARRAY_SIZE
-#error PGTABLE_RANGE exceeds SLICE_ARRAY_SIZE
-#endif
-
-#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
-#error PGTABLE_RANGE exceeds slice_mask high_slices size
-#endif
-
 /*
  * Define the address range of the kernel non-linear virtual area
  */