diff mbox

[V2,34/68] powerpc/mm/radix: Add radix callback for vmemmap and map_kernel page

Message ID 1460182444-2468-35-git-send-email-aneesh.kumar@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show

Commit Message

Aneesh Kumar K.V April 9, 2016, 6:13 a.m. UTC
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h | 12 ++++++++++++
 arch/powerpc/include/asm/book3s/64/radix.h   |  6 ++++++
 arch/powerpc/mm/pgtable-radix.c              | 20 ++++++++++++++++++++
 3 files changed, 38 insertions(+)

Comments

Balbir Singh April 21, 2016, 1:46 p.m. UTC | #1
On 09/04/16 16:13, Aneesh Kumar K.V wrote:
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/book3s/64/pgtable.h | 12 ++++++++++++
>  arch/powerpc/include/asm/book3s/64/radix.h   |  6 ++++++
>  arch/powerpc/mm/pgtable-radix.c              | 20 ++++++++++++++++++++
>  3 files changed, 38 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 623911ed64e9..c16037116625 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -725,6 +725,13 @@ void pgtable_cache_init(void);
>  static inline int map_kernel_page(unsigned long ea, unsigned long pa,
>  				  unsigned long flags)
>  {
> +	if (radix_enabled()) {
> +#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
Do you need the defined(CONFIG_PPC_RADIX_MMU) is radix_enabled() not sufficient?

> +		unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
> +		WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
> +#endif
> +		return map_radix_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
> +	}
>  	return hlmap_kernel_page(ea, pa, flags);
>  }
>  
> @@ -732,6 +739,8 @@ static inline int __meminit vmemmap_create_mapping(unsigned long start,
>  						   unsigned long page_size,
>  						   unsigned long phys)
>  {
> +	if (radix_enabled())
> +		return rvmemmap_create_mapping(start, page_size, phys);
>  	return hlvmemmap_create_mapping(start, page_size, phys);
>  }
>  
> @@ -739,6 +748,9 @@ static inline int __meminit vmemmap_create_mapping(unsigned long start,
>  static inline void vmemmap_remove_mapping(unsigned long start,
>  					  unsigned long page_size)
>  {
> +
> +	if (radix_enabled())
> +		return rvmemmap_remove_mapping(start, page_size);
>  	return hlvmemmap_remove_mapping(start, page_size);
>  }
>  #endif
> diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
> index 806dc2195f85..040c4a56d07b 100644
> --- a/arch/powerpc/include/asm/book3s/64/radix.h
> +++ b/arch/powerpc/include/asm/book3s/64/radix.h
> @@ -133,6 +133,12 @@ static inline int rpmd_trans_huge(pmd_t pmd)
>  
>  #endif
>  
> +extern int __meminit rvmemmap_create_mapping(unsigned long start,
> +					     unsigned long page_size,
> +					     unsigned long phys);
> +extern void rvmemmap_remove_mapping(unsigned long start,
> +				    unsigned long page_size);
> +
>  extern int map_radix_kernel_page(unsigned long ea, unsigned long pa,
>  				 pgprot_t flags, unsigned int psz);
>  #endif /* __ASSEMBLY__ */
> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
> index 5737769469b3..a5d0ae5630b3 100644
> --- a/arch/powerpc/mm/pgtable-radix.c
> +++ b/arch/powerpc/mm/pgtable-radix.c
> @@ -342,3 +342,23 @@ void rsetup_initial_memory_limit(phys_addr_t first_memblock_base,
>  	/* Finally limit subsequent allocations */
>  	memblock_set_current_limit(first_memblock_base + first_memblock_size);
>  }
> +
> +#ifdef CONFIG_SPARSEMEM_VMEMMAP
> +int __meminit rvmemmap_create_mapping(unsigned long start,
> +				      unsigned long page_size,
> +				      unsigned long phys)
> +{
> +	/* Create a PTE encoding */
> +	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
> +
> +	BUG_ON(map_radix_kernel_page(start, phys, __pgprot(flags), page_size));
> +	return 0;
> +}
> +
> +#ifdef CONFIG_MEMORY_HOTPLUG
> +void rvmemmap_remove_mapping(unsigned long start, unsigned long page_size)
> +{
> +	/* FIXME!! intel does more. We should free page tables mapping vmemmap ? */

You mean x86/x86_64? Are we going to not remove this entry from the kernel page table?
Will we leave this around?

> +}
> +#endif
> +#endif
>
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 623911ed64e9..c16037116625 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -725,6 +725,13 @@  void pgtable_cache_init(void);
 static inline int map_kernel_page(unsigned long ea, unsigned long pa,
 				  unsigned long flags)
 {
+	if (radix_enabled()) {
+#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
+		unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
+		WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
+#endif
+		return map_radix_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+	}
 	return hlmap_kernel_page(ea, pa, flags);
 }
 
@@ -732,6 +739,8 @@  static inline int __meminit vmemmap_create_mapping(unsigned long start,
 						   unsigned long page_size,
 						   unsigned long phys)
 {
+	if (radix_enabled())
+		return rvmemmap_create_mapping(start, page_size, phys);
 	return hlvmemmap_create_mapping(start, page_size, phys);
 }
 
@@ -739,6 +748,9 @@  static inline int __meminit vmemmap_create_mapping(unsigned long start,
 static inline void vmemmap_remove_mapping(unsigned long start,
 					  unsigned long page_size)
 {
+
+	if (radix_enabled())
+		return rvmemmap_remove_mapping(start, page_size);
 	return hlvmemmap_remove_mapping(start, page_size);
 }
 #endif
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 806dc2195f85..040c4a56d07b 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -133,6 +133,12 @@  static inline int rpmd_trans_huge(pmd_t pmd)
 
 #endif
 
+extern int __meminit rvmemmap_create_mapping(unsigned long start,
+					     unsigned long page_size,
+					     unsigned long phys);
+extern void rvmemmap_remove_mapping(unsigned long start,
+				    unsigned long page_size);
+
 extern int map_radix_kernel_page(unsigned long ea, unsigned long pa,
 				 pgprot_t flags, unsigned int psz);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 5737769469b3..a5d0ae5630b3 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -342,3 +342,23 @@  void rsetup_initial_memory_limit(phys_addr_t first_memblock_base,
 	/* Finally limit subsequent allocations */
 	memblock_set_current_limit(first_memblock_base + first_memblock_size);
 }
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit rvmemmap_create_mapping(unsigned long start,
+				      unsigned long page_size,
+				      unsigned long phys)
+{
+	/* Create a PTE encoding */
+	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
+
+	BUG_ON(map_radix_kernel_page(start, phys, __pgprot(flags), page_size));
+	return 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void rvmemmap_remove_mapping(unsigned long start, unsigned long page_size)
+{
+	/* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+}
+#endif
+#endif