diff mbox series

[1/2] powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA

Message ID 20200713150749.25245-1-aneesh.kumar@linux.ibm.com (mailing list archive)
State Accepted
Commit ef26b76d1af61b90eb0dd3da58ad4f97d8e028f8
Headers show
Series [1/2] powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA | expand

Commit Message

Aneesh Kumar K V July 13, 2020, 3:07 p.m. UTC
commit: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
added support for allocating gigantic hugepages using CMA. This patch
enables the same for powerpc

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 arch/powerpc/include/asm/hugetlb.h |  7 +++++++
 arch/powerpc/kernel/setup-common.c |  3 +++
 arch/powerpc/mm/hugetlbpage.c      | 18 ++++++++++++++++++
 3 files changed, 28 insertions(+)

Comments

Michael Ellerman July 30, 2020, 12:50 p.m. UTC | #1
On Mon, 13 Jul 2020 20:37:48 +0530, Aneesh Kumar K.V wrote:
> commit: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
> added support for allocating gigantic hugepages using CMA. This patch
> enables the same for powerpc

Applied to powerpc/next.

[1/2] powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA
      https://git.kernel.org/powerpc/c/ef26b76d1af61b90eb0dd3da58ad4f97d8e028f8
[2/2] powerpc/kvm/cma: Improve kernel log during boot
      https://git.kernel.org/powerpc/c/a5a8b258da7861009240b57687dfef47af91b406

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 551a9d4d3958..013165e62618 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -57,6 +57,7 @@  int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 			       unsigned long addr, pte_t *ptep,
 			       pte_t pte, int dirty);
 
+void gigantic_hugetlb_cma_reserve(void) __init;
 #include <asm-generic/hugetlb.h>
 
 #else /* ! CONFIG_HUGETLB_PAGE */
@@ -71,6 +72,12 @@  static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
 {
 	return NULL;
 }
+
+
+static inline void __init gigantic_hugetlb_cma_reserve(void)
+{
+}
+
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9d3faac53295..b198b0ff25bc 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -928,6 +928,9 @@  void __init setup_arch(char **cmdline_p)
 	/* Reserve large chunks of memory for use by CMA for KVM. */
 	kvm_cma_reserve();
 
+	/*  Reserve large chunks of memory for us by CMA for hugetlb */
+	gigantic_hugetlb_cma_reserve();
+
 	klp_init_thread_info(&init_task);
 
 	init_mm.start_code = (unsigned long)_stext;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index e9bfbccd975d..26292544630f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -684,3 +684,21 @@  void flush_dcache_icache_hugepage(struct page *page)
 		}
 	}
 }
+
+void __init gigantic_hugetlb_cma_reserve(void)
+{
+	unsigned long order = 0;
+
+	if (radix_enabled())
+		order = PUD_SHIFT - PAGE_SHIFT;
+	else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
+		/*
+		 * For pseries we do use ibm,expected#pages for reserving 16G pages.
+		 */
+		order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
+
+	if (order) {
+		VM_WARN_ON(order < MAX_ORDER);
+		hugetlb_cma_reserve(order);
+	}
+}