diff mbox

[RFC,5/7] mm/follow_page_mask: Add support for hugetlb pgd entries.

Message ID 1491314675-15787-5-git-send-email-aneesh.kumar@linux.vnet.ibm.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Aneesh Kumar K.V April 4, 2017, 2:04 p.m. UTC
ppc64 supports pgd hugetlb entries. Add code to handle hugetlb pgd entries to
follow_page_mask so that ppc64 can switch to it to handle hugetlbe entries.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 include/linux/hugetlb.h | 3 +++
 mm/gup.c                | 7 +++++++
 mm/hugetlb.c            | 9 +++++++++
 3 files changed, 19 insertions(+)

Comments

Anshuman Khandual April 7, 2017, 11:56 a.m. UTC | #1
On 04/04/2017 07:34 PM, Aneesh Kumar K.V wrote:
> ppc64 supports pgd hugetlb entries. Add code to handle hugetlb pgd entries to
> follow_page_mask so that ppc64 can switch to it to handle hugetlbe entries.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

This was exactly proposed by two of the patches I had posted
last year with bit more descriptive commit message. Making
follow page mask function aware of PGD based HugeTLB can be
sent separately to core MM. I will send it out to mm list soon
for fresh consideration which will make the series one patch
less.

https://patchwork.ozlabs.org/patch/595033/
https://patchwork.ozlabs.org/patch/595037/
diff mbox

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d3a4be0022d8..04b73a9c8b4b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -124,6 +124,9 @@  struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 				pmd_t *pmd, int flags);
 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 				pud_t *pud, int flags);
+struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+			     pgd_t *pgd, int flags);
+
 int pmd_huge(pmd_t pmd);
 int pud_huge(pud_t pud);
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
diff --git a/mm/gup.c b/mm/gup.c
index 0e18fd5f65b4..74a25e33dddb 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -382,6 +382,13 @@  struct page *follow_page_mask(struct vm_area_struct *vma,
 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 		return no_page_table(vma, flags);
 
+	if (pgd_huge(*pgd)) {
+		page = follow_huge_pgd(mm, address, pgd, flags);
+		if (page)
+			return page;
+		return no_page_table(vma, flags);
+	}
+
 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
 		page = follow_huge_pd(vma, address,
 				      __hugepd(pgd_val(*pgd)), flags,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b02faa1079bd..eb39a7496de7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4702,6 +4702,15 @@  follow_huge_pud(struct mm_struct *mm, unsigned long address,
 	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
 }
 
+struct page * __weak
+follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
+{
+	if (flags & FOLL_GET)
+		return NULL;
+
+	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+}
+
 #ifdef CONFIG_MEMORY_FAILURE
 
 /*