@@ -75,6 +75,23 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
+#define change_pmd_protnuma change_pmd_protnuma
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, int prot_numa)
+{
+ /*
+ * We don't track the _PAGE_PRESENT bit here
+ */
+ unsigned long pmd_val;
+ pmd_val = pmd_val(*pmdp);
+ if (prot_numa)
+ pmd_val |= _PAGE_NUMA;
+ else
+ pmd_val &= ~_PAGE_NUMA;
+ pmd_set(pmdp, pmd_val | _PAGE_NUMA);
+}
+
+
#define pmd_numa pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
@@ -697,6 +697,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
#endif
+
+#ifndef change_pmd_protnuma
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmd, int prot_numa)
+{
+ if (prot_numa)
+ set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
+ else
+ set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknonnuma(*pmd));
+}
+
+#endif
#else
extern int pte_numa(pte_t pte);
extern int pmd_numa(pmd_t pmd);
@@ -704,6 +716,8 @@ extern pte_t pte_mknonnuma(pte_t pte);
extern pmd_t pmd_mknonnuma(pmd_t pmd);
extern pte_t pte_mknuma(pte_t pte);
extern pmd_t pmd_mknuma(pmd_t pmd);
+extern void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmd, int prot_numa);
#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
#else
static inline int pmd_numa(pmd_t pmd)
@@ -735,6 +749,12 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
{
return pmd;
}
+
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmd, int prot_numa)
+{
+ BUG();
+}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_MMU */
@@ -3605,7 +3605,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock);
pmd = *pmdp;
if (pmd_numa(pmd)) {
- set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
+ change_pmd_protnuma(mm, _addr, pmdp, 0);
numa = true;
}
spin_unlock(&mm->page_table_lock);
@@ -112,22 +112,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return pages;
}
-#ifdef CONFIG_NUMA_BALANCING
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- spin_lock(&mm->page_table_lock);
- set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
- spin_unlock(&mm->page_table_lock);
-}
-#else
-static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmd)
-{
- BUG();
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -161,8 +145,12 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
* node. This allows a regular PMD to be handled as one fault
* and effectively batches the taking of the PTL
*/
- if (prot_numa && all_same_node)
- change_pmd_protnuma(vma->vm_mm, addr, pmd);
+ if (prot_numa && all_same_node) {
+ spin_lock(&vma->vm_mm->page_table_lock);
+ change_pmd_protnuma(vma->vm_mm, addr, pmd, 1);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+
+ }
} while (pmd++, addr = next, addr != end);
return pages;