Patchwork [4/4] powernv: kvm: make the handling of _PAGE_NUMA faster for guest

login
register
mail settings
Submitter Liu Ping Fan
Date Dec. 11, 2013, 8:47 a.m.
Message ID <1386751674-14136-5-git-send-email-pingfank@linux.vnet.ibm.com>
Download mbox | patch
Permalink /patch/299880/
State New
Headers show

Comments

Liu Ping Fan - Dec. 11, 2013, 8:47 a.m.
The period check of _PAGE_NUMA can probably happen on the correctly
placed page. For this case, when guest try to setup hpte in real mode,
we try to resolve the numa fault in real mode, since the switch between
guest context and host context costs too much.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_rm_mmu.c | 32 ++++++++++++++++++++++++++++++--
 1 file changed, 30 insertions(+), 2 deletions(-)

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index ae46052..a06b199 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -179,6 +179,11 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 	unsigned int writing;
 	unsigned long mmu_seq;
 	unsigned long rcbits;
+	struct mm_struct *mm = kvm->mm;
+	struct vm_area_struct *vma;
+	int page_nid, target_nid;
+	struct page *test_page;
+	pte_t *ptep;
 
 	psize = hpte_page_size(pteh, ptel);
 	if (!psize)
@@ -234,8 +239,26 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 
 		/* Look up the Linux PTE for the backing page */
 		pte_size = psize;
-		pte = lookup_linux_pte(pgdir, hva, writing, &pte_size, NULL);
-		if (pte_present(pte) && !pte_numa(pte)) {
+		pte = lookup_linux_pte(pgdir, hva, writing, &pte_size, &ptep);
+		if (pte_present(pte)) {
+			if (pte_numa(pte)) {
+				/* If fail, let gup handle it */
+				if (unlikely(!down_read_trylock(&mm->mmap_sem)))
+					goto pte_check;
+
+				vma = find_vma(mm, hva);
+				up_read(&mm->mmap_sem);
+				test_page = pte_page(pte);
+				page_nid = page_to_nid(test_page);
+				target_nid = numa_migrate_prep(test_page, vma,
+							 hva, page_nid);
+				put_page(test_page);
+				if (unlikely(target_nid != -1)) {
+					/* If fail, let gup handle it */
+					goto pte_check;
+				}
+			}
+
 			if (writing && !pte_write(pte))
 				/* make the actual HPTE be read-only */
 				ptel = hpte_make_readonly(ptel);
@@ -244,6 +267,7 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 		}
 	}
 
+pte_check:
 	if (pte_size < psize)
 		return H_PARAMETER;
 	if (pa && pte_size > psize)
@@ -339,6 +363,10 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 			pteh &= ~HPTE_V_VALID;
 			unlock_rmap(rmap);
 		} else {
+			if (pte_numa(pte) && pa) {
+				pte = pte_mknonnuma(pte);
+				*ptep = pte;
+			}
 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
 						realmode);
 			/* Only set R/C in real HPTE if already set in *rmap */