diff mbox series

[v2,08/11] powerpc/kvm/book3s_hv: Applies counting method to monitor lockless pgtbl walks

Message ID 20190920195047.7703-9-leonardo@linux.ibm.com (mailing list archive)
State Superseded, archived
Headers show
Series Introduces new count-based method for monitoring lockless pagetable wakls | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch warning Failed to apply on branch next (39d90246212423715005d06e4deac033174bae44)
snowpatch_ozlabs/apply_patch fail Failed to apply to any branch

Commit Message

Leonardo Bras Sept. 20, 2019, 7:50 p.m. UTC
Applies the counting-based method for monitoring all book3s_hv related
functions that do lockless pagetable walks.

Signed-off-by: Leonardo Bras <leonardo@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_nested.c | 8 ++++++++
 arch/powerpc/kvm/book3s_hv_rm_mmu.c | 9 ++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

Comments

John Hubbard Sept. 23, 2019, 8:47 p.m. UTC | #1
On 9/20/19 12:50 PM, Leonardo Bras wrote:
> Applies the counting-based method for monitoring all book3s_hv related
> functions that do lockless pagetable walks.
> 
> Signed-off-by: Leonardo Bras <leonardo@linux.ibm.com>
> ---
>  arch/powerpc/kvm/book3s_hv_nested.c | 8 ++++++++
>  arch/powerpc/kvm/book3s_hv_rm_mmu.c | 9 ++++++++-
>  2 files changed, 16 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
> index 735e0ac6f5b2..ed68e57af3a3 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -804,6 +804,7 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
>  		return;
>  
>  	/* Find the pte */
> +	start_lockless_pgtbl_walk(kvm->mm);

Here, it appears that there is no disabling of interrupts, so now I'm not
sure anymore if this is going to work properly. Definitely needs some documentation
somewhere as to why this is OK, since the previous discussions don't apply
if the interrupts are left on.


thanks,
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 735e0ac6f5b2..ed68e57af3a3 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -804,6 +804,7 @@  static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
 		return;
 
 	/* Find the pte */
+	start_lockless_pgtbl_walk(kvm->mm);
 	ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
 	/*
 	 * If the pte is present and the pfn is still the same, update the pte.
@@ -815,6 +816,7 @@  static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
 		__radix_pte_update(ptep, clr, set);
 		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
 	}
+	end_lockless_pgtbl_walk(kvm->mm);
 }
 
 /*
@@ -854,10 +856,12 @@  static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
 		return;
 
 	/* Find and invalidate the pte */
+	start_lockless_pgtbl_walk(kvm->mm);
 	ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
 	/* Don't spuriously invalidate ptes if the pfn has changed */
 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
+	end_lockless_pgtbl_walk(kvm->mm);
 }
 
 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
@@ -921,6 +925,7 @@  static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
 	int shift;
 
 	spin_lock(&kvm->mmu_lock);
+	start_lockless_pgtbl_walk(kvm->mm);
 	ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
 	if (!shift)
 		shift = PAGE_SHIFT;
@@ -928,6 +933,7 @@  static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
 		ret = true;
 	}
+	end_lockless_pgtbl_walk(kvm->mm);
 	spin_unlock(&kvm->mmu_lock);
 
 	if (shift_ret)
@@ -1362,11 +1368,13 @@  static long int __kvmhv_nested_page_fault(struct kvm_run *run,
 	/* See if can find translation in our partition scoped tables for L1 */
 	pte = __pte(0);
 	spin_lock(&kvm->mmu_lock);
+	start_lockless_pgtbl_walk(kvm->mm);
 	pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
 	if (!shift)
 		shift = PAGE_SHIFT;
 	if (pte_p)
 		pte = *pte_p;
+	end_lockless_pgtbl_walk(kvm->mm);
 	spin_unlock(&kvm->mmu_lock);
 
 	if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 63e0ce91e29d..53ca67492211 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -258,6 +258,7 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 	 * If called in real mode we have MSR_EE = 0. Otherwise
 	 * we disable irq above.
 	 */
+	start_lockless_pgtbl_walk(kvm->mm);
 	ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
 	if (ptep) {
 		pte_t pte;
@@ -311,6 +312,7 @@  long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 		ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
 		ptel |= HPTE_R_M;
 	}
+	end_lockless_pgtbl_walk(kvm->mm);
 
 	/* Find and lock the HPTEG slot to use */
  do_insert:
@@ -886,10 +888,15 @@  static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
 	hva = __gfn_to_hva_memslot(memslot, gfn);
 
 	/* Try to find the host pte for that virtual address */
+	start_lockless_pgtbl_walk(kvm->mm);
 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
-	if (!ptep)
+	if (!ptep) {
+		end_lockless_pgtbl_walk(kvm->mm);
 		return H_TOO_HARD;
+	}
 	pte = kvmppc_read_update_linux_pte(ptep, writing);
+	end_lockless_pgtbl_walk(kvm->mm);
+
 	if (!pte_present(pte))
 		return H_TOO_HARD;