diff mbox series

[v6,07/11] powerpc/kvm/e500: Use functions to track lockless pgtbl walks

Message ID 20200206030900.147032-8-leonardo@linux.ibm.com
State Not Applicable
Headers show
Series Introduces new functions for tracking lockless pagetable walks | expand

Commit Message

Leonardo Bras Feb. 6, 2020, 3:08 a.m. UTC
Applies the new functions used for tracking lockless pgtable walks on
kvmppc_e500_shadow_map().

Fixes the place where local_irq_restore() is called: previously, if ptep
was NULL, local_irq_restore() would never be called.

local_irq_{save,restore} is already inside {begin,end}_lockless_pgtbl_walk,
so there is no need to repeat it here.

Variable that saves the	irq mask was renamed from flags to irq_mask so it
doesn't lose meaning now it's not directly passed to local_irq_* functions.

Signed-off-by: Leonardo Bras <leonardo@linux.ibm.com>
---
 arch/powerpc/kvm/e500_mmu_host.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

Comments

Christophe Leroy Feb. 6, 2020, 6:18 a.m. UTC | #1
Le 06/02/2020 à 04:08, Leonardo Bras a écrit :
> Applies the new functions used for tracking lockless pgtable walks on
> kvmppc_e500_shadow_map().
> 
> Fixes the place where local_irq_restore() is called: previously, if ptep
> was NULL, local_irq_restore() would never be called.
> 
> local_irq_{save,restore} is already inside {begin,end}_lockless_pgtbl_walk,
> so there is no need to repeat it here.
> 
> Variable that saves the	irq mask was renamed from flags to irq_mask so it
> doesn't lose meaning now it's not directly passed to local_irq_* functions.
> 
> Signed-off-by: Leonardo Bras <leonardo@linux.ibm.com>
> ---
>   arch/powerpc/kvm/e500_mmu_host.c | 9 +++++----
>   1 file changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
> index 425d13806645..3dcf11f77256 100644
> --- a/arch/powerpc/kvm/e500_mmu_host.c
> +++ b/arch/powerpc/kvm/e500_mmu_host.c
> @@ -336,7 +336,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
>   	pte_t *ptep;
>   	unsigned int wimg = 0;
>   	pgd_t *pgdir;
> -	unsigned long flags;
> +	unsigned long irq_mask;
>   
>   	/* used to check for invalidations in progress */
>   	mmu_seq = kvm->mmu_notifier_seq;
> @@ -473,7 +473,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
>   	 * We are holding kvm->mmu_lock so a notifier invalidate
>   	 * can't run hence pfn won't change.
>   	 */
> -	local_irq_save(flags);
> +	irq_mask = begin_lockless_pgtbl_walk();
>   	ptep = find_linux_pte(pgdir, hva, NULL, NULL);
>   	if (ptep) {
>   		pte_t pte = READ_ONCE(*ptep);
> @@ -481,15 +481,16 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
>   		if (pte_present(pte)) {
>   			wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
>   				MAS2_WIMGE_MASK;
> -			local_irq_restore(flags);
>   		} else {
> -			local_irq_restore(flags);
> +			end_lockless_pgtbl_walk(irq_mask);
>   			pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
>   					   __func__, (long)gfn, pfn);
>   			ret = -EINVAL;
>   			goto out;
>   		}
>   	}
> +	end_lockless_pgtbl_walk(irq_mask);
> +

I don't really like unbalanced begin/end.

Something like the following would be cleaner:


begin_lockless_pgtbl_walk()
ptep = find()
if (ptep) {
	pte = READ_ONCE()
	if (pte_present(pte))
		wing=
	else
		ret = -EINVAL;
}
end_lockless_pgtbl_walk()

if (ret) {
	pr_err_rate...()
	goto out;
}



>   	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
>   
>   	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
> 

Christophe
Leonardo Bras Feb. 7, 2020, 3:10 a.m. UTC | #2
Hello Christophe, 

On Thu, 2020-02-06 at 07:18 +0100, Christophe Leroy wrote:
> 
> I don't really like unbalanced begin/end.
> 
> Something like the following would be cleaner:
> 
> 
> begin_lockless_pgtbl_walk()
> ptep = find()
> if (ptep) {
> 	pte = READ_ONCE()
> 	if (pte_present(pte))
> 		wing=
> 	else
> 		ret = -EINVAL;
> }
> end_lockless_pgtbl_walk()
> 
> if (ret) {
> 	pr_err_rate...()
> 	goto out;
> }
> 
> 

Sure, looks better that way. I will change that for v7.

Thanks for the feedback,

Leonardo Bras
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 425d13806645..3dcf11f77256 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -336,7 +336,7 @@  static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	pte_t *ptep;
 	unsigned int wimg = 0;
 	pgd_t *pgdir;
-	unsigned long flags;
+	unsigned long irq_mask;
 
 	/* used to check for invalidations in progress */
 	mmu_seq = kvm->mmu_notifier_seq;
@@ -473,7 +473,7 @@  static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	 * We are holding kvm->mmu_lock so a notifier invalidate
 	 * can't run hence pfn won't change.
 	 */
-	local_irq_save(flags);
+	irq_mask = begin_lockless_pgtbl_walk();
 	ptep = find_linux_pte(pgdir, hva, NULL, NULL);
 	if (ptep) {
 		pte_t pte = READ_ONCE(*ptep);
@@ -481,15 +481,16 @@  static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 		if (pte_present(pte)) {
 			wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
 				MAS2_WIMGE_MASK;
-			local_irq_restore(flags);
 		} else {
-			local_irq_restore(flags);
+			end_lockless_pgtbl_walk(irq_mask);
 			pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
 					   __func__, (long)gfn, pfn);
 			ret = -EINVAL;
 			goto out;
 		}
 	}
+	end_lockless_pgtbl_walk(irq_mask);
+
 	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 
 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,