diff mbox series

[2/3] KVM: PPC: Book3S HV: Implement virtual mode H_PAGE_INIT handler

Message ID 20190319040435.10716-2-sjitindarsingh@gmail.com
State Superseded
Headers show
Series [1/3] KVM: PPC: Implement kvmppc_copy_guest() to perform in place copy of guest memory | expand

Commit Message

Suraj Jitindar Singh March 19, 2019, 4:04 a.m. UTC
Implement a virtual mode handler for the H_CALL H_PAGE_INIT which can be
used to zero or copy a guest page. The page is defined to be 4k and must
be 4k aligned.

The in-kernel handler halves the time to handle this H_CALL compared to
handling it in userspace for a radix guest.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 39 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

Comments

Alexey Kardashevskiy March 19, 2019, 6:53 a.m. UTC | #1
On 19/03/2019 15:04, Suraj Jitindar Singh wrote:
> Implement a virtual mode handler for the H_CALL H_PAGE_INIT which can be
> used to zero or copy a guest page. The page is defined to be 4k and must
> be 4k aligned.
> 
> The in-kernel handler halves the time to handle this H_CALL compared to
> handling it in userspace for a radix guest.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> ---
>  arch/powerpc/kvm/book3s_hv.c | 39 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 39 insertions(+)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 7179ea783f4f..fa29cc4900c2 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -883,6 +883,39 @@ static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
>  	return 0;
>  }
>  
> +static int kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,



> +			      unsigned long dest, unsigned long src)
> +{
> +	int ret = H_FUNCTION;
> +	u64 pg_sz = 1UL << 12;	/* 4K page size */
> +	u64 pg_mask = pg_sz - 1;


Ditch pg_sz and just use SZ_4K? The page size is not going to change
here ever.


> +
> +	/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
> +	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
> +		      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
> +		return H_PARAMETER;
> +
> +	/* dest (and src if copy_page flag set) must be page aligned */
> +	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
> +		return H_PARAMETER;
> +
> +	/* zero and/or copy the page as determined by the flags */
> +	if (flags & H_ZERO_PAGE) {
> +		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
> +		if (ret < 0)
> +			return H_PARAMETER;
> +	}
> +	if (flags & H_COPY_PAGE) {


If H_COPY_PAGE is set, is there any good reason to do kvm_clear_guest
first even if H_ZERO_PAGE is set? It should not happen in practice and
it is no harm but also no sense.

> +		ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
> +		if (ret < 0)
> +			return H_PARAMETER;
> +	}


else if (src)
	return H_PARAMETER;



> +
> +	/* We can ignore the remaining flags */
> +
> +	return H_SUCCESS;
> +}
> +
>  static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
>  {
>  	struct kvmppc_vcore *vcore = target->arch.vcore;
> @@ -1083,6 +1116,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
>  		if (nesting_enabled(vcpu->kvm))
>  			ret = kvmhv_copy_tofrom_guest_nested(vcpu);
>  		break;
> +	case H_PAGE_INIT:
> +		ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
> +					 kvmppc_get_gpr(vcpu, 5),
> +					 kvmppc_get_gpr(vcpu, 6));
> +		break;
>  	default:
>  		return RESUME_HOST;
>  	}
> @@ -1127,6 +1165,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
>  	case H_IPOLL:
>  	case H_XIRR_X:
>  #endif
> +	case H_PAGE_INIT:
>  		return 1;
>  	}
>  
>
Suraj Jitindar Singh March 22, 2019, 5:04 a.m. UTC | #2
On Tue, 2019-03-19 at 17:53 +1100, Alexey Kardashevskiy wrote:
> 
> On 19/03/2019 15:04, Suraj Jitindar Singh wrote:
> > Implement a virtual mode handler for the H_CALL H_PAGE_INIT which
> > can be
> > used to zero or copy a guest page. The page is defined to be 4k and
> > must
> > be 4k aligned.
> > 
> > The in-kernel handler halves the time to handle this H_CALL
> > compared to
> > handling it in userspace for a radix guest.
> > 
> > Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> > ---
> >  arch/powerpc/kvm/book3s_hv.c | 39
> > +++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 39 insertions(+)
> > 
> > diff --git a/arch/powerpc/kvm/book3s_hv.c
> > b/arch/powerpc/kvm/book3s_hv.c
> > index 7179ea783f4f..fa29cc4900c2 100644
> > --- a/arch/powerpc/kvm/book3s_hv.c
> > +++ b/arch/powerpc/kvm/book3s_hv.c
> > @@ -883,6 +883,39 @@ static int kvmppc_copy_guest(struct kvm *kvm,
> > gpa_t to, gpa_t from,
> >  	return 0;
> >  }
> >  
> > +static int kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long
> > flags,
> 
> 
> 
> > +			      unsigned long dest, unsigned long
> > src)
> > +{
> > +	int ret = H_FUNCTION;
> > +	u64 pg_sz = 1UL << 12;	/* 4K page size */
> > +	u64 pg_mask = pg_sz - 1;
> 
> 
> Ditch pg_sz and just use SZ_4K? The page size is not going to change
> here ever.

Yep, will do. Didn't realise that was a thing

> 
> 
> > +
> > +	/* Check for invalid flags (H_PAGE_SET_LOANED covers all
> > CMO flags) */
> > +	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
> > +		      H_ZERO_PAGE | H_COPY_PAGE |
> > H_PAGE_SET_LOANED))
> > +		return H_PARAMETER;
> > +
> > +	/* dest (and src if copy_page flag set) must be page
> > aligned */
> > +	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src &
> > pg_mask)))
> > +		return H_PARAMETER;
> > +
> > +	/* zero and/or copy the page as determined by the flags */
> > +	if (flags & H_ZERO_PAGE) {
> > +		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
> > +		if (ret < 0)
> > +			return H_PARAMETER;
> > +	}
> > +	if (flags & H_COPY_PAGE) {
> 
> 
> If H_COPY_PAGE is set, is there any good reason to do kvm_clear_guest
> first even if H_ZERO_PAGE is set? It should not happen in practice
> and
> it is no harm but also no sense.

True, if both are set then we can get away with only doing copy since
it will have the same effect.

> 
> > +		ret = kvmppc_copy_guest(vcpu->kvm, dest, src,
> > pg_sz);
> > +		if (ret < 0)
> > +			return H_PARAMETER;
> > +	}
> 
> 
> else if (src)
> 	return H_PARAMETER;

I'm not sure that's technically an error so I'm going to leave it out.

> 
> 
> 
> > +
> > +	/* We can ignore the remaining flags */
> > +
> > +	return H_SUCCESS;
> > +}
> > +
> >  static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
> >  {
> >  	struct kvmppc_vcore *vcore = target->arch.vcore;
> > @@ -1083,6 +1116,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu
> > *vcpu)
> >  		if (nesting_enabled(vcpu->kvm))
> >  			ret =
> > kvmhv_copy_tofrom_guest_nested(vcpu);
> >  		break;
> > +	case H_PAGE_INIT:
> > +		ret = kvmppc_h_page_init(vcpu,
> > kvmppc_get_gpr(vcpu, 4),
> > +					 kvmppc_get_gpr(vcpu, 5),
> > +					 kvmppc_get_gpr(vcpu, 6));
> > +		break;
> >  	default:
> >  		return RESUME_HOST;
> >  	}
> > @@ -1127,6 +1165,7 @@ static int kvmppc_hcall_impl_hv(unsigned long
> > cmd)
> >  	case H_IPOLL:
> >  	case H_XIRR_X:
> >  #endif
> > +	case H_PAGE_INIT:
> >  		return 1;
> >  	}
> >  
> > 
> 
>
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7179ea783f4f..fa29cc4900c2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -883,6 +883,39 @@  static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
 	return 0;
 }
 
+static int kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+			      unsigned long dest, unsigned long src)
+{
+	int ret = H_FUNCTION;
+	u64 pg_sz = 1UL << 12;	/* 4K page size */
+	u64 pg_mask = pg_sz - 1;
+
+	/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+		      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+		return H_PARAMETER;
+
+	/* dest (and src if copy_page flag set) must be page aligned */
+	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+		return H_PARAMETER;
+
+	/* zero and/or copy the page as determined by the flags */
+	if (flags & H_ZERO_PAGE) {
+		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
+		if (ret < 0)
+			return H_PARAMETER;
+	}
+	if (flags & H_COPY_PAGE) {
+		ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
+		if (ret < 0)
+			return H_PARAMETER;
+	}
+
+	/* We can ignore the remaining flags */
+
+	return H_SUCCESS;
+}
+
 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
 {
 	struct kvmppc_vcore *vcore = target->arch.vcore;
@@ -1083,6 +1116,11 @@  int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 		if (nesting_enabled(vcpu->kvm))
 			ret = kvmhv_copy_tofrom_guest_nested(vcpu);
 		break;
+	case H_PAGE_INIT:
+		ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
+					 kvmppc_get_gpr(vcpu, 5),
+					 kvmppc_get_gpr(vcpu, 6));
+		break;
 	default:
 		return RESUME_HOST;
 	}
@@ -1127,6 +1165,7 @@  static int kvmppc_hcall_impl_hv(unsigned long cmd)
 	case H_IPOLL:
 	case H_XIRR_X:
 #endif
+	case H_PAGE_INIT:
 		return 1;
 	}