diff mbox series

[v2,1/4] KVM: PPC: Allow nested guest creation when L0 hv_guest_state > L1

Message ID 20201124105953.39325-2-ravi.bangoria@linux.ibm.com (mailing list archive)
State Not Applicable
Headers show
Series KVM: PPC: Power10 2nd DAWR enablement | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/merge (7c94b5d4e9d328a69d43a11d7e3dfd7a6d762cb6)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/next (3cea11cd5e3b00d91caf0b4730194039b45c5891)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linus/master (d5beb3140f91b1c8a3d41b14d729aefa4dcc58bc)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch powerpc/fixes (b6b79dd53082db11070b4368d85dd6699ff0b063)
snowpatch_ozlabs/apply_patch warning Failed to apply on branch linux-next (d9137320ac06f526fe3f9a3fdf07a3b14201068a)
snowpatch_ozlabs/apply_patch fail Failed to apply to any branch

Commit Message

Ravi Bangoria Nov. 24, 2020, 10:59 a.m. UTC
On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED
hcall to load L2 guest state in cpu. L1 hypervisor prepares the
L2 state in struct hv_guest_state and passes a pointer to it via
hcall. Using that pointer, L0 reads/writes that state directly
from/to L1 memory. Thus L0 must be aware of hv_guest_state layout
of L1. Currently it uses version field to achieve this. i.e. If
L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't
allow nested kvm guest.

This restriction can be loosen up a bit. L0 can be taught to
understand older layout of hv_guest_state, if we restrict the
new member to be added only at the end. i.e. we can allow
nested guest even when L0 hv_guest_state.version > L1
hv_guest_state.version. Though, the other way around is not
possible.

Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
---
 arch/powerpc/include/asm/hvcall.h   | 17 +++++++--
 arch/powerpc/kvm/book3s_hv_nested.c | 53 ++++++++++++++++++++++++-----
 2 files changed, 59 insertions(+), 11 deletions(-)

Comments

Fabiano Rosas Dec. 9, 2020, 2:25 p.m. UTC | #1
Ravi Bangoria <ravi.bangoria@linux.ibm.com> writes:

> On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED
> hcall to load L2 guest state in cpu. L1 hypervisor prepares the
> L2 state in struct hv_guest_state and passes a pointer to it via
> hcall. Using that pointer, L0 reads/writes that state directly
> from/to L1 memory. Thus L0 must be aware of hv_guest_state layout
> of L1. Currently it uses version field to achieve this. i.e. If
> L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't
> allow nested kvm guest.
>
> This restriction can be loosen up a bit. L0 can be taught to
> understand older layout of hv_guest_state, if we restrict the
> new member to be added only at the end. i.e. we can allow
> nested guest even when L0 hv_guest_state.version > L1
> hv_guest_state.version. Though, the other way around is not
> possible.
>
> Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>

> ---
>  arch/powerpc/include/asm/hvcall.h   | 17 +++++++--
>  arch/powerpc/kvm/book3s_hv_nested.c | 53 ++++++++++++++++++++++++-----
>  2 files changed, 59 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> index fbb377055471..a7073fddb657 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -524,9 +524,12 @@ struct h_cpu_char_result {
>  	u64 behaviour;
>  };
>
> -/* Register state for entering a nested guest with H_ENTER_NESTED */
> +/*
> + * Register state for entering a nested guest with H_ENTER_NESTED.
> + * New member must be added at the end.
> + */
>  struct hv_guest_state {
> -	u64 version;		/* version of this structure layout */
> +	u64 version;		/* version of this structure layout, must be first */
>  	u32 lpid;
>  	u32 vcpu_token;
>  	/* These registers are hypervisor privileged (at least for writing) */
> @@ -560,6 +563,16 @@ struct hv_guest_state {
>  /* Latest version of hv_guest_state structure */
>  #define HV_GUEST_STATE_VERSION	1
>
> +static inline int hv_guest_state_size(unsigned int version)
> +{
> +	switch (version) {
> +	case 1:
> +		return offsetofend(struct hv_guest_state, ppr);
> +	default:
> +		return -1;
> +	}
> +}
> +
>  #endif /* __ASSEMBLY__ */
>  #endif /* __KERNEL__ */
>  #endif /* _ASM_POWERPC_HVCALL_H */
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
> index 33b58549a9aa..2b433c3bacea 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -215,6 +215,45 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
>  	}
>  }
>
> +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					   struct hv_guest_state *l2_hv,
> +					   struct pt_regs *l2_regs,
> +					   u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &(l2_hv->version),
> +				sizeof(l2_hv->version)))
> +		return -1;
> +
> +	if (kvmppc_need_byteswap(vcpu))
> +		l2_hv->version = swab64(l2_hv->version);
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
> +				    sizeof(struct pt_regs));
> +}
> +
> +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
> +					    struct hv_guest_state *l2_hv,
> +					    struct pt_regs *l2_regs,
> +					    u64 hv_ptr, u64 regs_ptr)
> +{
> +	int size;
> +
> +	size = hv_guest_state_size(l2_hv->version);
> +	if (size < 0)
> +		return -1;
> +
> +	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
> +		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
> +				     sizeof(struct pt_regs));
> +}
> +
>  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  {
>  	long int err, r;
> @@ -235,17 +274,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  	hv_ptr = kvmppc_get_gpr(vcpu, 4);
>  	regs_ptr = kvmppc_get_gpr(vcpu, 5);
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
> -				  sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
> -				    sizeof(struct pt_regs));
> +	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					      hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_PARAMETER;
>
>  	if (kvmppc_need_byteswap(vcpu))
>  		byteswap_hv_regs(&l2_hv);
> -	if (l2_hv.version != HV_GUEST_STATE_VERSION)
> +	if (l2_hv.version > HV_GUEST_STATE_VERSION)
>  		return H_P2;
>
>  	if (kvmppc_need_byteswap(vcpu))
> @@ -325,10 +362,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
>  		byteswap_pt_regs(&l2_regs);
>  	}
>  	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> -	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
> -				   sizeof(struct hv_guest_state)) ||
> -		kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
> -				   sizeof(struct pt_regs));
> +	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> +					       hv_ptr, regs_ptr);
>  	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
>  	if (err)
>  		return H_AUTHORITY;
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index fbb377055471..a7073fddb657 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -524,9 +524,12 @@  struct h_cpu_char_result {
 	u64 behaviour;
 };
 
-/* Register state for entering a nested guest with H_ENTER_NESTED */
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
 struct hv_guest_state {
-	u64 version;		/* version of this structure layout */
+	u64 version;		/* version of this structure layout, must be first */
 	u32 lpid;
 	u32 vcpu_token;
 	/* These registers are hypervisor privileged (at least for writing) */
@@ -560,6 +563,16 @@  struct hv_guest_state {
 /* Latest version of hv_guest_state structure */
 #define HV_GUEST_STATE_VERSION	1
 
+static inline int hv_guest_state_size(unsigned int version)
+{
+	switch (version) {
+	case 1:
+		return offsetofend(struct hv_guest_state, ppr);
+	default:
+		return -1;
+	}
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 33b58549a9aa..2b433c3bacea 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -215,6 +215,45 @@  static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
 	}
 }
 
+static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
+					   struct hv_guest_state *l2_hv,
+					   struct pt_regs *l2_regs,
+					   u64 hv_ptr, u64 regs_ptr)
+{
+	int size;
+
+	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &(l2_hv->version),
+				sizeof(l2_hv->version)))
+		return -1;
+
+	if (kvmppc_need_byteswap(vcpu))
+		l2_hv->version = swab64(l2_hv->version);
+
+	size = hv_guest_state_size(l2_hv->version);
+	if (size < 0)
+		return -1;
+
+	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
+		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
+				    sizeof(struct pt_regs));
+}
+
+static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
+					    struct hv_guest_state *l2_hv,
+					    struct pt_regs *l2_regs,
+					    u64 hv_ptr, u64 regs_ptr)
+{
+	int size;
+
+	size = hv_guest_state_size(l2_hv->version);
+	if (size < 0)
+		return -1;
+
+	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
+		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
+				     sizeof(struct pt_regs));
+}
+
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 {
 	long int err, r;
@@ -235,17 +274,15 @@  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	hv_ptr = kvmppc_get_gpr(vcpu, 4);
 	regs_ptr = kvmppc_get_gpr(vcpu, 5);
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
-				  sizeof(struct hv_guest_state)) ||
-		kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
-				    sizeof(struct pt_regs));
+	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
+					      hv_ptr, regs_ptr);
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	if (err)
 		return H_PARAMETER;
 
 	if (kvmppc_need_byteswap(vcpu))
 		byteswap_hv_regs(&l2_hv);
-	if (l2_hv.version != HV_GUEST_STATE_VERSION)
+	if (l2_hv.version > HV_GUEST_STATE_VERSION)
 		return H_P2;
 
 	if (kvmppc_need_byteswap(vcpu))
@@ -325,10 +362,8 @@  long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 		byteswap_pt_regs(&l2_regs);
 	}
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
-				   sizeof(struct hv_guest_state)) ||
-		kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
-				   sizeof(struct pt_regs));
+	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
+					       hv_ptr, regs_ptr);
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	if (err)
 		return H_AUTHORITY;