diff mbox series

[RFC,v2,2/6] KVM: PPC: Add fpr getters and setters

Message ID 20230605064848.12319-3-jpn@linux.vnet.ibm.com
State New
Headers show
Series KVM: PPC: Nested PAPR guests | expand

Commit Message

Jordan Niethe June 5, 2023, 6:48 a.m. UTC
Add wrappers for fpr registers to prepare for supporting PAPR nested
guests.

Signed-off-by: Jordan Niethe <jpn@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/kvm_book3s.h | 31 +++++++++++++++++++++++++++
 arch/powerpc/include/asm/kvm_booke.h  | 10 +++++++++
 arch/powerpc/kvm/book3s.c             | 16 +++++++-------
 arch/powerpc/kvm/emulate_loadstore.c  |  2 +-
 arch/powerpc/kvm/powerpc.c            | 22 +++++++++----------
 5 files changed, 61 insertions(+), 20 deletions(-)

Comments

Nicholas Piggin June 7, 2023, 7:55 a.m. UTC | #1
On Mon Jun 5, 2023 at 4:48 PM AEST, Jordan Niethe wrote:
> Add wrappers for fpr registers to prepare for supporting PAPR nested
> guests.
>
> Signed-off-by: Jordan Niethe <jpn@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/kvm_book3s.h | 31 +++++++++++++++++++++++++++
>  arch/powerpc/include/asm/kvm_booke.h  | 10 +++++++++
>  arch/powerpc/kvm/book3s.c             | 16 +++++++-------
>  arch/powerpc/kvm/emulate_loadstore.c  |  2 +-
>  arch/powerpc/kvm/powerpc.c            | 22 +++++++++----------
>  5 files changed, 61 insertions(+), 20 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
> index 4e91f54a3f9f..a632e79639f0 100644
> --- a/arch/powerpc/include/asm/kvm_book3s.h
> +++ b/arch/powerpc/include/asm/kvm_book3s.h
> @@ -413,6 +413,37 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.fault_dar;
>  }
>  
> +static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
> +{
> +	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
> +}
> +
> +static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
> +{
> +	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
> +}
> +
> +static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
> +{
> +	return vcpu->arch.fp.fpscr;
> +}
> +
> +static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
> +{
> +	vcpu->arch.fp.fpscr = val;
> +}
> +
> +
> +static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
> +{
> +	return vcpu->arch.fp.fpr[i][j];
> +}
> +
> +static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, u64 val)
> +{
> +	vcpu->arch.fp.fpr[i][j] = val;
> +}
> +
>  #define BOOK3S_WRAPPER_SET(reg, size)					\
>  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
>  {									\
> diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
> index 0c3401b2e19e..7c3291aa8922 100644
> --- a/arch/powerpc/include/asm/kvm_booke.h
> +++ b/arch/powerpc/include/asm/kvm_booke.h
> @@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.regs.nip;
>  }
>  
> +static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
> +{
> +	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
> +}
> +
> +static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
> +{
> +	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
> +}
> +
>  #ifdef CONFIG_BOOKE
>  static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
>  {
> diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
> index 2fe31b518886..6cd20ab9e94e 100644
> --- a/arch/powerpc/kvm/book3s.c
> +++ b/arch/powerpc/kvm/book3s.c
> @@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
>  			break;
>  		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
>  			i = id - KVM_REG_PPC_FPR0;
> -			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
> +			*val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
>  			break;
>  		case KVM_REG_PPC_FPSCR:
> -			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
> +			*val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
>  			break;
>  #ifdef CONFIG_VSX
>  		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
>  			if (cpu_has_feature(CPU_FTR_VSX)) {
>  				i = id - KVM_REG_PPC_VSR0;
> -				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
> -				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
> +				val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
> +				val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
>  			} else {
>  				r = -ENXIO;
>  			}
> @@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
>  			break;
>  		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
>  			i = id - KVM_REG_PPC_FPR0;
> -			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
> +			kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
>  			break;
>  		case KVM_REG_PPC_FPSCR:
>  			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
> @@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
>  		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
>  			if (cpu_has_feature(CPU_FTR_VSX)) {
>  				i = id - KVM_REG_PPC_VSR0;
> -				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
> -				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
> +				kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
> +				kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
>  			} else {
>  				r = -ENXIO;
>  			}
> @@ -765,7 +765,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
>  			break;
>  #endif /* CONFIG_KVM_XIVE */
>  		case KVM_REG_PPC_FSCR:
> -			vcpu->arch.fscr = set_reg_val(id, *val);
> +			kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
>  			break;
>  		case KVM_REG_PPC_TAR:
>  			kvmppc_set_tar(vcpu, set_reg_val(id, *val));
> diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
> index 059c08ae0340..e6e66c3792f8 100644
> --- a/arch/powerpc/kvm/emulate_loadstore.c
> +++ b/arch/powerpc/kvm/emulate_loadstore.c
> @@ -250,7 +250,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
>  				vcpu->arch.mmio_sp64_extend = 1;
>  
>  			emulated = kvmppc_handle_store(vcpu,
> -					VCPU_FPR(vcpu, op.reg), size, 1);
> +					kvmppc_get_fpr(vcpu, op.reg), size, 1);
>  
>  			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
>  				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
> diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
> index ca9793c3d437..7f913e68342a 100644
> --- a/arch/powerpc/kvm/powerpc.c
> +++ b/arch/powerpc/kvm/powerpc.c
> @@ -938,7 +938,7 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
>  		val.vsxval[offset] = gpr;
>  		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
>  	} else {
> -		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
> +		kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
>  	}
>  }
>  

Is there a particular reason some reg sets are broken into their own
patches? Looking at this hunk you think the VR one got missed, but it's
in its own patch.

Not really a big deal but I wouldn't mind them all in one patch. Or at
least the FP/VR/VSR ine one since they're quite regular and similar.

Thanks,
Nick
Jordan Niethe June 10, 2023, 1:54 a.m. UTC | #2
On Wed, Jun 7, 2023 at 5:56 PM Nicholas Piggin <npiggin@gmail.com> wrote:
[snip]
>
> Is there a particular reason some reg sets are broken into their own
> patches? Looking at this hunk you think the VR one got missed, but it's
> in its own patch.
>
> Not really a big deal but I wouldn't mind them all in one patch. Or at
> least the FP/VR/VSR ine one since they're quite regular and similar.

There's not really a reason,

Originally I had things even more broken apart but then thought one
patch made
more sense. Part way through squashing the patches I had a change of
heart
and thought I'd see if people had a preference.

I'll just finish the squashing for the next series.

Thanks,
Jordan
>
> Thanks,
> Nick
>
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4e91f54a3f9f..a632e79639f0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -413,6 +413,37 @@  static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 	return vcpu->arch.fault_dar;
 }
 
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+}
+
+static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.fp.fpscr;
+}
+
+static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
+{
+	vcpu->arch.fp.fpscr = val;
+}
+
+
+static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
+{
+	return vcpu->arch.fp.fpr[i][j];
+}
+
+static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, u64 val)
+{
+	vcpu->arch.fp.fpr[i][j] = val;
+}
+
 #define BOOK3S_WRAPPER_SET(reg, size)					\
 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
 {									\
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 0c3401b2e19e..7c3291aa8922 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -89,6 +89,16 @@  static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
 	return vcpu->arch.regs.nip;
 }
 
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+	vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+}
+
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+	return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
 #ifdef CONFIG_BOOKE
 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 2fe31b518886..6cd20ab9e94e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -636,17 +636,17 @@  int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
 			break;
 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 			i = id - KVM_REG_PPC_FPR0;
-			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
+			*val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
 			break;
 		case KVM_REG_PPC_FPSCR:
-			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
+			*val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
 			break;
 #ifdef CONFIG_VSX
 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
 			if (cpu_has_feature(CPU_FTR_VSX)) {
 				i = id - KVM_REG_PPC_VSR0;
-				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
-				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
+				val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
+				val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
 			} else {
 				r = -ENXIO;
 			}
@@ -724,7 +724,7 @@  int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
 			break;
 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 			i = id - KVM_REG_PPC_FPR0;
-			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
+			kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
 			break;
 		case KVM_REG_PPC_FPSCR:
 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
@@ -733,8 +733,8 @@  int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
 			if (cpu_has_feature(CPU_FTR_VSX)) {
 				i = id - KVM_REG_PPC_VSR0;
-				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
-				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
+				kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
+				kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
 			} else {
 				r = -ENXIO;
 			}
@@ -765,7 +765,7 @@  int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
 			break;
 #endif /* CONFIG_KVM_XIVE */
 		case KVM_REG_PPC_FSCR:
-			vcpu->arch.fscr = set_reg_val(id, *val);
+			kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
 			break;
 		case KVM_REG_PPC_TAR:
 			kvmppc_set_tar(vcpu, set_reg_val(id, *val));
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 059c08ae0340..e6e66c3792f8 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -250,7 +250,7 @@  int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 				vcpu->arch.mmio_sp64_extend = 1;
 
 			emulated = kvmppc_handle_store(vcpu,
-					VCPU_FPR(vcpu, op.reg), size, 1);
+					kvmppc_get_fpr(vcpu, op.reg), size, 1);
 
 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index ca9793c3d437..7f913e68342a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -938,7 +938,7 @@  static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
 		val.vsxval[offset] = gpr;
 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
 	} else {
-		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+		kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
 	}
 }
 
@@ -954,8 +954,8 @@  static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
 		val.vsxval[1] = gpr;
 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
 	} else {
-		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
-		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+		kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
+		kvmppc_set_vsx_fpr(vcpu, index, 1,  gpr);
 	}
 }
 
@@ -974,8 +974,8 @@  static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
 	} else {
 		val.vsx32val[0] = gpr;
 		val.vsx32val[1] = gpr;
-		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
-		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+		kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
+		kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
 	}
 }
 
@@ -997,9 +997,9 @@  static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
 	} else {
 		dword_offset = offset / 2;
 		word_offset = offset % 2;
-		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+		val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
 		val.vsx32val[word_offset] = gpr32;
-		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+		kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
 	}
 }
 #endif /* CONFIG_VSX */
@@ -1194,14 +1194,14 @@  static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
 
-		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+		kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
 		break;
 #ifdef CONFIG_PPC_BOOK3S
 	case KVM_MMIO_REG_QPR:
 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 		break;
 	case KVM_MMIO_REG_FQPR:
-		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+		kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
 		break;
 #endif
@@ -1419,7 +1419,7 @@  static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
 		}
 
 		if (rs < 32) {
-			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+			*val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
 		} else {
 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
 			*val = reg.vsxval[vsx_offset];
@@ -1438,7 +1438,7 @@  static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
 		if (rs < 32) {
 			dword_offset = vsx_offset / 2;
 			word_offset = vsx_offset % 2;
-			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+			reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
 			*val = reg.vsx32val[word_offset];
 		} else {
 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);