diff mbox series

[v3,2/6] KVM: PPC: Rename accessor generator macros

Message ID 20230807014553.1168699-3-jniethe5@gmail.com
State New
Headers show
Series KVM: PPC: Nested APIv2 guest support | expand

Commit Message

Jordan Niethe Aug. 7, 2023, 1:45 a.m. UTC
More "wrapper" style accessor generating macros will be introduced for
the nestedv2 guest support. Rename the existing macros with more
descriptive names now so there is a consistent naming convention.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
---
v3:
  - New to series
---
 arch/powerpc/include/asm/kvm_ppc.h | 60 +++++++++++++++---------------
 1 file changed, 30 insertions(+), 30 deletions(-)

Comments

Nicholas Piggin Aug. 14, 2023, 8:27 a.m. UTC | #1
On Mon Aug 7, 2023 at 11:45 AM AEST, Jordan Niethe wrote:
> More "wrapper" style accessor generating macros will be introduced for
> the nestedv2 guest support. Rename the existing macros with more
> descriptive names now so there is a consistent naming convention.
>
> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>

> ---
> v3:
>   - New to series
> ---
>  arch/powerpc/include/asm/kvm_ppc.h | 60 +++++++++++++++---------------
>  1 file changed, 30 insertions(+), 30 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
> index d16d80ad2ae4..b66084a81dd0 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -927,19 +927,19 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
>  #endif
>  }
>  
> -#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
> +#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR_GET(reg, bookehv_spr)		\
>  static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
>  {									\
>  	return mfspr(bookehv_spr);					\
>  }									\
>  
> -#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
> +#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR_SET(reg, bookehv_spr)		\
>  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
>  {									\
>  	mtspr(bookehv_spr, val);						\
>  }									\
>  
> -#define SHARED_WRAPPER_GET(reg, size)					\
> +#define KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(reg, size)			\
>  static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
>  {									\
>  	if (kvmppc_shared_big_endian(vcpu))				\
> @@ -948,7 +948,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
>  	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
>  }									\
>  
> -#define SHARED_WRAPPER_SET(reg, size)					\
> +#define KVMPPC_VCPU_SHARED_REGS_ACESSOR_SET(reg, size)			\
>  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
>  {									\
>  	if (kvmppc_shared_big_endian(vcpu))				\
> @@ -957,36 +957,36 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
>  	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
>  }									\
>  
> -#define SHARED_WRAPPER(reg, size)					\
> -	SHARED_WRAPPER_GET(reg, size)					\
> -	SHARED_WRAPPER_SET(reg, size)					\
> +#define KVMPPC_VCPU_SHARED_REGS_ACESSOR(reg, size)					\
> +	KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(reg, size)					\
> +	KVMPPC_VCPU_SHARED_REGS_ACESSOR_SET(reg, size)					\
>  
> -#define SPRNG_WRAPPER(reg, bookehv_spr)					\
> -	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
> -	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
> +#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR(reg, bookehv_spr)					\
> +	KVMPPC_BOOKE_HV_SPRNG_ACESSOR_GET(reg, bookehv_spr)				\
> +	KVMPPC_BOOKE_HV_SPRNG_ACESSOR_SET(reg, bookehv_spr)				\
>  
>  #ifdef CONFIG_KVM_BOOKE_HV
>  
> -#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
> -	SPRNG_WRAPPER(reg, bookehv_spr)					\
> +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr)	\
> +	KVMPPC_BOOKE_HV_SPRNG_ACESSOR(reg, bookehv_spr)			\
>  
>  #else
>  
> -#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
> -	SHARED_WRAPPER(reg, size)					\
> +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr)	\
> +	KVMPPC_VCPU_SHARED_REGS_ACESSOR(reg, size)			\

Not the greatest name I've ever seen :D Hard to be concice and
consistent though, this is an odd one.

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

>  
>  #endif
>  
> -SHARED_WRAPPER(critical, 64)
> -SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
> -SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
> -SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
> -SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
> -SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
> -SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
> -SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
> -SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
> -SHARED_WRAPPER_GET(msr, 64)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(critical, 64)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR)
> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(msr, 64)
>  static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
>  {
>  	if (kvmppc_shared_big_endian(vcpu))
> @@ -994,12 +994,12 @@ static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
>  	else
>  	       vcpu->arch.shared->msr = cpu_to_le64(val);
>  }
> -SHARED_WRAPPER(dsisr, 32)
> -SHARED_WRAPPER(int_pending, 32)
> -SHARED_WRAPPER(sprg4, 64)
> -SHARED_WRAPPER(sprg5, 64)
> -SHARED_WRAPPER(sprg6, 64)
> -SHARED_WRAPPER(sprg7, 64)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(dsisr, 32)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(int_pending, 32)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg4, 64)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg5, 64)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg6, 64)
> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg7, 64)
>  
>  static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
>  {
Jordan Niethe Aug. 16, 2023, 3:20 a.m. UTC | #2
On 14/8/23 6:27 pm, Nicholas Piggin wrote:
> On Mon Aug 7, 2023 at 11:45 AM AEST, Jordan Niethe wrote:
>> More "wrapper" style accessor generating macros will be introduced for
>> the nestedv2 guest support. Rename the existing macros with more
>> descriptive names now so there is a consistent naming convention.
>>
>> Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
> 
>> ---
>> v3:
>>    - New to series
>> ---
>>   arch/powerpc/include/asm/kvm_ppc.h | 60 +++++++++++++++---------------
>>   1 file changed, 30 insertions(+), 30 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
>> index d16d80ad2ae4..b66084a81dd0 100644
>> --- a/arch/powerpc/include/asm/kvm_ppc.h
>> +++ b/arch/powerpc/include/asm/kvm_ppc.h
>> @@ -927,19 +927,19 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
>>   #endif
>>   }
>>   
>> -#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
>> +#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR_GET(reg, bookehv_spr)		\
>>   static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
>>   {									\
>>   	return mfspr(bookehv_spr);					\
>>   }									\
>>   
>> -#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
>> +#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR_SET(reg, bookehv_spr)		\
>>   static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
>>   {									\
>>   	mtspr(bookehv_spr, val);						\
>>   }									\
>>   
>> -#define SHARED_WRAPPER_GET(reg, size)					\
>> +#define KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(reg, size)			\
>>   static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
>>   {									\
>>   	if (kvmppc_shared_big_endian(vcpu))				\
>> @@ -948,7 +948,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
>>   	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
>>   }									\
>>   
>> -#define SHARED_WRAPPER_SET(reg, size)					\
>> +#define KVMPPC_VCPU_SHARED_REGS_ACESSOR_SET(reg, size)			\
>>   static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
>>   {									\
>>   	if (kvmppc_shared_big_endian(vcpu))				\
>> @@ -957,36 +957,36 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
>>   	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
>>   }									\
>>   
>> -#define SHARED_WRAPPER(reg, size)					\
>> -	SHARED_WRAPPER_GET(reg, size)					\
>> -	SHARED_WRAPPER_SET(reg, size)					\
>> +#define KVMPPC_VCPU_SHARED_REGS_ACESSOR(reg, size)					\
>> +	KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(reg, size)					\
>> +	KVMPPC_VCPU_SHARED_REGS_ACESSOR_SET(reg, size)					\
>>   
>> -#define SPRNG_WRAPPER(reg, bookehv_spr)					\
>> -	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
>> -	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
>> +#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR(reg, bookehv_spr)					\
>> +	KVMPPC_BOOKE_HV_SPRNG_ACESSOR_GET(reg, bookehv_spr)				\
>> +	KVMPPC_BOOKE_HV_SPRNG_ACESSOR_SET(reg, bookehv_spr)				\
>>   
>>   #ifdef CONFIG_KVM_BOOKE_HV
>>   
>> -#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
>> -	SPRNG_WRAPPER(reg, bookehv_spr)					\
>> +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr)	\
>> +	KVMPPC_BOOKE_HV_SPRNG_ACESSOR(reg, bookehv_spr)			\
>>   
>>   #else
>>   
>> -#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
>> -	SHARED_WRAPPER(reg, size)					\
>> +#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr)	\
>> +	KVMPPC_VCPU_SHARED_REGS_ACESSOR(reg, size)			\
> 
> Not the greatest name I've ever seen :D Hard to be concice and
> consistent though, this is an odd one.

Yes, it is a bit wordy.

> 
> Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

Thanks.

> 
>>   
>>   #endif
>>   
>> -SHARED_WRAPPER(critical, 64)
>> -SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
>> -SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
>> -SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
>> -SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
>> -SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
>> -SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
>> -SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
>> -SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
>> -SHARED_WRAPPER_GET(msr, 64)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(critical, 64)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR)
>> +KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(msr, 64)
>>   static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
>>   {
>>   	if (kvmppc_shared_big_endian(vcpu))
>> @@ -994,12 +994,12 @@ static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
>>   	else
>>   	       vcpu->arch.shared->msr = cpu_to_le64(val);
>>   }
>> -SHARED_WRAPPER(dsisr, 32)
>> -SHARED_WRAPPER(int_pending, 32)
>> -SHARED_WRAPPER(sprg4, 64)
>> -SHARED_WRAPPER(sprg5, 64)
>> -SHARED_WRAPPER(sprg6, 64)
>> -SHARED_WRAPPER(sprg7, 64)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(dsisr, 32)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(int_pending, 32)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg4, 64)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg5, 64)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg6, 64)
>> +KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg7, 64)
>>   
>>   static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
>>   {
>
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index d16d80ad2ae4..b66084a81dd0 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -927,19 +927,19 @@  static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
 #endif
 }
 
-#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
+#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR_GET(reg, bookehv_spr)		\
 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
 {									\
 	return mfspr(bookehv_spr);					\
 }									\
 
-#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
+#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR_SET(reg, bookehv_spr)		\
 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
 {									\
 	mtspr(bookehv_spr, val);						\
 }									\
 
-#define SHARED_WRAPPER_GET(reg, size)					\
+#define KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(reg, size)			\
 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
 {									\
 	if (kvmppc_shared_big_endian(vcpu))				\
@@ -948,7 +948,7 @@  static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
 }									\
 
-#define SHARED_WRAPPER_SET(reg, size)					\
+#define KVMPPC_VCPU_SHARED_REGS_ACESSOR_SET(reg, size)			\
 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
 {									\
 	if (kvmppc_shared_big_endian(vcpu))				\
@@ -957,36 +957,36 @@  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
 }									\
 
-#define SHARED_WRAPPER(reg, size)					\
-	SHARED_WRAPPER_GET(reg, size)					\
-	SHARED_WRAPPER_SET(reg, size)					\
+#define KVMPPC_VCPU_SHARED_REGS_ACESSOR(reg, size)					\
+	KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(reg, size)					\
+	KVMPPC_VCPU_SHARED_REGS_ACESSOR_SET(reg, size)					\
 
-#define SPRNG_WRAPPER(reg, bookehv_spr)					\
-	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
-	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
+#define KVMPPC_BOOKE_HV_SPRNG_ACESSOR(reg, bookehv_spr)					\
+	KVMPPC_BOOKE_HV_SPRNG_ACESSOR_GET(reg, bookehv_spr)				\
+	KVMPPC_BOOKE_HV_SPRNG_ACESSOR_SET(reg, bookehv_spr)				\
 
 #ifdef CONFIG_KVM_BOOKE_HV
 
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
-	SPRNG_WRAPPER(reg, bookehv_spr)					\
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr)	\
+	KVMPPC_BOOKE_HV_SPRNG_ACESSOR(reg, bookehv_spr)			\
 
 #else
 
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
-	SHARED_WRAPPER(reg, size)					\
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr)	\
+	KVMPPC_VCPU_SHARED_REGS_ACESSOR(reg, size)			\
 
 #endif
 
-SHARED_WRAPPER(critical, 64)
-SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
-SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
-SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
-SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
-SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
-SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
-SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
-SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
-SHARED_WRAPPER_GET(msr, 64)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(critical, 64)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR_GET(msr, 64)
 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
 {
 	if (kvmppc_shared_big_endian(vcpu))
@@ -994,12 +994,12 @@  static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
 	else
 	       vcpu->arch.shared->msr = cpu_to_le64(val);
 }
-SHARED_WRAPPER(dsisr, 32)
-SHARED_WRAPPER(int_pending, 32)
-SHARED_WRAPPER(sprg4, 64)
-SHARED_WRAPPER(sprg5, 64)
-SHARED_WRAPPER(sprg6, 64)
-SHARED_WRAPPER(sprg7, 64)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(dsisr, 32)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(int_pending, 32)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg4, 64)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg5, 64)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg6, 64)
+KVMPPC_VCPU_SHARED_REGS_ACESSOR(sprg7, 64)
 
 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
 {