diff mbox

[kernel] KVM: PPC: Create a virtual-mode only TCE table handlers

Message ID 1458269442-24292-1-git-send-email-aik@ozlabs.ru
State Accepted
Headers show

Commit Message

Alexey Kardashevskiy March 18, 2016, 2:50 a.m. UTC
Upcoming in-kernel VFIO acceleration needs different handling in real
and virtual modes which makes it hard to support both modes in
the same handler.

This creates a copy of kvmppc_rm_h_stuff_tce and kvmppc_rm_h_put_tce
in addition to the existing kvmppc_rm_h_put_tce_indirect.

This also fixes linker breakage when only PR KVM was selected (leaving
HV KVM off): the kvmppc_h_put_tce/kvmppc_h_stuff_tce functions
would not compile at all and the linked would fail.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
 arch/powerpc/kvm/book3s_64_vio.c        | 52 +++++++++++++++++++++++++++++++++
 arch/powerpc/kvm/book3s_64_vio_hv.c     |  8 ++---
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |  4 +--
 3 files changed, 57 insertions(+), 7 deletions(-)

Comments

Christian Zigotzky March 18, 2016, 9:04 a.m. UTC | #1
Hi Alexey,

Many thanks for your really fast response. I successfully compiled the 
latest Git kernel with your new patch today. After that, I successfully 
tested it with Mac-on-Linux/PR KVM. Mac OS X Tiger PowerPC (virtual G3 
CPU) works fantastic in MoL/PR KVM with the latest patched Git kernel.

Screenshot: https://plus.google.com/115515624056477014971/posts/hsQ2m6rYBdr

Have a nice day and thanks a lot to all for your hard work.

Cheers,

Christian

On 18 March 2016 at 03:50 AM, Alexey Kardashevskiy wrote:
> Upcoming in-kernel VFIO acceleration needs different handling in real
> and virtual modes which makes it hard to support both modes in
> the same handler.
>
> This creates a copy of kvmppc_rm_h_stuff_tce and kvmppc_rm_h_put_tce
> in addition to the existing kvmppc_rm_h_put_tce_indirect.
>
> This also fixes linker breakage when only PR KVM was selected (leaving
> HV KVM off): the kvmppc_h_put_tce/kvmppc_h_stuff_tce functions
> would not compile at all and the linked would fail.
>
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
>   arch/powerpc/kvm/book3s_64_vio.c        | 52 +++++++++++++++++++++++++++++++++
>   arch/powerpc/kvm/book3s_64_vio_hv.c     |  8 ++---
>   arch/powerpc/kvm/book3s_hv_rmhandlers.S |  4 +--
>   3 files changed, 57 insertions(+), 7 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
> index 2c2d103..75469f6 100644
> --- a/arch/powerpc/kvm/book3s_64_vio.c
> +++ b/arch/powerpc/kvm/book3s_64_vio.c
> @@ -209,6 +209,32 @@ fail:
>   	return ret;
>   }
>   
> +long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> +		      unsigned long ioba, unsigned long tce)
> +{
> +	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
> +	long ret;
> +
> +	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
> +	/* 	    liobn, ioba, tce); */
> +
> +	if (!stt)
> +		return H_TOO_HARD;
> +
> +	ret = kvmppc_ioba_validate(stt, ioba, 1);
> +	if (ret != H_SUCCESS)
> +		return ret;
> +
> +	ret = kvmppc_tce_validate(stt, tce);
> +	if (ret != H_SUCCESS)
> +		return ret;
> +
> +	kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
> +
> +	return H_SUCCESS;
> +}
> +EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
> +
>   long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
>   		unsigned long liobn, unsigned long ioba,
>   		unsigned long tce_list, unsigned long npages)
> @@ -264,3 +290,29 @@ unlock_exit:
>   	return ret;
>   }
>   EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
> +
> +long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
> +		unsigned long liobn, unsigned long ioba,
> +		unsigned long tce_value, unsigned long npages)
> +{
> +	struct kvmppc_spapr_tce_table *stt;
> +	long i, ret;
> +
> +	stt = kvmppc_find_table(vcpu, liobn);
> +	if (!stt)
> +		return H_TOO_HARD;
> +
> +	ret = kvmppc_ioba_validate(stt, ioba, npages);
> +	if (ret != H_SUCCESS)
> +		return ret;
> +
> +	/* Check permission bits only to allow userspace poison TCE for debug */
> +	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
> +		return H_PARAMETER;
> +
> +	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
> +		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
> +
> +	return H_SUCCESS;
> +}
> +EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
> diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
> index 44be73e..18f0227 100644
> --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
> @@ -180,8 +180,8 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
>   EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
>   
>   #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> -long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> -		      unsigned long ioba, unsigned long tce)
> +long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> +		unsigned long ioba, unsigned long tce)
>   {
>   	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
>   	long ret;
> @@ -204,7 +204,6 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>   
>   	return H_SUCCESS;
>   }
> -EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
>   
>   static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
>   		unsigned long ua, unsigned long *phpa)
> @@ -296,7 +295,7 @@ unlock_exit:
>   	return ret;
>   }
>   
> -long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
> +long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
>   		unsigned long liobn, unsigned long ioba,
>   		unsigned long tce_value, unsigned long npages)
>   {
> @@ -320,7 +319,6 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
>   
>   	return H_SUCCESS;
>   }
> -EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
>   
>   long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
>   		      unsigned long ioba)
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 85b32f1..81b0b51 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -1942,7 +1942,7 @@ hcall_real_table:
>   	.long	DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
>   	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
>   	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
> -	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
> +	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
>   	.long	0		/* 0x24 - H_SET_SPRG0 */
>   	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
>   	.long	0		/* 0x2c */
> @@ -2020,7 +2020,7 @@ hcall_real_table:
>   	.long	0		/* 0x12c */
>   	.long	0		/* 0x130 */
>   	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
> -	.long	DOTSYM(kvmppc_h_stuff_tce) - hcall_real_table
> +	.long	DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
>   	.long	DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
>   	.long	0		/* 0x140 */
>   	.long	0		/* 0x144 */

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paul Mackerras March 18, 2016, 9:16 a.m. UTC | #2
On Fri, Mar 18, 2016 at 01:50:42PM +1100, Alexey Kardashevskiy wrote:
> Upcoming in-kernel VFIO acceleration needs different handling in real
> and virtual modes which makes it hard to support both modes in
> the same handler.
> 
> This creates a copy of kvmppc_rm_h_stuff_tce and kvmppc_rm_h_put_tce
> in addition to the existing kvmppc_rm_h_put_tce_indirect.
> 
> This also fixes linker breakage when only PR KVM was selected (leaving
> HV KVM off): the kvmppc_h_put_tce/kvmppc_h_stuff_tce functions
> would not compile at all and the linked would fail.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>

Acked-by: Paul Mackerras <paulus@samba.org>

Paolo, will you take this directly, or do you want me to generate
a pull request?

Paul.
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini March 18, 2016, 9:57 a.m. UTC | #3
On 18/03/2016 10:16, Paul Mackerras wrote:
> On Fri, Mar 18, 2016 at 01:50:42PM +1100, Alexey Kardashevskiy wrote:
>> Upcoming in-kernel VFIO acceleration needs different handling in real
>> and virtual modes which makes it hard to support both modes in
>> the same handler.
>>
>> This creates a copy of kvmppc_rm_h_stuff_tce and kvmppc_rm_h_put_tce
>> in addition to the existing kvmppc_rm_h_put_tce_indirect.
>>
>> This also fixes linker breakage when only PR KVM was selected (leaving
>> HV KVM off): the kvmppc_h_put_tce/kvmppc_h_stuff_tce functions
>> would not compile at all and the linked would fail.
>>
>> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> 
> Acked-by: Paul Mackerras <paulus@samba.org>
> 
> Paolo, will you take this directly, or do you want me to generate
> a pull request?

I can take it directly.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christian Zigotzky March 19, 2016, 1:30 p.m. UTC | #4
Hi All,

Just for info: I successfully tested the latest Git kernel with the 
patched PR KVM today. I booted a ubuntu MATE 16.04 LTS PowerPC live DVD 
with a virtual Power Mac G3 machine today.

Screenshot: https://plus.google.com/115515624056477014971/posts/HAuxJT4WGPr

Have a nice day.

Cheers,

Christian
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 2c2d103..75469f6 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -209,6 +209,32 @@  fail:
 	return ret;
 }
 
+long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+		      unsigned long ioba, unsigned long tce)
+{
+	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+	long ret;
+
+	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
+	/* 	    liobn, ioba, tce); */
+
+	if (!stt)
+		return H_TOO_HARD;
+
+	ret = kvmppc_ioba_validate(stt, ioba, 1);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	ret = kvmppc_tce_validate(stt, tce);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
+
+	return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
+
 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 		unsigned long liobn, unsigned long ioba,
 		unsigned long tce_list, unsigned long npages)
@@ -264,3 +290,29 @@  unlock_exit:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
+
+long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+		unsigned long liobn, unsigned long ioba,
+		unsigned long tce_value, unsigned long npages)
+{
+	struct kvmppc_spapr_tce_table *stt;
+	long i, ret;
+
+	stt = kvmppc_find_table(vcpu, liobn);
+	if (!stt)
+		return H_TOO_HARD;
+
+	ret = kvmppc_ioba_validate(stt, ioba, npages);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	/* Check permission bits only to allow userspace poison TCE for debug */
+	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
+		return H_PARAMETER;
+
+	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+
+	return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 44be73e..18f0227 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -180,8 +180,8 @@  long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
 
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
-		      unsigned long ioba, unsigned long tce)
+long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+		unsigned long ioba, unsigned long tce)
 {
 	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
 	long ret;
@@ -204,7 +204,6 @@  long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 
 	return H_SUCCESS;
 }
-EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
 
 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
 		unsigned long ua, unsigned long *phpa)
@@ -296,7 +295,7 @@  unlock_exit:
 	return ret;
 }
 
-long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
 		unsigned long liobn, unsigned long ioba,
 		unsigned long tce_value, unsigned long npages)
 {
@@ -320,7 +319,6 @@  long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
 
 	return H_SUCCESS;
 }
-EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
 
 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
 		      unsigned long ioba)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 85b32f1..81b0b51 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1942,7 +1942,7 @@  hcall_real_table:
 	.long	DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
 	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
 	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
-	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
 	.long	0		/* 0x24 - H_SET_SPRG0 */
 	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
 	.long	0		/* 0x2c */
@@ -2020,7 +2020,7 @@  hcall_real_table:
 	.long	0		/* 0x12c */
 	.long	0		/* 0x130 */
 	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
-	.long	DOTSYM(kvmppc_h_stuff_tce) - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
 	.long	DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
 	.long	0		/* 0x140 */
 	.long	0		/* 0x144 */