Patchwork [14/27] KVM: PPC: Magic Page BookE support

login
register
mail settings
Submitter Alexander Graf
Date July 1, 2010, 10:42 a.m.
Message ID <1277980982-12433-15-git-send-email-agraf@suse.de>
Download mbox | patch
Permalink /patch/57531/
State Not Applicable
Headers show

Comments

Alexander Graf - July 1, 2010, 10:42 a.m.
As we now have Book3s support for the magic page, we also need BookE to
join in on the party.

This patch implements generic magic page logic for BookE and specific
TLB logic for e500. I didn't have any 440 around, so I didn't dare to
blindly try and write up broken code.

Signed-off-by: Alexander Graf <agraf@suse.de>
---
 arch/powerpc/kvm/booke.c    |   29 +++++++++++++++++++++++++++++
 arch/powerpc/kvm/e500_tlb.c |   19 +++++++++++++++++--
 2 files changed, 46 insertions(+), 2 deletions(-)
Josh Boyer - July 1, 2010, 11:18 a.m.
On Thu, Jul 01, 2010 at 12:42:49PM +0200, Alexander Graf wrote:
>As we now have Book3s support for the magic page, we also need BookE to
>join in on the party.
>
>This patch implements generic magic page logic for BookE and specific
>TLB logic for e500. I didn't have any 440 around, so I didn't dare to
>blindly try and write up broken code.

Is this the only patch in the series that needs 440 specific code?  Also,
does 440 KVM still work after this series is applied even without the code
not present in this patch?

josh
Alexander Graf - July 1, 2010, 12:25 p.m.
Josh Boyer wrote:
> On Thu, Jul 01, 2010 at 12:42:49PM +0200, Alexander Graf wrote:
>   
>> As we now have Book3s support for the magic page, we also need BookE to
>> join in on the party.
>>
>> This patch implements generic magic page logic for BookE and specific
>> TLB logic for e500. I didn't have any 440 around, so I didn't dare to
>> blindly try and write up broken code.
>>     
>
> Is this the only patch in the series that needs 440 specific code?  Also,
> does 440 KVM still work after this series is applied even without the code
> not present in this patch?
>   

Yes, pretty much. The rest of the code is generic. But 440 should easily
just work with this patch set. If you have one to try it out, please
give it a try. I can even prepare a 440 enabling patch so you could
verify if it works.

Alex
Liu Yu-B13201 - July 12, 2010, 11:24 a.m.
> -----Original Message-----
> From: kvm-owner@vger.kernel.org 
> [mailto:kvm-owner@vger.kernel.org] On Behalf Of Alexander Graf
> Sent: Thursday, July 01, 2010 6:43 PM
> To: kvm-ppc@vger.kernel.org
> Cc: KVM list; linuxppc-dev
> Subject: [PATCH 14/27] KVM: PPC: Magic Page BookE support
> 
> As we now have Book3s support for the magic page, we also 
> need BookE to
> join in on the party.
> 
> This patch implements generic magic page logic for BookE and specific
> TLB logic for e500. I didn't have any 440 around, so I didn't dare to
> blindly try and write up broken code.
> 
> Signed-off-by: Alexander Graf <agraf@suse.de>
> ---
>  arch/powerpc/kvm/booke.c    |   29 +++++++++++++++++++++++++++++
>  arch/powerpc/kvm/e500_tlb.c |   19 +++++++++++++++++--
>  2 files changed, 46 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 0f8ff9d..9609207 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c

> @@ -380,6 +406,9 @@ int kvmppc_handle_exit(struct kvm_run 
> *run, struct kvm_vcpu *vcpu,
>  		gpa_t gpaddr;
>  		gfn_t gfn;
>  
> +		if (kvmppc_dtlb_magic_page(vcpu, eaddr))
> +			break;
> +
>  		/* Check the guest TLB. */
>  		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
>  		if (gtlb_index < 0) {

How about moving this part into tlb search fail path?

Patch

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0f8ff9d..9609207 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -244,6 +244,31 @@  void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
 		vcpu->arch.shared->int_pending = 0;
 }
 
+/* Check if a DTLB miss was on the magic page. Returns !0 if so. */
+int kvmppc_dtlb_magic_page(struct kvm_vcpu *vcpu, ulong eaddr)
+{
+	ulong mp_ea = vcpu->arch.magic_page_ea;
+	ulong gpaddr = vcpu->arch.magic_page_pa;
+	int gtlb_index = 11 | (1 << 16); /* Random number in TLB1 */
+
+	/* Check for existence of magic page */
+	if(likely(!mp_ea))
+		return 0;
+
+	/* Check if we're on the magic page */
+	if(likely((eaddr >> 12) != (mp_ea >> 12)))
+		return 0;
+
+	/* Don't map in user mode */
+	if(vcpu->arch.shared->msr & MSR_PR)
+		return 0;
+
+	kvmppc_mmu_map(vcpu, vcpu->arch.magic_page_ea, gpaddr, gtlb_index);
+	kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+
+	return 1;
+}
+
 /**
  * kvmppc_handle_exit
  *
@@ -311,6 +336,7 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			r = RESUME_HOST;
 			break;
 		case EMULATE_FAIL:
+		case EMULATE_DO_MMIO:
 			/* XXX Deliver Program interrupt to guest. */
 			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
 			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
@@ -380,6 +406,9 @@  int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		gpa_t gpaddr;
 		gfn_t gfn;
 
+		if (kvmppc_dtlb_magic_page(vcpu, eaddr))
+			break;
+
 		/* Check the guest TLB. */
 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
 		if (gtlb_index < 0) {
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 66845a5..f5582ca 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -295,9 +295,22 @@  static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	struct page *new_page;
 	struct tlbe *stlbe;
 	hpa_t hpaddr;
+	u32 mas2 = gtlbe->mas2;
+	u32 mas3 = gtlbe->mas3;
 
 	stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 
+	if ((vcpu_e500->vcpu.arch.magic_page_ea) &&
+	    ((vcpu_e500->vcpu.arch.magic_page_pa >> PAGE_SHIFT) == gfn) &&
+	    !(vcpu_e500->vcpu.arch.shared->msr & MSR_PR)) {
+		mas2 = 0;
+		mas3 = E500_TLB_SUPER_PERM_MASK;
+		hpaddr = virt_to_phys(vcpu_e500->vcpu.arch.shared);
+		new_page = pfn_to_page(hpaddr >> PAGE_SHIFT);
+		get_page(new_page);
+		goto mapped;
+	}
+
 	/* Get reference to new page. */
 	new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
 	if (is_error_page(new_page)) {
@@ -305,6 +318,8 @@  static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 		kvm_release_page_clean(new_page);
 		return;
 	}
+
+mapped:
 	hpaddr = page_to_phys(new_page);
 
 	/* Drop reference to old page. */
@@ -316,10 +331,10 @@  static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
 		| MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
 	stlbe->mas2 = (gvaddr & MAS2_EPN)
-		| e500_shadow_mas2_attrib(gtlbe->mas2,
+		| e500_shadow_mas2_attrib(mas2,
 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 	stlbe->mas3 = (hpaddr & MAS3_RPN)
-		| e500_shadow_mas3_attrib(gtlbe->mas3,
+		| e500_shadow_mas3_attrib(mas3,
 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 	stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;