diff mbox

[RFC,1/2] KVM: PPC: Book3S HV: Make virtual processor area registration more robust

Message ID 20111220102257.GC5626@bloggs.ozlabs.ibm.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Paul Mackerras Dec. 20, 2011, 10:22 a.m. UTC
The PAPR API allows three sorts of per-virtual-processor areas to be
registered (VPA, SLB shadow buffer, and dispatch trace log), and
furthermore, these can be registered and unregistered for another
virtual CPU.  Currently we just update the vcpu fields pointing to
these areas at the time of registration or unregistration.  If this
is done on another vcpu, there is the possibility that the target vcpu
is using those fields at the time and could end up using a bogus
pointer and corrupting memory.

This fixes the race by making the target cpu itself do the update, so
we can be sure that the update happens at a time when the fields aren't
being used.  These are updated from a set of 'next_*' fields, which
are protected by a spinlock.  (We could have just taken the spinlock
when using the vpa, slb_shadow or dtl fields, but that would mean
taking the spinlock on every guest entry and exit.)

The code in do_h_register_vpa now takes the spinlock and updates the
'next_*' fields.  There is also a set of '*_pending' flags to indicate
that an update is pending.

This also changes 'struct dtl' (which was undefined) to 'struct dtl_entry',
which is what the rest of the kernel uses.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h |   15 +++-
 arch/powerpc/kvm/book3s_hv.c        |  167 +++++++++++++++++++++++++----------
 2 files changed, 131 insertions(+), 51 deletions(-)

Comments

Alexander Graf Jan. 16, 2012, 1:04 p.m. UTC | #1
On 20.12.2011, at 11:22, Paul Mackerras wrote:

> The PAPR API allows three sorts of per-virtual-processor areas to be
> registered (VPA, SLB shadow buffer, and dispatch trace log), and
> furthermore, these can be registered and unregistered for another
> virtual CPU.  Currently we just update the vcpu fields pointing to
> these areas at the time of registration or unregistration.  If this
> is done on another vcpu, there is the possibility that the target vcpu
> is using those fields at the time and could end up using a bogus
> pointer and corrupting memory.
> 
> This fixes the race by making the target cpu itself do the update, so
> we can be sure that the update happens at a time when the fields aren't
> being used.  These are updated from a set of 'next_*' fields, which
> are protected by a spinlock.  (We could have just taken the spinlock
> when using the vpa, slb_shadow or dtl fields, but that would mean
> taking the spinlock on every guest entry and exit.)
> 
> The code in do_h_register_vpa now takes the spinlock and updates the
> 'next_*' fields.  There is also a set of '*_pending' flags to indicate
> that an update is pending.
> 
> This also changes 'struct dtl' (which was undefined) to 'struct dtl_entry',
> which is what the rest of the kernel uses.
> 
> Signed-off-by: Paul Mackerras <paulus@samba.org>
> ---
> arch/powerpc/include/asm/kvm_host.h |   15 +++-
> arch/powerpc/kvm/book3s_hv.c        |  167 +++++++++++++++++++++++++----------
> 2 files changed, 131 insertions(+), 51 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 1cb6e52..b1126c1 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -82,7 +82,7 @@ struct kvm_vcpu;
> 
> struct lppaca;
> struct slb_shadow;
> -struct dtl;
> +struct dtl_entry;
> 
> struct kvm_vm_stat {
> 	u32 remote_tlb_flush;
> @@ -449,9 +449,18 @@ struct kvm_vcpu_arch {
> 	u32 last_inst;
> 
> 	struct lppaca *vpa;
> +	struct lppaca *next_vpa;
> 	struct slb_shadow *slb_shadow;
> -	struct dtl *dtl;
> -	struct dtl *dtl_end;
> +	struct slb_shadow *next_slb_shadow;
> +	struct dtl_entry *dtl;
> +	struct dtl_entry *dtl_end;
> +	struct dtl_entry *dtl_ptr;
> +	struct dtl_entry *next_dtl;
> +	struct dtl_entry *next_dtl_end;
> +	u8 vpa_pending;
> +	u8 slb_shadow_pending;
> +	u8 dtl_pending;
> +	spinlock_t vpa_update_lock;
> 
> 	wait_queue_head_t *wqp;
> 	struct kvmppc_vcore *vcore;
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index c11d960..6f6e88d 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -140,7 +140,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
> {
> 	struct kvm *kvm = vcpu->kvm;
> 	unsigned long len, nb;
> -	void *va;
> +	void *va, *free_va, *tvpa, *dtl, *ss;
> 	struct kvm_vcpu *tvcpu;
> 	int err = H_PARAMETER;
> 
> @@ -152,6 +152,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
> 	flags &= 7;
> 	if (flags == 0 || flags == 4)

This could probably use a new variable name. Also, what do 0 and 4 mean? Constant defines would be nice here.

> 		return H_PARAMETER;
> +	free_va = va = NULL;
> +	len = 0;
> 	if (flags < 4) {
> 		if (vpa & 0x7f)
> 			return H_PARAMETER;
> @@ -165,65 +167,122 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,

[pasted from real source]
>                 va = kvmppc_pin_guest_page(kvm, vpa, &nb);

Here you're pinning the page, setting va to that temporarily available address.

[...]

> 			len = *(unsigned short *)(va + 4);

This is a condition on (flags <= 1). We bail out on flags == 0 a few lines up. Just move this whole thing into the respective function handlers.

> 		else
> 			len = *(unsigned int *)(va + 4);

va + 4 isn't really descriptive. Is this a defined struct? Why not actually define one which you can just read data from? Or at least make this a define too. Reading random numbers in code is barely readable.

> +		free_va = va;

Now free_va is the temporarily available address.

> 		if (len > nb)
> 			goto out_unpin;
> -		switch (flags) {
> -		case 1:		/* register VPA */
> -			if (len < 640)
> -				goto out_unpin;
> -			if (tvcpu->arch.vpa)
> -				kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
> -			tvcpu->arch.vpa = va;
> -			init_vpa(vcpu, va);
> -			break;
> -		case 2:		/* register DTL */
> -			if (len < 48)
> -				goto out_unpin;
> -			len -= len % 48;
> -			if (tvcpu->arch.dtl)
> -				kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
> -			tvcpu->arch.dtl = va;
> -			tvcpu->arch.dtl_end = va + len;
> +	}
> +
> +	spin_lock(&tvcpu->arch.vpa_update_lock);
> +
> +	switch (flags) {
> +	case 1:		/* register VPA */

Yeah, these could also use defines :)


> +		if (len < 640)
> 			break;
> -		case 3:		/* register SLB shadow buffer */
> -			if (len < 16)
> -				goto out_unpin;
> -			if (tvcpu->arch.slb_shadow)
> -				kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
> -			tvcpu->arch.slb_shadow = va;
> +		free_va = tvcpu->arch.next_vpa;
> +		tvcpu->arch.next_vpa = va;

Now you're setting next_vpa to this temporarily available address? But next_vpa will be used after va is getting free'd, no? Or is that why you have free_va?

Wouldn't it be easier to just map it every time we actually use it and only shove the GPA around? We could basically save ourselves a lot of the logic here.


Alex
Paul Mackerras Jan. 17, 2012, 5:56 a.m. UTC | #2
On Mon, Jan 16, 2012 at 02:04:29PM +0100, Alexander Graf wrote:
> 
> On 20.12.2011, at 11:22, Paul Mackerras wrote:

> > @@ -152,6 +152,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
> > 	flags &= 7;
> > 	if (flags == 0 || flags == 4)
> 
> This could probably use a new variable name. Also, what do 0 and 4 mean? Constant defines would be nice here.

Those constants are defined in PAPR as being a subfunction code
indicating what sort of area and whether it is to be registered or
unregistered.  I'll make up some names for them.

> [pasted from real source]
> >                 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
> 
> Here you're pinning the page, setting va to that temporarily available address.

Well, it's not just temporarily available, it's available until we
unpin it, since we increment the page count, which inhibits migration.

> > 			len = *(unsigned int *)(va + 4);
> 
> va + 4 isn't really descriptive. Is this a defined struct? Why not actually define one which you can just read data from? Or at least make this a define too. Reading random numbers in code is barely readable.

It's not really a struct, at least not one that is used for anything
else.  PAPR defines that the length of the buffer has to be placed in
the second 32-bit word at registration time.

> 
> > +		free_va = va;
> 
> Now free_va is the temporarily available address.
...
> > +		free_va = tvcpu->arch.next_vpa;
> > +		tvcpu->arch.next_vpa = va;
> 
> Now you're setting next_vpa to this temporarily available address? But next_vpa will be used after va is getting free'd, no? Or is that why you have free_va?

Yes; here we are freeing any previously-set value of next_vpa.  The
idea of free_va is that it is initially set to va so that we correctly
unpin va if any error occurs.  But if there is no error, va gets put
into next_vpa and we free anything that was previously in next_vpa
instead.

> 
> Wouldn't it be easier to just map it every time we actually use it and only shove the GPA around? We could basically save ourselves a lot of the logic here.

There are fields in the VPA that we really want to be able to access
from real mode, for instance the fields that indicate whether we need
to save the FPR and/or VR values.  As far as the DTL is concerned, we
could in fact use copy_to_user to access it, so it doesn't strictly
need to be pinned.  We don't currently use the slb_shadow buffer, but
if we did we would need to access it from real mode, since we would be
reading it in order to set up guest SLB entries.

The other thing is that the VPA registration/unregistration is only
done a few times in the life of the guest, whereas we use the VPAs
constantly while the guest is running.  So it is more efficient to do
more of the work at registration time to make it quicker to access the
VPAs.

I'll send revised patches.  There's a small change I want to make to
patch 2 to avoid putting a very large stolen time value in the first
entry that gets put in after the DTL is registered, which can happen
currently if the DTL gets registered some time after the guest started
running.

Paul.
Alexander Graf Jan. 17, 2012, 9:27 a.m. UTC | #3
On 17.01.2012, at 06:56, Paul Mackerras <paulus@samba.org> wrote:

> On Mon, Jan 16, 2012 at 02:04:29PM +0100, Alexander Graf wrote:
>> 
>> On 20.12.2011, at 11:22, Paul Mackerras wrote:
> 
>>> @@ -152,6 +152,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
>>>    flags &= 7;
>>>    if (flags == 0 || flags == 4)
>> 
>> This could probably use a new variable name. Also, what do 0 and 4 mean? Constant defines would be nice here.
> 
> Those constants are defined in PAPR as being a subfunction code
> indicating what sort of area and whether it is to be registered or
> unregistered.  I'll make up some names for them.
> 
>> [pasted from real source]
>>>                va = kvmppc_pin_guest_page(kvm, vpa, &nb);
>> 
>> Here you're pinning the page, setting va to that temporarily available address.
> 
> Well, it's not just temporarily available, it's available until we
> unpin it, since we increment the page count, which inhibits migration.
> 
>>>            len = *(unsigned int *)(va + 4);
>> 
>> va + 4 isn't really descriptive. Is this a defined struct? Why not actually define one which you can just read data from? Or at least make this a define too. Reading random numbers in code is barely readable.
> 
> It's not really a struct, at least not one that is used for anything
> else.  PAPR defines that the length of the buffer has to be placed in
> the second 32-bit word at registration time.
> 
>> 
>>> +        free_va = va;
>> 
>> Now free_va is the temporarily available address.
> ...
>>> +        free_va = tvcpu->arch.next_vpa;
>>> +        tvcpu->arch.next_vpa = va;
>> 
>> Now you're setting next_vpa to this temporarily available address? But next_vpa will be used after va is getting free'd, no? Or is that why you have free_va?
> 
> Yes; here we are freeing any previously-set value of next_vpa.  The
> idea of free_va is that it is initially set to va so that we correctly
> unpin va if any error occurs.  But if there is no error, va gets put
> into next_vpa and we free anything that was previously in next_vpa
> instead.
> 
>> 
>> Wouldn't it be easier to just map it every time we actually use it and only shove the GPA around? We could basically save ourselves a lot of the logic here.
> 
> There are fields in the VPA that we really want to be able to access
> from real mode, for instance the fields that indicate whether we need
> to save the FPR and/or VR values.  As far as the DTL is concerned, we
> could in fact use copy_to_user to access it, so it doesn't strictly
> need to be pinned.  We don't currently use the slb_shadow buffer, but
> if we did we would need to access it from real mode, since we would be
> reading it in order to set up guest SLB entries.
> 
> The other thing is that the VPA registration/unregistration is only
> done a few times in the life of the guest, whereas we use the VPAs
> constantly while the guest is running.  So it is more efficient to do
> more of the work at registration time to make it quicker to access the
> VPAs.

The thing I was getting at was not the map during the lifetime, but the map during registration. Currently we have:

1) Set VPA to x
2) Assign feature y to VPA
3) Use VPA

1 and 2 are the slow path, 3 occurs more frequently. So we want 3 to be fast. 1 and 2 don't matter that much wrt performance.

You are currently mapping the VPA at /, which gets you into this map/unmap mess trying to free the previous mapping. If you moved the map to step 2 and only stored the GPA at step 1, all map+unmap operations except for final unmaps would be in one spot, so you wouldn't need to construct this big complex state machine.

I hope that makes it more clear :)

Alex

> 
> I'll send revised patches.  There's a small change I want to make to
> patch 2 to avoid putting a very large stolen time value in the first
> entry that gets put in after the DTL is registered, which can happen
> currently if the DTL gets registered some time after the guest started
> running.
> 
> Paul.
Paul Mackerras Jan. 17, 2012, 11:31 a.m. UTC | #4
On Tue, Jan 17, 2012 at 10:27:26AM +0100, Alexander Graf wrote:

> The thing I was getting at was not the map during the lifetime, but
> the map during registration. Currently we have:
> 
> 1) Set VPA to x
> 2) Assign feature y to VPA
> 3) Use VPA
> 
> 1 and 2 are the slow path, 3 occurs more frequently. So we want 3 to
> be fast. 1 and 2 don't matter that much wrt performance.
> 
> You are currently mapping the VPA at /, which gets you into this
> map/unmap mess trying to free the previous mapping. If you moved the
> map to step 2 and only stored the GPA at step 1, all map+unmap
> operations except for final unmaps would be in one spot, so you
> wouldn't need to construct this big complex state machine.

That might simplify things - I'll try it and see.  The worry with
doing the map/pin at 2 is that if anything goes wrong we no longer
have the opportunity to return an error for the H_REGISTER_VPA call,
so I'll have to at least do some checking in 1, leading to possibly
more code overall.

Paul.
Alexander Graf Jan. 17, 2012, 12:19 p.m. UTC | #5
On 17.01.2012, at 12:31, Paul Mackerras wrote:

> On Tue, Jan 17, 2012 at 10:27:26AM +0100, Alexander Graf wrote:
> 
>> The thing I was getting at was not the map during the lifetime, but
>> the map during registration. Currently we have:
>> 
>> 1) Set VPA to x
>> 2) Assign feature y to VPA
>> 3) Use VPA
>> 
>> 1 and 2 are the slow path, 3 occurs more frequently. So we want 3 to
>> be fast. 1 and 2 don't matter that much wrt performance.
>> 
>> You are currently mapping the VPA at /, which gets you into this
>> map/unmap mess trying to free the previous mapping. If you moved the
>> map to step 2 and only stored the GPA at step 1, all map+unmap
>> operations except for final unmaps would be in one spot, so you
>> wouldn't need to construct this big complex state machine.
> 
> That might simplify things - I'll try it and see.  The worry with
> doing the map/pin at 2 is that if anything goes wrong we no longer
> have the opportunity to return an error for the H_REGISTER_VPA call,
> so I'll have to at least do some checking in 1, leading to possibly
> more code overall.

Well, then map and unmap it in step 1 and map it in step 2 again. We're in the slow path so performance isn't critical. Readability and maintainability however are :)


Alex
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1cb6e52..b1126c1 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -82,7 +82,7 @@  struct kvm_vcpu;
 
 struct lppaca;
 struct slb_shadow;
-struct dtl;
+struct dtl_entry;
 
 struct kvm_vm_stat {
 	u32 remote_tlb_flush;
@@ -449,9 +449,18 @@  struct kvm_vcpu_arch {
 	u32 last_inst;
 
 	struct lppaca *vpa;
+	struct lppaca *next_vpa;
 	struct slb_shadow *slb_shadow;
-	struct dtl *dtl;
-	struct dtl *dtl_end;
+	struct slb_shadow *next_slb_shadow;
+	struct dtl_entry *dtl;
+	struct dtl_entry *dtl_end;
+	struct dtl_entry *dtl_ptr;
+	struct dtl_entry *next_dtl;
+	struct dtl_entry *next_dtl_end;
+	u8 vpa_pending;
+	u8 slb_shadow_pending;
+	u8 dtl_pending;
+	spinlock_t vpa_update_lock;
 
 	wait_queue_head_t *wqp;
 	struct kvmppc_vcore *vcore;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c11d960..6f6e88d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -140,7 +140,7 @@  static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
 {
 	struct kvm *kvm = vcpu->kvm;
 	unsigned long len, nb;
-	void *va;
+	void *va, *free_va, *tvpa, *dtl, *ss;
 	struct kvm_vcpu *tvcpu;
 	int err = H_PARAMETER;
 
@@ -152,6 +152,8 @@  static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
 	flags &= 7;
 	if (flags == 0 || flags == 4)
 		return H_PARAMETER;
+	free_va = va = NULL;
+	len = 0;
 	if (flags < 4) {
 		if (vpa & 0x7f)
 			return H_PARAMETER;
@@ -165,65 +167,122 @@  static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
 			len = *(unsigned short *)(va + 4);
 		else
 			len = *(unsigned int *)(va + 4);
+		free_va = va;
 		if (len > nb)
 			goto out_unpin;
-		switch (flags) {
-		case 1:		/* register VPA */
-			if (len < 640)
-				goto out_unpin;
-			if (tvcpu->arch.vpa)
-				kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
-			tvcpu->arch.vpa = va;
-			init_vpa(vcpu, va);
-			break;
-		case 2:		/* register DTL */
-			if (len < 48)
-				goto out_unpin;
-			len -= len % 48;
-			if (tvcpu->arch.dtl)
-				kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
-			tvcpu->arch.dtl = va;
-			tvcpu->arch.dtl_end = va + len;
+	}
+
+	spin_lock(&tvcpu->arch.vpa_update_lock);
+
+	switch (flags) {
+	case 1:		/* register VPA */
+		if (len < 640)
 			break;
-		case 3:		/* register SLB shadow buffer */
-			if (len < 16)
-				goto out_unpin;
-			if (tvcpu->arch.slb_shadow)
-				kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
-			tvcpu->arch.slb_shadow = va;
+		free_va = tvcpu->arch.next_vpa;
+		tvcpu->arch.next_vpa = va;
+		tvcpu->arch.vpa_pending = 1;
+		init_vpa(tvcpu, va);
+		err = 0;
+		break;
+	case 2:		/* register DTL */
+		if (len < 48)
 			break;
+		len -= len % 48;
+		tvpa = tvcpu->arch.vpa;
+		if (tvcpu->arch.vpa_pending)
+			tvpa = tvcpu->arch.next_vpa;
+		err = H_RESOURCE;
+		if (tvpa) {
+			free_va = tvcpu->arch.next_dtl;
+			tvcpu->arch.next_dtl = va;
+			tvcpu->arch.next_dtl_end = va + len;
+			tvcpu->arch.dtl_pending = 1;
+			err = 0;
 		}
-	} else {
-		switch (flags) {
-		case 5:		/* unregister VPA */
-			if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
-				return H_RESOURCE;
-			if (!tvcpu->arch.vpa)
-				break;
-			kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
-			tvcpu->arch.vpa = NULL;
-			break;
-		case 6:		/* unregister DTL */
-			if (!tvcpu->arch.dtl)
-				break;
-			kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
-			tvcpu->arch.dtl = NULL;
-			break;
-		case 7:		/* unregister SLB shadow buffer */
-			if (!tvcpu->arch.slb_shadow)
-				break;
-			kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
-			tvcpu->arch.slb_shadow = NULL;
+		break;
+	case 3:		/* register SLB shadow buffer */
+		if (len < 16)
 			break;
+		tvpa = tvcpu->arch.vpa;
+		if (tvcpu->arch.vpa_pending)
+			tvpa = tvcpu->arch.next_vpa;
+		err = H_RESOURCE;
+		if (tvpa) {
+			free_va = tvcpu->arch.next_slb_shadow;
+			tvcpu->arch.next_slb_shadow = va;
+			tvcpu->arch.slb_shadow_pending = 1;
+			err = 0;
+		}
+		break;
+
+	case 5:		/* unregister VPA */
+		dtl = tvcpu->arch.dtl;
+		if (tvcpu->arch.dtl_pending)
+			dtl = tvcpu->arch.next_dtl;
+		ss = tvcpu->arch.slb_shadow;
+		if (tvcpu->arch.slb_shadow_pending)
+			ss = tvcpu->arch.next_slb_shadow;
+		err = H_RESOURCE;
+		if (!dtl && !ss) {
+			free_va = tvcpu->arch.next_vpa;
+			tvcpu->arch.next_vpa = NULL;
+			tvcpu->arch.vpa_pending = 1;
+			err = 0;
 		}
+		break;
+	case 6:		/* unregister DTL */
+		free_va = tvcpu->arch.next_dtl;
+		tvcpu->arch.next_dtl = NULL;
+		tvcpu->arch.dtl_pending = 1;
+		err = 0;
+		break;
+	case 7:		/* unregister SLB shadow buffer */
+		free_va = tvcpu->arch.next_slb_shadow;
+		tvcpu->arch.next_slb_shadow = NULL;
+		tvcpu->arch.slb_shadow_pending = 1;
+		err = 0;
+		break;
 	}
-	return H_SUCCESS;
 
+	spin_unlock(&tvcpu->arch.vpa_update_lock);
  out_unpin:
-	kvmppc_unpin_guest_page(kvm, va);
+	if (free_va)
+		kvmppc_unpin_guest_page(kvm, free_va);
 	return err;
 }
 
+static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+
+	spin_lock(&vcpu->arch.vpa_update_lock);
+	if (vcpu->arch.vpa_pending) {
+		if (vcpu->arch.vpa)
+			kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
+		vcpu->arch.vpa = vcpu->arch.next_vpa;
+		vcpu->arch.next_vpa = NULL;
+		vcpu->arch.vpa_pending = 0;
+	}
+	if (vcpu->arch.slb_shadow_pending) {
+		if (vcpu->arch.slb_shadow)
+			kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
+		vcpu->arch.slb_shadow = vcpu->arch.next_slb_shadow;
+		vcpu->arch.next_slb_shadow = NULL;
+		vcpu->arch.slb_shadow_pending = 0;
+	}
+	if (vcpu->arch.dtl_pending) {
+		if (vcpu->arch.dtl)
+			kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
+		vcpu->arch.dtl = vcpu->arch.dtl_ptr = vcpu->arch.next_dtl;
+		vcpu->arch.dtl_end = vcpu->arch.next_dtl_end;
+		vcpu->arch.next_dtl = NULL;
+		vcpu->arch.dtl_pending = 0;
+		if (vcpu->arch.vpa)	/* (should always be non-NULL) */
+			vcpu->arch.vpa->dtl_idx = 0;
+	}
+	spin_unlock(&vcpu->arch.vpa_update_lock);
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
 	unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -509,12 +568,20 @@  out:
 
 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 {
+	spin_lock(&vcpu->arch.vpa_update_lock);
 	if (vcpu->arch.dtl)
 		kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
+	if (vcpu->arch.dtl_pending && vcpu->arch.next_dtl)
+		kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.next_dtl);
 	if (vcpu->arch.slb_shadow)
 		kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
+	if (vcpu->arch.slb_shadow_pending && vcpu->arch.next_slb_shadow)
+		kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.next_slb_shadow);
 	if (vcpu->arch.vpa)
 		kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
+	if (vcpu->arch.vpa_pending && vcpu->arch.next_vpa)
+		kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.next_vpa);
+	spin_unlock(&vcpu->arch.vpa_update_lock);
 	kvm_vcpu_uninit(vcpu);
 	kfree(vcpu);
 }
@@ -681,8 +748,12 @@  static int kvmppc_run_core(struct kvmppc_vcore *vc)
 	vc->in_guest = 0;
 	vc->pcpu = smp_processor_id();
 	vc->napping_threads = 0;
-	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
 		kvmppc_start_thread(vcpu);
+		if (vcpu->arch.vpa_pending || vcpu->arch.slb_shadow_pending ||
+		    vcpu->arch.dtl_pending)
+			kvmppc_update_vpas(vcpu);
+	}
 
 	preempt_disable();
 	spin_unlock(&vc->lock);