diff mbox

[RFC,v3,1/9] KVM: s390: optimize detection of started vcpus

Message ID 20170821203530.9266-2-rkrcmar@redhat.com
State New
Headers show

Commit Message

Radim Krčmář Aug. 21, 2017, 8:35 p.m. UTC
We can add a variable instead of scanning all online VCPUs to know how
many are started.  We can't trivially tell which VCPU is the last one,
though.

Suggested-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/s390/include/asm/kvm_host.h |  1 +
 arch/s390/kvm/kvm-s390.c         | 39 +++++++++++++++------------------------
 2 files changed, 16 insertions(+), 24 deletions(-)

Comments

Christian Borntraeger Aug. 22, 2017, 7:26 a.m. UTC | #1
On 08/21/2017 10:35 PM, Radim Krčmář wrote:
> We can add a variable instead of scanning all online VCPUs to know how
> many are started.  We can't trivially tell which VCPU is the last one,
> though.
> 
> Suggested-by: Cornelia Huck <cohuck@redhat.com>
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>



> @@ -3453,16 +3447,12 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
> 
>  void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
>  {
> -	int i, online_vcpus, started_vcpus = 0;

here you remove i.

> -	struct kvm_vcpu *started_vcpu = NULL;
> -
>  	if (is_vcpu_stopped(vcpu))
>  		return;
> 
>  	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
>  	/* Only one cpu at a time may enter/leave the STOPPED state. */
>  	spin_lock(&vcpu->kvm->arch.start_stop_lock);
> -	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
> 
>  	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
>  	kvm_s390_clear_stop_irq(vcpu);
> @@ -3470,19 +3460,20 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
>  	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
>  	__disable_ibs_on_vcpu(vcpu);
> 
> -	for (i = 0; i < online_vcpus; i++) {
> -		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
> -			started_vcpus++;
> -			started_vcpu = vcpu->kvm->vcpus[i];
> -		}
> -	}
> +	vcpu->kvm->arch.started_vcpus--;
> +
> +	if (vcpu->kvm->arch.started_vcpus == 1) {
> +		struct kvm_vcpu *started_vcpu;
> 
> -	if (started_vcpus == 1) {
>  		/*
> -		 * As we only have one VCPU left, we want to enable the
> -		 * IBS facility for that VCPU to speed it up.
> +		 * As we only have one VCPU left, we want to enable the IBS
> +		 * facility for that VCPU to speed it up.
>  		 */
> -		__enable_ibs_on_vcpu(started_vcpu);
> +		kvm_for_each_vcpu(i, started_vcpu, vcpu->kvm)

here you need i.
> +			if (!is_vcpu_stopped(started_vcpu)) {
> +				__enable_ibs_on_vcpu(started_vcpu);
> +				break;
> +			}
>  	}
> 
>  	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
>
David Hildenbrand Aug. 22, 2017, 11:31 a.m. UTC | #2
On 21.08.2017 22:35, Radim Krčmář wrote:
> We can add a variable instead of scanning all online VCPUs to know how
> many are started.  We can't trivially tell which VCPU is the last one,
> though.

You could keep the started vcpus in a list. Then you might drop unsigned
started_vcpus;

No started vcpus: Start pointer NULL
Single started vcpu: Only one element in the list (easy to check)
> 1 started vcpus: More than one element int he list (easy to check)


> 
> Suggested-by: Cornelia Huck <cohuck@redhat.com>
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/s390/include/asm/kvm_host.h |  1 +
>  arch/s390/kvm/kvm-s390.c         | 39 +++++++++++++++------------------------
>  2 files changed, 16 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
> index a409d5991934..be0d0bdf585b 100644
> --- a/arch/s390/include/asm/kvm_host.h
> +++ b/arch/s390/include/asm/kvm_host.h
> @@ -735,6 +735,7 @@ struct kvm_arch{
>  	struct mutex ipte_mutex;
>  	struct ratelimit_state sthyi_limit;
>  	spinlock_t start_stop_lock;
> +	unsigned started_vcpus;
>  	struct sie_page2 *sie_page2;
>  	struct kvm_s390_cpu_model model;
>  	struct kvm_s390_crypto crypto;
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 9f23a9e81a91..1534778a3c66 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -3414,25 +3414,17 @@ static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
>  
>  void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
>  {
> -	int i, online_vcpus, started_vcpus = 0;
> -
>  	if (!is_vcpu_stopped(vcpu))
>  		return;
>  
>  	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
>  	/* Only one cpu at a time may enter/leave the STOPPED state. */
>  	spin_lock(&vcpu->kvm->arch.start_stop_lock);
> -	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
>  
> -	for (i = 0; i < online_vcpus; i++) {
> -		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
> -			started_vcpus++;
> -	}
> -
> -	if (started_vcpus == 0) {
> +	if (vcpu->kvm->arch.started_vcpus == 0) {
>  		/* we're the only active VCPU -> speed it up */
>  		__enable_ibs_on_vcpu(vcpu);
> -	} else if (started_vcpus == 1) {
> +	} else if (vcpu->kvm->arch.started_vcpus == 1) {
>  		/*
>  		 * As we are starting a second VCPU, we have to disable
>  		 * the IBS facility on all VCPUs to remove potentially
> @@ -3441,6 +3433,8 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
>  		__disable_ibs_on_all_vcpus(vcpu->kvm);
>  	}
>  
> +	vcpu->kvm->arch.started_vcpus++;
> +
>  	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
>  	/*
>  	 * Another VCPU might have used IBS while we were offline.
> @@ -3453,16 +3447,12 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
>  
>  void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
>  {
> -	int i, online_vcpus, started_vcpus = 0;
> -	struct kvm_vcpu *started_vcpu = NULL;
> -
>  	if (is_vcpu_stopped(vcpu))
>  		return;
>  
>  	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
>  	/* Only one cpu at a time may enter/leave the STOPPED state. */
>  	spin_lock(&vcpu->kvm->arch.start_stop_lock);
> -	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
>  
>  	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
>  	kvm_s390_clear_stop_irq(vcpu);
> @@ -3470,19 +3460,20 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
>  	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
>  	__disable_ibs_on_vcpu(vcpu);
>  
> -	for (i = 0; i < online_vcpus; i++) {
> -		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
> -			started_vcpus++;
> -			started_vcpu = vcpu->kvm->vcpus[i];
> -		}
> -	}
> +	vcpu->kvm->arch.started_vcpus--;
> +
> +	if (vcpu->kvm->arch.started_vcpus == 1) {
> +		struct kvm_vcpu *started_vcpu;
>  
> -	if (started_vcpus == 1) {
>  		/*
> -		 * As we only have one VCPU left, we want to enable the
> -		 * IBS facility for that VCPU to speed it up.
> +		 * As we only have one VCPU left, we want to enable the IBS
> +		 * facility for that VCPU to speed it up.
>  		 */
> -		__enable_ibs_on_vcpu(started_vcpu);
> +		kvm_for_each_vcpu(i, started_vcpu, vcpu->kvm)
> +			if (!is_vcpu_stopped(started_vcpu)) {
> +				__enable_ibs_on_vcpu(started_vcpu);
> +				break;
> +			}
>  	}
>  
>  	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
>
Cornelia Huck Aug. 29, 2017, 11:23 a.m. UTC | #3
On Tue, 22 Aug 2017 13:31:27 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 21.08.2017 22:35, Radim Krčmář wrote:
> > We can add a variable instead of scanning all online VCPUs to know how
> > many are started.  We can't trivially tell which VCPU is the last one,
> > though.  
> 
> You could keep the started vcpus in a list. Then you might drop unsigned
> started_vcpus;
> 
> No started vcpus: Start pointer NULL
> Single started vcpu: Only one element in the list (easy to check)
> > 1 started vcpus: More than one element int he list (easy to check)  

I'm not sure the added complication of keeping a list buys us much
here: We only have the "look for the last vcpu not stopped" operation
for the 2->1 transition.
Cornelia Huck Aug. 29, 2017, 11:26 a.m. UTC | #4
On Mon, 21 Aug 2017 22:35:22 +0200
Radim Krčmář <rkrcmar@redhat.com> wrote:

> We can add a variable instead of scanning all online VCPUs to know how
> many are started.  We can't trivially tell which VCPU is the last one,
> though.
> 
> Suggested-by: Cornelia Huck <cohuck@redhat.com>
> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
> ---
>  arch/s390/include/asm/kvm_host.h |  1 +
>  arch/s390/kvm/kvm-s390.c         | 39 +++++++++++++++------------------------
>  2 files changed, 16 insertions(+), 24 deletions(-)

Modulo the bug noticed by Christian, I think this looks good.
David Hildenbrand Aug. 29, 2017, 12:05 p.m. UTC | #5
On 29.08.2017 13:23, Cornelia Huck wrote:
> On Tue, 22 Aug 2017 13:31:27 +0200
> David Hildenbrand <david@redhat.com> wrote:
> 
>> On 21.08.2017 22:35, Radim Krčmář wrote:
>>> We can add a variable instead of scanning all online VCPUs to know how
>>> many are started.  We can't trivially tell which VCPU is the last one,
>>> though.  
>>
>> You could keep the started vcpus in a list. Then you might drop unsigned
>> started_vcpus;
>>
>> No started vcpus: Start pointer NULL
>> Single started vcpu: Only one element in the list (easy to check)
>>> 1 started vcpus: More than one element int he list (easy to check)  
> 
> I'm not sure the added complication of keeping a list buys us much
> here: We only have the "look for the last vcpu not stopped" operation
> for the 2->1 transition.
> 

That is wrong, we also have to know the last remaining (started) VCPU.
For that, right now we have to iterate over all VCPUs.

There shouldn't be much complexity. We already perform changes under a
lock, so it is as simple as adding/removing from the list.

Detecting the transitions boils down to looking at two pointers.
Cornelia Huck Aug. 29, 2017, 12:42 p.m. UTC | #6
On Tue, 29 Aug 2017 14:05:40 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 29.08.2017 13:23, Cornelia Huck wrote:
> > On Tue, 22 Aug 2017 13:31:27 +0200
> > David Hildenbrand <david@redhat.com> wrote:
> >   
> >> On 21.08.2017 22:35, Radim Krčmář wrote:  
> >>> We can add a variable instead of scanning all online VCPUs to know how
> >>> many are started.  We can't trivially tell which VCPU is the last one,
> >>> though.    
> >>
> >> You could keep the started vcpus in a list. Then you might drop unsigned
> >> started_vcpus;
> >>
> >> No started vcpus: Start pointer NULL
> >> Single started vcpu: Only one element in the list (easy to check)  
> >>> 1 started vcpus: More than one element int he list (easy to check)    
> > 
> > I'm not sure the added complication of keeping a list buys us much
> > here: We only have the "look for the last vcpu not stopped" operation
> > for the 2->1 transition.
> >   
> 
> That is wrong, we also have to know the last remaining (started) VCPU.
> For that, right now we have to iterate over all VCPUs.

Yes, but that's only for the 2->1 transition... and all in all that is
still much better that before.

> 
> There shouldn't be much complexity. We already perform changes under a
> lock, so it is as simple as adding/removing from the list.
> 
> Detecting the transitions boils down to looking at two pointers.

Having a private vcpu list feels a bit wrong... and I don't really see
much benefit.
diff mbox

Patch

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a409d5991934..be0d0bdf585b 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -735,6 +735,7 @@  struct kvm_arch{
 	struct mutex ipte_mutex;
 	struct ratelimit_state sthyi_limit;
 	spinlock_t start_stop_lock;
+	unsigned started_vcpus;
 	struct sie_page2 *sie_page2;
 	struct kvm_s390_cpu_model model;
 	struct kvm_s390_crypto crypto;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9f23a9e81a91..1534778a3c66 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3414,25 +3414,17 @@  static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 
 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
 {
-	int i, online_vcpus, started_vcpus = 0;
-
 	if (!is_vcpu_stopped(vcpu))
 		return;
 
 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
 	/* Only one cpu at a time may enter/leave the STOPPED state. */
 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
-	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
-	for (i = 0; i < online_vcpus; i++) {
-		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
-			started_vcpus++;
-	}
-
-	if (started_vcpus == 0) {
+	if (vcpu->kvm->arch.started_vcpus == 0) {
 		/* we're the only active VCPU -> speed it up */
 		__enable_ibs_on_vcpu(vcpu);
-	} else if (started_vcpus == 1) {
+	} else if (vcpu->kvm->arch.started_vcpus == 1) {
 		/*
 		 * As we are starting a second VCPU, we have to disable
 		 * the IBS facility on all VCPUs to remove potentially
@@ -3441,6 +3433,8 @@  void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
 		__disable_ibs_on_all_vcpus(vcpu->kvm);
 	}
 
+	vcpu->kvm->arch.started_vcpus++;
+
 	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	/*
 	 * Another VCPU might have used IBS while we were offline.
@@ -3453,16 +3447,12 @@  void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
 
 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
 {
-	int i, online_vcpus, started_vcpus = 0;
-	struct kvm_vcpu *started_vcpu = NULL;
-
 	if (is_vcpu_stopped(vcpu))
 		return;
 
 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
 	/* Only one cpu at a time may enter/leave the STOPPED state. */
 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
-	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
 	kvm_s390_clear_stop_irq(vcpu);
@@ -3470,19 +3460,20 @@  void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
 	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	__disable_ibs_on_vcpu(vcpu);
 
-	for (i = 0; i < online_vcpus; i++) {
-		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
-			started_vcpus++;
-			started_vcpu = vcpu->kvm->vcpus[i];
-		}
-	}
+	vcpu->kvm->arch.started_vcpus--;
+
+	if (vcpu->kvm->arch.started_vcpus == 1) {
+		struct kvm_vcpu *started_vcpu;
 
-	if (started_vcpus == 1) {
 		/*
-		 * As we only have one VCPU left, we want to enable the
-		 * IBS facility for that VCPU to speed it up.
+		 * As we only have one VCPU left, we want to enable the IBS
+		 * facility for that VCPU to speed it up.
 		 */
-		__enable_ibs_on_vcpu(started_vcpu);
+		kvm_for_each_vcpu(i, started_vcpu, vcpu->kvm)
+			if (!is_vcpu_stopped(started_vcpu)) {
+				__enable_ibs_on_vcpu(started_vcpu);
+				break;
+			}
 	}
 
 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);