diff mbox series

[SRU,Disco] arm64: KVM: Always set ICH_HCR_EL2.EN if GICv4 is enabled

Message ID 20190523190034.17226-1-dann.frazier@canonical.com
State New
Headers show
Series [SRU,Disco] arm64: KVM: Always set ICH_HCR_EL2.EN if GICv4 is enabled | expand

Commit Message

dann frazier May 23, 2019, 7 p.m. UTC
From: Marc Zyngier <marc.zyngier@arm.com>

BugLink: https://bugs.launchpad.net/bugs/1829942

The normal interrupt flow is not to enable the vgic when no virtual
interrupt is to be injected (i.e. the LRs are empty). But when a guest
is likely to use GICv4 for LPIs, we absolutely need to switch it on
at all times. Otherwise, VLPIs only get delivered when there is something
in the LRs, which doesn't happen very often.

Reported-by: Nianyao Tang <tangnianyao@huawei.com>
Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
(cherry picked from commit ca71228b42a96908eca7658861eafacd227856c9)
Signed-off-by: dann frazier <dann.frazier@canonical.com>
---
 virt/kvm/arm/hyp/vgic-v3-sr.c |  4 ++--
 virt/kvm/arm/vgic/vgic.c      | 14 ++++++++++----
 2 files changed, 12 insertions(+), 6 deletions(-)

Comments

Connor Kuehl May 28, 2019, 9:13 p.m. UTC | #1
On 5/23/19 12:00 PM, dann frazier wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1829942
> 
> The normal interrupt flow is not to enable the vgic when no virtual
> interrupt is to be injected (i.e. the LRs are empty). But when a guest
> is likely to use GICv4 for LPIs, we absolutely need to switch it on
> at all times. Otherwise, VLPIs only get delivered when there is something
> in the LRs, which doesn't happen very often.
> 
> Reported-by: Nianyao Tang <tangnianyao@huawei.com>
> Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> (cherry picked from commit ca71228b42a96908eca7658861eafacd227856c9)
> Signed-off-by: dann frazier <dann.frazier@canonical.com>

Acked-by: Connor Kuehl <connor.kuehl@canonical.com>

> ---
>  virt/kvm/arm/hyp/vgic-v3-sr.c |  4 ++--
>  virt/kvm/arm/vgic/vgic.c      | 14 ++++++++++----
>  2 files changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
> index 9652c453480f5..3c3f7cda95c75 100644
> --- a/virt/kvm/arm/hyp/vgic-v3-sr.c
> +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
> @@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
>  		}
>  	}
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		int i;
>  		u32 elrsr;
>  
> @@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
>  	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
>  	int i;
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
>  
>  		for (i = 0; i < used_lrs; i++)
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index abd9c73526778..3af69f2a38667 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
>  	 * either observe the new interrupt before or after doing this check,
>  	 * and introducing additional synchronization mechanism doesn't change
>  	 * this.
> +	 *
> +	 * Note that we still need to go through the whole thing if anything
> +	 * can be directly injected (GICv4).
>  	 */
> -	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
> +	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
> +	    !vgic_supports_direct_msis(vcpu->kvm))
>  		return;
>  
>  	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
>  
> -	raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> -	vgic_flush_lr_state(vcpu);
> -	raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
> +		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +		vgic_flush_lr_state(vcpu);
> +		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	}
>  
>  	if (can_access_vgic_from_kernel())
>  		vgic_restore_state(vcpu);
>
Kleber Sacilotto de Souza June 4, 2019, 1:45 p.m. UTC | #2
On 5/23/19 9:00 PM, dann frazier wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1829942
> 
> The normal interrupt flow is not to enable the vgic when no virtual
> interrupt is to be injected (i.e. the LRs are empty). But when a guest
> is likely to use GICv4 for LPIs, we absolutely need to switch it on
> at all times. Otherwise, VLPIs only get delivered when there is something
> in the LRs, which doesn't happen very often.
> 
> Reported-by: Nianyao Tang <tangnianyao@huawei.com>
> Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> (cherry picked from commit ca71228b42a96908eca7658861eafacd227856c9)
> Signed-off-by: dann frazier <dann.frazier@canonical.com>

For Cosmic and Disco:

Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>

> ---
>  virt/kvm/arm/hyp/vgic-v3-sr.c |  4 ++--
>  virt/kvm/arm/vgic/vgic.c      | 14 ++++++++++----
>  2 files changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
> index 9652c453480f5..3c3f7cda95c75 100644
> --- a/virt/kvm/arm/hyp/vgic-v3-sr.c
> +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
> @@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
>  		}
>  	}
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		int i;
>  		u32 elrsr;
>  
> @@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
>  	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
>  	int i;
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
>  
>  		for (i = 0; i < used_lrs; i++)
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index abd9c73526778..3af69f2a38667 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
>  	 * either observe the new interrupt before or after doing this check,
>  	 * and introducing additional synchronization mechanism doesn't change
>  	 * this.
> +	 *
> +	 * Note that we still need to go through the whole thing if anything
> +	 * can be directly injected (GICv4).
>  	 */
> -	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
> +	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
> +	    !vgic_supports_direct_msis(vcpu->kvm))
>  		return;
>  
>  	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
>  
> -	raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> -	vgic_flush_lr_state(vcpu);
> -	raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
> +		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +		vgic_flush_lr_state(vcpu);
> +		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	}
>  
>  	if (can_access_vgic_from_kernel())
>  		vgic_restore_state(vcpu);
>
Kleber Sacilotto de Souza June 5, 2019, 10:56 a.m. UTC | #3
On 5/23/19 9:00 PM, dann frazier wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1829942
> 
> The normal interrupt flow is not to enable the vgic when no virtual
> interrupt is to be injected (i.e. the LRs are empty). But when a guest
> is likely to use GICv4 for LPIs, we absolutely need to switch it on
> at all times. Otherwise, VLPIs only get delivered when there is something
> in the LRs, which doesn't happen very often.
> 
> Reported-by: Nianyao Tang <tangnianyao@huawei.com>
> Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> (cherry picked from commit ca71228b42a96908eca7658861eafacd227856c9)
> Signed-off-by: dann frazier <dann.frazier@canonical.com>

This patch has already been applied to Disco as part of the
update to 5.0.12 upstream stable release (LP: #1830934).

Thanks,
Kleber

> ---
>  virt/kvm/arm/hyp/vgic-v3-sr.c |  4 ++--
>  virt/kvm/arm/vgic/vgic.c      | 14 ++++++++++----
>  2 files changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
> index 9652c453480f5..3c3f7cda95c75 100644
> --- a/virt/kvm/arm/hyp/vgic-v3-sr.c
> +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
> @@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
>  		}
>  	}
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		int i;
>  		u32 elrsr;
>  
> @@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
>  	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
>  	int i;
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
>  
>  		for (i = 0; i < used_lrs; i++)
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index abd9c73526778..3af69f2a38667 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
>  	 * either observe the new interrupt before or after doing this check,
>  	 * and introducing additional synchronization mechanism doesn't change
>  	 * this.
> +	 *
> +	 * Note that we still need to go through the whole thing if anything
> +	 * can be directly injected (GICv4).
>  	 */
> -	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
> +	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
> +	    !vgic_supports_direct_msis(vcpu->kvm))
>  		return;
>  
>  	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
>  
> -	raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> -	vgic_flush_lr_state(vcpu);
> -	raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
> +		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +		vgic_flush_lr_state(vcpu);
> +		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	}
>  
>  	if (can_access_vgic_from_kernel())
>  		vgic_restore_state(vcpu);
>
diff mbox series

Patch

diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 9652c453480f5..3c3f7cda95c75 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -222,7 +222,7 @@  void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 		}
 	}
 
-	if (used_lrs) {
+	if (used_lrs || cpu_if->its_vpe.its_vm) {
 		int i;
 		u32 elrsr;
 
@@ -247,7 +247,7 @@  void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
 	int i;
 
-	if (used_lrs) {
+	if (used_lrs || cpu_if->its_vpe.its_vm) {
 		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 
 		for (i = 0; i < used_lrs; i++)
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index abd9c73526778..3af69f2a38667 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -867,15 +867,21 @@  void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 	 * either observe the new interrupt before or after doing this check,
 	 * and introducing additional synchronization mechanism doesn't change
 	 * this.
+	 *
+	 * Note that we still need to go through the whole thing if anything
+	 * can be directly injected (GICv4).
 	 */
-	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+	    !vgic_supports_direct_msis(vcpu->kvm))
 		return;
 
 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
-	raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
-	vgic_flush_lr_state(vcpu);
-	raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
+		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+		vgic_flush_lr_state(vcpu);
+		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+	}
 
 	if (can_access_vgic_from_kernel())
 		vgic_restore_state(vcpu);