diff mbox series

[SRU,Cosmic] arm64: KVM: Always set ICH_HCR_EL2.EN if GICv4 is enabled

Message ID 20190523190034.17226-2-dann.frazier@canonical.com
State New
Headers show
Series [SRU,Cosmic] arm64: KVM: Always set ICH_HCR_EL2.EN if GICv4 is enabled | expand

Commit Message

dann frazier May 23, 2019, 7 p.m. UTC
From: Marc Zyngier <marc.zyngier@arm.com>

BugLink: https://bugs.launchpad.net/bugs/1829942

The normal interrupt flow is not to enable the vgic when no virtual
interrupt is to be injected (i.e. the LRs are empty). But when a guest
is likely to use GICv4 for LPIs, we absolutely need to switch it on
at all times. Otherwise, VLPIs only get delivered when there is something
in the LRs, which doesn't happen very often.

Reported-by: Nianyao Tang <tangnianyao@huawei.com>
Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
(backported from commit ca71228b42a96908eca7658861eafacd227856c9)
[ dannf: Fixed to apply by retaining spin_lock calls, that were later
  changed to raw_spin_lock calls ]
Signed-off-by: dann frazier <dann.frazier@canonical.com>
---
 virt/kvm/arm/hyp/vgic-v3-sr.c |  4 ++--
 virt/kvm/arm/vgic/vgic.c      | 14 ++++++++++----
 2 files changed, 12 insertions(+), 6 deletions(-)

Comments

Kleber Sacilotto de Souza June 5, 2019, 10:55 a.m. UTC | #1
On 5/23/19 9:00 PM, dann frazier wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1829942
> 
> The normal interrupt flow is not to enable the vgic when no virtual
> interrupt is to be injected (i.e. the LRs are empty). But when a guest
> is likely to use GICv4 for LPIs, we absolutely need to switch it on
> at all times. Otherwise, VLPIs only get delivered when there is something
> in the LRs, which doesn't happen very often.
> 
> Reported-by: Nianyao Tang <tangnianyao@huawei.com>
> Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> (backported from commit ca71228b42a96908eca7658861eafacd227856c9)
> [ dannf: Fixed to apply by retaining spin_lock calls, that were later
>   changed to raw_spin_lock calls ]
> Signed-off-by: dann frazier <dann.frazier@canonical.com>
> ---
>  virt/kvm/arm/hyp/vgic-v3-sr.c |  4 ++--
>  virt/kvm/arm/vgic/vgic.c      | 14 ++++++++++----
>  2 files changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
> index 616e5a433ab0f..e20e797a1b69b 100644
> --- a/virt/kvm/arm/hyp/vgic-v3-sr.c
> +++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
> @@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
>  		}
>  	}
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		int i;
>  		u32 elrsr;
>  
> @@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
>  	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
>  	int i;
>  
> -	if (used_lrs) {
> +	if (used_lrs || cpu_if->its_vpe.its_vm) {
>  		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
>  
>  		for (i = 0; i < used_lrs; i++)
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index 7007468e24712..f48662234cd41 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -871,15 +871,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
>  	 * either observe the new interrupt before or after doing this check,
>  	 * and introducing additional synchronization mechanism doesn't change
>  	 * this.
> +	 *
> +	 * Note that we still need to go through the whole thing if anything
> +	 * can be directly injected (GICv4).
>  	 */
> -	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
> +	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
> +	    !vgic_supports_direct_msis(vcpu->kvm))
>  		return;
>  
>  	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
>  
> -	spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> -	vgic_flush_lr_state(vcpu);
> -	spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
> +		spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +		vgic_flush_lr_state(vcpu);
> +		spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
> +	}
>  
>  	if (can_access_vgic_from_kernel())
>  		vgic_restore_state(vcpu);
> 

Applied to cosmic/master-next branch.

Thanks,
Kleber
diff mbox series

Patch

diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 616e5a433ab0f..e20e797a1b69b 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -222,7 +222,7 @@  void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 		}
 	}
 
-	if (used_lrs) {
+	if (used_lrs || cpu_if->its_vpe.its_vm) {
 		int i;
 		u32 elrsr;
 
@@ -247,7 +247,7 @@  void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
 	int i;
 
-	if (used_lrs) {
+	if (used_lrs || cpu_if->its_vpe.its_vm) {
 		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 
 		for (i = 0; i < used_lrs; i++)
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7007468e24712..f48662234cd41 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -871,15 +871,21 @@  void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 	 * either observe the new interrupt before or after doing this check,
 	 * and introducing additional synchronization mechanism doesn't change
 	 * this.
+	 *
+	 * Note that we still need to go through the whole thing if anything
+	 * can be directly injected (GICv4).
 	 */
-	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+	    !vgic_supports_direct_msis(vcpu->kvm))
 		return;
 
 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
-	spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
-	vgic_flush_lr_state(vcpu);
-	spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
+		spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+		vgic_flush_lr_state(vcpu);
+		spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+	}
 
 	if (can_access_vgic_from_kernel())
 		vgic_restore_state(vcpu);