diff mbox

[09/14] KVM: PPC: Book3S HV: Enable KVM real mode handling of passthrough IRQs

Message ID 1456512032-31286-10-git-send-email-warrier@linux.vnet.ibm.com (mailing list archive)
State Superseded
Delegated to: Paul Mackerras
Headers show

Commit Message

Suresh E. Warrier Feb. 26, 2016, 6:40 p.m. UTC
The KVM real mode passthrough handling code only searches for
"cached" IRQ maps in the passthrough IRQ map when checking for
passthrough IRQs that can be redirected to the guest.
This patch enables KVM real mode handling of passthrough IRQs
by turning on caching of selected passthrough IRQs. Currently,
we follow a simple method and cache any passthrough IRQ when its
virtual IRQ is first injected into the guest.

Since we have a limit of 16 cache entries per guest, this will
limit passthrough IRQs that are handled in KVM real mode to 16.
This should work well for the general case for VMs with small
number of passthrough adapters or SRIOV VFs. In the future, we
can increase the number of cached entries, but we would then need
to come up with faster search/filtering mechanisms for an IRQ in
the map of cached passthrough IRQs.

Signed-off-by: Suresh Warrier <warrier@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/kvm_host.h |  1 +
 arch/powerpc/include/asm/kvm_ppc.h  |  2 ++
 arch/powerpc/kvm/book3s.c           | 10 +++++++++
 arch/powerpc/kvm/book3s_hv.c        |  4 ++++
 arch/powerpc/kvm/book3s_xics.c      | 41 +++++++++++++++++++++++++++++++++++++
 arch/powerpc/kvm/book3s_xics.h      |  2 ++
 6 files changed, 60 insertions(+)
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index fc10248..558d195 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -63,6 +63,7 @@  extern int kvm_unmap_hva_range(struct kvm *kvm,
 extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+extern int kvmppc_cache_passthru_irq(struct kvm *kvm, int guest_gsi);
 
 static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 							 unsigned long address)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b19bb30..93531cc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -484,6 +484,8 @@  extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
 			struct kvm_vcpu *vcpu, u32 cpu);
 extern void kvmppc_xics_ipi_action(void);
+extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq);
+extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq);
 extern int h_ipi_redirect;
 #else
 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 2492b7e..1b4f5bd 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -953,6 +953,16 @@  void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
 }
 
+int kvmppc_cache_passthru_irq(struct kvm *kvm, int irq)
+{
+	int r = 0;
+
+	if (kvm->arch.kvm_ops->cache_passthru_irq)
+		r = kvm->arch.kvm_ops->cache_passthru_irq(kvm, irq);
+
+	return r;
+}
+
 static int kvmppc_book3s_init(void)
 {
 	int r;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index cc5aea96..487657f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3468,6 +3468,8 @@  static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
 
 	pimap->n_mapped++;
 
+	kvmppc_xics_set_mapped(kvm, guest_gsi);
+
 	if (!kvm->arch.pimap)
 		kvm->arch.pimap = pimap;
 
@@ -3522,6 +3524,8 @@  static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
 	if (i != pimap->n_mapped)
 		pimap->mapped[i] = pimap->mapped[pimap->n_mapped];
 
+	kvmppc_xics_clr_mapped(kvm, guest_gsi);
+
 	/*
 	 * We don't free this structure even when the count goes to
 	 * zero. The structure is freed when we destroy the VM.
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index be23f88..b90570c 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -88,6 +88,18 @@  static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
 		return -EINVAL;
 
 	/*
+	 * If this is a mapped passthrough IRQ that is not cached,
+	 * add this to the IRQ cached map so that real mode KVM
+	 * will redirect this directly to the guest where possible.
+	 * Currently, we will cache a passthrough IRQ the first time
+	 * we  inject it into the guest.
+	 */
+	if (state->pmapped && !state->pcached) {
+		if (kvmppc_cache_passthru_irq(xics->kvm, irq) == 0)
+			state->pcached = 1;
+	}
+
+	/*
 	 * We set state->asserted locklessly. This should be fine as
 	 * we are the only setter, thus concurrent access is undefined
 	 * to begin with.
@@ -1410,3 +1422,32 @@  int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
 {
 	return pin;
 }
+
+void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq)
+{
+	struct kvmppc_xics *xics = kvm->arch.xics;
+	struct kvmppc_ics *ics;
+	u16 idx;
+
+	ics = kvmppc_xics_find_ics(xics, irq, &idx);
+	if (!ics)
+		return;
+
+	ics->irq_state[idx].pmapped = 1;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
+
+void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq)
+{
+	struct kvmppc_xics *xics = kvm->arch.xics;
+	struct kvmppc_ics *ics;
+	u16 idx;
+
+	ics = kvmppc_xics_find_ics(xics, irq, &idx);
+	if (!ics)
+		return;
+
+	ics->irq_state[idx].pmapped = 0;
+	ics->irq_state[idx].pcached = 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 56ea44f..de560f1 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -41,6 +41,8 @@  struct ics_irq_state {
 	u8  masked_pending;
 	u8  asserted; /* Only for LSI */
 	u8  exists;
+	u8  pmapped;
+	u8  pcached;
 };
 
 /* Atomic ICP state, updated with a single compare & swap */