Patchwork [RFC,17/45] qemu-kvm: Track MSIRoutingCache in KVM routing table

login
register
mail settings
Submitter Jan Kiszka
Date Oct. 17, 2011, 9:27 a.m.
Message ID <76a09d29c3ff24930642b36302ce41dd96824ca1.1318843693.git.jan.kiszka@siemens.com>
Download mbox | patch
Permalink /patch/120144/
State New
Headers show

Comments

Jan Kiszka - Oct. 17, 2011, 9:27 a.m.
Keep a link from the internal KVM routing table to potential MSI routing
cache entries. The link is used so far whenever the entry is dropped to
invalidate the cache content. It will allow us to build MSI routing
entries on demand and flush existing ones on table overflow.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---
 hw/device-assignment.c |    4 ++--
 kvm-all.c              |    1 +
 qemu-kvm.c             |   25 ++++++++++++++++++-------
 qemu-kvm.h             |    3 ++-
 4 files changed, 23 insertions(+), 10 deletions(-)
Avi Kivity - Oct. 17, 2011, 11:13 a.m.
On 10/17/2011 11:27 AM, Jan Kiszka wrote:
> Keep a link from the internal KVM routing table to potential MSI routing
> cache entries. The link is used so far whenever the entry is dropped to
> invalidate the cache content. It will allow us to build MSI routing
> entries on demand and flush existing ones on table overflow.
>

Does this not require a destructor for MSIRoutingCache?
Jan Kiszka - Oct. 17, 2011, 11:25 a.m.
On 2011-10-17 13:13, Avi Kivity wrote:
> On 10/17/2011 11:27 AM, Jan Kiszka wrote:
>> Keep a link from the internal KVM routing table to potential MSI routing
>> cache entries. The link is used so far whenever the entry is dropped to
>> invalidate the cache content. It will allow us to build MSI routing
>> entries on demand and flush existing ones on table overflow.
>>
> 
> Does this not require a destructor for MSIRoutingCache?

Yes, kvm_msi_cache_invalidate. Cache providers are responsible for
invalidating used caches before freeing them. That also drops the
reference established here.

Jan
Avi Kivity - Oct. 17, 2011, 12:15 p.m.
On 10/17/2011 01:25 PM, Jan Kiszka wrote:
> On 2011-10-17 13:13, Avi Kivity wrote:
> > On 10/17/2011 11:27 AM, Jan Kiszka wrote:
> >> Keep a link from the internal KVM routing table to potential MSI routing
> >> cache entries. The link is used so far whenever the entry is dropped to
> >> invalidate the cache content. It will allow us to build MSI routing
> >> entries on demand and flush existing ones on table overflow.
> >>
> > 
> > Does this not require a destructor for MSIRoutingCache?
>
> Yes, kvm_msi_cache_invalidate. Cache providers are responsible for
> invalidating used caches before freeing them. That also drops the
> reference established here.

Ah, apic.c's cache is static.  It looked to me as if you're immediately
leaking references to it, but you aren't.

Patch

diff --git a/hw/device-assignment.c b/hw/device-assignment.c
index 11efd16..07e9f5a 100644
--- a/hw/device-assignment.c
+++ b/hw/device-assignment.c
@@ -951,7 +951,7 @@  static void assigned_dev_update_msi(PCIDevice *pci_dev)
         }
         assigned_dev->entry->gsi = r;
 
-        kvm_add_routing_entry(assigned_dev->entry);
+        kvm_add_routing_entry(assigned_dev->entry, NULL);
         if (kvm_commit_irq_routes() < 0) {
             perror("assigned_dev_update_msi: kvm_commit_irq_routes");
             assigned_dev->cap.state &= ~ASSIGNED_DEVICE_MSI_ENABLED;
@@ -1039,7 +1039,7 @@  static int assigned_dev_update_msix_mmio(PCIDevice *pci_dev)
         adev->entry[entries_nr].u.msi.address_hi = msg_upper_addr;
         adev->entry[entries_nr].u.msi.data = msg_data;
         DEBUG("MSI-X data 0x%x, MSI-X addr_lo 0x%x\n!", msg_data, msg_addr);
-	kvm_add_routing_entry(&adev->entry[entries_nr]);
+        kvm_add_routing_entry(&adev->entry[entries_nr], NULL);
 
         msix_entry.gsi = adev->entry[entries_nr].gsi;
         msix_entry.entry = i;
diff --git a/kvm-all.c b/kvm-all.c
index c34263b..c4186a5 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -81,6 +81,7 @@  struct KVMState
     int irqchip_inject_ioctl;
 #ifdef KVM_CAP_IRQ_ROUTING
     struct kvm_irq_routing *irq_routes;
+    MSIRoutingCache **msi_cache;
     int nr_allocated_irq_routes;
 #endif
     void *used_gsi_bitmap;
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 253cf75..13d4f90 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -253,7 +253,8 @@  int kvm_has_gsi_routing(void)
     return r;
 }
 
-int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry)
+int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry,
+                          MSIRoutingCache *msi_cache)
 {
 #ifdef KVM_CAP_IRQ_ROUTING
     KVMState *s = kvm_state;
@@ -274,6 +275,8 @@  int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry)
         }
         s->nr_allocated_irq_routes = n;
         s->irq_routes = z;
+
+        s->msi_cache = g_realloc(s->msi_cache, sizeof(*s->msi_cache) * n);
     }
     n = s->irq_routes->nr++;
     new = &s->irq_routes->entries[n];
@@ -282,6 +285,7 @@  int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry)
     new->type = entry->type;
     new->flags = entry->flags;
     new->u = entry->u;
+    s->msi_cache[n] = msi_cache;
 
     set_gsi(s, entry->gsi);
 
@@ -301,7 +305,7 @@  int kvm_add_irq_route(int gsi, int irqchip, int pin)
     e.flags = 0;
     e.u.irqchip.irqchip = irqchip;
     e.u.irqchip.pin = pin;
-    return kvm_add_routing_entry(&e);
+    return kvm_add_routing_entry(&e, NULL);
 #else
     return -ENOSYS;
 #endif
@@ -312,6 +316,7 @@  int kvm_del_routing_entry(struct kvm_irq_routing_entry *entry)
 #ifdef KVM_CAP_IRQ_ROUTING
     KVMState *s = kvm_state;
     struct kvm_irq_routing_entry *e, *p;
+    MSIRoutingCache *cache;
     int i, gsi, found = 0;
 
     gsi = entry->gsi;
@@ -324,8 +329,6 @@  int kvm_del_routing_entry(struct kvm_irq_routing_entry *entry)
                     if (e->u.irqchip.irqchip ==
                         entry->u.irqchip.irqchip
                         && e->u.irqchip.pin == entry->u.irqchip.pin) {
-                        p = &s->irq_routes->entries[--s->irq_routes->nr];
-                        *e = *p;
                         found = 1;
                     }
                     break;
@@ -336,8 +339,6 @@  int kvm_del_routing_entry(struct kvm_irq_routing_entry *entry)
                         && e->u.msi.address_hi ==
                         entry->u.msi.address_hi
                         && e->u.msi.data == entry->u.msi.data) {
-                        p = &s->irq_routes->entries[--s->irq_routes->nr];
-                        *e = *p;
                         found = 1;
                     }
                     break;
@@ -346,6 +347,16 @@  int kvm_del_routing_entry(struct kvm_irq_routing_entry *entry)
                 break;
             }
             if (found) {
+                s->irq_routes->nr--;
+                p = &s->irq_routes->entries[s->irq_routes->nr];
+                *e = *p;
+
+                cache = s->msi_cache[i];
+                if (cache) {
+                    cache->type = MSI_ROUTE_NONE;
+                }
+                s->msi_cache[i] = s->msi_cache[s->irq_routes->nr];
+
                 /* If there are no other users of this GSI
                  * mark it available in the bitmap */
                 for (i = 0; i < s->irq_routes->nr; i++) {
@@ -469,7 +480,7 @@  int kvm_msi_message_add(MSIMessage *msg, MSIRoutingCache *cache)
     cache->kvm_irqfd = -1;
 
     kvm_msi_routing_entry(&e, cache);
-    return kvm_add_routing_entry(&e);
+    return kvm_add_routing_entry(&e, cache);
 }
 
 int kvm_msi_message_del(MSIRoutingCache *cache)
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 68a921e..b2ae5da 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -188,7 +188,8 @@  struct kvm_irq_routing_entry;
  * Adds a filled routing entry to the temporary irq routing table. Nothing is
  * committed to the running VM.
  */
-int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry);
+int kvm_add_routing_entry(struct kvm_irq_routing_entry *entry,
+                          MSIRoutingCache *msi_cache);
 
 /*!
  * \brief Removes a routing from the temporary irq routing table