xen-mapcache: remember the last page address rather then the last address_index

Submitted by Stefano Stabellini on June 6, 2011, 6:02 p.m.

Details

Message ID alpine.DEB.2.00.1106061902150.12963@kaball-desktop
State New
Headers show

Commit Message

Stefano Stabellini June 6, 2011, 6:02 p.m.
A single address_index corresponds to multiple pages that might or
might not be mapped.
It is better to just remember the last page address for the sake of this
optimization, so that we are sure that it is mapped.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Patch hide | download patch | download mbox

diff --git a/xen-mapcache.c b/xen-mapcache.c
index fac47cd..80c430f 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -62,7 +62,7 @@  typedef struct MapCache {
     QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
 
     /* For most cases (>99.9%), the page address is the same. */
-    target_phys_addr_t last_address_index;
+    target_phys_addr_t last_address_page;
     uint8_t *last_address_vaddr;
     unsigned long max_mcache_size;
     unsigned int mcache_bucket_shift;
@@ -87,7 +87,7 @@  void qemu_map_cache_init(void)
     mapcache = qemu_mallocz(sizeof (MapCache));
 
     QTAILQ_INIT(&mapcache->locked_entries);
-    mapcache->last_address_index = -1;
+    mapcache->last_address_page = -1;
 
     getrlimit(RLIMIT_AS, &rlimit_as);
     if (rlimit_as.rlim_max < MCACHE_MAX_SIZE) {
@@ -173,7 +173,7 @@  uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u
 
     trace_qemu_map_cache(phys_addr);
 
-    if (address_index == mapcache->last_address_index && !lock && !__size) {
+    if ((phys_addr >> XC_PAGE_SHIFT) == mapcache->last_address_page && !lock && !__size) {
         trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset);
         return mapcache->last_address_vaddr + address_offset;
     }
@@ -210,18 +210,18 @@  uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u
 
     if(!test_bits(address_offset >> XC_PAGE_SHIFT, size >> XC_PAGE_SHIFT,
                 entry->valid_mapping)) {
-        mapcache->last_address_index = -1;
+        mapcache->last_address_page = -1;
         trace_qemu_map_cache_return(NULL);
         return NULL;
     }
 
-    mapcache->last_address_index = address_index;
+    mapcache->last_address_page = phys_addr >> XC_PAGE_SHIFT;
     mapcache->last_address_vaddr = entry->vaddr_base;
     if (lock) {
         MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev));
         entry->lock++;
         reventry->vaddr_req = mapcache->last_address_vaddr + address_offset;
-        reventry->paddr_index = mapcache->last_address_index;
+        reventry->paddr_index = address_index;
         reventry->size = entry->size;
         QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
     }
@@ -278,7 +278,7 @@  void qemu_invalidate_entry(uint8_t *buffer)
     int found = 0;
 
     if (mapcache->last_address_vaddr == buffer) {
-        mapcache->last_address_index = -1;
+        mapcache->last_address_page = -1;
     }
 
     QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
@@ -357,7 +357,7 @@  void qemu_invalidate_map_cache(void)
         entry->valid_mapping = NULL;
     }
 
-    mapcache->last_address_index = -1;
+    mapcache->last_address_page = -1;
     mapcache->last_address_vaddr = NULL;
 
     mapcache_unlock();