diff mbox

block: Give always priority to unused entries in the qcow2 L2 cache

Message ID 20150206134629.GA1775@igalia.com
State New
Headers show

Commit Message

Alberto Garcia Feb. 6, 2015, 1:46 p.m. UTC
On Fri, Feb 06, 2015 at 12:18:07PM +0100, Kevin Wolf wrote:

> > I wanted to measure the actual impact of this, so I modified the
> > current code to implement a quick and dirty LRU algorithm and made
> > some numbers.
> 
> Cool, thanks. I think switching to an LRU algorithm is an option
> that might make sense for the general case, so this is something to
> consider.

I'm anyway sharing the implementation in case you want to give it a
look.

> I was going to suggest a test run for a different scenario, like
> with many interleaved sequential reads, but thinking a bit more
> about how to do it so that the eviction algorithm could make a
> significant change, it occurred to me that it might not be all that
> easy (and therefore possibly not practically relevant either).

Yes, that was exactly my point.

Thanks a lot for your comments and quick review,

Berto
diff mbox

Patch

diff --git a/block/qcow2-cache.c b/block/qcow2-cache.c
index b115549..0dd0676 100644
--- a/block/qcow2-cache.c
+++ b/block/qcow2-cache.c
@@ -28,11 +28,11 @@ 
 #include "trace.h"
 
 typedef struct Qcow2CachedTable {
-    void*   table;
-    int64_t offset;
-    bool    dirty;
-    int     cache_hits;
-    int     ref;
+    void*    table;
+    int64_t  offset;
+    bool     dirty;
+    unsigned lru_count;
+    int      ref;
 } Qcow2CachedTable;
 
 struct Qcow2Cache {
@@ -40,6 +40,7 @@  struct Qcow2Cache {
     struct Qcow2Cache*      depends;
     int                     size;
     bool                    depends_on_flush;
+    unsigned                max_lru_count;
 };
 
 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
@@ -228,16 +229,18 @@  int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
     for (i = 0; i < c->size; i++) {
         assert(c->entries[i].ref == 0);
         c->entries[i].offset = 0;
-        c->entries[i].cache_hits = 0;
+        c->entries[i].lru_count = 0;
     }
 
+    c->max_lru_count = 0;
+
     return 0;
 }
 
 static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
 {
     int i;
-    int min_count = INT_MAX;
+    int min_count = UINT_MAX;
     int min_index = -1;
 
 
@@ -246,15 +249,9 @@  static int qcow2_cache_find_entry_to_replace(Qcow2Cache *c)
             continue;
         }
 
-        if (c->entries[i].cache_hits < min_count) {
+        if (c->entries[i].lru_count < min_count) {
             min_index = i;
-            min_count = c->entries[i].cache_hits;
-        }
-
-        /* Give newer hits priority */
-        /* TODO Check how to optimize the replacement strategy */
-        if (c->entries[i].cache_hits > 1) {
-            c->entries[i].cache_hits /= 2;
+            min_count = c->entries[i].lru_count;
         }
     }
 
@@ -310,17 +307,26 @@  static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
         }
     }
 
-    /* Give the table some hits for the start so that it won't be replaced
-     * immediately. The number 32 is completely arbitrary. */
-    c->entries[i].cache_hits = 32;
+    c->entries[i].lru_count = ++c->max_lru_count;
     c->entries[i].offset = offset;
 
     /* And return the right table */
 found:
-    c->entries[i].cache_hits++;
+    if (c->entries[i].lru_count < c->max_lru_count) {
+        c->entries[i].lru_count = ++c->max_lru_count;
+    }
     c->entries[i].ref++;
     *table = c->entries[i].table;
 
+    /* Reset LRU counters before they overflow */
+    if (c->max_lru_count == UINT_MAX) {
+        unsigned f;
+        for (f = 0; f < c->size; f++) {
+            c->entries[f].lru_count = 0;
+        }
+        c->max_lru_count = 0;
+    }
+
     trace_qcow2_cache_get_done(qemu_coroutine_self(),
                                c == s->l2_table_cache, i);