From patchwork Wed Apr 11 18:49:04 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Orit Wasserman X-Patchwork-Id: 151852 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 2C6C7B7061 for ; Thu, 12 Apr 2012 04:50:08 +1000 (EST) Received: from localhost ([::1]:52501 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SI2cI-00057y-0P for incoming@patchwork.ozlabs.org; Wed, 11 Apr 2012 14:50:06 -0400 Received: from eggs.gnu.org ([208.118.235.92]:48020) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SI2c0-0004u7-VR for qemu-devel@nongnu.org; Wed, 11 Apr 2012 14:49:51 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1SI2bv-0004uw-O2 for qemu-devel@nongnu.org; Wed, 11 Apr 2012 14:49:48 -0400 Received: from mx1.redhat.com ([209.132.183.28]:16510) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1SI2bv-0004ui-FI for qemu-devel@nongnu.org; Wed, 11 Apr 2012 14:49:43 -0400 Received: from int-mx11.intmail.prod.int.phx2.redhat.com (int-mx11.intmail.prod.int.phx2.redhat.com [10.5.11.24]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id q3BIncXB022095 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Wed, 11 Apr 2012 14:49:38 -0400 Received: from dhcp-1-120.tlv.redhat.com (reserved-201-244.tlv.redhat.com [10.35.201.244] (may be forged)) by int-mx11.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id q3BInUsF008086; Wed, 11 Apr 2012 14:49:35 -0400 From: Orit Wasserman To: qemu-devel@nongnu.org Date: Wed, 11 Apr 2012 21:49:04 +0300 Message-Id: <1334170153-9503-2-git-send-email-owasserm@redhat.com> In-Reply-To: <1334170153-9503-1-git-send-email-owasserm@redhat.com> References: <1334170153-9503-1-git-send-email-owasserm@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.24 X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 209.132.183.28 Cc: aliguori@us.ibm.com, quintela@redhat.com, Petter Svard , stefanha@gmail.com, blauwirbel@gmail.com, Orit Wasserman , Benoit Hudzia , avi@redhat.com, Aidan Shribman Subject: [Qemu-devel] [PATCH v9 01/10] Add cache handling functions X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Add LRU page cache mechanism. The page are accessed by their address. Signed-off-by: Orit Wasserman Signed-off-by: Benoit Hudzia Signed-off-by: Petter Svard Signed-off-by: Aidan Shribman --- arch_init.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 220 insertions(+), 0 deletions(-) diff --git a/arch_init.c b/arch_init.c index 595badf..2e534f1 100644 --- a/arch_init.c +++ b/arch_init.c @@ -28,6 +28,7 @@ #include #include #endif +#include #include "config.h" #include "monitor.h" #include "sysemu.h" @@ -44,6 +45,14 @@ #include "exec-memory.h" #include "hw/pcspk.h" +#ifdef DEBUG_ARCH_INIT +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + #ifdef TARGET_SPARC int graphic_width = 1024; int graphic_height = 768; @@ -127,6 +136,217 @@ static int is_dup_page(uint8_t *page) return 1; } +/***********************************************************/ +/* Page cache for storing previous pages as basis for XBZRLE compression */ +#define CACHE_N_WAY 2 /* 2-way assossiative cache */ + +typedef struct CacheItem { + ram_addr_t it_addr; + unsigned long it_age; + uint8_t *it_data; +} CacheItem; + +typedef struct CacheBucket { + CacheItem bkt_item[CACHE_N_WAY]; +} CacheBucket; + +static CacheBucket *page_cache; +static int64_t cache_num_buckets; +static uint64_t cache_max_item_age; +static int64_t cache_num_items; + +static void cache_init(int64_t num_buckets); +static void cache_fini(void); +static int cache_is_cached(ram_addr_t addr); +static int cache_get_oldest(CacheBucket *buck); +static int cache_get_newest(CacheBucket *buck, ram_addr_t addr); +static void cache_insert(ram_addr_t id, uint8_t *pdata, int use_buffer); +static unsigned long cache_get_cache_pos(ram_addr_t address); +static CacheItem *cache_item_get(unsigned long pos, int item); +static void cache_resize(int64_t new_size); + +/***********************************************************/ +/* XBRLE page cache implementation */ +static CacheItem *cache_item_get(unsigned long pos, int item) +{ + assert(page_cache); + return &page_cache[pos].bkt_item[item]; +} + +static void cache_init(int64_t num_bytes) +{ + int i; + + cache_num_items = 0; + cache_max_item_age = 0; + cache_num_buckets = num_bytes / (TARGET_PAGE_SIZE * CACHE_N_WAY); + assert(cache_num_buckets); + DPRINTF("Setting cache buckets to %lu\n", cache_num_buckets); + + assert(!page_cache); + page_cache = (CacheBucket *)g_malloc((cache_num_buckets) * + sizeof(CacheBucket)); + + for (i = 0; i < cache_num_buckets; i++) { + int j; + for (j = 0; j < CACHE_N_WAY; j++) { + CacheItem *it = cache_item_get(i, j); + it->it_data = NULL; + it->it_age = 0; + it->it_addr = -1; + } + } +} + +static void cache_fini(void) +{ + int i; + + assert(page_cache); + + for (i = 0; i < cache_num_buckets; i++) { + int j; + for (j = 0; j < CACHE_N_WAY; j++) { + CacheItem *it = cache_item_get(i, j); + g_free(it->it_data); + it->it_data = 0; + } + } + + g_free(page_cache); + page_cache = NULL; +} + +static unsigned long cache_get_cache_pos(ram_addr_t address) +{ + unsigned long pos; + + assert(cache_num_buckets); + pos = (address/TARGET_PAGE_SIZE) & (cache_num_buckets - 1); + return pos; +} + +static int cache_get_newest(CacheBucket *buck, ram_addr_t addr) +{ + unsigned long big = 0; + int big_pos = -1; + int j; + + assert(page_cache); + + for (j = 0; j < CACHE_N_WAY; j++) { + CacheItem *it = &buck->bkt_item[j]; + + if (it->it_addr != addr) { + continue; + } + + if (!j || it->it_age > big) { + big = it->it_age; + big_pos = j; + } + } + + return big_pos; +} + +static int cache_get_oldest(CacheBucket *buck) +{ + unsigned long small = 0; + int small_pos = -1; + int j; + + assert(page_cache); + + for (j = 0; j < CACHE_N_WAY; j++) { + CacheItem *it = &buck->bkt_item[j]; + + if (!j || it->it_age < small) { + small = it->it_age; + small_pos = j; + } + } + + return small_pos; +} + +static int cache_is_cached(ram_addr_t addr) +{ + unsigned long pos = cache_get_cache_pos(addr); + + assert(page_cache); + CacheBucket *bucket = &page_cache[pos]; + return cache_get_newest(bucket, addr); +} + +static void cache_insert(unsigned long addr, uint8_t *pdata, int use_buffer) +{ + unsigned long pos; + int slot = -1; + CacheBucket *bucket; + + pos = cache_get_cache_pos(addr); + assert(page_cache); + bucket = &page_cache[pos]; + slot = cache_get_oldest(bucket); /* evict LRU */ + + /* actual update of entry */ + CacheItem *it = cache_item_get(pos, slot); + if (!it->it_data) { + if (!use_buffer) { + it->it_data = g_malloc(TARGET_PAGE_SIZE); + } + cache_num_items++; + } + + if (!use_buffer) { + memcpy(it->it_data, pdata, TARGET_PAGE_SIZE); + } else { + it->it_data = pdata; + } + it->it_age = ++cache_max_item_age; + it->it_addr = addr; +} + +void cache_resize(int64_t new_size) +{ + int64_t new_num_buckets = new_size/(TARGET_PAGE_SIZE * CACHE_N_WAY); + CacheBucket *old_page_cache = page_cache; + int i; + int64_t old_num_buckets = cache_num_buckets; + + /* same size */ + if (new_num_buckets == cache_num_buckets) { + return; + } + /* cache was not inited */ + if (page_cache == NULL) { + return; + } + + /* create a new cache */ + page_cache = NULL; + cache_init(new_size); + + /* move all data from old cache */ + for (i = 0; i < old_num_buckets; i++) { + int j; + for (j = 0; j < CACHE_N_WAY; j++) { + CacheItem *it = &old_page_cache[i].bkt_item[j]; + if (it->it_addr != -1) { + /* check for collision , if there is keep the first value */ + if (cache_is_cached(it->it_addr) != -1) { + g_free(it->it_data); + } else { + cache_insert(it->it_addr, it->it_data, 1); + } + } + } + } + + g_free(old_page_cache); +} + static RAMBlock *last_block; static ram_addr_t last_offset;