From patchwork Wed May 22 03:16:46 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fam Zheng X-Patchwork-Id: 245493 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 5380F2C0085 for ; Wed, 22 May 2013 13:20:33 +1000 (EST) Received: from localhost ([::1]:33603 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UezbL-0003sK-FN for incoming@patchwork.ozlabs.org; Tue, 21 May 2013 23:20:31 -0400 Received: from eggs.gnu.org ([208.118.235.92]:34315) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UezYD-00075s-7u for qemu-devel@nongnu.org; Tue, 21 May 2013 23:17:27 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UezY6-0007nK-Ej for qemu-devel@nongnu.org; Tue, 21 May 2013 23:17:17 -0400 Received: from mx1.redhat.com ([209.132.183.28]:45436) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UezY6-0007nC-6B for qemu-devel@nongnu.org; Tue, 21 May 2013 23:17:10 -0400 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id r4M3H9pQ016412 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Tue, 21 May 2013 23:17:09 -0400 Received: from localhost.nay.redhat.com ([10.66.7.14]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id r4M3GqNo024814 for ; Tue, 21 May 2013 23:17:07 -0400 From: Fam Zheng To: qemu-devel@nongnu.org Date: Wed, 22 May 2013 11:16:46 +0800 Message-Id: <1369192610-25003-7-git-send-email-famz@redhat.com> In-Reply-To: <1369192610-25003-1-git-send-email-famz@redhat.com> References: <1369192610-25003-1-git-send-email-famz@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Subject: [Qemu-devel] [PATCH v4 06/10] curl: introduce CURLDataCache X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Data buffer was contained by CURLState, they are allocated and freed together. This patch try to isolate them, by introducing a dedicated cache list to BDRVCURLState. The benifit is we can now release the CURLState (and associated sockets) while keep the fetched data for later use, and simplies the prefetch and buffer logic for some degree. Note: There's already page cache in guest kernel, why cache data here? Since we don't want to submit http/ftp/* request for every 2KB in sequencial read, but there are crude guest that sends small IO reqs, which will result in horrible performance. GRUB/isolinux loading kernel is a typical case and we workaround this by prefetch cache. This is what curl.c has been doing along. This patch just refectors the buffer. Signed-off-by: Fam Zheng --- block/curl.c | 136 +++++++++++++++++++++++++++++------------------------------ 1 file changed, 67 insertions(+), 69 deletions(-) diff --git a/block/curl.c b/block/curl.c index 4fd5bb9..e387ae1 100644 --- a/block/curl.c +++ b/block/curl.c @@ -43,10 +43,6 @@ #define SECTOR_SIZE 512 #define READ_AHEAD_SIZE (256 * 1024) -#define FIND_RET_NONE 0 -#define FIND_RET_OK 1 -#define FIND_RET_WAIT 2 - struct BDRVCURLState; typedef struct CURLAIOCB { @@ -61,6 +57,16 @@ typedef struct CURLAIOCB { size_t end; } CURLAIOCB; +typedef struct CURLDataCache { + char *data; + size_t base_pos; + size_t data_len; + size_t write_pos; + /* Ref count for CURLState */ + int use_count; + QLIST_ENTRY(CURLDataCache) next; +} CURLDataCache; + typedef struct CURLState { struct BDRVCURLState *s; @@ -90,6 +96,8 @@ typedef struct BDRVCURLState { char *url; size_t readahead_size; QEMUTimer *timer; + /* List of data cache ordered by access, freed from tail */ + QLIST_HEAD(, CURLDataCache) cache; /* Whether http server accept range in header */ bool accept_range; } BDRVCURLState; @@ -98,6 +106,19 @@ static void curl_clean_state(CURLState *s); static void curl_fd_handler(void *arg); static int curl_aio_flush(void *opaque); +static CURLDataCache *curl_find_cache(BDRVCURLState *bs, + size_t start, size_t len) +{ + CURLDataCache *c; + QLIST_FOREACH(c, &bs->cache, next) { + if (start >= c->base_pos && + start + len <= c->base_pos + c->write_pos) { + return c; + } + } + return NULL; +} + static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, void *s, void *sp) { @@ -181,6 +202,23 @@ static int curl_multi_timer_cb(CURLM *multi, long timeout_ms, void *s) return 0; } +static void curl_complete_io(BDRVCURLState *bs, CURLAIOCB *acb, + CURLDataCache *cache) +{ + size_t aio_base = acb->sector_num * SECTOR_SIZE; + size_t aio_bytes = acb->nb_sectors * SECTOR_SIZE; + size_t off = aio_base - cache->base_pos; + + qemu_iovec_from_buf(acb->qiov, 0, cache->data + off, aio_bytes); + acb->common.cb(acb->common.opaque, 0); + DPRINTF("AIO Request OK: %10zd %10zd\n", aio_base, aio_bytes); + qemu_aio_release(acb); + acb = NULL; + /* Move cache next in the list */ + QLIST_REMOVE(cache, next); + QLIST_INSERT_HEAD(&bs->cache, cache, next); +} + static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { CURLState *s = ((CURLState*)opaque); @@ -214,59 +252,6 @@ read_end: return realsize; } -static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, - CURLAIOCB *acb) -{ - int i; - size_t end = start + len; - - for (i=0; istates[i]; - size_t buf_end = (state->buf_start + state->buf_off); - size_t buf_fend = (state->buf_start + state->buf_len); - - if (!state->orig_buf) - continue; - if (!state->buf_off) - continue; - - // Does the existing buffer cover our section? - if ((start >= state->buf_start) && - (start <= buf_end) && - (end >= state->buf_start) && - (end <= buf_end)) - { - char *buf = state->orig_buf + (start - state->buf_start); - - qemu_iovec_from_buf(acb->qiov, 0, buf, len); - acb->common.cb(acb->common.opaque, 0); - - return FIND_RET_OK; - } - - // Wait for unfinished chunks - if ((start >= state->buf_start) && - (start <= buf_fend) && - (end >= state->buf_start) && - (end <= buf_fend)) - { - int j; - - acb->start = start - state->buf_start; - acb->end = acb->start + len; - - for (j=0; jacb[j]) { - state->acb[j] = acb; - return FIND_RET_WAIT; - } - } - } - } - - return FIND_RET_NONE; -} - static void curl_fd_handler(void *arg) { CURLSockInfo *sock = (CURLSockInfo *)arg; @@ -299,7 +284,9 @@ static void curl_fd_handler(void *arg) case CURLMSG_DONE: { CURLState *state = NULL; - curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, (char**)&state); + curl_easy_getinfo(msg->easy_handle, + CURLINFO_PRIVATE, + (char **)&state); /* ACBs for successful messages get completed in curl_read_cb */ if (msg->data.result != CURLE_OK) { @@ -495,6 +482,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags) } QLIST_INIT(&s->socks); + QLIST_INIT(&s->cache); DPRINTF("CURL: Opening %s\n", file); s->url = g_strdup(file); @@ -589,26 +577,24 @@ static const AIOCBInfo curl_aiocb_info = { static void curl_readv_bh_cb(void *p) { CURLState *state; - + CURLDataCache *cache = NULL; CURLAIOCB *acb = p; BDRVCURLState *s = acb->common.bs->opaque; + size_t aio_base, aio_bytes; qemu_bh_delete(acb->bh); acb->bh = NULL; + aio_base = acb->sector_num * SECTOR_SIZE; + aio_bytes = acb->nb_sectors * SECTOR_SIZE; + size_t start = acb->sector_num * SECTOR_SIZE; size_t end; - // In case we have the requested data already (e.g. read-ahead), - // we can just call the callback and be done. - switch (curl_find_buf(s, start, acb->nb_sectors * SECTOR_SIZE, acb)) { - case FIND_RET_OK: - qemu_aio_release(acb); - // fall through - case FIND_RET_WAIT: - return; - default: - break; + cache = curl_find_cache(s, aio_base, aio_bytes); + if (cache) { + curl_complete_io(s, acb, cache); + return; } // No cache found, so let's start a new request @@ -691,6 +677,18 @@ static void curl_close(BlockDriverState *bs) if (s->multi) curl_multi_cleanup(s->multi); + while (!QLIST_EMPTY(&s->cache)) { + CURLDataCache *cache = QLIST_FIRST(&s->cache); + assert(cache->use_count == 0); + if (cache->data) { + g_free(cache->data); + cache->data = NULL; + } + QLIST_REMOVE(cache, next); + g_free(cache); + cache = NULL; + } + while (!QLIST_EMPTY(&s->socks)) { CURLSockInfo *sock = QLIST_FIRST(&s->socks); QLIST_REMOVE(sock, next);