From patchwork Thu Jun 6 06:25:52 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fam Zheng X-Patchwork-Id: 249283 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 9F3492C02B7 for ; Thu, 6 Jun 2013 16:30:35 +1000 (EST) Received: from localhost ([::1]:41197 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UkTiT-0006ji-Md for incoming@patchwork.ozlabs.org; Thu, 06 Jun 2013 02:30:33 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:41651) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UkTfD-0001l5-Qk for qemu-devel@nongnu.org; Thu, 06 Jun 2013 02:27:18 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UkTf6-0004QS-Vl for qemu-devel@nongnu.org; Thu, 06 Jun 2013 02:27:11 -0400 Received: from mx1.redhat.com ([209.132.183.28]:10007) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UkTf6-0004Q1-Oh for qemu-devel@nongnu.org; Thu, 06 Jun 2013 02:27:04 -0400 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id r566R4sE023144 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Thu, 6 Jun 2013 02:27:04 -0400 Received: from localhost.nay.redhat.com ([10.66.7.14]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id r566Qh5M009870; Thu, 6 Jun 2013 02:27:01 -0400 From: Fam Zheng To: qemu-devel@nongnu.org Date: Thu, 6 Jun 2013 14:25:52 +0800 Message-Id: <1370499959-8916-7-git-send-email-famz@redhat.com> In-Reply-To: <1370499959-8916-1-git-send-email-famz@redhat.com> References: <1370499959-8916-1-git-send-email-famz@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Cc: kwolf@redhat.com, jcody@redhat.com, Fam Zheng , rjones@redhat.com, stefanha@redhat.com Subject: [Qemu-devel] [PATCH v7 06/13] curl: introduce CURLDataCache X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Data buffer was contained by CURLState, they are allocated and freed together. This patch try to isolate them, by introducing a dedicated cache list to BDRVCURLState. The benifit is we can now release the CURLState (and associated sockets) while keep the fetched data for later use, and simplies the prefetch and buffer logic for some degree. Note: There's already page cache in guest kernel, why cache data here? Since we don't want to submit http/ftp/* request for every 2KB in sequencial read, but there are crude guest that sends small IO reqs, which will result in horrible performance. GRUB/isolinux loading kernel is a typical case and we workaround this by prefetch cache. This is what curl.c has been doing along. This patch just refectors the buffer. Signed-off-by: Fam Zheng --- block/curl.c | 137 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 66 insertions(+), 71 deletions(-) diff --git a/block/curl.c b/block/curl.c index bb46c8f..a99d8b5 100644 --- a/block/curl.c +++ b/block/curl.c @@ -43,10 +43,6 @@ #define SECTOR_SIZE 512 #define READ_AHEAD_SIZE (256 * 1024) -#define FIND_RET_NONE 0 -#define FIND_RET_OK 1 -#define FIND_RET_WAIT 2 - struct BDRVCURLState; typedef struct CURLAIOCB { @@ -61,6 +57,14 @@ typedef struct CURLAIOCB { size_t end; } CURLAIOCB; +typedef struct CURLDataCache { + char *data; + int64_t base_pos; + size_t data_len; + int64_t write_pos; + QLIST_ENTRY(CURLDataCache) next; +} CURLDataCache; + typedef struct CURLState { struct BDRVCURLState *s; @@ -90,6 +94,8 @@ typedef struct BDRVCURLState { char *url; size_t readahead_size; QEMUTimer *timer; + /* List of data cache ordered by access, freed from tail */ + QLIST_HEAD(, CURLDataCache) cache; /* Whether http server accept range in header */ bool accept_range; } BDRVCURLState; @@ -98,6 +104,19 @@ static void curl_clean_state(CURLState *s); static void curl_fd_handler(void *arg); static int curl_aio_flush(void *opaque); +static CURLDataCache *curl_find_cache(BDRVCURLState *bs, + int64_t start, size_t len) +{ + CURLDataCache *c; + QLIST_FOREACH(c, &bs->cache, next) { + if (start >= c->base_pos && + start + len <= c->base_pos + c->write_pos) { + return c; + } + } + return NULL; +} + static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, void *userp, void *sockp) { @@ -181,6 +200,23 @@ static int curl_multi_timer_cb(CURLM *multi, long timeout_ms, void *s_) return 0; } +static void curl_complete_io(BDRVCURLState *bs, CURLAIOCB *acb, + CURLDataCache *cache) +{ + int64_t aio_base = acb->sector_num * SECTOR_SIZE; + size_t aio_bytes = acb->nb_sectors * SECTOR_SIZE; + int64_t off = aio_base - cache->base_pos; + + qemu_iovec_from_buf(acb->qiov, 0, cache->data + off, aio_bytes); + acb->common.cb(acb->common.opaque, 0); + DPRINTF("AIO Request OK: " PRId64 "%10zd\n", aio_base, aio_bytes); + qemu_aio_release(acb); + acb = NULL; + /* Move cache next in the list */ + QLIST_REMOVE(cache, next); + QLIST_INSERT_HEAD(&bs->cache, cache, next); +} + static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { CURLState *s = ((CURLState*)opaque); @@ -214,59 +250,6 @@ read_end: return realsize; } -static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, - CURLAIOCB *acb) -{ - int i; - size_t end = start + len; - - for (i=0; istates[i]; - size_t buf_end = (state->buf_start + state->buf_off); - size_t buf_fend = (state->buf_start + state->buf_len); - - if (!state->orig_buf) - continue; - if (!state->buf_off) - continue; - - // Does the existing buffer cover our section? - if ((start >= state->buf_start) && - (start <= buf_end) && - (end >= state->buf_start) && - (end <= buf_end)) - { - char *buf = state->orig_buf + (start - state->buf_start); - - qemu_iovec_from_buf(acb->qiov, 0, buf, len); - acb->common.cb(acb->common.opaque, 0); - - return FIND_RET_OK; - } - - // Wait for unfinished chunks - if ((start >= state->buf_start) && - (start <= buf_fend) && - (end >= state->buf_start) && - (end <= buf_fend)) - { - int j; - - acb->start = start - state->buf_start; - acb->end = acb->start + len; - - for (j=0; jacb[j]) { - state->acb[j] = acb; - return FIND_RET_WAIT; - } - } - } - } - - return FIND_RET_NONE; -} - static void curl_fd_handler(void *arg) { CURLSockInfo *sock = (CURLSockInfo *)arg; @@ -299,7 +282,9 @@ static void curl_fd_handler(void *arg) case CURLMSG_DONE: { CURLState *state = NULL; - curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, (char**)&state); + curl_easy_getinfo(msg->easy_handle, + CURLINFO_PRIVATE, + (char **)&state); /* ACBs for successful messages get completed in curl_read_cb */ if (msg->data.result != CURLE_OK) { @@ -495,6 +480,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags) } QLIST_INIT(&s->socks); + QLIST_INIT(&s->cache); DPRINTF("CURL: Opening %s\n", file); s->url = g_strdup(file); @@ -589,26 +575,24 @@ static const AIOCBInfo curl_aiocb_info = { static void curl_readv_bh_cb(void *p) { CURLState *state; - + CURLDataCache *cache = NULL; CURLAIOCB *acb = p; BDRVCURLState *s = acb->common.bs->opaque; + int64_t aio_base, aio_bytes; + int64_t start, end; qemu_bh_delete(acb->bh); acb->bh = NULL; - size_t start = acb->sector_num * SECTOR_SIZE; - size_t end; + aio_base = acb->sector_num * SECTOR_SIZE; + aio_bytes = acb->nb_sectors * SECTOR_SIZE; - // In case we have the requested data already (e.g. read-ahead), - // we can just call the callback and be done. - switch (curl_find_buf(s, start, acb->nb_sectors * SECTOR_SIZE, acb)) { - case FIND_RET_OK: - qemu_aio_release(acb); - // fall through - case FIND_RET_WAIT: - return; - default: - break; + start = acb->sector_num * SECTOR_SIZE; + + cache = curl_find_cache(s, aio_base, aio_bytes); + if (cache) { + curl_complete_io(s, acb, cache); + return; } // No cache found, so let's start a new request @@ -691,6 +675,17 @@ static void curl_close(BlockDriverState *bs) if (s->multi) curl_multi_cleanup(s->multi); + while (!QLIST_EMPTY(&s->cache)) { + CURLDataCache *cache = QLIST_FIRST(&s->cache); + if (cache->data) { + g_free(cache->data); + cache->data = NULL; + } + QLIST_REMOVE(cache, next); + g_free(cache); + cache = NULL; + } + while (!QLIST_EMPTY(&s->socks)) { CURLSockInfo *sock = QLIST_FIRST(&s->socks); QLIST_REMOVE(sock, next);