From patchwork Tue May 14 02:26:25 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Fam Zheng X-Patchwork-Id: 243565 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id CAE542C00B0 for ; Tue, 14 May 2013 12:28:56 +1000 (EST) Received: from localhost ([::1]:59100 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Uc4z0-0005dd-U3 for incoming@patchwork.ozlabs.org; Mon, 13 May 2013 22:28:54 -0400 Received: from eggs.gnu.org ([208.118.235.92]:59074) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Uc4xL-0003Iz-Ta for qemu-devel@nongnu.org; Mon, 13 May 2013 22:27:14 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Uc4xI-0003Sv-Uo for qemu-devel@nongnu.org; Mon, 13 May 2013 22:27:11 -0400 Received: from mx1.redhat.com ([209.132.183.28]:11748) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Uc4xI-0003Sh-NP for qemu-devel@nongnu.org; Mon, 13 May 2013 22:27:08 -0400 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id r4E2R8Gb012881 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Mon, 13 May 2013 22:27:08 -0400 Received: from fam-laptop.nay.redhat.com ([10.66.7.14]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id r4E2QV5k014423; Mon, 13 May 2013 22:27:04 -0400 From: Fam Zheng To: qemu-devel@nongnu.org Date: Tue, 14 May 2013 10:26:25 +0800 Message-Id: <1368498390-20738-7-git-send-email-famz@redhat.com> In-Reply-To: <1368498390-20738-1-git-send-email-famz@redhat.com> References: <1368498390-20738-1-git-send-email-famz@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Cc: kwolf@redhat.com, jcody@redhat.com, Fam Zheng , stefanha@redhat.com Subject: [Qemu-devel] [PATCH v2 06/11] curl: introduce CURLDataCache X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Data buffer was contained by CURLState, they are allocated and freed together. This patch try to isolate them, by introducing a dedicated cache list to BDRVCURLState. The benifit is we can now release the CURLState (and associated sockets) while keep the fetched data for later use, and simplies the prefetch and buffer logic for some degree. Signed-off-by: Fam Zheng --- block/curl.c | 123 ++++++++++++++++++++++++++--------------------------------- 1 file changed, 54 insertions(+), 69 deletions(-) diff --git a/block/curl.c b/block/curl.c index 61d0d6f..0c4d865 100644 --- a/block/curl.c +++ b/block/curl.c @@ -43,10 +43,6 @@ #define SECTOR_SIZE 512 #define READ_AHEAD_SIZE (256 * 1024) -#define FIND_RET_NONE 0 -#define FIND_RET_OK 1 -#define FIND_RET_WAIT 2 - struct BDRVCURLState; typedef struct CURLAIOCB { @@ -61,6 +57,16 @@ typedef struct CURLAIOCB { size_t end; } CURLAIOCB; +typedef struct CURLDataCache { + char *data; + size_t base_pos; + size_t data_len; + size_t write_pos; + /* Ref count for CURLState */ + int use_count; + QLIST_ENTRY(CURLDataCache) next; +} CURLDataCache; + typedef struct CURLState { struct BDRVCURLState *s; @@ -91,6 +97,8 @@ typedef struct BDRVCURLState { char *url; size_t readahead_size; QEMUTimer *timer; + /* List of data cache ordered by access, freed from tail */ + QLIST_HEAD(, CURLDataCache) cache; /* Whether http server accept range in header */ bool accept_range; } BDRVCURLState; @@ -99,6 +107,19 @@ static void curl_clean_state(CURLState *s); static void curl_fd_handler(void *arg); static int curl_aio_flush(void *opaque); +static CURLDataCache *curl_find_cache(BDRVCURLState *bs, + size_t start, size_t len) +{ + CURLDataCache *c; + QLIST_FOREACH(c, &bs->cache, next) { + if (start >= c->base_pos && + start + len <= c->base_pos + c->write_pos) { + return c; + } + } + return NULL; +} + static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, void *s, void *sp) { @@ -182,6 +203,23 @@ static int curl_multi_timer_cb(CURLM *multi, long timeout_ms, void *s) return 0; } +static void curl_complete_io(BDRVCURLState *bs, CURLAIOCB *acb, + CURLDataCache *cache) +{ + size_t aio_base = acb->sector_num * SECTOR_SIZE; + size_t aio_bytes = acb->nb_sectors * SECTOR_SIZE; + size_t off = aio_base - cache->base_pos; + + qemu_iovec_from_buf(acb->qiov, 0, cache->data + off, aio_bytes); + acb->common.cb(acb->common.opaque, 0); + DPRINTF("AIO Request OK: %10zd %10zd\n", aio_base, aio_bytes); + qemu_aio_release(acb); + acb = NULL; + /* Move cache next in the list */ + QLIST_REMOVE(cache, next); + QLIST_INSERT_HEAD(&bs->cache, cache, next); +} + static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { CURLState *s = ((CURLState*)opaque); @@ -215,59 +253,6 @@ read_end: return realsize; } -static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len, - CURLAIOCB *acb) -{ - int i; - size_t end = start + len; - - for (i=0; istates[i]; - size_t buf_end = (state->buf_start + state->buf_off); - size_t buf_fend = (state->buf_start + state->buf_len); - - if (!state->orig_buf) - continue; - if (!state->buf_off) - continue; - - // Does the existing buffer cover our section? - if ((start >= state->buf_start) && - (start <= buf_end) && - (end >= state->buf_start) && - (end <= buf_end)) - { - char *buf = state->orig_buf + (start - state->buf_start); - - qemu_iovec_from_buf(acb->qiov, 0, buf, len); - acb->common.cb(acb->common.opaque, 0); - - return FIND_RET_OK; - } - - // Wait for unfinished chunks - if ((start >= state->buf_start) && - (start <= buf_fend) && - (end >= state->buf_start) && - (end <= buf_fend)) - { - int j; - - acb->start = start - state->buf_start; - acb->end = acb->start + len; - - for (j=0; jacb[j]) { - state->acb[j] = acb; - return FIND_RET_WAIT; - } - } - } - } - - return FIND_RET_NONE; -} - static void curl_fd_handler(void *arg) { CURLSockInfo *sock = (CURLSockInfo *)arg; @@ -300,7 +285,9 @@ static void curl_fd_handler(void *arg) case CURLMSG_DONE: { CURLState *state = NULL; - curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, (char**)&state); + curl_easy_getinfo(msg->easy_handle, + CURLINFO_PRIVATE, + (char **)&state); /* ACBs for successful messages get completed in curl_read_cb */ if (msg->data.result != CURLE_OK) { @@ -586,26 +573,24 @@ static const AIOCBInfo curl_aiocb_info = { static void curl_readv_bh_cb(void *p) { CURLState *state; - + CURLDataCache *cache = NULL; CURLAIOCB *acb = p; BDRVCURLState *s = acb->common.bs->opaque; + size_t aio_base, aio_bytes; qemu_bh_delete(acb->bh); acb->bh = NULL; + aio_base = acb->sector_num * SECTOR_SIZE; + aio_bytes = acb->nb_sectors * SECTOR_SIZE; + size_t start = acb->sector_num * SECTOR_SIZE; size_t end; - // In case we have the requested data already (e.g. read-ahead), - // we can just call the callback and be done. - switch (curl_find_buf(s, start, acb->nb_sectors * SECTOR_SIZE, acb)) { - case FIND_RET_OK: - qemu_aio_release(acb); - // fall through - case FIND_RET_WAIT: - return; - default: - break; + cache = curl_find_cache(s, aio_base, aio_bytes); + if (cache) { + curl_complete_io(s, acb, cache); + return; } // No cache found, so let's start a new request