From patchwork Wed Feb 27 00:47:09 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Keith Busch X-Patchwork-Id: 223452 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (Client did not present a certificate) by ozlabs.org (Postfix) with ESMTPS id 09B912C0086 for ; Wed, 27 Feb 2013 12:20:57 +1100 (EST) Received: from localhost ([::1]:54469 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UAVC6-000119-Uv for incoming@patchwork.ozlabs.org; Tue, 26 Feb 2013 19:48:26 -0500 Received: from eggs.gnu.org ([208.118.235.92]:57681) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UAVBC-0007Hk-GV for qemu-devel@nongnu.org; Tue, 26 Feb 2013 19:47:40 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UAVB5-00087t-UJ for qemu-devel@nongnu.org; Tue, 26 Feb 2013 19:47:30 -0500 Received: from mga01.intel.com ([192.55.52.88]:6571) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UAVB5-00086i-OG for qemu-devel@nongnu.org; Tue, 26 Feb 2013 19:47:23 -0500 Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga101.fm.intel.com with ESMTP; 26 Feb 2013 16:47:20 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.84,744,1355126400"; d="scan'208";a="292246145" Received: from dcgshare.lm.intel.com ([10.232.118.254]) by fmsmga001.fm.intel.com with ESMTP; 26 Feb 2013 16:47:20 -0800 Received: by dcgshare.lm.intel.com (Postfix, from userid 1017) id E3723E0C6E; Tue, 26 Feb 2013 17:47:19 -0700 (MST) From: Keith Busch To: qemu-devel@nongnu.org Date: Tue, 26 Feb 2013 17:47:09 -0700 Message-Id: <1361926034-21824-7-git-send-email-keith.busch@intel.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1361926034-21824-1-git-send-email-keith.busch@intel.com> References: <1361926034-21824-1-git-send-email-keith.busch@intel.com> X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 192.55.52.88 Cc: Keith Busch Subject: [Qemu-devel] [PATCH 06/11] QEMU NVMe: Implement flush and dsm X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Signed-off-by: Keith Busch --- hw/nvme.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++------ hw/nvme.h | 2 + 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/hw/nvme.c b/hw/nvme.c index 69136e0..087fce9 100644 --- a/hw/nvme.c +++ b/hw/nvme.c @@ -327,7 +327,6 @@ static void nvme_rw_cb(void *opaque, int ret) n = sq->ctrl; cq = n->cq[sq->cqid]; qemu_sglist_destroy(&req->qsg); - req->aiocb = NULL; nvme_update_stats(ns, req->nlb, req->rw); if (!req->rw) { @@ -391,10 +390,28 @@ static void nvme_dsm_dealloc(NvmeNamespace *ns, uint64_t slba, uint64_t nlb) } } +static void nvme_dsm_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + NvmeSQueue *sq = req->sq; + NvmeCtrl *n = sq->ctrl; + NvmeCQueue *cq = n->cq[sq->cqid]; + + if (ret && !req->cqe.status) { + req->cqe.status = NVME_INTERNAL_DEV_ERROR << 1; + } + if (!(--req->aio_count)) { + g_free(req->aiocb_dsm); + nvme_enqueue_req_completion(cq, req); + } +} + static uint16_t nvme_dsm(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, NvmeRequest *req) { uint16_t nr = (cmd->cdw10 & 0xff) + 1; + uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); + uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; NvmeDsmRange range[nr]; if (nvme_dma_prp(cmd->prp1, cmd->prp2, sizeof(range), n, (uint8_t *)range, @@ -406,17 +423,55 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, int i; uint64_t slba; uint32_t nlb; + req->aiocb_dsm = g_malloc(nr * sizeof (*req->aiocb_dsm)); + req->aio_count = nr; for (i = 0; i < nr; i++) { slba = range[i].slba; nlb = range[i].nlb; if (slba + nlb > ns->id_ns.ncap) { - return NVME_LBA_RANGE | NVME_DNR; + req->aio_count -= (nr - i); + if (req->aio_count) { + req->cqe.status = NVME_LBA_RANGE | NVME_DNR; + break; + } + else { + g_free(req->aiocb_dsm); + return NVME_LBA_RANGE | NVME_DNR; + } } nvme_dsm_dealloc(ns, slba, nlb); - /* TODO: send bdrv_aio_discard request */ + req->aiocb_dsm[i] = bdrv_aio_discard(n->conf.bs, + ns->start_block + (slba << (data_shift - 9)), + (nlb + 1) << (data_shift - 9), nvme_dsm_cb, req); } } - return NVME_SUCCESS; + if (!req->aio_count) { + return NVME_SUCCESS; + } + return NVME_NO_COMPLETE; +} + +static void nvme_flush_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + NvmeSQueue *sq = req->sq; + NvmeCtrl *n = sq->ctrl; + NvmeCQueue *cq = n->cq[sq->cqid]; + + if (!ret) { + req->cqe.status = NVME_SUCCESS << 1; + } else { + req->cqe.status = NVME_INTERNAL_DEV_ERROR << 1; + } + nvme_enqueue_req_completion(cq, req); +} + +static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, + NvmeRequest *req) +{ + req->ns = ns; + bdrv_aio_flush(n->conf.bs, nvme_flush_cb, req); + return NVME_NO_COMPLETE; } static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) @@ -429,7 +484,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) ns = &n->namespaces[cmd->nsid - 1]; switch (cmd->opcode) { case NVME_CMD_FLUSH: - return NVME_SUCCESS; + return nvme_flush(n, ns, cmd, req); case NVME_CMD_WRITE: case NVME_CMD_READ: return nvme_rw(n, ns, cmd, req); @@ -466,8 +521,9 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) sq = n->sq[c->qid]; while (!QTAILQ_EMPTY(&sq->out_req_list)) { req = QTAILQ_FIRST(&sq->out_req_list); - assert(req->aiocb); - bdrv_aio_cancel(req->aiocb); + if (req->aiocb) { + bdrv_aio_cancel(req->aiocb); + } } if (!nvme_check_cqid(n, sq->cqid)) { NvmeCQueue *cq = n->cq[sq->cqid]; @@ -1069,6 +1125,8 @@ static void nvme_sq_process(void *opaque) QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); memset(&req->cqe, 0, sizeof(req->cqe)); req->cqe.cid = cmd.cid; + req->aiocb = NULL; + req->aiocb_dsm = NULL; status = sq->id ? nvme_io_cmd(n, &cmd, req) : nvme_admin_cmd(n, &cmd, req); diff --git a/hw/nvme.h b/hw/nvme.h index 964e91d..4dabb49 100644 --- a/hw/nvme.h +++ b/hw/nvme.h @@ -591,9 +591,11 @@ typedef struct NvmeRequest { struct NvmeSQueue *sq; struct NvmeNamespace *ns; BlockDriverAIOCB *aiocb; + BlockDriverAIOCB **aiocb_dsm; uint64_t slba; uint16_t rw; uint16_t nlb; + uint16_t aio_count; NvmeCqe cqe; QEMUSGList qsg; QTAILQ_ENTRY(NvmeRequest)entry;