From patchwork Tue Aug 8 14:29:44 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Sementsov-Ogievskiy X-Patchwork-Id: 799214 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (mailfrom) smtp.mailfrom=nongnu.org (client-ip=2001:4830:134:3::11; helo=lists.gnu.org; envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org; receiver=) Received: from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 3xRcJB0bfJz9s4s for ; Wed, 9 Aug 2017 00:30:34 +1000 (AEST) Received: from localhost ([::1]:42928 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1df5Wi-00063O-10 for incoming@patchwork.ozlabs.org; Tue, 08 Aug 2017 10:30:32 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:57868) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1df5WA-00061e-R5 for qemu-devel@nongnu.org; Tue, 08 Aug 2017 10:30:00 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1df5W5-0006Y1-4Z for qemu-devel@nongnu.org; Tue, 08 Aug 2017 10:29:58 -0400 Received: from mailhub.sw.ru ([195.214.232.25]:43181 helo=relay.sw.ru) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1df5W4-0006WL-Eo for qemu-devel@nongnu.org; Tue, 08 Aug 2017 10:29:52 -0400 Received: from kvm.sw.ru (msk-vpn.virtuozzo.com [195.214.232.6]) by relay.sw.ru (8.13.4/8.13.4) with ESMTP id v78ETiSI029718; Tue, 8 Aug 2017 17:29:45 +0300 (MSK) From: Vladimir Sementsov-Ogievskiy To: qemu-block@nongnu.org, qemu-devel@nongnu.org Date: Tue, 8 Aug 2017 17:29:44 +0300 Message-Id: <20170808142944.145159-1-vsementsov@virtuozzo.com> X-Mailer: git-send-email 2.11.1 X-detected-operating-system: by eggs.gnu.org: OpenBSD 3.x [fuzzy] X-Received-From: 195.214.232.25 Subject: [Qemu-devel] [PATCH for 2.10] block/nbd-client: always return EIO on and after the first io channel error X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: kwolf@redhat.com, vsementsov@virtuozzo.com, mreitz@redhat.com, den@openvz.org, pbonzini@redhat.com Errors-To: qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org Sender: "Qemu-devel" Do not communicate after the first error to avoid communicating throught broken channel. The only exclusion is try to send NBD_CMD_DISC anyway on in nbd_client_close. Signed-off-by: Vladimir Sementsov-Ogievskiy --- Hi all. Here is a patch, fixing a problem noted in [PATCH 06/17] block/nbd-client: fix nbd_read_reply_entry and [PATCH 17/17] block/nbd-client: always return EIO on and after the first io channel error and discussed on list. If it will be applied to 2.10, then I'll rebase my 'nbd client refactoring and fixing' on it (for 2.11). If not - I'll prefer not rebase the series, so, do not apply this patch for 2.11. block/nbd-client.h | 1 + block/nbd-client.c | 58 +++++++++++++++++++++++++++++++++++++----------------- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/block/nbd-client.h b/block/nbd-client.h index df80771357..28db9922c8 100644 --- a/block/nbd-client.h +++ b/block/nbd-client.h @@ -29,6 +29,7 @@ typedef struct NBDClientSession { Coroutine *recv_coroutine[MAX_NBD_REQUESTS]; NBDReply reply; + bool eio_to_all; } NBDClientSession; NBDClientSession *nbd_get_client_session(BlockDriverState *bs); diff --git a/block/nbd-client.c b/block/nbd-client.c index 25dd28406b..1282b2484e 100644 --- a/block/nbd-client.c +++ b/block/nbd-client.c @@ -49,6 +49,8 @@ static void nbd_teardown_connection(BlockDriverState *bs) { NBDClientSession *client = nbd_get_client_session(bs); + client->eio_to_all = true; + if (!client->ioc) { /* Already closed */ return; } @@ -74,12 +76,16 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque) Error *local_err = NULL; for (;;) { + if (s->eio_to_all) { + break; + } + assert(s->reply.handle == 0); ret = nbd_receive_reply(s->ioc, &s->reply, &local_err); if (ret < 0) { error_report_err(local_err); } - if (ret <= 0) { + if (ret <= 0 || s->eio_to_all) { break; } @@ -107,6 +113,7 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque) qemu_coroutine_yield(); } + s->eio_to_all = true; nbd_recv_coroutines_enter_all(s); s->read_reply_co = NULL; } @@ -118,6 +125,10 @@ static int nbd_co_send_request(BlockDriverState *bs, NBDClientSession *s = nbd_get_client_session(bs); int rc, ret, i; + if (s->eio_to_all) { + return -EIO; + } + qemu_co_mutex_lock(&s->send_mutex); while (s->in_flight == MAX_NBD_REQUESTS) { qemu_co_queue_wait(&s->free_sema, &s->send_mutex); @@ -135,15 +146,15 @@ static int nbd_co_send_request(BlockDriverState *bs, assert(i < MAX_NBD_REQUESTS); request->handle = INDEX_TO_HANDLE(s, i); - if (!s->ioc) { + if (s->eio_to_all) { qemu_co_mutex_unlock(&s->send_mutex); - return -EPIPE; + return -EIO; } if (qiov) { qio_channel_set_cork(s->ioc, true); rc = nbd_send_request(s->ioc, request); - if (rc >= 0) { + if (rc >= 0 && !s->eio_to_all) { ret = nbd_rwv(s->ioc, qiov->iov, qiov->niov, request->len, false, NULL); if (ret != request->len) { @@ -155,7 +166,8 @@ static int nbd_co_send_request(BlockDriverState *bs, rc = nbd_send_request(s->ioc, request); } qemu_co_mutex_unlock(&s->send_mutex); - return rc; + + return s->eio_to_all ? -EIO : rc; } static void nbd_co_receive_reply(NBDClientSession *s, @@ -169,13 +181,13 @@ static void nbd_co_receive_reply(NBDClientSession *s, qemu_coroutine_yield(); *reply = s->reply; if (reply->handle != request->handle || - !s->ioc) { + !s->ioc || s->eio_to_all) { reply->error = EIO; } else { if (qiov && reply->error == 0) { ret = nbd_rwv(s->ioc, qiov->iov, qiov->niov, request->len, true, NULL); - if (ret != request->len) { + if (ret != request->len || s->eio_to_all) { reply->error = EIO; } } @@ -225,8 +237,10 @@ int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, } else { nbd_co_receive_reply(client, &request, &reply, qiov); } - nbd_coroutine_end(bs, &request); - return -reply.error; + if (request.handle != 0) { + nbd_coroutine_end(bs, &request); + } + return client->eio_to_all ? -EIO : -reply.error; } int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, @@ -254,8 +268,10 @@ int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, } else { nbd_co_receive_reply(client, &request, &reply, NULL); } - nbd_coroutine_end(bs, &request); - return -reply.error; + if (request.handle != 0) { + nbd_coroutine_end(bs, &request); + } + return client->eio_to_all ? -EIO : -reply.error; } int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, @@ -288,8 +304,10 @@ int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, } else { nbd_co_receive_reply(client, &request, &reply, NULL); } - nbd_coroutine_end(bs, &request); - return -reply.error; + if (request.handle != 0) { + nbd_coroutine_end(bs, &request); + } + return client->eio_to_all ? -EIO : -reply.error; } int nbd_client_co_flush(BlockDriverState *bs) @@ -312,8 +330,10 @@ int nbd_client_co_flush(BlockDriverState *bs) } else { nbd_co_receive_reply(client, &request, &reply, NULL); } - nbd_coroutine_end(bs, &request); - return -reply.error; + if (request.handle != 0) { + nbd_coroutine_end(bs, &request); + } + return client->eio_to_all ? -EIO : -reply.error; } int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) @@ -337,9 +357,10 @@ int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) } else { nbd_co_receive_reply(client, &request, &reply, NULL); } - nbd_coroutine_end(bs, &request); - return -reply.error; - + if (request.handle != 0) { + nbd_coroutine_end(bs, &request); + } + return client->eio_to_all ? -EIO : -reply.error; } void nbd_client_detach_aio_context(BlockDriverState *bs) @@ -384,6 +405,7 @@ int nbd_client_init(BlockDriverState *bs, logout("session init %s\n", export); qio_channel_set_blocking(QIO_CHANNEL(sioc), true, NULL); + client->eio_to_all = false; client->info.request_sizes = true; ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), export, tlscreds, hostname,