Message ID | 1315495505-28906-13-git-send-email-pbonzini@redhat.com |
---|---|
State | New |
Headers | show |
On 08/09/11 16:25, Paolo Bonzini wrote: > qemu-nbd has a limit of slightly less than 1M per request. Work > around this in the nbd block driver. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > block/nbd.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ > 1 files changed, 46 insertions(+), 6 deletions(-) > > diff --git a/block/nbd.c b/block/nbd.c > index 5a75263..468a517 100644 > --- a/block/nbd.c > +++ b/block/nbd.c > @@ -213,8 +213,9 @@ static int nbd_open(BlockDriverState *bs, const char* filename, int flags) > return result; > } > > -static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, > - int nb_sectors, QEMUIOVector *qiov) > +static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, > + int nb_sectors, QEMUIOVector *qiov, > + int offset) > { > BDRVNBDState *s = bs->opaque; > struct nbd_request request; > @@ -241,7 +242,7 @@ static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, > reply.error = EIO; > goto done; > } > - if (qemu_co_recvv(s->sock, qiov->iov, request.len, 0) != request.len) { > + if (qemu_co_recvv(s->sock, qiov->iov, request.len, offset) != request.len) { > reply.error = EIO; > } > > @@ -251,8 +252,9 @@ done: > > } > > -static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, > - int nb_sectors, QEMUIOVector *qiov) > +static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, > + int nb_sectors, QEMUIOVector *qiov, > + int offset) > { > BDRVNBDState *s = bs->opaque; > struct nbd_request request; > @@ -273,7 +275,7 @@ static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, > reply.error = errno; > goto done; > } > - ret = qemu_co_sendv(s->sock, qiov->iov, request.len, 0); > + ret = qemu_co_sendv(s->sock, qiov->iov, request.len, offset); > if (ret != request.len) { > reply.error = EIO; > goto done; > @@ -291,6 +293,44 @@ done: > return -reply.error; > } > > +/* qemu-nbd has a limit of slightly less than 1M per request. For safety, > + * transfer at most 512K per request. */ > +#define NBD_MAX_SECTORS 1024 As far as I'm aware, the limit of 1MiB - header size is common to all NBD servers. I'm not aware of anything at all that'll fail on a 768K request but succeed in the exact same circumstances on a 512K request. Again, this is a performance consideration - each request is relatively slow, so you don't want them to be unnecessarily small. > + > +static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, > + int nb_sectors, QEMUIOVector *qiov) > +{ > + int offset = 0; > + int ret; > + while (nb_sectors > NBD_MAX_SECTORS) { > + ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); > + if (ret < 0) { > + return ret; > + } > + offset += NBD_MAX_SECTORS * 512; > + sector_num += NBD_MAX_SECTORS; > + nb_sectors -= NBD_MAX_SECTORS; > + } > + return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset); > +} > + > +static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, > + int nb_sectors, QEMUIOVector *qiov) > +{ > + int offset = 0; > + int ret; > + while (nb_sectors > NBD_MAX_SECTORS) { > + ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); > + if (ret < 0) { > + return ret; > + } > + offset += NBD_MAX_SECTORS * 512; > + sector_num += NBD_MAX_SECTORS; > + nb_sectors -= NBD_MAX_SECTORS; > + } > + return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset); > +} > + > static int nbd_co_flush(BlockDriverState *bs) > { > BDRVNBDState *s = bs->opaque;
On 09/09/2011 04:52 PM, Nicholas Thomas wrote: >> > +/* qemu-nbd has a limit of slightly less than 1M per request. For safety, >> > + * transfer at most 512K per request. */ >> > +#define NBD_MAX_SECTORS 1024 > > As far as I'm aware, the limit of 1MiB - header size is common to all > NBD servers. I'm not aware of anything at all that'll fail on a 768K > request but succeed in the exact same circumstances on a 512K request. > Again, this is a performance consideration - each request is relatively > slow, so you don't want them to be unnecessarily small. Yes, it should probably be bumped to 1536 or 2040 (to keep requests 4k-aligned). I wasn't sure about the limit. I've never seen requests that big anyway. Paolo
diff --git a/block/nbd.c b/block/nbd.c index 5a75263..468a517 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -213,8 +213,9 @@ static int nbd_open(BlockDriverState *bs, const char* filename, int flags) return result; } -static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) +static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov, + int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; @@ -241,7 +242,7 @@ static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, reply.error = EIO; goto done; } - if (qemu_co_recvv(s->sock, qiov->iov, request.len, 0) != request.len) { + if (qemu_co_recvv(s->sock, qiov->iov, request.len, offset) != request.len) { reply.error = EIO; } @@ -251,8 +252,9 @@ done: } -static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, - int nb_sectors, QEMUIOVector *qiov) +static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov, + int offset) { BDRVNBDState *s = bs->opaque; struct nbd_request request; @@ -273,7 +275,7 @@ static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, reply.error = errno; goto done; } - ret = qemu_co_sendv(s->sock, qiov->iov, request.len, 0); + ret = qemu_co_sendv(s->sock, qiov->iov, request.len, offset); if (ret != request.len) { reply.error = EIO; goto done; @@ -291,6 +293,44 @@ done: return -reply.error; } +/* qemu-nbd has a limit of slightly less than 1M per request. For safety, + * transfer at most 512K per request. */ +#define NBD_MAX_SECTORS 1024 + +static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + int offset = 0; + int ret; + while (nb_sectors > NBD_MAX_SECTORS) { + ret = nbd_co_readv_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); + if (ret < 0) { + return ret; + } + offset += NBD_MAX_SECTORS * 512; + sector_num += NBD_MAX_SECTORS; + nb_sectors -= NBD_MAX_SECTORS; + } + return nbd_co_readv_1(bs, sector_num, nb_sectors, qiov, offset); +} + +static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov) +{ + int offset = 0; + int ret; + while (nb_sectors > NBD_MAX_SECTORS) { + ret = nbd_co_writev_1(bs, sector_num, NBD_MAX_SECTORS, qiov, offset); + if (ret < 0) { + return ret; + } + offset += NBD_MAX_SECTORS * 512; + sector_num += NBD_MAX_SECTORS; + nb_sectors -= NBD_MAX_SECTORS; + } + return nbd_co_writev_1(bs, sector_num, nb_sectors, qiov, offset); +} + static int nbd_co_flush(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque;
qemu-nbd has a limit of slightly less than 1M per request. Work around this in the nbd block driver. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- block/nbd.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 files changed, 46 insertions(+), 6 deletions(-)