diff mbox series

9p: init_in_iov_from_pdu can truncate the size

Message ID 20191219004251.23763-1-sstabellini@kernel.org
State New
Headers show
Series 9p: init_in_iov_from_pdu can truncate the size | expand

Commit Message

Stefano Stabellini Dec. 19, 2019, 12:42 a.m. UTC
From: Stefano Stabellini <stefano.stabellini@xilinx.com>

init_in_iov_from_pdu might not be able to allocate the full buffer size
requested, which comes from the client and could be larger than the
transport has available at the time of the request. Specifically, this
can happen with read operations, with the client requesting a read up to
the max allowed, which might be more than the transport has available at
the time.

Today the implementation of init_in_iov_from_pdu throws an error, both
Xen and Virtio.

Instead, change the V9fsTransport interface so that the size becomes a
pointer and can be limited by the implementation of
init_in_iov_from_pdu.

Change both the Xen and Virtio implementations to set the size to the
size of the buffer they managed to allocate, instead of throwing an
error.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
CC: groug@kaod.org
CC: anthony.perard@citrix.com
---
 hw/9pfs/9p.c               | 22 +++++++++++++++-------
 hw/9pfs/9p.h               |  2 +-
 hw/9pfs/virtio-9p-device.c | 10 +++-------
 hw/9pfs/xen-9p-backend.c   | 12 ++++--------
 4 files changed, 23 insertions(+), 23 deletions(-)

Comments

Christian Schoenebeck Dec. 19, 2019, 5:08 p.m. UTC | #1
On Donnerstag, 19. Dezember 2019 01:42:51 CET Stefano Stabellini wrote:
> From: Stefano Stabellini <stefano.stabellini@xilinx.com>
> 
> init_in_iov_from_pdu might not be able to allocate the full buffer size
> requested, which comes from the client and could be larger than the
> transport has available at the time of the request. Specifically, this
> can happen with read operations, with the client requesting a read up to
> the max allowed, which might be more than the transport has available at
> the time.

I haven't looked thoroughly at this yet, but that's about addressing a 
temporary, not a permanent transport buffer size limitation, right? Because if 
it was a permanent one, then probably an adjusted (lowered) msize should be 
returned on R_version response to client as well.

I wonder why I never triggered this issue, because I was experimenting with 
huge msize values for 9pfs performance checks. Was there anything specific to 
trigger this issue?

> Today the implementation of init_in_iov_from_pdu throws an error, both
> Xen and Virtio.
> 
> Instead, change the V9fsTransport interface so that the size becomes a
> pointer and can be limited by the implementation of
> init_in_iov_from_pdu.
> 
> Change both the Xen and Virtio implementations to set the size to the
> size of the buffer they managed to allocate, instead of throwing an
> error.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
> CC: groug@kaod.org
> CC: anthony.perard@citrix.com
> ---
>  hw/9pfs/9p.c               | 22 +++++++++++++++-------
>  hw/9pfs/9p.h               |  2 +-
>  hw/9pfs/virtio-9p-device.c | 10 +++-------
>  hw/9pfs/xen-9p-backend.c   | 12 ++++--------
>  4 files changed, 23 insertions(+), 23 deletions(-)
> 
> diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
> index bdf7919abf..d6c89ce608 100644
> --- a/hw/9pfs/9p.c
> +++ b/hw/9pfs/9p.c
> @@ -1682,22 +1682,30 @@ out_nofid:
>   * with qemu_iovec_destroy().
>   */
>  static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
> -                                    size_t skip, size_t size,
> +                                    size_t skip, size_t *size,
>                                      bool is_write)
>  {
>      QEMUIOVector elem;
>      struct iovec *iov;
>      unsigned int niov;
> +    size_t alloc_size = *size + skip;
> 
>      if (is_write) {
> -        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size +
> skip); +        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov,
> alloc_size); } else {
> -        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size +
> skip); +        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov,
> &alloc_size); +    }
> +
> +    if (alloc_size < skip)
> +    {
> +        *size = 0;
> +    } else {
> +        *size = alloc_size - skip;
>      }
>

Code style nitpicking:  

ERROR: that open brace { should be on the previous line
#56: FILE: hw/9pfs/9p.c:1699:
+    if (alloc_size < skip)
+    {

> 
>  static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
> @@ -1722,7 +1730,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu,
> V9fsFidState *fidp, }
>      offset += err;
> 
> -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
>      err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
>                      ((char *)fidp->fs.xattr.value) + off,
>                      read_count);
> @@ -1852,7 +1860,7 @@ static void coroutine_fn v9fs_read(void *opaque)
>          QEMUIOVector qiov;
>          int32_t len;
> 
> -        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count,
> false); +        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4,
> &max_count, false); qemu_iovec_init(&qiov, qiov_full.niov);
>          do {
>              qemu_iovec_reset(&qiov);
> @@ -2085,7 +2093,7 @@ static void coroutine_fn v9fs_write(void *opaque)
>          return;
>      }
>      offset += err;
> -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &count, true);
>      trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
> 
>      fidp = get_fid(pdu, fid);
> diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
> index 8883761b2c..50f7e21da6 100644
> --- a/hw/9pfs/9p.h
> +++ b/hw/9pfs/9p.h
> @@ -365,7 +365,7 @@ struct V9fsTransport {
>      ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char
> *fmt, va_list ap);
>      void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t size);
> +                                        unsigned int *pniov, size_t *size);
> void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> unsigned int *pniov, size_t size); void        (*push_and_notify)(V9fsPDU
> *pdu);
> diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> index 775e8ff766..68873c3f5f 100644
> --- a/hw/9pfs/virtio-9p-device.c
> +++ b/hw/9pfs/virtio-9p-device.c
> @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> size_t offset, }
> 
>  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t size)
> +                                        unsigned int *pniov, size_t *size)
>  {
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
>      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> 
> -    if (buf_size < size) {
> -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> -
> -        virtio_error(vdev,
> -                     "VirtFS reply type %d needs %zu bytes, buffer has
> %zu", -                     pdu->id + 1, size, buf_size);
> +    if (buf_size < *size) {
> +        *size = buf_size;
>      }
> 
>      *piov = elem->in_sg;
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 3f54a21c76..3994a356d4 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -187,7 +187,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
>                                            struct iovec **piov,
>                                            unsigned int *pniov,
> -                                          size_t size)
> +                                          size_t *size)
>  {
>      Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
>      Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
> @@ -197,15 +197,11 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU
> *pdu, g_free(ring->sg);
> 
>      ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
> -    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
> +    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
> 
>      buf_size = iov_size(ring->sg, num);
> -    if (buf_size  < size) {
> -        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
> -                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
> -                buf_size);
> -        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
> -        xen_9pfs_disconnect(&xen_9pfs->xendev);
> +    if (buf_size  < *size) {
> +        *size = buf_size;
>      }
> 
>      *piov = ring->sg;

Best regards,
Christian Schoenebeck
Stefano Stabellini Dec. 19, 2019, 10:36 p.m. UTC | #2
On Thu, 19 Dec 2019, Christian Schoenebeck wrote:
> On Donnerstag, 19. Dezember 2019 01:42:51 CET Stefano Stabellini wrote:
> > From: Stefano Stabellini <stefano.stabellini@xilinx.com>
> > 
> > init_in_iov_from_pdu might not be able to allocate the full buffer size
> > requested, which comes from the client and could be larger than the
> > transport has available at the time of the request. Specifically, this
> > can happen with read operations, with the client requesting a read up to
> > the max allowed, which might be more than the transport has available at
> > the time.
> 
> I haven't looked thoroughly at this yet, but that's about addressing a 
> temporary, not a permanent transport buffer size limitation, right? 

Yes, that is correct.


> Because if it was a permanent one, then probably an adjusted (lowered)
> msize should be returned on R_version response to client as well.
> 
> I wonder why I never triggered this issue, because I was experimenting with 
> huge msize values for 9pfs performance checks. Was there anything specific to 
> trigger this issue?

Lots of heavy usage by a Java application booting. Nothing like Java to
stress the system :-)


> > Today the implementation of init_in_iov_from_pdu throws an error, both
> > Xen and Virtio.
> > 
> > Instead, change the V9fsTransport interface so that the size becomes a
> > pointer and can be limited by the implementation of
> > init_in_iov_from_pdu.
> > 
> > Change both the Xen and Virtio implementations to set the size to the
> > size of the buffer they managed to allocate, instead of throwing an
> > error.
> > 
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
> > CC: groug@kaod.org
> > CC: anthony.perard@citrix.com
> > ---
> >  hw/9pfs/9p.c               | 22 +++++++++++++++-------
> >  hw/9pfs/9p.h               |  2 +-
> >  hw/9pfs/virtio-9p-device.c | 10 +++-------
> >  hw/9pfs/xen-9p-backend.c   | 12 ++++--------
> >  4 files changed, 23 insertions(+), 23 deletions(-)
> > 
> > diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
> > index bdf7919abf..d6c89ce608 100644
> > --- a/hw/9pfs/9p.c
> > +++ b/hw/9pfs/9p.c
> > @@ -1682,22 +1682,30 @@ out_nofid:
> >   * with qemu_iovec_destroy().
> >   */
> >  static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
> > -                                    size_t skip, size_t size,
> > +                                    size_t skip, size_t *size,
> >                                      bool is_write)
> >  {
> >      QEMUIOVector elem;
> >      struct iovec *iov;
> >      unsigned int niov;
> > +    size_t alloc_size = *size + skip;
> > 
> >      if (is_write) {
> > -        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size +
> > skip); +        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov,
> > alloc_size); } else {
> > -        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size +
> > skip); +        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov,
> > &alloc_size); +    }
> > +
> > +    if (alloc_size < skip)
> > +    {
> > +        *size = 0;
> > +    } else {
> > +        *size = alloc_size - skip;
> >      }
> >
> 
> Code style nitpicking:  
> 
> ERROR: that open brace { should be on the previous line
> #56: FILE: hw/9pfs/9p.c:1699:
> +    if (alloc_size < skip)
> +    {

Oops, sorry! I can fix that.


> > 
> >  static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
> > @@ -1722,7 +1730,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu,
> > V9fsFidState *fidp, }
> >      offset += err;
> > 
> > -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
> > +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
> >      err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
> >                      ((char *)fidp->fs.xattr.value) + off,
> >                      read_count);
> > @@ -1852,7 +1860,7 @@ static void coroutine_fn v9fs_read(void *opaque)
> >          QEMUIOVector qiov;
> >          int32_t len;
> > 
> > -        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count,
> > false); +        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4,
> > &max_count, false); qemu_iovec_init(&qiov, qiov_full.niov);
> >          do {
> >              qemu_iovec_reset(&qiov);
> > @@ -2085,7 +2093,7 @@ static void coroutine_fn v9fs_write(void *opaque)
> >          return;
> >      }
> >      offset += err;
> > -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
> > +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &count, true);
> >      trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
> > 
> >      fidp = get_fid(pdu, fid);
> > diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
> > index 8883761b2c..50f7e21da6 100644
> > --- a/hw/9pfs/9p.h
> > +++ b/hw/9pfs/9p.h
> > @@ -365,7 +365,7 @@ struct V9fsTransport {
> >      ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char
> > *fmt, va_list ap);
> >      void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> > -                                        unsigned int *pniov, size_t size);
> > +                                        unsigned int *pniov, size_t *size);
> > void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> > unsigned int *pniov, size_t size); void        (*push_and_notify)(V9fsPDU
> > *pdu);
> > diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> > index 775e8ff766..68873c3f5f 100644
> > --- a/hw/9pfs/virtio-9p-device.c
> > +++ b/hw/9pfs/virtio-9p-device.c
> > @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> > size_t offset, }
> > 
> >  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
> > -                                        unsigned int *pniov, size_t size)
> > +                                        unsigned int *pniov, size_t *size)
> >  {
> >      V9fsState *s = pdu->s;
> >      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
> >      VirtQueueElement *elem = v->elems[pdu->idx];
> >      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> > 
> > -    if (buf_size < size) {
> > -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> > -
> > -        virtio_error(vdev,
> > -                     "VirtFS reply type %d needs %zu bytes, buffer has
> > %zu", -                     pdu->id + 1, size, buf_size);
> > +    if (buf_size < *size) {
> > +        *size = buf_size;
> >      }
> > 
> >      *piov = elem->in_sg;
> > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> > index 3f54a21c76..3994a356d4 100644
> > --- a/hw/9pfs/xen-9p-backend.c
> > +++ b/hw/9pfs/xen-9p-backend.c
> > @@ -187,7 +187,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
> > static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
> >                                            struct iovec **piov,
> >                                            unsigned int *pniov,
> > -                                          size_t size)
> > +                                          size_t *size)
> >  {
> >      Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
> >      Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
> > @@ -197,15 +197,11 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU
> > *pdu, g_free(ring->sg);
> > 
> >      ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
> > -    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
> > +    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
> > 
> >      buf_size = iov_size(ring->sg, num);
> > -    if (buf_size  < size) {
> > -        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
> > -                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
> > -                buf_size);
> > -        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
> > -        xen_9pfs_disconnect(&xen_9pfs->xendev);
> > +    if (buf_size  < *size) {
> > +        *size = buf_size;
> >      }
> > 
> >      *piov = ring->sg;
> 
> Best regards,
> Christian Schoenebeck
> 
> 
>
Christian Schoenebeck Dec. 20, 2019, 12:31 p.m. UTC | #3
On Donnerstag, 19. Dezember 2019 23:36:07 CET Stefano Stabellini wrote:
> On Thu, 19 Dec 2019, Christian Schoenebeck wrote:
> > On Donnerstag, 19. Dezember 2019 01:42:51 CET Stefano Stabellini wrote:
> > > From: Stefano Stabellini <stefano.stabellini@xilinx.com>
> > > 
> > > init_in_iov_from_pdu might not be able to allocate the full buffer size
> > > requested, which comes from the client and could be larger than the
> > > transport has available at the time of the request. Specifically, this
> > > can happen with read operations, with the client requesting a read up to
> > > the max allowed, which might be more than the transport has available at
> > > the time.
> > 
> > I haven't looked thoroughly at this yet, but that's about addressing a
> > temporary, not a permanent transport buffer size limitation, right?
> 
> Yes, that is correct.

One more thing ...

> diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> index 775e8ff766..68873c3f5f 100644
> --- a/hw/9pfs/virtio-9p-device.c
> +++ b/hw/9pfs/virtio-9p-device.c
> @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> size_t offset, }
> 
>  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t size)
> +                                        unsigned int *pniov, size_t *size)
>  {
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
>      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> 
> -    if (buf_size < size) {
> -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> -
> -        virtio_error(vdev,
> -                     "VirtFS reply type %d needs %zu bytes, buffer has
> %zu", -                     pdu->id + 1, size, buf_size);
> +    if (buf_size < *size) {
> +        *size = buf_size;
>      }
> 
>      *piov = elem->in_sg;

Here could be a problem: what happens if the currently available transport 
buffer size is extremely small, i.e. less than P9_IOHDRSZ? I am not sure that 
would be handled safely everywhere. So maybe it would make sense to make 
transport buffer size < P9_IOHDRSZ an error case here?

Best regards,
Christian Schoenebeck
Greg Kurz Jan. 6, 2020, 1:42 p.m. UTC | #4
On Wed, 18 Dec 2019 16:42:51 -0800
Stefano Stabellini <sstabellini@kernel.org> wrote:

> From: Stefano Stabellini <stefano.stabellini@xilinx.com>
> 
> init_in_iov_from_pdu might not be able to allocate the full buffer size
> requested, which comes from the client and could be larger than the
> transport has available at the time of the request. Specifically, this
> can happen with read operations, with the client requesting a read up to
> the max allowed, which might be more than the transport has available at
> the time.
> 
> Today the implementation of init_in_iov_from_pdu throws an error, both
> Xen and Virtio.
> 
> Instead, change the V9fsTransport interface so that the size becomes a
> pointer and can be limited by the implementation of
> init_in_iov_from_pdu.
> 
> Change both the Xen and Virtio implementations to set the size to the
> size of the buffer they managed to allocate, instead of throwing an
> error.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
> CC: groug@kaod.org
> CC: anthony.perard@citrix.com
> ---
>  hw/9pfs/9p.c               | 22 +++++++++++++++-------
>  hw/9pfs/9p.h               |  2 +-
>  hw/9pfs/virtio-9p-device.c | 10 +++-------
>  hw/9pfs/xen-9p-backend.c   | 12 ++++--------
>  4 files changed, 23 insertions(+), 23 deletions(-)
> 
> diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
> index bdf7919abf..d6c89ce608 100644
> --- a/hw/9pfs/9p.c
> +++ b/hw/9pfs/9p.c
> @@ -1682,22 +1682,30 @@ out_nofid:
>   * with qemu_iovec_destroy().
>   */
>  static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
> -                                    size_t skip, size_t size,
> +                                    size_t skip, size_t *size,
>                                      bool is_write)
>  {
>      QEMUIOVector elem;
>      struct iovec *iov;
>      unsigned int niov;
> +    size_t alloc_size = *size + skip;
>  
>      if (is_write) {
> -        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
> +        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, alloc_size);
>      } else {
> -        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
> +        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, &alloc_size);
> +    }
> +
> +    if (alloc_size < skip)
> +    {
> +        *size = 0;
> +    } else {
> +        *size = alloc_size - skip;
>      }
>      qemu_iovec_init_external(&elem, iov, niov);
>      qemu_iovec_init(qiov, niov);
> -    qemu_iovec_concat(qiov, &elem, skip, size);
> +    qemu_iovec_concat(qiov, &elem, skip, *size);
>  }
>  
>  static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
> @@ -1722,7 +1730,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
>      }
>      offset += err;
>  
> -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);


>      err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
>                      ((char *)fidp->fs.xattr.value) + off,
>                      read_count);
> @@ -1852,7 +1860,7 @@ static void coroutine_fn v9fs_read(void *opaque)
>          QEMUIOVector qiov;
>          int32_t len;
>  
> -        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
> +        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &max_count, false);

This doesn't build:

hw/9pfs/9p.c: In function ‘v9fs_read’:
hw/9pfs/9p.c:2271:62: error: passing argument 4 of ‘v9fs_init_qiov_from_pdu’ from incompatible pointer type [-Werror=incompatible-pointer-types]
 2271 |         v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &max_count, false);
      |                                                              ^~~~~~~~~~
      |                                                              |
      |                                                              uint32_t * {aka unsigned int *}
hw/9pfs/9p.c:2093:58: note: expected ‘size_t *’ {aka ‘long unsigned int *’} but argument is of type ‘uint32_t *’ {aka ‘unsigned int *’}
 2093 |                                     size_t skip, size_t *size,
      |                                                  ~~~~~~~~^~~~

>          qemu_iovec_init(&qiov, qiov_full.niov);
>          do {
>              qemu_iovec_reset(&qiov);
> @@ -2085,7 +2093,7 @@ static void coroutine_fn v9fs_write(void *opaque)
>          return;
>      }
>      offset += err;
> -    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
> +    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &count, true);

Same here.

hw/9pfs/9p.c: In function ‘v9fs_write’:
hw/9pfs/9p.c:2527:54: error: passing argument 4 of ‘v9fs_init_qiov_from_pdu’ from incompatible pointer type [-Werror=incompatible-pointer-types]
 2527 |     v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &count, true);
      |                                                      ^~~~~~
      |                                                      |
      |                                                      uint32_t * {aka unsigned int *}
hw/9pfs/9p.c:2093:58: note: expected ‘size_t *’ {aka ‘long unsigned int *’} but argument is of type ‘uint32_t *’ {aka ‘unsigned int *’}
 2093 |                                     size_t skip, size_t *size,
      |                                                  ~~~~~~~~^~~~

>      trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
>  
>      fidp = get_fid(pdu, fid);
> diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
> index 8883761b2c..50f7e21da6 100644
> --- a/hw/9pfs/9p.h
> +++ b/hw/9pfs/9p.h
> @@ -365,7 +365,7 @@ struct V9fsTransport {
>      ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
>                                    va_list ap);
>      void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t size);
> +                                        unsigned int *pniov, size_t *size);
>      void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
>                                           unsigned int *pniov, size_t size);
>      void        (*push_and_notify)(V9fsPDU *pdu);
> diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> index 775e8ff766..68873c3f5f 100644
> --- a/hw/9pfs/virtio-9p-device.c
> +++ b/hw/9pfs/virtio-9p-device.c
> @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
>  }
>  
>  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
> -                                        unsigned int *pniov, size_t size)
> +                                        unsigned int *pniov, size_t *size)
>  {
>      V9fsState *s = pdu->s;
>      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
>      VirtQueueElement *elem = v->elems[pdu->idx];
>      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
>  
> -    if (buf_size < size) {
> -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> -
> -        virtio_error(vdev,
> -                     "VirtFS reply type %d needs %zu bytes, buffer has %zu",
> -                     pdu->id + 1, size, buf_size);
> +    if (buf_size < *size) {
> +        *size = buf_size;
>      }

As suggested by Christian in some other mail, it could still make sense to
raise the error if there isn't even enough space to pack a 9p message header.

>  
>  
>      *piov = elem->in_sg;
> diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
> index 3f54a21c76..3994a356d4 100644
> --- a/hw/9pfs/xen-9p-backend.c
> +++ b/hw/9pfs/xen-9p-backend.c
> @@ -187,7 +187,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
>  static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
>                                            struct iovec **piov,
>                                            unsigned int *pniov,
> -                                          size_t size)
> +                                          size_t *size)
>  {
>      Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
>      Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
> @@ -197,15 +197,11 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
>      g_free(ring->sg);
>  
>      ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
> -    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
> +    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
>  

This chunk doesn't apply on master:

<<<<<<<
    ring->sg = g_new0(struct iovec, 2);
    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
=======
    ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
>>>>>>>


>      buf_size = iov_size(ring->sg, num);
> -    if (buf_size  < size) {
> -        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
> -                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
> -                buf_size);
> -        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
> -        xen_9pfs_disconnect(&xen_9pfs->xendev);
> +    if (buf_size  < *size) {
> +        *size = buf_size;
>      }
>  
>      *piov = ring->sg;
Christian Schoenebeck Jan. 6, 2020, 3:24 p.m. UTC | #5
On Montag, 6. Januar 2020 14:42:54 CET Greg Kurz wrote:
> > diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> > index 775e8ff766..68873c3f5f 100644
> > --- a/hw/9pfs/virtio-9p-device.c
> > +++ b/hw/9pfs/virtio-9p-device.c
> > @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> > size_t offset,> 
> >  }
> >  
> >  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec
> >  **piov,
> > 
> > -                                        unsigned int *pniov, size_t size)
> > +                                        unsigned int *pniov, size_t
> > *size)
> > 
> >  {
> >  
> >      V9fsState *s = pdu->s;
> >      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
> >      VirtQueueElement *elem = v->elems[pdu->idx];
> >      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> > 
> > -    if (buf_size < size) {
> > -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> > -
> > -        virtio_error(vdev,
> > -                     "VirtFS reply type %d needs %zu bytes, buffer has
> > %zu", -                     pdu->id + 1, size, buf_size);
> > +    if (buf_size < *size) {
> > +        *size = buf_size;
> > 
> >      }
> 
> As suggested by Christian in some other mail, it could still make sense to
> raise the error if there isn't even enough space to pack a 9p message
> header.

Another option: Instead of handling this as a hard error (which they probably 
try to avoid in their use case): putting the handler asleep for a while by 
calling qemu_co_sleep_ns_wakeable() in this case. Then a bit later transport 
would eventually have the required buffer size and handler could continue the 
request without an error.

But this would require more care. For instance subsequent request handlers 
would need to check if there was already an event handler asleep, and if so it 
would either need to wake it up or put itself asleep as well to prevent the 
request order being processed by server being messed up.

Best regards,
Christian Schoenebeck
Greg Kurz Jan. 6, 2020, 5:31 p.m. UTC | #6
On Mon, 06 Jan 2020 16:24:18 +0100
Christian Schoenebeck <qemu_oss@crudebyte.com> wrote:

> On Montag, 6. Januar 2020 14:42:54 CET Greg Kurz wrote:
> > > diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> > > index 775e8ff766..68873c3f5f 100644
> > > --- a/hw/9pfs/virtio-9p-device.c
> > > +++ b/hw/9pfs/virtio-9p-device.c
> > > @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> > > size_t offset,> 
> > >  }
> > >  
> > >  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec
> > >  **piov,
> > > 
> > > -                                        unsigned int *pniov, size_t size)
> > > +                                        unsigned int *pniov, size_t
> > > *size)
> > > 
> > >  {
> > >  
> > >      V9fsState *s = pdu->s;
> > >      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
> > >      VirtQueueElement *elem = v->elems[pdu->idx];
> > >      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> > > 
> > > -    if (buf_size < size) {
> > > -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> > > -
> > > -        virtio_error(vdev,
> > > -                     "VirtFS reply type %d needs %zu bytes, buffer has
> > > %zu", -                     pdu->id + 1, size, buf_size);
> > > +    if (buf_size < *size) {
> > > +        *size = buf_size;
> > > 
> > >      }
> > 
> > As suggested by Christian in some other mail, it could still make sense to
> > raise the error if there isn't even enough space to pack a 9p message
> > header.
> 
> Another option: Instead of handling this as a hard error (which they probably 
> try to avoid in their use case): putting the handler asleep for a while by 
> calling qemu_co_sleep_ns_wakeable() in this case. Then a bit later transport 
> would eventually have the required buffer size and handler could continue the 
> request without an error.
> 

Waiting for an arbitrary amount of time (how much?) and retrying doesn't give
any guarantee either that things will go smoothly. If anything, I'd rather have
the transport to wake up the request when more buffer space gets available.

> But this would require more care. For instance subsequent request handlers 
> would need to check if there was already an event handler asleep, and if so it 
> would either need to wake it up or put itself asleep as well to prevent the 
> request order being processed by server being messed up.
> 

And so on... ie. we would need to handle a queue of sleeping requests IIUC.
Not really fan to go this way to address what looks like a corner case.

> Best regards,
> Christian Schoenebeck
> 
>
Stefano Stabellini Jan. 6, 2020, 7:30 p.m. UTC | #7
On Mon, 6 Jan 2020, Greg Kurz wrote:
> On Mon, 06 Jan 2020 16:24:18 +0100
> Christian Schoenebeck <qemu_oss@crudebyte.com> wrote:
> 
> > On Montag, 6. Januar 2020 14:42:54 CET Greg Kurz wrote:
> > > > diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
> > > > index 775e8ff766..68873c3f5f 100644
> > > > --- a/hw/9pfs/virtio-9p-device.c
> > > > +++ b/hw/9pfs/virtio-9p-device.c
> > > > @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu,
> > > > size_t offset,> 
> > > >  }
> > > >  
> > > >  static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec
> > > >  **piov,
> > > > 
> > > > -                                        unsigned int *pniov, size_t size)
> > > > +                                        unsigned int *pniov, size_t
> > > > *size)
> > > > 
> > > >  {
> > > >  
> > > >      V9fsState *s = pdu->s;
> > > >      V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
> > > >      VirtQueueElement *elem = v->elems[pdu->idx];
> > > >      size_t buf_size = iov_size(elem->in_sg, elem->in_num);
> > > > 
> > > > -    if (buf_size < size) {
> > > > -        VirtIODevice *vdev = VIRTIO_DEVICE(v);
> > > > -
> > > > -        virtio_error(vdev,
> > > > -                     "VirtFS reply type %d needs %zu bytes, buffer has
> > > > %zu", -                     pdu->id + 1, size, buf_size);
> > > > +    if (buf_size < *size) {
> > > > +        *size = buf_size;
> > > > 
> > > >      }
> > > 
> > > As suggested by Christian in some other mail, it could still make sense to
> > > raise the error if there isn't even enough space to pack a 9p message
> > > header.
> > 
> > Another option: Instead of handling this as a hard error (which they probably 
> > try to avoid in their use case): putting the handler asleep for a while by 
> > calling qemu_co_sleep_ns_wakeable() in this case. Then a bit later transport 
> > would eventually have the required buffer size and handler could continue the 
> > request without an error.
> > 
> 
> Waiting for an arbitrary amount of time (how much?) and retrying doesn't give
> any guarantee either that things will go smoothly. If anything, I'd rather have
> the transport to wake up the request when more buffer space gets available.
> 
> > But this would require more care. For instance subsequent request handlers 
> > would need to check if there was already an event handler asleep, and if so it 
> > would either need to wake it up or put itself asleep as well to prevent the 
> > request order being processed by server being messed up.
> > 
> 
> And so on... ie. we would need to handle a queue of sleeping requests IIUC.
> Not really fan to go this way to address what looks like a corner case.

I agree with you that handling requests queuing is complex and overkill
for this. As I don't have a better suggestion, I am going to go with
retaining the error if the allocated buf_size is less than P9_IOHDRSZ.
diff mbox series

Patch

diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index bdf7919abf..d6c89ce608 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -1682,22 +1682,30 @@  out_nofid:
  * with qemu_iovec_destroy().
  */
 static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
-                                    size_t skip, size_t size,
+                                    size_t skip, size_t *size,
                                     bool is_write)
 {
     QEMUIOVector elem;
     struct iovec *iov;
     unsigned int niov;
+    size_t alloc_size = *size + skip;
 
     if (is_write) {
-        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip);
+        pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, alloc_size);
     } else {
-        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
+        pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, &alloc_size);
+    }
+
+    if (alloc_size < skip)
+    {
+        *size = 0;
+    } else {
+        *size = alloc_size - skip;
     }
 
     qemu_iovec_init_external(&elem, iov, niov);
     qemu_iovec_init(qiov, niov);
-    qemu_iovec_concat(qiov, &elem, skip, size);
+    qemu_iovec_concat(qiov, &elem, skip, *size);
 }
 
 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
@@ -1722,7 +1730,7 @@  static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
     }
     offset += err;
 
-    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
+    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false);
     err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
                     ((char *)fidp->fs.xattr.value) + off,
                     read_count);
@@ -1852,7 +1860,7 @@  static void coroutine_fn v9fs_read(void *opaque)
         QEMUIOVector qiov;
         int32_t len;
 
-        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false);
+        v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, &max_count, false);
         qemu_iovec_init(&qiov, qiov_full.niov);
         do {
             qemu_iovec_reset(&qiov);
@@ -2085,7 +2093,7 @@  static void coroutine_fn v9fs_write(void *opaque)
         return;
     }
     offset += err;
-    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true);
+    v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &count, true);
     trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov);
 
     fidp = get_fid(pdu, fid);
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index 8883761b2c..50f7e21da6 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -365,7 +365,7 @@  struct V9fsTransport {
     ssize_t     (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
                                   va_list ap);
     void        (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
-                                        unsigned int *pniov, size_t size);
+                                        unsigned int *pniov, size_t *size);
     void        (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
                                          unsigned int *pniov, size_t size);
     void        (*push_and_notify)(V9fsPDU *pdu);
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 775e8ff766..68873c3f5f 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -145,19 +145,15 @@  static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
 }
 
 static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
-                                        unsigned int *pniov, size_t size)
+                                        unsigned int *pniov, size_t *size)
 {
     V9fsState *s = pdu->s;
     V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
     VirtQueueElement *elem = v->elems[pdu->idx];
     size_t buf_size = iov_size(elem->in_sg, elem->in_num);
 
-    if (buf_size < size) {
-        VirtIODevice *vdev = VIRTIO_DEVICE(v);
-
-        virtio_error(vdev,
-                     "VirtFS reply type %d needs %zu bytes, buffer has %zu",
-                     pdu->id + 1, size, buf_size);
+    if (buf_size < *size) {
+        *size = buf_size;
     }
 
     *piov = elem->in_sg;
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index 3f54a21c76..3994a356d4 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -187,7 +187,7 @@  static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu,
 static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
                                           struct iovec **piov,
                                           unsigned int *pniov,
-                                          size_t size)
+                                          size_t *size)
 {
     Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state);
     Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings];
@@ -197,15 +197,11 @@  static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu,
     g_free(ring->sg);
 
     ring->sg = g_malloc0(sizeof(*ring->sg) * 2);
-    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size);
+    xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size);
 
     buf_size = iov_size(ring->sg, num);
-    if (buf_size  < size) {
-        xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d"
-                "needs %zu bytes, buffer has %zu\n", pdu->id, size,
-                buf_size);
-        xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing);
-        xen_9pfs_disconnect(&xen_9pfs->xendev);
+    if (buf_size  < *size) {
+        *size = buf_size;
     }
 
     *piov = ring->sg;