diff mbox

[v2,1/2] virtio-net: enable configurable tx queue size

Message ID 1497610119-45041-1-git-send-email-wei.w.wang@intel.com
State New
Headers show

Commit Message

Wang, Wei W June 16, 2017, 10:48 a.m. UTC
This patch enables the virtio-net tx queue size to be configurable
between 256 (the default queue size) and 1024 by the user when the
vhost-user backend is used.

Currently, the maximum tx queue size for other backends is 512 due
to the following limitations:
- QEMU backend: the QEMU backend implementation in some cases may
send 1024+1 iovs to writev.
- Vhost_net backend: there are possibilities that the guest sends
a vring_desc of memory which corsses a MemoryRegion thereby
generating more than 1024 iovs in total after translattion from
guest-physical address in the backend.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
 hw/net/virtio-net.c            | 46 ++++++++++++++++++++++++++++++++++--------
 include/hw/virtio/virtio-net.h |  1 +
 2 files changed, 39 insertions(+), 8 deletions(-)

Comments

Michael S. Tsirkin June 16, 2017, 2:19 p.m. UTC | #1
On Fri, Jun 16, 2017 at 06:48:38PM +0800, Wei Wang wrote:
> This patch enables the virtio-net tx queue size to be configurable
> between 256 (the default queue size) and 1024 by the user when the
> vhost-user backend is used.
> 
> Currently, the maximum tx queue size for other backends is 512 due
> to the following limitations:
> - QEMU backend: the QEMU backend implementation in some cases may
> send 1024+1 iovs to writev.
> - Vhost_net backend: there are possibilities that the guest sends
> a vring_desc of memory which corsses a MemoryRegion thereby
> generating more than 1024 iovs in total after translattion from
> guest-physical address in the backend.
> 
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> ---
>  hw/net/virtio-net.c            | 46 ++++++++++++++++++++++++++++++++++--------
>  include/hw/virtio/virtio-net.h |  1 +
>  2 files changed, 39 insertions(+), 8 deletions(-)
> 
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 7d091c9..e1a08fd 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -33,8 +33,11 @@
>  
>  /* previously fixed value */
>  #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
> +
>  /* for now, only allow larger queues; with virtio-1, guest can downsize */
>  #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
>  
>  /*
>   * Calculate the number of bytes up to and including the given 'field' of
> @@ -1491,18 +1494,33 @@ static void virtio_net_tx_bh(void *opaque)
>  static void virtio_net_add_queue(VirtIONet *n, int index)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    NetClientState *nc = qemu_get_queue(n->nic);
>  
>      n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
>                                             virtio_net_handle_rx);
> +
> +    /*
> +     * Currently, backends other than vhost-user don't support 1024 queue
> +     * size.
> +     */
> +    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
> +        nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
> +        fprintf(stderr, "warning: %s: queue size %d not supported\n",
> +                __func__, n->net_conf.tx_queue_size);

OK but it's best to avoid fprintfs. Queue size is easy to see so
I don't think we need it. I've dropped this from the patch for now,
feel free to send a separate patch re-adding this.

> +        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> +    }
> +
>      if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> +            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> +                             virtio_net_handle_tx_timer);
>          n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>                                                virtio_net_tx_timer,
>                                                &n->vqs[index]);
>      } else {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> +            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> +                             virtio_net_handle_tx_bh);
>          n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
>      }
>  
> @@ -1910,6 +1928,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> +        !is_power_of_2(n->net_conf.tx_queue_size)) {
> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> +                   "must be a power of 2 between %d and %d",
> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> +                   VIRTQUEUE_MAX_SIZE);
> +        virtio_cleanup(vdev);
> +        return;
> +    }
> +
>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
>      if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> @@ -1930,17 +1959,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          error_report("Defaulting to \"bh\"");
>      }
>  
> -    for (i = 0; i < n->max_queues; i++) {
> -        virtio_net_add_queue(n, i);
> -    }
> -
> -    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
>      qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
>      memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
>      n->status = VIRTIO_NET_S_LINK_UP;
>      n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
>                                       virtio_net_announce_timer, n);
> -
>      if (n->netclient_type) {
>          /*
>           * Happen when virtio_net_set_netclient_name has been called.
> @@ -1952,6 +1975,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>                                object_get_typename(OBJECT(dev)), dev->id, n);
>      }
>  
> +    for (i = 0; i < n->max_queues; i++) {
> +        virtio_net_add_queue(n, i);
> +    }
> +    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
> +
>      peer_test_vnet_hdr(n);
>      if (peer_has_vnet_hdr(n)) {
>          for (i = 0; i < n->max_queues; i++) {
> @@ -2089,6 +2117,8 @@ static Property virtio_net_properties[] = {
>      DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
>      DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
>                         VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
> +    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
> +                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
>      DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
>      DEFINE_PROP_END_OF_LIST(),
>  };
> diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> index 1eec9a2..fd944ba 100644
> --- a/include/hw/virtio/virtio-net.h
> +++ b/include/hw/virtio/virtio-net.h
> @@ -36,6 +36,7 @@ typedef struct virtio_net_conf
>      int32_t txburst;
>      char *tx;
>      uint16_t rx_queue_size;
> +    uint16_t tx_queue_size;
>      uint16_t mtu;
>  } virtio_net_conf;
>  
> -- 
> 2.7.4
Michael S. Tsirkin June 16, 2017, 2:31 p.m. UTC | #2
On Fri, Jun 16, 2017 at 06:48:38PM +0800, Wei Wang wrote:
> This patch enables the virtio-net tx queue size to be configurable
> between 256 (the default queue size) and 1024 by the user when the
> vhost-user backend is used.
> 
> Currently, the maximum tx queue size for other backends is 512 due
> to the following limitations:
> - QEMU backend: the QEMU backend implementation in some cases may
> send 1024+1 iovs to writev.
> - Vhost_net backend: there are possibilities that the guest sends
> a vring_desc of memory which corsses a MemoryRegion thereby
> generating more than 1024 iovs in total after translattion from
> guest-physical address in the backend.
> 
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> ---
>  hw/net/virtio-net.c            | 46 ++++++++++++++++++++++++++++++++++--------
>  include/hw/virtio/virtio-net.h |  1 +
>  2 files changed, 39 insertions(+), 8 deletions(-)
> 
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 7d091c9..e1a08fd 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -33,8 +33,11 @@
>  
>  /* previously fixed value */
>  #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
> +
>  /* for now, only allow larger queues; with virtio-1, guest can downsize */
>  #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
>  
>  /*
>   * Calculate the number of bytes up to and including the given 'field' of
> @@ -1491,18 +1494,33 @@ static void virtio_net_tx_bh(void *opaque)
>  static void virtio_net_add_queue(VirtIONet *n, int index)
>  {
>      VirtIODevice *vdev = VIRTIO_DEVICE(n);
> +    NetClientState *nc = qemu_get_queue(n->nic);
>  
>      n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
>                                             virtio_net_handle_rx);
> +
> +    /*
> +     * Currently, backends other than vhost-user don't support 1024 queue
> +     * size.
> +     */
> +    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
> +        nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
> +        fprintf(stderr, "warning: %s: queue size %d not supported\n",
> +                __func__, n->net_conf.tx_queue_size);
> +        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> +    }
> +

Also, I suspect we can get here with no peer, and above will crash.
It seems ugly to do this on each virtio_net_add_queue.
How about moving this to realize?




>      if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> +            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> +                             virtio_net_handle_tx_timer);
>          n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
>                                                virtio_net_tx_timer,
>                                                &n->vqs[index]);
>      } else {
>          n->vqs[index].tx_vq =
> -            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> +            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> +                             virtio_net_handle_tx_bh);
>          n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
>      }
>  
> @@ -1910,6 +1928,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          return;
>      }
>  
> +    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> +        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> +        !is_power_of_2(n->net_conf.tx_queue_size)) {
> +        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> +                   "must be a power of 2 between %d and %d",
> +                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> +                   VIRTQUEUE_MAX_SIZE);
> +        virtio_cleanup(vdev);
> +        return;
> +    }
> +
>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
>      if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> @@ -1930,17 +1959,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>          error_report("Defaulting to \"bh\"");
>      }
>  
> -    for (i = 0; i < n->max_queues; i++) {
> -        virtio_net_add_queue(n, i);
> -    }
> -
> -    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
>      qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
>      memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
>      n->status = VIRTIO_NET_S_LINK_UP;
>      n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
>                                       virtio_net_announce_timer, n);
> -
>      if (n->netclient_type) {
>          /*
>           * Happen when virtio_net_set_netclient_name has been called.
> @@ -1952,6 +1975,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>                                object_get_typename(OBJECT(dev)), dev->id, n);
>      }
>  
> +    for (i = 0; i < n->max_queues; i++) {
> +        virtio_net_add_queue(n, i);
> +    }
> +    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
> +
>      peer_test_vnet_hdr(n);
>      if (peer_has_vnet_hdr(n)) {
>          for (i = 0; i < n->max_queues; i++) {
> @@ -2089,6 +2117,8 @@ static Property virtio_net_properties[] = {
>      DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
>      DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
>                         VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
> +    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
> +                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
>      DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
>      DEFINE_PROP_END_OF_LIST(),
>  };
> diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> index 1eec9a2..fd944ba 100644
> --- a/include/hw/virtio/virtio-net.h
> +++ b/include/hw/virtio/virtio-net.h
> @@ -36,6 +36,7 @@ typedef struct virtio_net_conf
>      int32_t txburst;
>      char *tx;
>      uint16_t rx_queue_size;
> +    uint16_t tx_queue_size;
>      uint16_t mtu;
>  } virtio_net_conf;
>  
> -- 
> 2.7.4
Wang, Wei W June 17, 2017, 8:38 a.m. UTC | #3
On 06/16/2017 10:31 PM, Michael S. Tsirkin wrote:
> On Fri, Jun 16, 2017 at 06:48:38PM +0800, Wei Wang wrote:
>> This patch enables the virtio-net tx queue size to be configurable
>> between 256 (the default queue size) and 1024 by the user when the
>> vhost-user backend is used.
>>
>> Currently, the maximum tx queue size for other backends is 512 due
>> to the following limitations:
>> - QEMU backend: the QEMU backend implementation in some cases may
>> send 1024+1 iovs to writev.
>> - Vhost_net backend: there are possibilities that the guest sends
>> a vring_desc of memory which corsses a MemoryRegion thereby
>> generating more than 1024 iovs in total after translattion from
>> guest-physical address in the backend.
>>
>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>> ---
>>   hw/net/virtio-net.c            | 46 ++++++++++++++++++++++++++++++++++--------
>>   include/hw/virtio/virtio-net.h |  1 +
>>   2 files changed, 39 insertions(+), 8 deletions(-)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index 7d091c9..e1a08fd 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -33,8 +33,11 @@
>>   
>>   /* previously fixed value */
>>   #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
>> +
>>   /* for now, only allow larger queues; with virtio-1, guest can downsize */
>>   #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
>> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
>>   
>>   /*
>>    * Calculate the number of bytes up to and including the given 'field' of
>> @@ -1491,18 +1494,33 @@ static void virtio_net_tx_bh(void *opaque)
>>   static void virtio_net_add_queue(VirtIONet *n, int index)
>>   {
>>       VirtIODevice *vdev = VIRTIO_DEVICE(n);
>> +    NetClientState *nc = qemu_get_queue(n->nic);
>>   
>>       n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
>>                                              virtio_net_handle_rx);
>> +
>> +    /*
>> +     * Currently, backends other than vhost-user don't support 1024 queue
>> +     * size.
>> +     */
>> +    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>> +        nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>> +        fprintf(stderr, "warning: %s: queue size %d not supported\n",
>> +                __func__, n->net_conf.tx_queue_size);
>> +        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>> +    }
>> +
> Also, I suspect we can get here with no peer, and above will crash.
> It seems ugly to do this on each virtio_net_add_queue.
> How about moving this to realize?

The code has been re-arranged to make sure nc->peer is ready before
it's used, but I agree that it looks better to move the above to realize().

Best,
Wei
Michael S. Tsirkin June 22, 2017, 2 p.m. UTC | #4
On Sat, Jun 17, 2017 at 04:38:03PM +0800, Wei Wang wrote:
> On 06/16/2017 10:31 PM, Michael S. Tsirkin wrote:
> > On Fri, Jun 16, 2017 at 06:48:38PM +0800, Wei Wang wrote:
> > > This patch enables the virtio-net tx queue size to be configurable
> > > between 256 (the default queue size) and 1024 by the user when the
> > > vhost-user backend is used.
> > > 
> > > Currently, the maximum tx queue size for other backends is 512 due
> > > to the following limitations:
> > > - QEMU backend: the QEMU backend implementation in some cases may
> > > send 1024+1 iovs to writev.
> > > - Vhost_net backend: there are possibilities that the guest sends
> > > a vring_desc of memory which corsses a MemoryRegion thereby
> > > generating more than 1024 iovs in total after translattion from
> > > guest-physical address in the backend.
> > > 
> > > Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> > > ---
> > >   hw/net/virtio-net.c            | 46 ++++++++++++++++++++++++++++++++++--------
> > >   include/hw/virtio/virtio-net.h |  1 +
> > >   2 files changed, 39 insertions(+), 8 deletions(-)
> > > 
> > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > index 7d091c9..e1a08fd 100644
> > > --- a/hw/net/virtio-net.c
> > > +++ b/hw/net/virtio-net.c
> > > @@ -33,8 +33,11 @@
> > >   /* previously fixed value */
> > >   #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> > > +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
> > > +
> > >   /* for now, only allow larger queues; with virtio-1, guest can downsize */
> > >   #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> > > +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
> > >   /*
> > >    * Calculate the number of bytes up to and including the given 'field' of
> > > @@ -1491,18 +1494,33 @@ static void virtio_net_tx_bh(void *opaque)
> > >   static void virtio_net_add_queue(VirtIONet *n, int index)
> > >   {
> > >       VirtIODevice *vdev = VIRTIO_DEVICE(n);
> > > +    NetClientState *nc = qemu_get_queue(n->nic);
> > >       n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
> > >                                              virtio_net_handle_rx);
> > > +
> > > +    /*
> > > +     * Currently, backends other than vhost-user don't support 1024 queue
> > > +     * size.
> > > +     */
> > > +    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
> > > +        nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
> > > +        fprintf(stderr, "warning: %s: queue size %d not supported\n",
> > > +                __func__, n->net_conf.tx_queue_size);
> > > +        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> > > +    }
> > > +
> > Also, I suspect we can get here with no peer, and above will crash.
> > It seems ugly to do this on each virtio_net_add_queue.
> > How about moving this to realize?
> 
> The code has been re-arranged to make sure nc->peer is ready before
> it's used, but I agree that it looks better to move the above to realize().
> 
> Best,
> Wei

ping

The issues left are minor, let's make progress and merge this asap
Wang, Wei W June 23, 2017, 12:46 a.m. UTC | #5
On 06/22/2017 10:00 PM, Michael S. Tsirkin wrote:
> On Sat, Jun 17, 2017 at 04:38:03PM +0800, Wei Wang wrote:
>> On 06/16/2017 10:31 PM, Michael S. Tsirkin wrote:
>>> On Fri, Jun 16, 2017 at 06:48:38PM +0800, Wei Wang wrote:
>>>> This patch enables the virtio-net tx queue size to be configurable
>>>> between 256 (the default queue size) and 1024 by the user when the
>>>> vhost-user backend is used.
>>>>
>>>> Currently, the maximum tx queue size for other backends is 512 due
>>>> to the following limitations:
>>>> - QEMU backend: the QEMU backend implementation in some cases may
>>>> send 1024+1 iovs to writev.
>>>> - Vhost_net backend: there are possibilities that the guest sends
>>>> a vring_desc of memory which corsses a MemoryRegion thereby
>>>> generating more than 1024 iovs in total after translattion from
>>>> guest-physical address in the backend.
>>>>
>>>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>>>> ---
>>>>    hw/net/virtio-net.c            | 46 ++++++++++++++++++++++++++++++++++--------
>>>>    include/hw/virtio/virtio-net.h |  1 +
>>>>    2 files changed, 39 insertions(+), 8 deletions(-)
>>>>
>>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>>> index 7d091c9..e1a08fd 100644
>>>> --- a/hw/net/virtio-net.c
>>>> +++ b/hw/net/virtio-net.c
>>>> @@ -33,8 +33,11 @@
>>>>    /* previously fixed value */
>>>>    #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>>>> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
>>>> +
>>>>    /* for now, only allow larger queues; with virtio-1, guest can downsize */
>>>>    #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
>>>> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
>>>>    /*
>>>>     * Calculate the number of bytes up to and including the given 'field' of
>>>> @@ -1491,18 +1494,33 @@ static void virtio_net_tx_bh(void *opaque)
>>>>    static void virtio_net_add_queue(VirtIONet *n, int index)
>>>>    {
>>>>        VirtIODevice *vdev = VIRTIO_DEVICE(n);
>>>> +    NetClientState *nc = qemu_get_queue(n->nic);
>>>>        n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
>>>>                                               virtio_net_handle_rx);
>>>> +
>>>> +    /*
>>>> +     * Currently, backends other than vhost-user don't support 1024 queue
>>>> +     * size.
>>>> +     */
>>>> +    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>>>> +        nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>>>> +        fprintf(stderr, "warning: %s: queue size %d not supported\n",
>>>> +                __func__, n->net_conf.tx_queue_size);
>>>> +        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>>>> +    }
>>>> +
>>> Also, I suspect we can get here with no peer, and above will crash.
>>> It seems ugly to do this on each virtio_net_add_queue.
>>> How about moving this to realize?
>> The code has been re-arranged to make sure nc->peer is ready before
>> it's used, but I agree that it looks better to move the above to realize().
>>
>> Best,
>> Wei
> ping
>
> The issues left are minor, let's make progress and merge this asap
>

OK. I'll send out the new version soon.


Best,
Wei
diff mbox

Patch

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 7d091c9..e1a08fd 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -33,8 +33,11 @@ 
 
 /* previously fixed value */
 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
+
 /* for now, only allow larger queues; with virtio-1, guest can downsize */
 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
 
 /*
  * Calculate the number of bytes up to and including the given 'field' of
@@ -1491,18 +1494,33 @@  static void virtio_net_tx_bh(void *opaque)
 static void virtio_net_add_queue(VirtIONet *n, int index)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    NetClientState *nc = qemu_get_queue(n->nic);
 
     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
                                            virtio_net_handle_rx);
+
+    /*
+     * Currently, backends other than vhost-user don't support 1024 queue
+     * size.
+     */
+    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
+        nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
+        fprintf(stderr, "warning: %s: queue size %d not supported\n",
+                __func__, n->net_conf.tx_queue_size);
+        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
+    }
+
     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+                             virtio_net_handle_tx_timer);
         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                               virtio_net_tx_timer,
                                               &n->vqs[index]);
     } else {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+                             virtio_net_handle_tx_bh);
         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
     }
 
@@ -1910,6 +1928,17 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
         return;
     }
 
+    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
+        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
+        !is_power_of_2(n->net_conf.tx_queue_size)) {
+        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
+                   "must be a power of 2 between %d and %d",
+                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
+                   VIRTQUEUE_MAX_SIZE);
+        virtio_cleanup(vdev);
+        return;
+    }
+
     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
     if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
@@ -1930,17 +1959,11 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
         error_report("Defaulting to \"bh\"");
     }
 
-    for (i = 0; i < n->max_queues; i++) {
-        virtio_net_add_queue(n, i);
-    }
-
-    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
     qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
     memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
     n->status = VIRTIO_NET_S_LINK_UP;
     n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
                                      virtio_net_announce_timer, n);
-
     if (n->netclient_type) {
         /*
          * Happen when virtio_net_set_netclient_name has been called.
@@ -1952,6 +1975,11 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
                               object_get_typename(OBJECT(dev)), dev->id, n);
     }
 
+    for (i = 0; i < n->max_queues; i++) {
+        virtio_net_add_queue(n, i);
+    }
+    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
+
     peer_test_vnet_hdr(n);
     if (peer_has_vnet_hdr(n)) {
         for (i = 0; i < n->max_queues; i++) {
@@ -2089,6 +2117,8 @@  static Property virtio_net_properties[] = {
     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
     DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
                        VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
+    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
+                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
     DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
     DEFINE_PROP_END_OF_LIST(),
 };
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 1eec9a2..fd944ba 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -36,6 +36,7 @@  typedef struct virtio_net_conf
     int32_t txburst;
     char *tx;
     uint16_t rx_queue_size;
+    uint16_t tx_queue_size;
     uint16_t mtu;
 } virtio_net_conf;