diff mbox

[V7,08/16] virtio: introduce bus specific queue limit

Message ID 1429770109-23873-9-git-send-email-jasowang@redhat.com
State New
Headers show

Commit Message

Jason Wang April 23, 2015, 6:21 a.m. UTC
This patch introduces a bus specific queue limitation. It will be
useful for increasing the limit for one of the bus without disturbing
other buses.

Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
---
 hw/char/virtio-serial-bus.c    |  2 +-
 hw/net/virtio-net.c            |  4 ++--
 hw/s390x/s390-virtio-bus.c     |  1 +
 hw/s390x/virtio-ccw.c          |  1 +
 hw/scsi/virtio-scsi.c          |  4 ++--
 hw/virtio/virtio-mmio.c        |  1 +
 hw/virtio/virtio-pci.c         |  1 +
 hw/virtio/virtio.c             | 40 +++++++++++++++++++++++++---------------
 include/hw/virtio/virtio-bus.h |  1 +
 include/hw/virtio/virtio.h     |  1 +
 10 files changed, 36 insertions(+), 20 deletions(-)

Comments

Michael S. Tsirkin April 27, 2015, 11:05 a.m. UTC | #1
On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> This patch introduces a bus specific queue limitation. It will be
> useful for increasing the limit for one of the bus without disturbing
> other buses.
> 
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Alexander Graf <agraf@suse.de>
> Cc: Richard Henderson <rth@twiddle.net>
> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>

Is this still needed if you drop the attempt to
keep the limit around for old machine types?

> ---
>  hw/char/virtio-serial-bus.c    |  2 +-
>  hw/net/virtio-net.c            |  4 ++--
>  hw/s390x/s390-virtio-bus.c     |  1 +
>  hw/s390x/virtio-ccw.c          |  1 +
>  hw/scsi/virtio-scsi.c          |  4 ++--
>  hw/virtio/virtio-mmio.c        |  1 +
>  hw/virtio/virtio-pci.c         |  1 +
>  hw/virtio/virtio.c             | 40 +++++++++++++++++++++++++---------------
>  include/hw/virtio/virtio-bus.h |  1 +
>  include/hw/virtio/virtio.h     |  1 +
>  10 files changed, 36 insertions(+), 20 deletions(-)
> 
> diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
> index e336bdb..0694831 100644
> --- a/hw/char/virtio-serial-bus.c
> +++ b/hw/char/virtio-serial-bus.c
> @@ -973,7 +973,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
>      }
>  
>      /* Each port takes 2 queues, and one pair is for the control queue */
> -    max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1;
> +    max_supported_ports = virtio_get_queue_max(vdev) / 2 - 1;
>  
>      if (vser->serial.max_virtserial_ports > max_supported_ports) {
>          error_setg(errp, "maximum ports supported: %u", max_supported_ports);
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index b6fac9c..bf286f5 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -1588,10 +1588,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>      virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
>  
>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
> -    if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
> +    if (n->max_queues * 2 + 1 > virtio_get_queue_max(vdev)) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
>                     "must be a postive integer less than %d.",
> -                   n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
> +                   n->max_queues, (virtio_get_queue_max(vdev) - 1) / 2);
>          virtio_cleanup(vdev);
>          return;
>      }
> diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
> index 047c963..2b41e32 100644
> --- a/hw/s390x/s390-virtio-bus.c
> +++ b/hw/s390x/s390-virtio-bus.c
> @@ -715,6 +715,7 @@ static void virtio_s390_bus_class_init(ObjectClass *klass, void *data)
>      bus_class->max_dev = 1;
>      k->notify = virtio_s390_notify;
>      k->get_features = virtio_s390_get_features;
> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>  }
>  
>  static const TypeInfo virtio_s390_bus_info = {
> diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
> index 0434f56..590eed5 100644
> --- a/hw/s390x/virtio-ccw.c
> +++ b/hw/s390x/virtio-ccw.c
> @@ -1715,6 +1715,7 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
>      k->load_queue = virtio_ccw_load_queue;
>      k->save_config = virtio_ccw_save_config;
>      k->load_config = virtio_ccw_load_config;
> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>  }
>  
>  static const TypeInfo virtio_ccw_bus_info = {
> diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
> index c9bea06..fbdde2b 100644
> --- a/hw/scsi/virtio-scsi.c
> +++ b/hw/scsi/virtio-scsi.c
> @@ -826,10 +826,10 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
>                  sizeof(VirtIOSCSIConfig));
>  
>      if (s->conf.num_queues == 0 ||
> -            s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX - 2) {
> +            s->conf.num_queues > virtio_get_queue_max(vdev) - 2) {
>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
>                           "must be a positive integer less than %d.",
> -                   s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX - 2);
> +                   s->conf.num_queues, virtio_get_queue_max(vdev) - 2);
>          virtio_cleanup(vdev);
>          return;
>      }
> diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
> index 10123f3..2ae6942 100644
> --- a/hw/virtio/virtio-mmio.c
> +++ b/hw/virtio/virtio-mmio.c
> @@ -403,6 +403,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
>      k->device_plugged = virtio_mmio_device_plugged;
>      k->has_variable_vring_alignment = true;
>      bus_class->max_dev = 1;
> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>  }
>  
>  static const TypeInfo virtio_mmio_bus_info = {
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index c7c3f72..075b13b 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -1498,6 +1498,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
>      k->vmstate_change = virtio_pci_vmstate_change;
>      k->device_plugged = virtio_pci_device_plugged;
>      k->device_unplugged = virtio_pci_device_unplugged;
> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>  }
>  
>  static const TypeInfo virtio_pci_bus_info = {
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 17c1260..bbb224f 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -541,6 +541,14 @@ void virtio_update_irq(VirtIODevice *vdev)
>      virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
>  }
>  
> +int virtio_get_queue_max(VirtIODevice *vdev)
> +{
> +    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
> +    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
> +
> +    return k->queue_max;
> +}
> +
>  void virtio_set_status(VirtIODevice *vdev, uint8_t val)
>  {
>      VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> @@ -599,7 +607,7 @@ void virtio_reset(void *opaque)
>      vdev->config_vector = VIRTIO_NO_VECTOR;
>      virtio_notify_vector(vdev, vdev->config_vector);
>  
> -    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> +    for (i = 0; i < virtio_get_queue_max(vdev); i++) {
>          vdev->vq[i].vring.desc = 0;
>          vdev->vq[i].vring.avail = 0;
>          vdev->vq[i].vring.used = 0;
> @@ -738,7 +746,8 @@ int virtio_queue_get_num(VirtIODevice *vdev, int n)
>  int virtio_queue_get_id(VirtQueue *vq)
>  {
>      VirtIODevice *vdev = vq->vdev;
> -    assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
> +
> +    assert(vq >= &vdev->vq[0] && vq < &vdev->vq[virtio_get_queue_max(vdev)]);
>      return vq - &vdev->vq[0];
>  }
>  
> @@ -774,28 +783,29 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
>  
>  uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
>  {
> -    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
> +    return n < virtio_get_queue_max(vdev) ? vdev->vq[n].vector :
>          VIRTIO_NO_VECTOR;
>  }
>  
>  void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
>  {
> -    if (n < VIRTIO_PCI_QUEUE_MAX)
> +    if (n < virtio_get_queue_max(vdev))
>          vdev->vq[n].vector = vector;
>  }
>  
>  VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
>                              void (*handle_output)(VirtIODevice *, VirtQueue *))
>  {
> -    int i;
> +    int i, queue_max = virtio_get_queue_max(vdev);
>  
> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> +    for (i = 0; i < queue_max; i++) {
>          if (vdev->vq[i].vring.num == 0)
>              break;
>      }
>  
> -    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
> +    if (i == queue_max || queue_size > VIRTQUEUE_MAX_SIZE) {
>          abort();
> +    }
>  
>      vdev->vq[i].vring.num = queue_size;
>      vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
> @@ -806,7 +816,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
>  
>  void virtio_del_queue(VirtIODevice *vdev, int n)
>  {
> -    if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
> +    if (n < 0 || n >= virtio_get_queue_max(vdev)) {
>          abort();
>      }
>  
> @@ -916,14 +926,14 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
>      qemu_put_be32(f, vdev->config_len);
>      qemu_put_buffer(f, vdev->config, vdev->config_len);
>  
> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> +    for (i = 0; i < k->queue_max; i++) {
>          if (vdev->vq[i].vring.num == 0)
>              break;
>      }
>  
>      qemu_put_be32(f, i);
>  
> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> +    for (i = 0; i < k->queue_max; i++) {
>          if (vdev->vq[i].vring.num == 0)
>              break;
>  
> @@ -988,7 +998,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
>      qemu_get_8s(f, &vdev->status);
>      qemu_get_8s(f, &vdev->isr);
>      qemu_get_be16s(f, &vdev->queue_sel);
> -    if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
> +    if (vdev->queue_sel >= k->queue_max) {
>          return -1;
>      }
>      qemu_get_be32s(f, &features);
> @@ -1015,7 +1025,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
>  
>      num = qemu_get_be32(f);
>  
> -    if (num > VIRTIO_PCI_QUEUE_MAX) {
> +    if (num > k->queue_max) {
>          error_report("Invalid number of PCI queues: 0x%x", num);
>          return -1;
>      }
> @@ -1125,15 +1135,15 @@ void virtio_instance_init_common(Object *proxy_obj, void *data,
>  void virtio_init(VirtIODevice *vdev, const char *name,
>                   uint16_t device_id, size_t config_size)
>  {
> -    int i;
> +    int i, queue_max = virtio_get_queue_max(vdev);
>      vdev->device_id = device_id;
>      vdev->status = 0;
>      vdev->isr = 0;
>      vdev->queue_sel = 0;
>      vdev->config_vector = VIRTIO_NO_VECTOR;
> -    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
> +    vdev->vq = g_malloc0(sizeof(VirtQueue) * queue_max);
>      vdev->vm_running = runstate_is_running();
> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> +    for (i = 0; i < queue_max; i++) {
>          vdev->vq[i].vector = VIRTIO_NO_VECTOR;
>          vdev->vq[i].vdev = vdev;
>          vdev->vq[i].queue_index = i;
> diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
> index 0d2e7b4..4da8022 100644
> --- a/include/hw/virtio/virtio-bus.h
> +++ b/include/hw/virtio/virtio-bus.h
> @@ -68,6 +68,7 @@ typedef struct VirtioBusClass {
>       * Note that changing this will break migration for this transport.
>       */
>      bool has_variable_vring_alignment;
> +    uint16_t queue_max;
>  } VirtioBusClass;
>  
>  struct VirtioBusState {
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index d95f8b6..91fd673 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -179,6 +179,7 @@ void virtio_queue_notify(VirtIODevice *vdev, int n);
>  uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
>  void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector);
>  void virtio_set_status(VirtIODevice *vdev, uint8_t val);
> +int virtio_get_queue_max(VirtIODevice *vdev);
>  void virtio_reset(void *opaque);
>  void virtio_update_irq(VirtIODevice *vdev);
>  int virtio_set_features(VirtIODevice *vdev, uint32_t val);
> -- 
> 2.1.0
Jason Wang April 28, 2015, 3:14 a.m. UTC | #2
On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin <mst@redhat.com> 
wrote:
> On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
>>  This patch introduces a bus specific queue limitation. It will be
>>  useful for increasing the limit for one of the bus without 
>> disturbing
>>  other buses.
>>  
>>  Cc: Michael S. Tsirkin <mst@redhat.com>
>>  Cc: Alexander Graf <agraf@suse.de>
>>  Cc: Richard Henderson <rth@twiddle.net>
>>  Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
>>  Cc: Christian Borntraeger <borntraeger@de.ibm.com>
>>  Cc: Paolo Bonzini <pbonzini@redhat.com>
>>  Signed-off-by: Jason Wang <jasowang@redhat.com>
>>  Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> 
> Is this still needed if you drop the attempt to
> keep the limit around for old machine types?

If we agree to drop, we probably need transport specific macro.

> 
> 
>>  ---
>>   hw/char/virtio-serial-bus.c    |  2 +-
>>   hw/net/virtio-net.c            |  4 ++--
>>   hw/s390x/s390-virtio-bus.c     |  1 +
>>   hw/s390x/virtio-ccw.c          |  1 +
>>   hw/scsi/virtio-scsi.c          |  4 ++--
>>   hw/virtio/virtio-mmio.c        |  1 +
>>   hw/virtio/virtio-pci.c         |  1 +
>>   hw/virtio/virtio.c             | 40 
>> +++++++++++++++++++++++++---------------
>>   include/hw/virtio/virtio-bus.h |  1 +
>>   include/hw/virtio/virtio.h     |  1 +
>>   10 files changed, 36 insertions(+), 20 deletions(-)
>>  
>>  diff --git a/hw/char/virtio-serial-bus.c 
>> b/hw/char/virtio-serial-bus.c
>>  index e336bdb..0694831 100644
>>  --- a/hw/char/virtio-serial-bus.c
>>  +++ b/hw/char/virtio-serial-bus.c
>>  @@ -973,7 +973,7 @@ static void 
>> virtio_serial_device_realize(DeviceState *dev, Error **errp)
>>       }
>>   
>>       /* Each port takes 2 queues, and one pair is for the control 
>> queue */
>>  -    max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1;
>>  +    max_supported_ports = virtio_get_queue_max(vdev) / 2 - 1;
>>   
>>       if (vser->serial.max_virtserial_ports > max_supported_ports) {
>>           error_setg(errp, "maximum ports supported: %u", 
>> max_supported_ports);
>>  diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>  index b6fac9c..bf286f5 100644
>>  --- a/hw/net/virtio-net.c
>>  +++ b/hw/net/virtio-net.c
>>  @@ -1588,10 +1588,10 @@ static void 
>> virtio_net_device_realize(DeviceState *dev, Error **errp)
>>       virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
>>   
>>       n->max_queues = MAX(n->nic_conf.peers.queues, 1);
>>  -    if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
>>  +    if (n->max_queues * 2 + 1 > virtio_get_queue_max(vdev)) {
>>           error_setg(errp, "Invalid number of queues (= %" PRIu32 
>> "), "
>>                      "must be a postive integer less than %d.",
>>  -                   n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
>>  +                   n->max_queues, (virtio_get_queue_max(vdev) - 1) 
>> / 2);
>>           virtio_cleanup(vdev);
>>           return;
>>       }
>>  diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
>>  index 047c963..2b41e32 100644
>>  --- a/hw/s390x/s390-virtio-bus.c
>>  +++ b/hw/s390x/s390-virtio-bus.c
>>  @@ -715,6 +715,7 @@ static void 
>> virtio_s390_bus_class_init(ObjectClass *klass, void *data)
>>       bus_class->max_dev = 1;
>>       k->notify = virtio_s390_notify;
>>       k->get_features = virtio_s390_get_features;
>>  +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>>   }
>>   
>>   static const TypeInfo virtio_s390_bus_info = {
>>  diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
>>  index 0434f56..590eed5 100644
>>  --- a/hw/s390x/virtio-ccw.c
>>  +++ b/hw/s390x/virtio-ccw.c
>>  @@ -1715,6 +1715,7 @@ static void 
>> virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
>>       k->load_queue = virtio_ccw_load_queue;
>>       k->save_config = virtio_ccw_save_config;
>>       k->load_config = virtio_ccw_load_config;
>>  +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>>   }
>>   
>>   static const TypeInfo virtio_ccw_bus_info = {
>>  diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
>>  index c9bea06..fbdde2b 100644
>>  --- a/hw/scsi/virtio-scsi.c
>>  +++ b/hw/scsi/virtio-scsi.c
>>  @@ -826,10 +826,10 @@ void virtio_scsi_common_realize(DeviceState 
>> *dev, Error **errp,
>>                   sizeof(VirtIOSCSIConfig));
>>   
>>       if (s->conf.num_queues == 0 ||
>>  -            s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX - 2) {
>>  +            s->conf.num_queues > virtio_get_queue_max(vdev) - 2) {
>>           error_setg(errp, "Invalid number of queues (= %" PRIu32 
>> "), "
>>                            "must be a positive integer less than 
>> %d.",
>>  -                   s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX - 2);
>>  +                   s->conf.num_queues, virtio_get_queue_max(vdev) 
>> - 2);
>>           virtio_cleanup(vdev);
>>           return;
>>       }
>>  diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
>>  index 10123f3..2ae6942 100644
>>  --- a/hw/virtio/virtio-mmio.c
>>  +++ b/hw/virtio/virtio-mmio.c
>>  @@ -403,6 +403,7 @@ static void 
>> virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
>>       k->device_plugged = virtio_mmio_device_plugged;
>>       k->has_variable_vring_alignment = true;
>>       bus_class->max_dev = 1;
>>  +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>>   }
>>   
>>   static const TypeInfo virtio_mmio_bus_info = {
>>  diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>>  index c7c3f72..075b13b 100644
>>  --- a/hw/virtio/virtio-pci.c
>>  +++ b/hw/virtio/virtio-pci.c
>>  @@ -1498,6 +1498,7 @@ static void 
>> virtio_pci_bus_class_init(ObjectClass *klass, void *data)
>>       k->vmstate_change = virtio_pci_vmstate_change;
>>       k->device_plugged = virtio_pci_device_plugged;
>>       k->device_unplugged = virtio_pci_device_unplugged;
>>  +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
>>   }
>>   
>>   static const TypeInfo virtio_pci_bus_info = {
>>  diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
>>  index 17c1260..bbb224f 100644
>>  --- a/hw/virtio/virtio.c
>>  +++ b/hw/virtio/virtio.c
>>  @@ -541,6 +541,14 @@ void virtio_update_irq(VirtIODevice *vdev)
>>       virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
>>   }
>>   
>>  +int virtio_get_queue_max(VirtIODevice *vdev)
>>  +{
>>  +    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
>>  +    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
>>  +
>>  +    return k->queue_max;
>>  +}
>>  +
>>   void virtio_set_status(VirtIODevice *vdev, uint8_t val)
>>   {
>>       VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
>>  @@ -599,7 +607,7 @@ void virtio_reset(void *opaque)
>>       vdev->config_vector = VIRTIO_NO_VECTOR;
>>       virtio_notify_vector(vdev, vdev->config_vector);
>>   
>>  -    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
>>  +    for (i = 0; i < virtio_get_queue_max(vdev); i++) {
>>           vdev->vq[i].vring.desc = 0;
>>           vdev->vq[i].vring.avail = 0;
>>           vdev->vq[i].vring.used = 0;
>>  @@ -738,7 +746,8 @@ int virtio_queue_get_num(VirtIODevice *vdev, 
>> int n)
>>   int virtio_queue_get_id(VirtQueue *vq)
>>   {
>>       VirtIODevice *vdev = vq->vdev;
>>  -    assert(vq >= &vdev->vq[0] && vq < 
>> &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
>>  +
>>  +    assert(vq >= &vdev->vq[0] && vq < 
>> &vdev->vq[virtio_get_queue_max(vdev)]);
>>       return vq - &vdev->vq[0];
>>   }
>>   
>>  @@ -774,28 +783,29 @@ void virtio_queue_notify(VirtIODevice *vdev, 
>> int n)
>>   
>>   uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
>>   {
>>  -    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
>>  +    return n < virtio_get_queue_max(vdev) ? vdev->vq[n].vector :
>>           VIRTIO_NO_VECTOR;
>>   }
>>   
>>   void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t 
>> vector)
>>   {
>>  -    if (n < VIRTIO_PCI_QUEUE_MAX)
>>  +    if (n < virtio_get_queue_max(vdev))
>>           vdev->vq[n].vector = vector;
>>   }
>>   
>>   VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
>>                               void (*handle_output)(VirtIODevice *, 
>> VirtQueue *))
>>   {
>>  -    int i;
>>  +    int i, queue_max = virtio_get_queue_max(vdev);
>>   
>>  -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
>>  +    for (i = 0; i < queue_max; i++) {
>>           if (vdev->vq[i].vring.num == 0)
>>               break;
>>       }
>>   
>>  -    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > 
>> VIRTQUEUE_MAX_SIZE)
>>  +    if (i == queue_max || queue_size > VIRTQUEUE_MAX_SIZE) {
>>           abort();
>>  +    }
>>   
>>       vdev->vq[i].vring.num = queue_size;
>>       vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
>>  @@ -806,7 +816,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, 
>> int queue_size,
>>   
>>   void virtio_del_queue(VirtIODevice *vdev, int n)
>>   {
>>  -    if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
>>  +    if (n < 0 || n >= virtio_get_queue_max(vdev)) {
>>           abort();
>>       }
>>   
>>  @@ -916,14 +926,14 @@ void virtio_save(VirtIODevice *vdev, QEMUFile 
>> *f)
>>       qemu_put_be32(f, vdev->config_len);
>>       qemu_put_buffer(f, vdev->config, vdev->config_len);
>>   
>>  -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
>>  +    for (i = 0; i < k->queue_max; i++) {
>>           if (vdev->vq[i].vring.num == 0)
>>               break;
>>       }
>>   
>>       qemu_put_be32(f, i);
>>   
>>  -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
>>  +    for (i = 0; i < k->queue_max; i++) {
>>           if (vdev->vq[i].vring.num == 0)
>>               break;
>>   
>>  @@ -988,7 +998,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile 
>> *f, int version_id)
>>       qemu_get_8s(f, &vdev->status);
>>       qemu_get_8s(f, &vdev->isr);
>>       qemu_get_be16s(f, &vdev->queue_sel);
>>  -    if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
>>  +    if (vdev->queue_sel >= k->queue_max) {
>>           return -1;
>>       }
>>       qemu_get_be32s(f, &features);
>>  @@ -1015,7 +1025,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile 
>> *f, int version_id)
>>   
>>       num = qemu_get_be32(f);
>>   
>>  -    if (num > VIRTIO_PCI_QUEUE_MAX) {
>>  +    if (num > k->queue_max) {
>>           error_report("Invalid number of PCI queues: 0x%x", num);
>>           return -1;
>>       }
>>  @@ -1125,15 +1135,15 @@ void virtio_instance_init_common(Object 
>> *proxy_obj, void *data,
>>   void virtio_init(VirtIODevice *vdev, const char *name,
>>                    uint16_t device_id, size_t config_size)
>>   {
>>  -    int i;
>>  +    int i, queue_max = virtio_get_queue_max(vdev);
>>       vdev->device_id = device_id;
>>       vdev->status = 0;
>>       vdev->isr = 0;
>>       vdev->queue_sel = 0;
>>       vdev->config_vector = VIRTIO_NO_VECTOR;
>>  -    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
>>  +    vdev->vq = g_malloc0(sizeof(VirtQueue) * queue_max);
>>       vdev->vm_running = runstate_is_running();
>>  -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
>>  +    for (i = 0; i < queue_max; i++) {
>>           vdev->vq[i].vector = VIRTIO_NO_VECTOR;
>>           vdev->vq[i].vdev = vdev;
>>           vdev->vq[i].queue_index = i;
>>  diff --git a/include/hw/virtio/virtio-bus.h 
>> b/include/hw/virtio/virtio-bus.h
>>  index 0d2e7b4..4da8022 100644
>>  --- a/include/hw/virtio/virtio-bus.h
>>  +++ b/include/hw/virtio/virtio-bus.h
>>  @@ -68,6 +68,7 @@ typedef struct VirtioBusClass {
>>        * Note that changing this will break migration for this 
>> transport.
>>        */
>>       bool has_variable_vring_alignment;
>>  +    uint16_t queue_max;
>>   } VirtioBusClass;
>>   
>>   struct VirtioBusState {
>>  diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
>>  index d95f8b6..91fd673 100644
>>  --- a/include/hw/virtio/virtio.h
>>  +++ b/include/hw/virtio/virtio.h
>>  @@ -179,6 +179,7 @@ void virtio_queue_notify(VirtIODevice *vdev, 
>> int n);
>>   uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
>>   void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t 
>> vector);
>>   void virtio_set_status(VirtIODevice *vdev, uint8_t val);
>>  +int virtio_get_queue_max(VirtIODevice *vdev);
>>   void virtio_reset(void *opaque);
>>   void virtio_update_irq(VirtIODevice *vdev);
>>   int virtio_set_features(VirtIODevice *vdev, uint32_t val);
>>  -- 
>>  2.1.0
>
Michael S. Tsirkin April 28, 2015, 5:13 a.m. UTC | #3
On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> 
> 
> On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> >> This patch introduces a bus specific queue limitation. It will be
> >> useful for increasing the limit for one of the bus without disturbing
> >> other buses.
> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> >> Cc: Alexander Graf <agraf@suse.de>
> >> Cc: Richard Henderson <rth@twiddle.net>
> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> >
> >Is this still needed if you drop the attempt to
> >keep the limit around for old machine types?
> 
> If we agree to drop, we probably need transport specific macro.

You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
Fine, why not.

> >
> >
> >> ---
> >>  hw/char/virtio-serial-bus.c    |  2 +-
> >>  hw/net/virtio-net.c            |  4 ++--
> >>  hw/s390x/s390-virtio-bus.c     |  1 +
> >>  hw/s390x/virtio-ccw.c          |  1 +
> >>  hw/scsi/virtio-scsi.c          |  4 ++--
> >>  hw/virtio/virtio-mmio.c        |  1 +
> >>  hw/virtio/virtio-pci.c         |  1 +
> >>  hw/virtio/virtio.c             | 40
> >>+++++++++++++++++++++++++---------------
> >>  include/hw/virtio/virtio-bus.h |  1 +
> >>  include/hw/virtio/virtio.h     |  1 +
> >>  10 files changed, 36 insertions(+), 20 deletions(-)
> >>   diff --git a/hw/char/virtio-serial-bus.c
> >>b/hw/char/virtio-serial-bus.c
> >> index e336bdb..0694831 100644
> >> --- a/hw/char/virtio-serial-bus.c
> >> +++ b/hw/char/virtio-serial-bus.c
> >> @@ -973,7 +973,7 @@ static void
> >>virtio_serial_device_realize(DeviceState *dev, Error **errp)
> >>      }
> >>         /* Each port takes 2 queues, and one pair is for the control
> >>queue */
> >> -    max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1;
> >> +    max_supported_ports = virtio_get_queue_max(vdev) / 2 - 1;
> >>      if (vser->serial.max_virtserial_ports > max_supported_ports) {
> >>          error_setg(errp, "maximum ports supported: %u",
> >>max_supported_ports);
> >> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> >> index b6fac9c..bf286f5 100644
> >> --- a/hw/net/virtio-net.c
> >> +++ b/hw/net/virtio-net.c
> >> @@ -1588,10 +1588,10 @@ static void
> >>virtio_net_device_realize(DeviceState *dev, Error **errp)
> >>      virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
> >>      n->max_queues = MAX(n->nic_conf.peers.queues, 1);
> >> -    if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
> >> +    if (n->max_queues * 2 + 1 > virtio_get_queue_max(vdev)) {
> >>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> >>                     "must be a postive integer less than %d.",
> >> -                   n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
> >> +                   n->max_queues, (virtio_get_queue_max(vdev) - 1) /
> >>2);
> >>          virtio_cleanup(vdev);
> >>          return;
> >>      }
> >> diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
> >> index 047c963..2b41e32 100644
> >> --- a/hw/s390x/s390-virtio-bus.c
> >> +++ b/hw/s390x/s390-virtio-bus.c
> >> @@ -715,6 +715,7 @@ static void virtio_s390_bus_class_init(ObjectClass
> >>*klass, void *data)
> >>      bus_class->max_dev = 1;
> >>      k->notify = virtio_s390_notify;
> >>      k->get_features = virtio_s390_get_features;
> >> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
> >>  }
> >>  static const TypeInfo virtio_s390_bus_info = {
> >> diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
> >> index 0434f56..590eed5 100644
> >> --- a/hw/s390x/virtio-ccw.c
> >> +++ b/hw/s390x/virtio-ccw.c
> >> @@ -1715,6 +1715,7 @@ static void virtio_ccw_bus_class_init(ObjectClass
> >>*klass, void *data)
> >>      k->load_queue = virtio_ccw_load_queue;
> >>      k->save_config = virtio_ccw_save_config;
> >>      k->load_config = virtio_ccw_load_config;
> >> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
> >>  }
> >>  static const TypeInfo virtio_ccw_bus_info = {
> >> diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
> >> index c9bea06..fbdde2b 100644
> >> --- a/hw/scsi/virtio-scsi.c
> >> +++ b/hw/scsi/virtio-scsi.c
> >> @@ -826,10 +826,10 @@ void virtio_scsi_common_realize(DeviceState *dev,
> >>Error **errp,
> >>                  sizeof(VirtIOSCSIConfig));
> >>      if (s->conf.num_queues == 0 ||
> >> -            s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX - 2) {
> >> +            s->conf.num_queues > virtio_get_queue_max(vdev) - 2) {
> >>          error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> >>                           "must be a positive integer less than %d.",
> >> -                   s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX - 2);
> >> +                   s->conf.num_queues, virtio_get_queue_max(vdev) -
> >>2);
> >>          virtio_cleanup(vdev);
> >>          return;
> >>      }
> >> diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
> >> index 10123f3..2ae6942 100644
> >> --- a/hw/virtio/virtio-mmio.c
> >> +++ b/hw/virtio/virtio-mmio.c
> >> @@ -403,6 +403,7 @@ static void virtio_mmio_bus_class_init(ObjectClass
> >>*klass, void *data)
> >>      k->device_plugged = virtio_mmio_device_plugged;
> >>      k->has_variable_vring_alignment = true;
> >>      bus_class->max_dev = 1;
> >> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
> >>  }
> >>  static const TypeInfo virtio_mmio_bus_info = {
> >> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> >> index c7c3f72..075b13b 100644
> >> --- a/hw/virtio/virtio-pci.c
> >> +++ b/hw/virtio/virtio-pci.c
> >> @@ -1498,6 +1498,7 @@ static void virtio_pci_bus_class_init(ObjectClass
> >>*klass, void *data)
> >>      k->vmstate_change = virtio_pci_vmstate_change;
> >>      k->device_plugged = virtio_pci_device_plugged;
> >>      k->device_unplugged = virtio_pci_device_unplugged;
> >> +    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
> >>  }
> >>  static const TypeInfo virtio_pci_bus_info = {
> >> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> >> index 17c1260..bbb224f 100644
> >> --- a/hw/virtio/virtio.c
> >> +++ b/hw/virtio/virtio.c
> >> @@ -541,6 +541,14 @@ void virtio_update_irq(VirtIODevice *vdev)
> >>      virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
> >>  }
> >> +int virtio_get_queue_max(VirtIODevice *vdev)
> >> +{
> >> +    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
> >> +    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
> >> +
> >> +    return k->queue_max;
> >> +}
> >> +
> >>  void virtio_set_status(VirtIODevice *vdev, uint8_t val)
> >>  {
> >>      VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
> >> @@ -599,7 +607,7 @@ void virtio_reset(void *opaque)
> >>      vdev->config_vector = VIRTIO_NO_VECTOR;
> >>      virtio_notify_vector(vdev, vdev->config_vector);
> >> -    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> >> +    for (i = 0; i < virtio_get_queue_max(vdev); i++) {
> >>          vdev->vq[i].vring.desc = 0;
> >>          vdev->vq[i].vring.avail = 0;
> >>          vdev->vq[i].vring.used = 0;
> >> @@ -738,7 +746,8 @@ int virtio_queue_get_num(VirtIODevice *vdev, int n)
> >>  int virtio_queue_get_id(VirtQueue *vq)
> >>  {
> >>      VirtIODevice *vdev = vq->vdev;
> >> -    assert(vq >= &vdev->vq[0] && vq <
> >>&vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
> >> +
> >> +    assert(vq >= &vdev->vq[0] && vq <
> >>&vdev->vq[virtio_get_queue_max(vdev)]);
> >>      return vq - &vdev->vq[0];
> >>  }
> >>    @@ -774,28 +783,29 @@ void virtio_queue_notify(VirtIODevice *vdev,
> >>int n)
> >>  uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
> >>  {
> >> -    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
> >> +    return n < virtio_get_queue_max(vdev) ? vdev->vq[n].vector :
> >>          VIRTIO_NO_VECTOR;
> >>  }
> >>     void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t
> >>vector)
> >>  {
> >> -    if (n < VIRTIO_PCI_QUEUE_MAX)
> >> +    if (n < virtio_get_queue_max(vdev))
> >>          vdev->vq[n].vector = vector;
> >>  }
> >>  VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
> >>                              void (*handle_output)(VirtIODevice *,
> >>VirtQueue *))
> >>  {
> >> -    int i;
> >> +    int i, queue_max = virtio_get_queue_max(vdev);
> >> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> >> +    for (i = 0; i < queue_max; i++) {
> >>          if (vdev->vq[i].vring.num == 0)
> >>              break;
> >>      }
> >>    -    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size >
> >>VIRTQUEUE_MAX_SIZE)
> >> +    if (i == queue_max || queue_size > VIRTQUEUE_MAX_SIZE) {
> >>          abort();
> >> +    }
> >>      vdev->vq[i].vring.num = queue_size;
> >>      vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
> >> @@ -806,7 +816,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int
> >>queue_size,
> >>  void virtio_del_queue(VirtIODevice *vdev, int n)
> >>  {
> >> -    if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
> >> +    if (n < 0 || n >= virtio_get_queue_max(vdev)) {
> >>          abort();
> >>      }
> >>    @@ -916,14 +926,14 @@ void virtio_save(VirtIODevice *vdev, QEMUFile
> >>*f)
> >>      qemu_put_be32(f, vdev->config_len);
> >>      qemu_put_buffer(f, vdev->config, vdev->config_len);
> >> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> >> +    for (i = 0; i < k->queue_max; i++) {
> >>          if (vdev->vq[i].vring.num == 0)
> >>              break;
> >>      }
> >>      qemu_put_be32(f, i);
> >> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> >> +    for (i = 0; i < k->queue_max; i++) {
> >>          if (vdev->vq[i].vring.num == 0)
> >>              break;
> >>    @@ -988,7 +998,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f,
> >>int version_id)
> >>      qemu_get_8s(f, &vdev->status);
> >>      qemu_get_8s(f, &vdev->isr);
> >>      qemu_get_be16s(f, &vdev->queue_sel);
> >> -    if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
> >> +    if (vdev->queue_sel >= k->queue_max) {
> >>          return -1;
> >>      }
> >>      qemu_get_be32s(f, &features);
> >> @@ -1015,7 +1025,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f,
> >>int version_id)
> >>      num = qemu_get_be32(f);
> >> -    if (num > VIRTIO_PCI_QUEUE_MAX) {
> >> +    if (num > k->queue_max) {
> >>          error_report("Invalid number of PCI queues: 0x%x", num);
> >>          return -1;
> >>      }
> >> @@ -1125,15 +1135,15 @@ void virtio_instance_init_common(Object
> >>*proxy_obj, void *data,
> >>  void virtio_init(VirtIODevice *vdev, const char *name,
> >>                   uint16_t device_id, size_t config_size)
> >>  {
> >> -    int i;
> >> +    int i, queue_max = virtio_get_queue_max(vdev);
> >>      vdev->device_id = device_id;
> >>      vdev->status = 0;
> >>      vdev->isr = 0;
> >>      vdev->queue_sel = 0;
> >>      vdev->config_vector = VIRTIO_NO_VECTOR;
> >> -    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
> >> +    vdev->vq = g_malloc0(sizeof(VirtQueue) * queue_max);
> >>      vdev->vm_running = runstate_is_running();
> >> -    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
> >> +    for (i = 0; i < queue_max; i++) {
> >>          vdev->vq[i].vector = VIRTIO_NO_VECTOR;
> >>          vdev->vq[i].vdev = vdev;
> >>          vdev->vq[i].queue_index = i;
> >> diff --git a/include/hw/virtio/virtio-bus.h
> >>b/include/hw/virtio/virtio-bus.h
> >> index 0d2e7b4..4da8022 100644
> >> --- a/include/hw/virtio/virtio-bus.h
> >> +++ b/include/hw/virtio/virtio-bus.h
> >> @@ -68,6 +68,7 @@ typedef struct VirtioBusClass {
> >>       * Note that changing this will break migration for this
> >>transport.
> >>       */
> >>      bool has_variable_vring_alignment;
> >> +    uint16_t queue_max;
> >>  } VirtioBusClass;
> >>  struct VirtioBusState {
> >> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> >> index d95f8b6..91fd673 100644
> >> --- a/include/hw/virtio/virtio.h
> >> +++ b/include/hw/virtio/virtio.h
> >> @@ -179,6 +179,7 @@ void virtio_queue_notify(VirtIODevice *vdev, int
> >>n);
> >>  uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
> >>  void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t
> >>vector);
> >>  void virtio_set_status(VirtIODevice *vdev, uint8_t val);
> >> +int virtio_get_queue_max(VirtIODevice *vdev);
> >>  void virtio_reset(void *opaque);
> >>  void virtio_update_irq(VirtIODevice *vdev);
> >>  int virtio_set_features(VirtIODevice *vdev, uint32_t val);
> >> --  2.1.0
> >
Jason Wang April 28, 2015, 6:13 a.m. UTC | #4
On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> 
wrote:
> On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
>>  
>>  
>>  On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin 
>> <mst@redhat.com> wrote:
>>  >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
>>  >> This patch introduces a bus specific queue limitation. It will be
>>  >> useful for increasing the limit for one of the bus without 
>> disturbing
>>  >> other buses.
>>  >> Cc: Michael S. Tsirkin <mst@redhat.com>
>>  >> Cc: Alexander Graf <agraf@suse.de>
>>  >> Cc: Richard Henderson <rth@twiddle.net>
>>  >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
>>  >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
>>  >> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>  >> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>  >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
>>  >
>>  >Is this still needed if you drop the attempt to
>>  >keep the limit around for old machine types?
>>  
>>  If we agree to drop, we probably need transport specific macro.
> 
> You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> Fine, why not.

I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci 
limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 
64. Since to my understanding, it's not safe to increase the limit for 
all other transports which was pointed out by Cornelia in V1: 
http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
Michael S. Tsirkin April 28, 2015, 7:14 a.m. UTC | #5
On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> 
> 
> On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> >><mst@redhat.com> wrote:
> >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> >> >> This patch introduces a bus specific queue limitation. It will be
> >> >> useful for increasing the limit for one of the bus without
> >>disturbing
> >> >> other buses.
> >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> >> >> Cc: Alexander Graf <agraf@suse.de>
> >> >> Cc: Richard Henderson <rth@twiddle.net>
> >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> >> >
> >> >Is this still needed if you drop the attempt to
> >> >keep the limit around for old machine types?
> >> If we agree to drop, we probably need transport specific macro.
> >
> >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> >Fine, why not.
> 
> I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> Since to my understanding, it's not safe to increase the limit for all other
> transports which was pointed out by Cornelia in V1:
> http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.

I think all you need is add a check to CCW_CMD_SET_IND:
limit to 64 for legacy interrupts only.
Cornelia Huck April 28, 2015, 8:04 a.m. UTC | #6
On Tue, 28 Apr 2015 09:14:07 +0200
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > 
> > 
> > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > >><mst@redhat.com> wrote:
> > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > >> >> This patch introduces a bus specific queue limitation. It will be
> > >> >> useful for increasing the limit for one of the bus without
> > >>disturbing
> > >> >> other buses.
> > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > >> >> Cc: Alexander Graf <agraf@suse.de>
> > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > >> >
> > >> >Is this still needed if you drop the attempt to
> > >> >keep the limit around for old machine types?
> > >> If we agree to drop, we probably need transport specific macro.
> > >
> > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > >Fine, why not.
> > 
> > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > Since to my understanding, it's not safe to increase the limit for all other
> > transports which was pointed out by Cornelia in V1:
> > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> 
> I think all you need is add a check to CCW_CMD_SET_IND:
> limit to 64 for legacy interrupts only.

It isn't that easy.

What is easy is to add a check to the guest driver that fails setup for
devices with more than 64 queues not using adapter interrupts.

On the host side, we're lacking information when interpreting
CCW_CMD_SET_IND (the command does not contain a queue count, and the
actual number of virtqueues is not readily available.) We also can't
fence off when setting up the vqs, as this happens before we know which
kind of indicators the guest wants to use.

More importantly, we haven't even speced what we want to do in this
case. Do we want to reject SET_IND for devices with more than 64
queues? (Probably yes.)

All this involves more work, and I'd prefer to do Jason's changes
instead as this gives us some more time to figure this out properly.

And we haven't even considered s390-virtio yet, which I really want to
touch as little as possible :)
Michael S. Tsirkin April 28, 2015, 8:16 a.m. UTC | #7
On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> On Tue, 28 Apr 2015 09:14:07 +0200
> "Michael S. Tsirkin" <mst@redhat.com> wrote:
> 
> > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > 
> > > 
> > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > >><mst@redhat.com> wrote:
> > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > >> >> useful for increasing the limit for one of the bus without
> > > >>disturbing
> > > >> >> other buses.
> > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > >> >
> > > >> >Is this still needed if you drop the attempt to
> > > >> >keep the limit around for old machine types?
> > > >> If we agree to drop, we probably need transport specific macro.
> > > >
> > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > >Fine, why not.
> > > 
> > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > Since to my understanding, it's not safe to increase the limit for all other
> > > transports which was pointed out by Cornelia in V1:
> > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > 
> > I think all you need is add a check to CCW_CMD_SET_IND:
> > limit to 64 for legacy interrupts only.
> 
> It isn't that easy.
> 
> What is easy is to add a check to the guest driver that fails setup for
> devices with more than 64 queues not using adapter interrupts.
> 
> On the host side, we're lacking information when interpreting
> CCW_CMD_SET_IND (the command does not contain a queue count, and the
> actual number of virtqueues is not readily available.)

Why isn't it available? All devices call virtio_add_queue
as appropriate. Just fail legacy adaptors.

> We also can't
> fence off when setting up the vqs, as this happens before we know which
> kind of indicators the guest wants to use.
> 
> More importantly, we haven't even speced what we want to do in this
> case. Do we want to reject SET_IND for devices with more than 64
> queues? (Probably yes.)
> 
> All this involves more work, and I'd prefer to do Jason's changes
> instead as this gives us some more time to figure this out properly.
> 
> And we haven't even considered s390-virtio yet, which I really want to
> touch as little as possible :)

Well this patch does touch it anyway :)
For s390 just check and fail at init if you like.
Cornelia Huck April 28, 2015, 10:40 a.m. UTC | #8
On Tue, 28 Apr 2015 10:16:04 +0200
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> > On Tue, 28 Apr 2015 09:14:07 +0200
> > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > 
> > > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > > 
> > > > 
> > > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > > >><mst@redhat.com> wrote:
> > > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > > >> >> useful for increasing the limit for one of the bus without
> > > > >>disturbing
> > > > >> >> other buses.
> > > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > >> >
> > > > >> >Is this still needed if you drop the attempt to
> > > > >> >keep the limit around for old machine types?
> > > > >> If we agree to drop, we probably need transport specific macro.
> > > > >
> > > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > > >Fine, why not.
> > > > 
> > > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > > Since to my understanding, it's not safe to increase the limit for all other
> > > > transports which was pointed out by Cornelia in V1:
> > > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > > 
> > > I think all you need is add a check to CCW_CMD_SET_IND:
> > > limit to 64 for legacy interrupts only.
> > 
> > It isn't that easy.
> > 
> > What is easy is to add a check to the guest driver that fails setup for
> > devices with more than 64 queues not using adapter interrupts.
> > 
> > On the host side, we're lacking information when interpreting
> > CCW_CMD_SET_IND (the command does not contain a queue count, and the
> > actual number of virtqueues is not readily available.)
> 
> Why isn't it available? All devices call virtio_add_queue
> as appropriate. Just fail legacy adaptors.

Because we don't know what the guest is going to use? It is free to
use per-subchannel indicators, even if it is operating in virtio-1 mode.

> 
> > We also can't
> > fence off when setting up the vqs, as this happens before we know which
> > kind of indicators the guest wants to use.
> > 
> > More importantly, we haven't even speced what we want to do in this
> > case. Do we want to reject SET_IND for devices with more than 64
> > queues? (Probably yes.)
> > 
> > All this involves more work, and I'd prefer to do Jason's changes
> > instead as this gives us some more time to figure this out properly.
> > 
> > And we haven't even considered s390-virtio yet, which I really want to
> > touch as little as possible :)
> 
> Well this patch does touch it anyway :)

But only small, self-evident changes.

> For s390 just check and fail at init if you like.

What about devices that may change their number of queues? I'd really
prefer large queue numbers to be fenced off in the the individual
devices, and for that they need to be able to grab a transport-specific
queue limit.
Michael S. Tsirkin April 28, 2015, 10:55 a.m. UTC | #9
On Tue, Apr 28, 2015 at 12:40:07PM +0200, Cornelia Huck wrote:
> On Tue, 28 Apr 2015 10:16:04 +0200
> "Michael S. Tsirkin" <mst@redhat.com> wrote:
> 
> > On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> > > On Tue, 28 Apr 2015 09:14:07 +0200
> > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > 
> > > > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > > > 
> > > > > 
> > > > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > > > >><mst@redhat.com> wrote:
> > > > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > > > >> >> useful for increasing the limit for one of the bus without
> > > > > >>disturbing
> > > > > >> >> other buses.
> > > > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > >> >
> > > > > >> >Is this still needed if you drop the attempt to
> > > > > >> >keep the limit around for old machine types?
> > > > > >> If we agree to drop, we probably need transport specific macro.
> > > > > >
> > > > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > > > >Fine, why not.
> > > > > 
> > > > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > > > Since to my understanding, it's not safe to increase the limit for all other
> > > > > transports which was pointed out by Cornelia in V1:
> > > > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > > > 
> > > > I think all you need is add a check to CCW_CMD_SET_IND:
> > > > limit to 64 for legacy interrupts only.
> > > 
> > > It isn't that easy.
> > > 
> > > What is easy is to add a check to the guest driver that fails setup for
> > > devices with more than 64 queues not using adapter interrupts.
> > > 
> > > On the host side, we're lacking information when interpreting
> > > CCW_CMD_SET_IND (the command does not contain a queue count, and the
> > > actual number of virtqueues is not readily available.)
> > 
> > Why isn't it available? All devices call virtio_add_queue
> > as appropriate. Just fail legacy adaptors.
> 
> Because we don't know what the guest is going to use? It is free to
> use per-subchannel indicators, even if it is operating in virtio-1 mode.
> > 
> > > We also can't
> > > fence off when setting up the vqs, as this happens before we know which
> > > kind of indicators the guest wants to use.
> > > 
> > > More importantly, we haven't even speced what we want to do in this
> > > case. Do we want to reject SET_IND for devices with more than 64
> > > queues? (Probably yes.)
> > > 
> > > All this involves more work, and I'd prefer to do Jason's changes
> > > instead as this gives us some more time to figure this out properly.
> > > 
> > > And we haven't even considered s390-virtio yet, which I really want to
> > > touch as little as possible :)
> > 
> > Well this patch does touch it anyway :)
> 
> But only small, self-evident changes.
> 

Sorry, I don't see what you are trying to say.
There's no chance legacy interrupts work with > 64 queues.
Guests should have validated the # of queues, and not
attempted to use >64 queues. Looks like there's no
such validation in guest, right?

Solution - don't specify this configuration with legacy guests.

Modern guests work so there's value in supporting such
configuration in QEMU, I don't see why we must deny it in QEMU.

> > For s390 just check and fail at init if you like.
> 
> What about devices that may change their number of queues? I'd really
> prefer large queue numbers to be fenced off in the the individual
> devices, and for that they need to be able to grab a transport-specific
> queue limit.

This is why I don't want bus specific limits in core,
it just makes it too easy to sweep dirt under the carpet.
s390 is legacy - fine, but don't perpetuate the issue
in devices.
Cornelia Huck April 28, 2015, 11:39 a.m. UTC | #10
On Tue, 28 Apr 2015 12:55:40 +0200
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Tue, Apr 28, 2015 at 12:40:07PM +0200, Cornelia Huck wrote:
> > On Tue, 28 Apr 2015 10:16:04 +0200
> > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > 
> > > On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> > > > On Tue, 28 Apr 2015 09:14:07 +0200
> > > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > > 
> > > > > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > > > > 
> > > > > > 
> > > > > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > > > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > > > > >><mst@redhat.com> wrote:
> > > > > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > > > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > > > > >> >> useful for increasing the limit for one of the bus without
> > > > > > >>disturbing
> > > > > > >> >> other buses.
> > > > > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > > > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > > > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > > > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > > > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > >> >
> > > > > > >> >Is this still needed if you drop the attempt to
> > > > > > >> >keep the limit around for old machine types?
> > > > > > >> If we agree to drop, we probably need transport specific macro.
> > > > > > >
> > > > > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > > > > >Fine, why not.
> > > > > > 
> > > > > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > > > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > > > > Since to my understanding, it's not safe to increase the limit for all other
> > > > > > transports which was pointed out by Cornelia in V1:
> > > > > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > > > > 
> > > > > I think all you need is add a check to CCW_CMD_SET_IND:
> > > > > limit to 64 for legacy interrupts only.
> > > > 
> > > > It isn't that easy.
> > > > 
> > > > What is easy is to add a check to the guest driver that fails setup for
> > > > devices with more than 64 queues not using adapter interrupts.
> > > > 
> > > > On the host side, we're lacking information when interpreting
> > > > CCW_CMD_SET_IND (the command does not contain a queue count, and the
> > > > actual number of virtqueues is not readily available.)
> > > 
> > > Why isn't it available? All devices call virtio_add_queue
> > > as appropriate. Just fail legacy adaptors.
> > 
> > Because we don't know what the guest is going to use? It is free to
> > use per-subchannel indicators, even if it is operating in virtio-1 mode.
> > > 
> > > > We also can't
> > > > fence off when setting up the vqs, as this happens before we know which
> > > > kind of indicators the guest wants to use.
> > > > 
> > > > More importantly, we haven't even speced what we want to do in this
> > > > case. Do we want to reject SET_IND for devices with more than 64
> > > > queues? (Probably yes.)
> > > > 
> > > > All this involves more work, and I'd prefer to do Jason's changes
> > > > instead as this gives us some more time to figure this out properly.
> > > > 
> > > > And we haven't even considered s390-virtio yet, which I really want to
> > > > touch as little as possible :)
> > > 
> > > Well this patch does touch it anyway :)
> > 
> > But only small, self-evident changes.
> > 
> 
> Sorry, I don't see what you are trying to say.
> There's no chance legacy interrupts work with > 64 queues.
> Guests should have validated the # of queues, and not
> attempted to use >64 queues. Looks like there's no
> such validation in guest, right?

I have no idea whether > 64 queues would work with s390-virtio - it
might well work, but I'm not willing to extend any effort to verifying
that.

> 
> Solution - don't specify this configuration with legacy guests.
> 
> Modern guests work so there's value in supporting such
> configuration in QEMU, I don't see why we must deny it in QEMU.

What is "legacy guest" in your context? A guest running with the legacy
transport or a guest using ccw but not virtio-1? A ccw guest using
adapter interrupts but not virtio-1 should be fine.

> 
> > > For s390 just check and fail at init if you like.
> > 
> > What about devices that may change their number of queues? I'd really
> > prefer large queue numbers to be fenced off in the the individual
> > devices, and for that they need to be able to grab a transport-specific
> > queue limit.
> 
> This is why I don't want bus specific limits in core,
> it just makes it too easy to sweep dirt under the carpet.
> s390 is legacy - fine, but don't perpetuate the issue
> in devices.

What is "swept under the carpet" here? A device can have min(max queues
from transport, max queues from device type) queues. I think it's
easier to refuse instantiating with too many queues per device type (as
most will be fine with 64 queues), so I don't want that code in the
transport (beyond making the limit available).

For s390 I'd like in the end:
- s390-virtio: legacy - keep it working as best-can-do, so I'd prefer
  to keep it at 64 queues, even if more might work
- virtio-ccw, devices in legacy or virtio-1 mode: works with adapter
  interrupts, so let's fence off setting per-subchannel indicators if a
  device has more than 64 queues (needs work and a well thought-out
  rejection mechanism)

That's _in the end_: I'd like to keep ccw at 64 queues _for now_ so
that we don't have a rushed interface change - and at the same time, I
don't want to hold off pci. Makes sense?
Michael S. Tsirkin April 28, 2015, 12:47 p.m. UTC | #11
On Tue, Apr 28, 2015 at 01:39:51PM +0200, Cornelia Huck wrote:
> On Tue, 28 Apr 2015 12:55:40 +0200
> "Michael S. Tsirkin" <mst@redhat.com> wrote:
> 
> > On Tue, Apr 28, 2015 at 12:40:07PM +0200, Cornelia Huck wrote:
> > > On Tue, 28 Apr 2015 10:16:04 +0200
> > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > 
> > > > On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> > > > > On Tue, 28 Apr 2015 09:14:07 +0200
> > > > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > > > 
> > > > > > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > > > > > 
> > > > > > > 
> > > > > > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > > > > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > > > > > >><mst@redhat.com> wrote:
> > > > > > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > > > > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > > > > > >> >> useful for increasing the limit for one of the bus without
> > > > > > > >>disturbing
> > > > > > > >> >> other buses.
> > > > > > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > > > > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > > > > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > > > > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > > > > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > > >> >
> > > > > > > >> >Is this still needed if you drop the attempt to
> > > > > > > >> >keep the limit around for old machine types?
> > > > > > > >> If we agree to drop, we probably need transport specific macro.
> > > > > > > >
> > > > > > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > > > > > >Fine, why not.
> > > > > > > 
> > > > > > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > > > > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > > > > > Since to my understanding, it's not safe to increase the limit for all other
> > > > > > > transports which was pointed out by Cornelia in V1:
> > > > > > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > > > > > 
> > > > > > I think all you need is add a check to CCW_CMD_SET_IND:
> > > > > > limit to 64 for legacy interrupts only.
> > > > > 
> > > > > It isn't that easy.
> > > > > 
> > > > > What is easy is to add a check to the guest driver that fails setup for
> > > > > devices with more than 64 queues not using adapter interrupts.
> > > > > 
> > > > > On the host side, we're lacking information when interpreting
> > > > > CCW_CMD_SET_IND (the command does not contain a queue count, and the
> > > > > actual number of virtqueues is not readily available.)
> > > > 
> > > > Why isn't it available? All devices call virtio_add_queue
> > > > as appropriate. Just fail legacy adaptors.
> > > 
> > > Because we don't know what the guest is going to use? It is free to
> > > use per-subchannel indicators, even if it is operating in virtio-1 mode.
> > > > 
> > > > > We also can't
> > > > > fence off when setting up the vqs, as this happens before we know which
> > > > > kind of indicators the guest wants to use.
> > > > > 
> > > > > More importantly, we haven't even speced what we want to do in this
> > > > > case. Do we want to reject SET_IND for devices with more than 64
> > > > > queues? (Probably yes.)
> > > > > 
> > > > > All this involves more work, and I'd prefer to do Jason's changes
> > > > > instead as this gives us some more time to figure this out properly.
> > > > > 
> > > > > And we haven't even considered s390-virtio yet, which I really want to
> > > > > touch as little as possible :)
> > > > 
> > > > Well this patch does touch it anyway :)
> > > 
> > > But only small, self-evident changes.
> > > 
> > 
> > Sorry, I don't see what you are trying to say.
> > There's no chance legacy interrupts work with > 64 queues.
> > Guests should have validated the # of queues, and not
> > attempted to use >64 queues. Looks like there's no
> > such validation in guest, right?
> 
> I have no idea whether > 64 queues would work with s390-virtio - it
> might well work, but I'm not willing to extend any effort to verifying
> that.

Well this doesn't mean we won't make any changes, ever,
just so we can reduce verification costs.
Let's make the change everywhere, if we see issues
we'll backtrack.

> > 
> > Solution - don't specify this configuration with legacy guests.
> > 
> > Modern guests work so there's value in supporting such
> > configuration in QEMU, I don't see why we must deny it in QEMU.
> 
> What is "legacy guest" in your context? A guest running with the legacy
> transport or a guest using ccw but not virtio-1? A ccw guest using
> adapter interrupts but not virtio-1 should be fine.

A guest not using adapter interrupts.

> > 
> > > > For s390 just check and fail at init if you like.
> > > 
> > > What about devices that may change their number of queues? I'd really
> > > prefer large queue numbers to be fenced off in the the individual
> > > devices, and for that they need to be able to grab a transport-specific
> > > queue limit.
> > 
> > This is why I don't want bus specific limits in core,
> > it just makes it too easy to sweep dirt under the carpet.
> > s390 is legacy - fine, but don't perpetuate the issue
> > in devices.
> 
> What is "swept under the carpet" here? A device can have min(max queues
> from transport, max queues from device type) queues. I think it's
> easier to refuse instantiating with too many queues per device type (as
> most will be fine with 64 queues), so I don't want that code in the
> transport (beyond making the limit available).
> 
> For s390 I'd like in the end:
> - s390-virtio: legacy - keep it working as best-can-do, so I'd prefer
>   to keep it at 64 queues, even if more might work
> - virtio-ccw, devices in legacy or virtio-1 mode: works with adapter
>   interrupts, so let's fence off setting per-subchannel indicators if a
>   device has more than 64 queues (needs work and a well thought-out
>   rejection mechanism)
> 
> That's _in the end_: I'd like to keep ccw at 64 queues _for now_ so
> that we don't have a rushed interface change - and at the same time, I
> don't want to hold off pci. Makes sense?

If you want to fail configurations with > 64 queues in ccw or s390,
that's fine by me. I don't want work arounds for these bugs in virtio
core though. So transports should not have a say in how many queues can
be supported, but they can fail configurations they can't support if
they want to.
Cornelia Huck April 28, 2015, 1:33 p.m. UTC | #12
On Tue, 28 Apr 2015 14:47:11 +0200
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Tue, Apr 28, 2015 at 01:39:51PM +0200, Cornelia Huck wrote:
> > On Tue, 28 Apr 2015 12:55:40 +0200
> > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > 
> > > On Tue, Apr 28, 2015 at 12:40:07PM +0200, Cornelia Huck wrote:
> > > > On Tue, 28 Apr 2015 10:16:04 +0200
> > > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > > 
> > > > > On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> > > > > > On Tue, 28 Apr 2015 09:14:07 +0200
> > > > > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > > > > 
> > > > > > > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > > > > > > 
> > > > > > > > 
> > > > > > > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > > > > > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > > > > > > >><mst@redhat.com> wrote:
> > > > > > > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > > > > > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > > > > > > >> >> useful for increasing the limit for one of the bus without
> > > > > > > > >>disturbing
> > > > > > > > >> >> other buses.
> > > > > > > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > > > > > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > > > > > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > > > > > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > > > > > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > > > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > > > >> >
> > > > > > > > >> >Is this still needed if you drop the attempt to
> > > > > > > > >> >keep the limit around for old machine types?
> > > > > > > > >> If we agree to drop, we probably need transport specific macro.
> > > > > > > > >
> > > > > > > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > > > > > > >Fine, why not.
> > > > > > > > 
> > > > > > > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > > > > > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > > > > > > Since to my understanding, it's not safe to increase the limit for all other
> > > > > > > > transports which was pointed out by Cornelia in V1:
> > > > > > > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > > > > > > 
> > > > > > > I think all you need is add a check to CCW_CMD_SET_IND:
> > > > > > > limit to 64 for legacy interrupts only.
> > > > > > 
> > > > > > It isn't that easy.
> > > > > > 
> > > > > > What is easy is to add a check to the guest driver that fails setup for
> > > > > > devices with more than 64 queues not using adapter interrupts.
> > > > > > 
> > > > > > On the host side, we're lacking information when interpreting
> > > > > > CCW_CMD_SET_IND (the command does not contain a queue count, and the
> > > > > > actual number of virtqueues is not readily available.)
> > > > > 
> > > > > Why isn't it available? All devices call virtio_add_queue
> > > > > as appropriate. Just fail legacy adaptors.
> > > > 
> > > > Because we don't know what the guest is going to use? It is free to
> > > > use per-subchannel indicators, even if it is operating in virtio-1 mode.
> > > > > 
> > > > > > We also can't
> > > > > > fence off when setting up the vqs, as this happens before we know which
> > > > > > kind of indicators the guest wants to use.
> > > > > > 
> > > > > > More importantly, we haven't even speced what we want to do in this
> > > > > > case. Do we want to reject SET_IND for devices with more than 64
> > > > > > queues? (Probably yes.)
> > > > > > 
> > > > > > All this involves more work, and I'd prefer to do Jason's changes
> > > > > > instead as this gives us some more time to figure this out properly.
> > > > > > 
> > > > > > And we haven't even considered s390-virtio yet, which I really want to
> > > > > > touch as little as possible :)
> > > > > 
> > > > > Well this patch does touch it anyway :)
> > > > 
> > > > But only small, self-evident changes.
> > > > 
> > > 
> > > Sorry, I don't see what you are trying to say.
> > > There's no chance legacy interrupts work with > 64 queues.
> > > Guests should have validated the # of queues, and not
> > > attempted to use >64 queues. Looks like there's no
> > > such validation in guest, right?
> > 
> > I have no idea whether > 64 queues would work with s390-virtio - it
> > might well work, but I'm not willing to extend any effort to verifying
> > that.
> 
> Well this doesn't mean we won't make any changes, ever,
> just so we can reduce verification costs.
> Let's make the change everywhere, if we see issues
> we'll backtrack.

I don't like possibly breaking things with a seeing eye. And I know
that some virtio-ccw setups will break.

> 
> > > 
> > > Solution - don't specify this configuration with legacy guests.
> > > 
> > > Modern guests work so there's value in supporting such
> > > configuration in QEMU, I don't see why we must deny it in QEMU.
> > 
> > What is "legacy guest" in your context? A guest running with the legacy
> > transport or a guest using ccw but not virtio-1? A ccw guest using
> > adapter interrupts but not virtio-1 should be fine.
> 
> A guest not using adapter interrupts.

There's nothing about that that's per-guest. It is a choice per-device.
In fact, the Linux guest driver falls back to classic interrupts if it
fails to setup adapter interrupts for a device - and this might happen
for large guests when the host adapter routing table is full.

> 
> > > 
> > > > > For s390 just check and fail at init if you like.
> > > > 
> > > > What about devices that may change their number of queues? I'd really
> > > > prefer large queue numbers to be fenced off in the the individual
> > > > devices, and for that they need to be able to grab a transport-specific
> > > > queue limit.
> > > 
> > > This is why I don't want bus specific limits in core,
> > > it just makes it too easy to sweep dirt under the carpet.
> > > s390 is legacy - fine, but don't perpetuate the issue
> > > in devices.
> > 
> > What is "swept under the carpet" here? A device can have min(max queues
> > from transport, max queues from device type) queues. I think it's
> > easier to refuse instantiating with too many queues per device type (as
> > most will be fine with 64 queues), so I don't want that code in the
> > transport (beyond making the limit available).
> > 
> > For s390 I'd like in the end:
> > - s390-virtio: legacy - keep it working as best-can-do, so I'd prefer
> >   to keep it at 64 queues, even if more might work
> > - virtio-ccw, devices in legacy or virtio-1 mode: works with adapter
> >   interrupts, so let's fence off setting per-subchannel indicators if a
> >   device has more than 64 queues (needs work and a well thought-out
> >   rejection mechanism)
> > 
> > That's _in the end_: I'd like to keep ccw at 64 queues _for now_ so
> > that we don't have a rushed interface change - and at the same time, I
> > don't want to hold off pci. Makes sense?
> 
> If you want to fail configurations with > 64 queues in ccw or s390,
> that's fine by me. I don't want work arounds for these bugs in virtio
> core though. So transports should not have a say in how many queues can
> be supported, but they can fail configurations they can't support if
> they want to.

Eh, isn't that a contradiction? Failing a configuration means that the
transport does indeed have a say?
Michael S. Tsirkin April 28, 2015, 2:40 p.m. UTC | #13
On Tue, Apr 28, 2015 at 03:33:37PM +0200, Cornelia Huck wrote:
> On Tue, 28 Apr 2015 14:47:11 +0200
> "Michael S. Tsirkin" <mst@redhat.com> wrote:
> 
> > On Tue, Apr 28, 2015 at 01:39:51PM +0200, Cornelia Huck wrote:
> > > On Tue, 28 Apr 2015 12:55:40 +0200
> > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > 
> > > > On Tue, Apr 28, 2015 at 12:40:07PM +0200, Cornelia Huck wrote:
> > > > > On Tue, 28 Apr 2015 10:16:04 +0200
> > > > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > > > 
> > > > > > On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
> > > > > > > On Tue, 28 Apr 2015 09:14:07 +0200
> > > > > > > "Michael S. Tsirkin" <mst@redhat.com> wrote:
> > > > > > > 
> > > > > > > > On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
> > > > > > > > > 
> > > > > > > > > 
> > > > > > > > > On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
> > > > > > > > > >On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
> > > > > > > > > >>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
> > > > > > > > > >><mst@redhat.com> wrote:
> > > > > > > > > >> >On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
> > > > > > > > > >> >> This patch introduces a bus specific queue limitation. It will be
> > > > > > > > > >> >> useful for increasing the limit for one of the bus without
> > > > > > > > > >>disturbing
> > > > > > > > > >> >> other buses.
> > > > > > > > > >> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> > > > > > > > > >> >> Cc: Alexander Graf <agraf@suse.de>
> > > > > > > > > >> >> Cc: Richard Henderson <rth@twiddle.net>
> > > > > > > > > >> >> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > > > > >> >> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> > > > > > > > > >> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> > > > > > > > > >> >> Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > > > > > > > >> >> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
> > > > > > > > > >> >
> > > > > > > > > >> >Is this still needed if you drop the attempt to
> > > > > > > > > >> >keep the limit around for old machine types?
> > > > > > > > > >> If we agree to drop, we probably need transport specific macro.
> > > > > > > > > >
> > > > > > > > > >You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
> > > > > > > > > >Fine, why not.
> > > > > > > > > 
> > > > > > > > > I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
> > > > > > > > > limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
> > > > > > > > > Since to my understanding, it's not safe to increase the limit for all other
> > > > > > > > > transports which was pointed out by Cornelia in V1:
> > > > > > > > > http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
> > > > > > > > 
> > > > > > > > I think all you need is add a check to CCW_CMD_SET_IND:
> > > > > > > > limit to 64 for legacy interrupts only.
> > > > > > > 
> > > > > > > It isn't that easy.
> > > > > > > 
> > > > > > > What is easy is to add a check to the guest driver that fails setup for
> > > > > > > devices with more than 64 queues not using adapter interrupts.
> > > > > > > 
> > > > > > > On the host side, we're lacking information when interpreting
> > > > > > > CCW_CMD_SET_IND (the command does not contain a queue count, and the
> > > > > > > actual number of virtqueues is not readily available.)
> > > > > > 
> > > > > > Why isn't it available? All devices call virtio_add_queue
> > > > > > as appropriate. Just fail legacy adaptors.
> > > > > 
> > > > > Because we don't know what the guest is going to use? It is free to
> > > > > use per-subchannel indicators, even if it is operating in virtio-1 mode.
> > > > > > 
> > > > > > > We also can't
> > > > > > > fence off when setting up the vqs, as this happens before we know which
> > > > > > > kind of indicators the guest wants to use.
> > > > > > > 
> > > > > > > More importantly, we haven't even speced what we want to do in this
> > > > > > > case. Do we want to reject SET_IND for devices with more than 64
> > > > > > > queues? (Probably yes.)
> > > > > > > 
> > > > > > > All this involves more work, and I'd prefer to do Jason's changes
> > > > > > > instead as this gives us some more time to figure this out properly.
> > > > > > > 
> > > > > > > And we haven't even considered s390-virtio yet, which I really want to
> > > > > > > touch as little as possible :)
> > > > > > 
> > > > > > Well this patch does touch it anyway :)
> > > > > 
> > > > > But only small, self-evident changes.
> > > > > 
> > > > 
> > > > Sorry, I don't see what you are trying to say.
> > > > There's no chance legacy interrupts work with > 64 queues.
> > > > Guests should have validated the # of queues, and not
> > > > attempted to use >64 queues. Looks like there's no
> > > > such validation in guest, right?
> > > 
> > > I have no idea whether > 64 queues would work with s390-virtio - it
> > > might well work, but I'm not willing to extend any effort to verifying
> > > that.
> > 
> > Well this doesn't mean we won't make any changes, ever,
> > just so we can reduce verification costs.
> > Let's make the change everywhere, if we see issues
> > we'll backtrack.
> 
> I don't like possibly breaking things with a seeing eye. And I know
> that some virtio-ccw setups will break.
> 
> > 
> > > > 
> > > > Solution - don't specify this configuration with legacy guests.
> > > > 
> > > > Modern guests work so there's value in supporting such
> > > > configuration in QEMU, I don't see why we must deny it in QEMU.
> > > 
> > > What is "legacy guest" in your context? A guest running with the legacy
> > > transport or a guest using ccw but not virtio-1? A ccw guest using
> > > adapter interrupts but not virtio-1 should be fine.
> > 
> > A guest not using adapter interrupts.
> 
> There's nothing about that that's per-guest. It is a choice per-device.
> In fact, the Linux guest driver falls back to classic interrupts if it
> fails to setup adapter interrupts for a device - and this might happen
> for large guests when the host adapter routing table is full.
> 
> > 
> > > > 
> > > > > > For s390 just check and fail at init if you like.
> > > > > 
> > > > > What about devices that may change their number of queues? I'd really
> > > > > prefer large queue numbers to be fenced off in the the individual
> > > > > devices, and for that they need to be able to grab a transport-specific
> > > > > queue limit.
> > > > 
> > > > This is why I don't want bus specific limits in core,
> > > > it just makes it too easy to sweep dirt under the carpet.
> > > > s390 is legacy - fine, but don't perpetuate the issue
> > > > in devices.
> > > 
> > > What is "swept under the carpet" here? A device can have min(max queues
> > > from transport, max queues from device type) queues. I think it's
> > > easier to refuse instantiating with too many queues per device type (as
> > > most will be fine with 64 queues), so I don't want that code in the
> > > transport (beyond making the limit available).
> > > 
> > > For s390 I'd like in the end:
> > > - s390-virtio: legacy - keep it working as best-can-do, so I'd prefer
> > >   to keep it at 64 queues, even if more might work
> > > - virtio-ccw, devices in legacy or virtio-1 mode: works with adapter
> > >   interrupts, so let's fence off setting per-subchannel indicators if a
> > >   device has more than 64 queues (needs work and a well thought-out
> > >   rejection mechanism)
> > > 
> > > That's _in the end_: I'd like to keep ccw at 64 queues _for now_ so
> > > that we don't have a rushed interface change - and at the same time, I
> > > don't want to hold off pci. Makes sense?
> > 
> > If you want to fail configurations with > 64 queues in ccw or s390,
> > that's fine by me. I don't want work arounds for these bugs in virtio
> > core though. So transports should not have a say in how many queues can
> > be supported, but they can fail configurations they can't support if
> > they want to.
> 
> Eh, isn't that a contradiction? Failing a configuration means that the
> transport does indeed have a say?

I'm fine with general capability that lets transport check device
and fail init, for whatever reason.
E.g. can we teach plugged callback to fail?
I don't want to mess up core with knowledge about specific transport
bugs such as random limits on # of queues.
Jason Wang May 13, 2015, 7:51 a.m. UTC | #14
On 04/28/2015 10:40 PM, Michael S. Tsirkin wrote:
> On Tue, Apr 28, 2015 at 03:33:37PM +0200, Cornelia Huck wrote:
>> On Tue, 28 Apr 2015 14:47:11 +0200
>> "Michael S. Tsirkin" <mst@redhat.com> wrote:
>>
>>> On Tue, Apr 28, 2015 at 01:39:51PM +0200, Cornelia Huck wrote:
>>>> On Tue, 28 Apr 2015 12:55:40 +0200
>>>> "Michael S. Tsirkin" <mst@redhat.com> wrote:
>>>>
>>>>> On Tue, Apr 28, 2015 at 12:40:07PM +0200, Cornelia Huck wrote:
>>>>>> On Tue, 28 Apr 2015 10:16:04 +0200
>>>>>> "Michael S. Tsirkin" <mst@redhat.com> wrote:
>>>>>>
>>>>>>> On Tue, Apr 28, 2015 at 10:04:15AM +0200, Cornelia Huck wrote:
>>>>>>>> On Tue, 28 Apr 2015 09:14:07 +0200
>>>>>>>> "Michael S. Tsirkin" <mst@redhat.com> wrote:
>>>>>>>>
>>>>>>>>> On Tue, Apr 28, 2015 at 02:13:20PM +0800, Jason Wang wrote:
>>>>>>>>>>
>>>>>>>>>> On Tue, Apr 28, 2015 at 1:13 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>>>>>>>>>>> On Tue, Apr 28, 2015 at 11:14:04AM +0800, Jason Wang wrote:
>>>>>>>>>>>>     On Mon, Apr 27, 2015 at 7:05 PM, Michael S. Tsirkin
>>>>>>>>>>>> <mst@redhat.com> wrote:
>>>>>>>>>>>>> On Thu, Apr 23, 2015 at 02:21:41PM +0800, Jason Wang wrote:
>>>>>>>>>>>>>> This patch introduces a bus specific queue limitation. It will be
>>>>>>>>>>>>>> useful for increasing the limit for one of the bus without
>>>>>>>>>>>> disturbing
>>>>>>>>>>>>>> other buses.
>>>>>>>>>>>>>> Cc: Michael S. Tsirkin <mst@redhat.com>
>>>>>>>>>>>>>> Cc: Alexander Graf <agraf@suse.de>
>>>>>>>>>>>>>> Cc: Richard Henderson <rth@twiddle.net>
>>>>>>>>>>>>>> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
>>>>>>>>>>>>>> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
>>>>>>>>>>>>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>>>>>>>>>>>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>>>>>>>>>>>>> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
>>>>>>>>>>>>> Is this still needed if you drop the attempt to
>>>>>>>>>>>>> keep the limit around for old machine types?
>>>>>>>>>>>> If we agree to drop, we probably need transport specific macro.
>>>>>>>>>>> You mean just rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX?
>>>>>>>>>>> Fine, why not.
>>>>>>>>>> I mean keeping VIRTIO_PCI_QUEUE_MAX for pci only and just increase pci
>>>>>>>>>> limit. And introduce e.g VIRTIO_PCI_QUEUE_CCW for ccw and keep it as 64.
>>>>>>>>>> Since to my understanding, it's not safe to increase the limit for all other
>>>>>>>>>> transports which was pointed out by Cornelia in V1:
>>>>>>>>>> http://permalink.gmane.org/gmane.comp.emulators.qemu/318245.
>>>>>>>>> I think all you need is add a check to CCW_CMD_SET_IND:
>>>>>>>>> limit to 64 for legacy interrupts only.
>>>>>>>> It isn't that easy.
>>>>>>>>
>>>>>>>> What is easy is to add a check to the guest driver that fails setup for
>>>>>>>> devices with more than 64 queues not using adapter interrupts.
>>>>>>>>
>>>>>>>> On the host side, we're lacking information when interpreting
>>>>>>>> CCW_CMD_SET_IND (the command does not contain a queue count, and the
>>>>>>>> actual number of virtqueues is not readily available.)
>>>>>>> Why isn't it available? All devices call virtio_add_queue
>>>>>>> as appropriate. Just fail legacy adaptors.
>>>>>> Because we don't know what the guest is going to use? It is free to
>>>>>> use per-subchannel indicators, even if it is operating in virtio-1 mode.
>>>>>>>> We also can't
>>>>>>>> fence off when setting up the vqs, as this happens before we know which
>>>>>>>> kind of indicators the guest wants to use.
>>>>>>>>
>>>>>>>> More importantly, we haven't even speced what we want to do in this
>>>>>>>> case. Do we want to reject SET_IND for devices with more than 64
>>>>>>>> queues? (Probably yes.)
>>>>>>>>
>>>>>>>> All this involves more work, and I'd prefer to do Jason's changes
>>>>>>>> instead as this gives us some more time to figure this out properly.
>>>>>>>>
>>>>>>>> And we haven't even considered s390-virtio yet, which I really want to
>>>>>>>> touch as little as possible :)
>>>>>>> Well this patch does touch it anyway :)
>>>>>> But only small, self-evident changes.
>>>>>>
>>>>> Sorry, I don't see what you are trying to say.
>>>>> There's no chance legacy interrupts work with > 64 queues.
>>>>> Guests should have validated the # of queues, and not
>>>>> attempted to use >64 queues. Looks like there's no
>>>>> such validation in guest, right?
>>>> I have no idea whether > 64 queues would work with s390-virtio - it
>>>> might well work, but I'm not willing to extend any effort to verifying
>>>> that.
>>> Well this doesn't mean we won't make any changes, ever,
>>> just so we can reduce verification costs.
>>> Let's make the change everywhere, if we see issues
>>> we'll backtrack.
>> I don't like possibly breaking things with a seeing eye. And I know
>> that some virtio-ccw setups will break.
>>
>>>>> Solution - don't specify this configuration with legacy guests.
>>>>>
>>>>> Modern guests work so there's value in supporting such
>>>>> configuration in QEMU, I don't see why we must deny it in QEMU.
>>>> What is "legacy guest" in your context? A guest running with the legacy
>>>> transport or a guest using ccw but not virtio-1? A ccw guest using
>>>> adapter interrupts but not virtio-1 should be fine.
>>> A guest not using adapter interrupts.
>> There's nothing about that that's per-guest. It is a choice per-device.
>> In fact, the Linux guest driver falls back to classic interrupts if it
>> fails to setup adapter interrupts for a device - and this might happen
>> for large guests when the host adapter routing table is full.
>>
>>>>>>> For s390 just check and fail at init if you like.
>>>>>> What about devices that may change their number of queues? I'd really
>>>>>> prefer large queue numbers to be fenced off in the the individual
>>>>>> devices, and for that they need to be able to grab a transport-specific
>>>>>> queue limit.
>>>>> This is why I don't want bus specific limits in core,
>>>>> it just makes it too easy to sweep dirt under the carpet.
>>>>> s390 is legacy - fine, but don't perpetuate the issue
>>>>> in devices.
>>>> What is "swept under the carpet" here? A device can have min(max queues
>>>> from transport, max queues from device type) queues. I think it's
>>>> easier to refuse instantiating with too many queues per device type (as
>>>> most will be fine with 64 queues), so I don't want that code in the
>>>> transport (beyond making the limit available).
>>>>
>>>> For s390 I'd like in the end:
>>>> - s390-virtio: legacy - keep it working as best-can-do, so I'd prefer
>>>>   to keep it at 64 queues, even if more might work
>>>> - virtio-ccw, devices in legacy or virtio-1 mode: works with adapter
>>>>   interrupts, so let's fence off setting per-subchannel indicators if a
>>>>   device has more than 64 queues (needs work and a well thought-out
>>>>   rejection mechanism)
>>>>
>>>> That's _in the end_: I'd like to keep ccw at 64 queues _for now_ so
>>>> that we don't have a rushed interface change - and at the same time, I
>>>> don't want to hold off pci. Makes sense?
>>> If you want to fail configurations with > 64 queues in ccw or s390,
>>> that's fine by me. I don't want work arounds for these bugs in virtio
>>> core though. So transports should not have a say in how many queues can
>>> be supported, but they can fail configurations they can't support if
>>> they want to.
>> Eh, isn't that a contradiction? Failing a configuration means that the
>> transport does indeed have a say?
> I'm fine with general capability that lets transport check device
> and fail init, for whatever reason.
> E.g. can we teach plugged callback to fail?

Looks like we can (and for s390, we need add a callback just for
checking this). That just moves the transport specific limit to
k->device_plugged (my patch check k->queue_max). I don't see obvious
difference.
diff mbox

Patch

diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index e336bdb..0694831 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -973,7 +973,7 @@  static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
     }
 
     /* Each port takes 2 queues, and one pair is for the control queue */
-    max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1;
+    max_supported_ports = virtio_get_queue_max(vdev) / 2 - 1;
 
     if (vser->serial.max_virtserial_ports > max_supported_ports) {
         error_setg(errp, "maximum ports supported: %u", max_supported_ports);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index b6fac9c..bf286f5 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1588,10 +1588,10 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
     virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
 
     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
-    if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
+    if (n->max_queues * 2 + 1 > virtio_get_queue_max(vdev)) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
                    "must be a postive integer less than %d.",
-                   n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
+                   n->max_queues, (virtio_get_queue_max(vdev) - 1) / 2);
         virtio_cleanup(vdev);
         return;
     }
diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
index 047c963..2b41e32 100644
--- a/hw/s390x/s390-virtio-bus.c
+++ b/hw/s390x/s390-virtio-bus.c
@@ -715,6 +715,7 @@  static void virtio_s390_bus_class_init(ObjectClass *klass, void *data)
     bus_class->max_dev = 1;
     k->notify = virtio_s390_notify;
     k->get_features = virtio_s390_get_features;
+    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
 }
 
 static const TypeInfo virtio_s390_bus_info = {
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 0434f56..590eed5 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -1715,6 +1715,7 @@  static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
     k->load_queue = virtio_ccw_load_queue;
     k->save_config = virtio_ccw_save_config;
     k->load_config = virtio_ccw_load_config;
+    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
 }
 
 static const TypeInfo virtio_ccw_bus_info = {
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index c9bea06..fbdde2b 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -826,10 +826,10 @@  void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
                 sizeof(VirtIOSCSIConfig));
 
     if (s->conf.num_queues == 0 ||
-            s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX - 2) {
+            s->conf.num_queues > virtio_get_queue_max(vdev) - 2) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
                          "must be a positive integer less than %d.",
-                   s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX - 2);
+                   s->conf.num_queues, virtio_get_queue_max(vdev) - 2);
         virtio_cleanup(vdev);
         return;
     }
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 10123f3..2ae6942 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -403,6 +403,7 @@  static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
     k->device_plugged = virtio_mmio_device_plugged;
     k->has_variable_vring_alignment = true;
     bus_class->max_dev = 1;
+    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
 }
 
 static const TypeInfo virtio_mmio_bus_info = {
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index c7c3f72..075b13b 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1498,6 +1498,7 @@  static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
     k->vmstate_change = virtio_pci_vmstate_change;
     k->device_plugged = virtio_pci_device_plugged;
     k->device_unplugged = virtio_pci_device_unplugged;
+    k->queue_max = VIRTIO_PCI_QUEUE_MAX;
 }
 
 static const TypeInfo virtio_pci_bus_info = {
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 17c1260..bbb224f 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -541,6 +541,14 @@  void virtio_update_irq(VirtIODevice *vdev)
     virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
 }
 
+int virtio_get_queue_max(VirtIODevice *vdev)
+{
+    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+    return k->queue_max;
+}
+
 void virtio_set_status(VirtIODevice *vdev, uint8_t val)
 {
     VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
@@ -599,7 +607,7 @@  void virtio_reset(void *opaque)
     vdev->config_vector = VIRTIO_NO_VECTOR;
     virtio_notify_vector(vdev, vdev->config_vector);
 
-    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < virtio_get_queue_max(vdev); i++) {
         vdev->vq[i].vring.desc = 0;
         vdev->vq[i].vring.avail = 0;
         vdev->vq[i].vring.used = 0;
@@ -738,7 +746,8 @@  int virtio_queue_get_num(VirtIODevice *vdev, int n)
 int virtio_queue_get_id(VirtQueue *vq)
 {
     VirtIODevice *vdev = vq->vdev;
-    assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
+
+    assert(vq >= &vdev->vq[0] && vq < &vdev->vq[virtio_get_queue_max(vdev)]);
     return vq - &vdev->vq[0];
 }
 
@@ -774,28 +783,29 @@  void virtio_queue_notify(VirtIODevice *vdev, int n)
 
 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
 {
-    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
+    return n < virtio_get_queue_max(vdev) ? vdev->vq[n].vector :
         VIRTIO_NO_VECTOR;
 }
 
 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
 {
-    if (n < VIRTIO_PCI_QUEUE_MAX)
+    if (n < virtio_get_queue_max(vdev))
         vdev->vq[n].vector = vector;
 }
 
 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
                             void (*handle_output)(VirtIODevice *, VirtQueue *))
 {
-    int i;
+    int i, queue_max = virtio_get_queue_max(vdev);
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < queue_max; i++) {
         if (vdev->vq[i].vring.num == 0)
             break;
     }
 
-    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
+    if (i == queue_max || queue_size > VIRTQUEUE_MAX_SIZE) {
         abort();
+    }
 
     vdev->vq[i].vring.num = queue_size;
     vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
@@ -806,7 +816,7 @@  VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
 
 void virtio_del_queue(VirtIODevice *vdev, int n)
 {
-    if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
+    if (n < 0 || n >= virtio_get_queue_max(vdev)) {
         abort();
     }
 
@@ -916,14 +926,14 @@  void virtio_save(VirtIODevice *vdev, QEMUFile *f)
     qemu_put_be32(f, vdev->config_len);
     qemu_put_buffer(f, vdev->config, vdev->config_len);
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < k->queue_max; i++) {
         if (vdev->vq[i].vring.num == 0)
             break;
     }
 
     qemu_put_be32(f, i);
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < k->queue_max; i++) {
         if (vdev->vq[i].vring.num == 0)
             break;
 
@@ -988,7 +998,7 @@  int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
     qemu_get_8s(f, &vdev->status);
     qemu_get_8s(f, &vdev->isr);
     qemu_get_be16s(f, &vdev->queue_sel);
-    if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
+    if (vdev->queue_sel >= k->queue_max) {
         return -1;
     }
     qemu_get_be32s(f, &features);
@@ -1015,7 +1025,7 @@  int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
 
     num = qemu_get_be32(f);
 
-    if (num > VIRTIO_PCI_QUEUE_MAX) {
+    if (num > k->queue_max) {
         error_report("Invalid number of PCI queues: 0x%x", num);
         return -1;
     }
@@ -1125,15 +1135,15 @@  void virtio_instance_init_common(Object *proxy_obj, void *data,
 void virtio_init(VirtIODevice *vdev, const char *name,
                  uint16_t device_id, size_t config_size)
 {
-    int i;
+    int i, queue_max = virtio_get_queue_max(vdev);
     vdev->device_id = device_id;
     vdev->status = 0;
     vdev->isr = 0;
     vdev->queue_sel = 0;
     vdev->config_vector = VIRTIO_NO_VECTOR;
-    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
+    vdev->vq = g_malloc0(sizeof(VirtQueue) * queue_max);
     vdev->vm_running = runstate_is_running();
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < queue_max; i++) {
         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
         vdev->vq[i].vdev = vdev;
         vdev->vq[i].queue_index = i;
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 0d2e7b4..4da8022 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -68,6 +68,7 @@  typedef struct VirtioBusClass {
      * Note that changing this will break migration for this transport.
      */
     bool has_variable_vring_alignment;
+    uint16_t queue_max;
 } VirtioBusClass;
 
 struct VirtioBusState {
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index d95f8b6..91fd673 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -179,6 +179,7 @@  void virtio_queue_notify(VirtIODevice *vdev, int n);
 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector);
 void virtio_set_status(VirtIODevice *vdev, uint8_t val);
+int virtio_get_queue_max(VirtIODevice *vdev);
 void virtio_reset(void *opaque);
 void virtio_update_irq(VirtIODevice *vdev);
 int virtio_set_features(VirtIODevice *vdev, uint32_t val);