diff mbox series

[2/2] vhost-scsi: Add support for a worker thread per virtqueue

Message ID 20231114003644.7026-3-michael.christie@oracle.com
State New
Headers show
Series vhost-scsi: Support worker ioctls | expand

Commit Message

Mike Christie Nov. 14, 2023, 12:36 a.m. UTC
This adds support for vhost-scsi to be able to create a worker thread
per virtqueue. Right now for vhost-net we get a worker thread per
tx/rx virtqueue pair which scales nicely as we add more virtqueues and
CPUs, but for scsi we get the single worker thread that's shared by all
virtqueues. When trying to send IO to more than 2 virtqueues the single
thread becomes a bottlneck.

This patch adds a new setting, virtqueue_workers, which can be set to:

1: Existing behavior whre we get the single thread.
-1: Create a worker per IO virtqueue.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 hw/scsi/vhost-scsi.c            | 68 +++++++++++++++++++++++++++++++++
 include/hw/virtio/virtio-scsi.h |  1 +
 2 files changed, 69 insertions(+)

Comments

Stefano Garzarella Nov. 15, 2023, 11:43 a.m. UTC | #1
On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
>This adds support for vhost-scsi to be able to create a worker thread
>per virtqueue. Right now for vhost-net we get a worker thread per
>tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>CPUs, but for scsi we get the single worker thread that's shared by all
>virtqueues. When trying to send IO to more than 2 virtqueues the single
>thread becomes a bottlneck.
>
>This patch adds a new setting, virtqueue_workers, which can be set to:
>
>1: Existing behavior whre we get the single thread.
>-1: Create a worker per IO virtqueue.

I find this setting a bit odd. What about a boolean instead?

`per_virtqueue_workers`:
     false: Existing behavior whre we get the single thread.
     true: Create a worker per IO virtqueue.

>
>Signed-off-by: Mike Christie <michael.christie@oracle.com>
>---
> hw/scsi/vhost-scsi.c            | 68 +++++++++++++++++++++++++++++++++
> include/hw/virtio/virtio-scsi.h |  1 +
> 2 files changed, 69 insertions(+)
>
>diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
>index 3126df9e1d9d..5cf669b6563b 100644
>--- a/hw/scsi/vhost-scsi.c
>+++ b/hw/scsi/vhost-scsi.c
>@@ -31,6 +31,9 @@
> #include "qemu/cutils.h"
> #include "sysemu/sysemu.h"
>
>+#define VHOST_SCSI_WORKER_PER_VQ    -1
>+#define VHOST_SCSI_WORKER_DEF        1
>+
> /* Features supported by host kernel. */
> static const int kernel_feature_bits[] = {
>     VIRTIO_F_NOTIFY_ON_EMPTY,
>@@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
>     .pre_save = vhost_scsi_pre_save,
> };
>
>+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
>+{
>+    struct vhost_dev *dev = &vsc->dev;
>+    struct vhost_vring_worker vq_worker;
>+    struct vhost_worker_state worker;
>+    int i, ret;
>+
>+    /* Use default worker */
>+    if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
>+        dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
>+        return 0;
>+    }
>+
>+    if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
>+        return -EINVAL;
>+    }
>+
>+    /*
>+     * ctl/evt share the first worker since it will be rare for them
>+     * to send cmds while IO is running.
>+     */
>+    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
>+        memset(&worker, 0, sizeof(worker));
>+
>+        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);

Should we call vhost_free_worker() in the vhost_scsi_unrealize() or are
workers automatically freed when `vhostfd` is closed?

The rest LGTM.

Thanks,
Stefano

>+        if (ret == -ENOTTY) {
>+            /*
>+             * worker ioctls are not implemented so just ignore and
>+             * and continue device setup.
>+             */
>+            ret = 0;
>+            break;
>+        } else if (ret) {
>+            break;
>+        }
>+
>+        memset(&vq_worker, 0, sizeof(vq_worker));
>+        vq_worker.worker_id = worker.worker_id;
>+        vq_worker.index = i;
>+
>+        ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
>+        if (ret == -ENOTTY) {
>+            /*
>+             * It's a bug for the kernel to have supported the worker creation
>+             * ioctl but not attach.
>+             */
>+            dev->vhost_ops->vhost_free_worker(dev, &worker);
>+            break;
>+        } else if (ret) {
>+            break;
>+        }
>+    }
>+
>+    return ret;
>+}
>+
> static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> {
>     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>@@ -232,6 +291,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
>         goto free_vqs;
>     }
>
>+    ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
>+    if (ret < 0) {
>+        error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
>+                   strerror(-ret));
>+        goto free_vqs;
>+    }
>+
>     /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
>     vsc->channel = 0;
>     vsc->lun = 0;
>@@ -297,6 +363,8 @@ static Property vhost_scsi_properties[] = {
>                                                  VIRTIO_SCSI_F_T10_PI,
>                                                  false),
>     DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
>+    DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
>+                      conf.virtqueue_workers, VHOST_SCSI_WORKER_DEF),
>     DEFINE_PROP_END_OF_LIST(),
> };
>
>diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
>index 779568ab5d28..f70624ece564 100644
>--- a/include/hw/virtio/virtio-scsi.h
>+++ b/include/hw/virtio/virtio-scsi.h
>@@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
> struct VirtIOSCSIConf {
>     uint32_t num_queues;
>     uint32_t virtqueue_size;
>+    int virtqueue_workers;
>     bool seg_max_adjust;
>     uint32_t max_sectors;
>     uint32_t cmd_per_lun;
>-- 
>2.34.1
>
Stefan Hajnoczi Nov. 15, 2023, 12:57 p.m. UTC | #2
On Wed, Nov 15, 2023 at 12:43:02PM +0100, Stefano Garzarella wrote:
> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
> > This adds support for vhost-scsi to be able to create a worker thread
> > per virtqueue. Right now for vhost-net we get a worker thread per
> > tx/rx virtqueue pair which scales nicely as we add more virtqueues and
> > CPUs, but for scsi we get the single worker thread that's shared by all
> > virtqueues. When trying to send IO to more than 2 virtqueues the single
> > thread becomes a bottlneck.
> > 
> > This patch adds a new setting, virtqueue_workers, which can be set to:
> > 
> > 1: Existing behavior whre we get the single thread.
> > -1: Create a worker per IO virtqueue.
> 
> I find this setting a bit odd. What about a boolean instead?
> 
> `per_virtqueue_workers`:
>     false: Existing behavior whre we get the single thread.
>     true: Create a worker per IO virtqueue.

Me too, I thought there would be round-robin assignment for 1 <
worker_cnt < (dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED) but instead only 1
and -1 have any meaning.

Do you want to implement round-robin assignment?

> 
> > 
> > Signed-off-by: Mike Christie <michael.christie@oracle.com>
> > ---
> > hw/scsi/vhost-scsi.c            | 68 +++++++++++++++++++++++++++++++++
> > include/hw/virtio/virtio-scsi.h |  1 +
> > 2 files changed, 69 insertions(+)
> > 
> > diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
> > index 3126df9e1d9d..5cf669b6563b 100644
> > --- a/hw/scsi/vhost-scsi.c
> > +++ b/hw/scsi/vhost-scsi.c
> > @@ -31,6 +31,9 @@
> > #include "qemu/cutils.h"
> > #include "sysemu/sysemu.h"
> > 
> > +#define VHOST_SCSI_WORKER_PER_VQ    -1
> > +#define VHOST_SCSI_WORKER_DEF        1
> > +
> > /* Features supported by host kernel. */
> > static const int kernel_feature_bits[] = {
> >     VIRTIO_F_NOTIFY_ON_EMPTY,
> > @@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
> >     .pre_save = vhost_scsi_pre_save,
> > };
> > 
> > +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
> > +{
> > +    struct vhost_dev *dev = &vsc->dev;
> > +    struct vhost_vring_worker vq_worker;
> > +    struct vhost_worker_state worker;
> > +    int i, ret;
> > +
> > +    /* Use default worker */
> > +    if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
> > +        dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
> > +        return 0;
> > +    }
> > +
> > +    if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
> > +        return -EINVAL;
> > +    }
> > +
> > +    /*
> > +     * ctl/evt share the first worker since it will be rare for them
> > +     * to send cmds while IO is running.
> > +     */
> > +    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
> > +        memset(&worker, 0, sizeof(worker));
> > +
> > +        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
> 
> Should we call vhost_free_worker() in the vhost_scsi_unrealize() or are
> workers automatically freed when `vhostfd` is closed?
> 
> The rest LGTM.
> 
> Thanks,
> Stefano
> 
> > +        if (ret == -ENOTTY) {
> > +            /*
> > +             * worker ioctls are not implemented so just ignore and
> > +             * and continue device setup.
> > +             */
> > +            ret = 0;
> > +            break;
> > +        } else if (ret) {
> > +            break;
> > +        }
> > +
> > +        memset(&vq_worker, 0, sizeof(vq_worker));
> > +        vq_worker.worker_id = worker.worker_id;
> > +        vq_worker.index = i;
> > +
> > +        ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
> > +        if (ret == -ENOTTY) {
> > +            /*
> > +             * It's a bug for the kernel to have supported the worker creation
> > +             * ioctl but not attach.
> > +             */
> > +            dev->vhost_ops->vhost_free_worker(dev, &worker);
> > +            break;
> > +        } else if (ret) {
> > +            break;
> > +        }
> > +    }
> > +
> > +    return ret;
> > +}
> > +
> > static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> > {
> >     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
> > @@ -232,6 +291,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> >         goto free_vqs;
> >     }
> > 
> > +    ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
> > +    if (ret < 0) {
> > +        error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
> > +                   strerror(-ret));
> > +        goto free_vqs;
> > +    }
> > +
> >     /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
> >     vsc->channel = 0;
> >     vsc->lun = 0;
> > @@ -297,6 +363,8 @@ static Property vhost_scsi_properties[] = {
> >                                                  VIRTIO_SCSI_F_T10_PI,
> >                                                  false),
> >     DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
> > +    DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
> > +                      conf.virtqueue_workers, VHOST_SCSI_WORKER_DEF),
> >     DEFINE_PROP_END_OF_LIST(),
> > };
> > 
> > diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
> > index 779568ab5d28..f70624ece564 100644
> > --- a/include/hw/virtio/virtio-scsi.h
> > +++ b/include/hw/virtio/virtio-scsi.h
> > @@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
> > struct VirtIOSCSIConf {
> >     uint32_t num_queues;
> >     uint32_t virtqueue_size;
> > +    int virtqueue_workers;
> >     bool seg_max_adjust;
> >     uint32_t max_sectors;
> >     uint32_t cmd_per_lun;
> > -- 
> > 2.34.1
> > 
>
Mike Christie Nov. 15, 2023, 4:48 p.m. UTC | #3
On 11/15/23 5:43 AM, Stefano Garzarella wrote:
> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
>> This adds support for vhost-scsi to be able to create a worker thread
>> per virtqueue. Right now for vhost-net we get a worker thread per
>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>> CPUs, but for scsi we get the single worker thread that's shared by all
>> virtqueues. When trying to send IO to more than 2 virtqueues the single
>> thread becomes a bottlneck.
>>
>> This patch adds a new setting, virtqueue_workers, which can be set to:
>>
>> 1: Existing behavior whre we get the single thread.
>> -1: Create a worker per IO virtqueue.
> 
> I find this setting a bit odd. What about a boolean instead?
> 
> `per_virtqueue_workers`:
>     false: Existing behavior whre we get the single thread.
>     true: Create a worker per IO virtqueue.

Sound good.


> 
>>
>> Signed-off-by: Mike Christie <michael.christie@oracle.com>
>> ---
>> hw/scsi/vhost-scsi.c            | 68 +++++++++++++++++++++++++++++++++
>> include/hw/virtio/virtio-scsi.h |  1 +
>> 2 files changed, 69 insertions(+)
>>
>> diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
>> index 3126df9e1d9d..5cf669b6563b 100644
>> --- a/hw/scsi/vhost-scsi.c
>> +++ b/hw/scsi/vhost-scsi.c
>> @@ -31,6 +31,9 @@
>> #include "qemu/cutils.h"
>> #include "sysemu/sysemu.h"
>>
>> +#define VHOST_SCSI_WORKER_PER_VQ    -1
>> +#define VHOST_SCSI_WORKER_DEF        1
>> +
>> /* Features supported by host kernel. */
>> static const int kernel_feature_bits[] = {
>>     VIRTIO_F_NOTIFY_ON_EMPTY,
>> @@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
>>     .pre_save = vhost_scsi_pre_save,
>> };
>>
>> +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
>> +{
>> +    struct vhost_dev *dev = &vsc->dev;
>> +    struct vhost_vring_worker vq_worker;
>> +    struct vhost_worker_state worker;
>> +    int i, ret;
>> +
>> +    /* Use default worker */
>> +    if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
>> +        dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
>> +        return 0;
>> +    }
>> +
>> +    if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
>> +        return -EINVAL;
>> +    }
>> +
>> +    /*
>> +     * ctl/evt share the first worker since it will be rare for them
>> +     * to send cmds while IO is running.
>> +     */
>> +    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
>> +        memset(&worker, 0, sizeof(worker));
>> +
>> +        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
> 
> Should we call vhost_free_worker() in the vhost_scsi_unrealize() or are
> workers automatically freed when `vhostfd` is closed?
> 

All worker threads are freed automatically like how the default worker
created from VHOST_SET_OWNER is freed on close.
Mike Christie Nov. 15, 2023, 4:56 p.m. UTC | #4
On 11/15/23 6:57 AM, Stefan Hajnoczi wrote:
> On Wed, Nov 15, 2023 at 12:43:02PM +0100, Stefano Garzarella wrote:
>> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
>>> This adds support for vhost-scsi to be able to create a worker thread
>>> per virtqueue. Right now for vhost-net we get a worker thread per
>>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>>> CPUs, but for scsi we get the single worker thread that's shared by all
>>> virtqueues. When trying to send IO to more than 2 virtqueues the single
>>> thread becomes a bottlneck.
>>>
>>> This patch adds a new setting, virtqueue_workers, which can be set to:
>>>
>>> 1: Existing behavior whre we get the single thread.
>>> -1: Create a worker per IO virtqueue.
>>
>> I find this setting a bit odd. What about a boolean instead?
>>
>> `per_virtqueue_workers`:
>>     false: Existing behavior whre we get the single thread.
>>     true: Create a worker per IO virtqueue.
> 
> Me too, I thought there would be round-robin assignment for 1 <
> worker_cnt < (dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED) but instead only 1
> and -1 have any meaning.
> 
> Do you want to implement round-robin assignment?
> 

It was an int because I originally did round robin but at some point
dropped it. I found that our users at least:

1. Are used to configuring number of virtqueues.
2. In the userspace guest OS are used to checking the queue to CPU
mappings to figure out how their app should optimize itself.

So users would just do a virtqueue per vCPU or if trying to reduce
mem usage would do N virtqueues < vCPUs. For both cases they just did the
worker per virtqueue.

However, I left it an int in case in the future someone wanted
the future.
Stefan Hajnoczi Nov. 15, 2023, 5:24 p.m. UTC | #5
On Wed, 15 Nov 2023 at 11:57, Mike Christie <michael.christie@oracle.com> wrote:
>
> On 11/15/23 6:57 AM, Stefan Hajnoczi wrote:
> > On Wed, Nov 15, 2023 at 12:43:02PM +0100, Stefano Garzarella wrote:
> >> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
> >>> This adds support for vhost-scsi to be able to create a worker thread
> >>> per virtqueue. Right now for vhost-net we get a worker thread per
> >>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
> >>> CPUs, but for scsi we get the single worker thread that's shared by all
> >>> virtqueues. When trying to send IO to more than 2 virtqueues the single
> >>> thread becomes a bottlneck.
> >>>
> >>> This patch adds a new setting, virtqueue_workers, which can be set to:
> >>>
> >>> 1: Existing behavior whre we get the single thread.
> >>> -1: Create a worker per IO virtqueue.
> >>
> >> I find this setting a bit odd. What about a boolean instead?
> >>
> >> `per_virtqueue_workers`:
> >>     false: Existing behavior whre we get the single thread.
> >>     true: Create a worker per IO virtqueue.
> >
> > Me too, I thought there would be round-robin assignment for 1 <
> > worker_cnt < (dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED) but instead only 1
> > and -1 have any meaning.
> >
> > Do you want to implement round-robin assignment?
> >
>
> It was an int because I originally did round robin but at some point
> dropped it. I found that our users at least:
>
> 1. Are used to configuring number of virtqueues.
> 2. In the userspace guest OS are used to checking the queue to CPU
> mappings to figure out how their app should optimize itself.
>
> So users would just do a virtqueue per vCPU or if trying to reduce
> mem usage would do N virtqueues < vCPUs. For both cases they just did the
> worker per virtqueue.
>
> However, I left it an int in case in the future someone wanted
> the future.

Okay. I have a slight preference for a vq_workers bool parameter in
that case. An int parameter can be added later if the number of
workers need to be specified. But if you want to keep -1/1 then I
guess it's fine unless someone else has a strong opinion.

Stefan
diff mbox series

Patch

diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 3126df9e1d9d..5cf669b6563b 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -31,6 +31,9 @@ 
 #include "qemu/cutils.h"
 #include "sysemu/sysemu.h"
 
+#define VHOST_SCSI_WORKER_PER_VQ    -1
+#define VHOST_SCSI_WORKER_DEF        1
+
 /* Features supported by host kernel. */
 static const int kernel_feature_bits[] = {
     VIRTIO_F_NOTIFY_ON_EMPTY,
@@ -165,6 +168,62 @@  static const VMStateDescription vmstate_virtio_vhost_scsi = {
     .pre_save = vhost_scsi_pre_save,
 };
 
+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
+{
+    struct vhost_dev *dev = &vsc->dev;
+    struct vhost_vring_worker vq_worker;
+    struct vhost_worker_state worker;
+    int i, ret;
+
+    /* Use default worker */
+    if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
+        dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
+        return 0;
+    }
+
+    if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
+        return -EINVAL;
+    }
+
+    /*
+     * ctl/evt share the first worker since it will be rare for them
+     * to send cmds while IO is running.
+     */
+    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
+        memset(&worker, 0, sizeof(worker));
+
+        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
+        if (ret == -ENOTTY) {
+            /*
+             * worker ioctls are not implemented so just ignore and
+             * and continue device setup.
+             */
+            ret = 0;
+            break;
+        } else if (ret) {
+            break;
+        }
+
+        memset(&vq_worker, 0, sizeof(vq_worker));
+        vq_worker.worker_id = worker.worker_id;
+        vq_worker.index = i;
+
+        ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
+        if (ret == -ENOTTY) {
+            /*
+             * It's a bug for the kernel to have supported the worker creation
+             * ioctl but not attach.
+             */
+            dev->vhost_ops->vhost_free_worker(dev, &worker);
+            break;
+        } else if (ret) {
+            break;
+        }
+    }
+
+    return ret;
+}
+
 static void vhost_scsi_realize(DeviceState *dev, Error **errp)
 {
     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@@ -232,6 +291,13 @@  static void vhost_scsi_realize(DeviceState *dev, Error **errp)
         goto free_vqs;
     }
 
+    ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
+    if (ret < 0) {
+        error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
+                   strerror(-ret));
+        goto free_vqs;
+    }
+
     /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
     vsc->channel = 0;
     vsc->lun = 0;
@@ -297,6 +363,8 @@  static Property vhost_scsi_properties[] = {
                                                  VIRTIO_SCSI_F_T10_PI,
                                                  false),
     DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
+    DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
+                      conf.virtqueue_workers, VHOST_SCSI_WORKER_DEF),
     DEFINE_PROP_END_OF_LIST(),
 };
 
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 779568ab5d28..f70624ece564 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -51,6 +51,7 @@  typedef struct virtio_scsi_config VirtIOSCSIConfig;
 struct VirtIOSCSIConf {
     uint32_t num_queues;
     uint32_t virtqueue_size;
+    int virtqueue_workers;
     bool seg_max_adjust;
     uint32_t max_sectors;
     uint32_t cmd_per_lun;