diff mbox series

[V5,1/6] vhost: introduce vhost_vring_call

Message ID 20200731065533.4144-2-lingshan.zhu@intel.com
State Not Applicable
Delegated to: David Miller
Headers show
Series IRQ offloading for vDPA | expand

Commit Message

Zhu, Lingshan July 31, 2020, 6:55 a.m. UTC
This commit introduces struct vhost_vring_call which replaced
raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
Besides eventfd_ctx, it contains a spin lock and an
irq_bypass_producer in its structure.

Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
Suggested-by: Jason Wang <jasowang@redhat.com>
---
 drivers/vhost/vdpa.c  |  4 ++--
 drivers/vhost/vhost.c | 22 ++++++++++++++++------
 drivers/vhost/vhost.h |  9 ++++++++-
 3 files changed, 26 insertions(+), 9 deletions(-)

Comments

Jason Wang Aug. 4, 2020, 8:38 a.m. UTC | #1
On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> This commit introduces struct vhost_vring_call which replaced
> raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
> Besides eventfd_ctx, it contains a spin lock and an
> irq_bypass_producer in its structure.
>
> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> ---
>   drivers/vhost/vdpa.c  |  4 ++--
>   drivers/vhost/vhost.c | 22 ++++++++++++++++------
>   drivers/vhost/vhost.h |  9 ++++++++-
>   3 files changed, 26 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index a54b60d6623f..df3cf386b0cd 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
>   static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
>   {
>   	struct vhost_virtqueue *vq = private;
> -	struct eventfd_ctx *call_ctx = vq->call_ctx;
> +	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
>   
>   	if (call_ctx)
>   		eventfd_signal(call_ctx, 1);
> @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
>   		break;
>   
>   	case VHOST_SET_VRING_CALL:
> -		if (vq->call_ctx) {
> +		if (vq->call_ctx.ctx) {
>   			cb.callback = vhost_vdpa_virtqueue_cb;
>   			cb.private = vq;
>   		} else {
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index d7b8df3edffc..9f1a845a9302 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
>   		__vhost_vq_meta_reset(d->vqs[i]);
>   }
>   
> +static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
> +{
> +	call_ctx->ctx = NULL;
> +	memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
> +	spin_lock_init(&call_ctx->ctx_lock);
> +}
> +
>   static void vhost_vq_reset(struct vhost_dev *dev,
>   			   struct vhost_virtqueue *vq)
>   {
> @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
>   	vq->log_base = NULL;
>   	vq->error_ctx = NULL;
>   	vq->kick = NULL;
> -	vq->call_ctx = NULL;
>   	vq->log_ctx = NULL;
>   	vhost_reset_is_le(vq);
>   	vhost_disable_cross_endian(vq);
>   	vq->busyloop_timeout = 0;
>   	vq->umem = NULL;
>   	vq->iotlb = NULL;
> +	vhost_vring_call_reset(&vq->call_ctx);
>   	__vhost_vq_meta_reset(vq);
>   }
>   
> @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
>   			eventfd_ctx_put(dev->vqs[i]->error_ctx);
>   		if (dev->vqs[i]->kick)
>   			fput(dev->vqs[i]->kick);
> -		if (dev->vqs[i]->call_ctx)
> -			eventfd_ctx_put(dev->vqs[i]->call_ctx);
> +		if (dev->vqs[i]->call_ctx.ctx)
> +			eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
>   		vhost_vq_reset(dev, dev->vqs[i]);
>   	}
>   	vhost_dev_free_iovecs(dev);
> @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
>   			r = PTR_ERR(ctx);
>   			break;
>   		}
> -		swap(ctx, vq->call_ctx);
> +
> +		spin_lock(&vq->call_ctx.ctx_lock);
> +		swap(ctx, vq->call_ctx.ctx);
> +		spin_unlock(&vq->call_ctx.ctx_lock);
>   		break;
>   	case VHOST_SET_VRING_ERR:
>   		if (copy_from_user(&f, argp, sizeof f)) {
> @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>   void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>   {
>   	/* Signal the Guest tell them we used something up. */
> -	if (vq->call_ctx && vhost_notify(dev, vq))
> -		eventfd_signal(vq->call_ctx, 1);
> +	if (vq->call_ctx.ctx && vhost_notify(dev, vq))
> +		eventfd_signal(vq->call_ctx.ctx, 1);
>   }
>   EXPORT_SYMBOL_GPL(vhost_signal);
>   
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index c8e96a095d3b..38eb1aa3b68d 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -13,6 +13,7 @@
>   #include <linux/virtio_ring.h>
>   #include <linux/atomic.h>
>   #include <linux/vhost_iotlb.h>
> +#include <linux/irqbypass.h>
>   
>   struct vhost_work;
>   typedef void (*vhost_work_fn_t)(struct vhost_work *work);
> @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
>   	VHOST_NUM_ADDRS = 3,
>   };
>   
> +struct vhost_vring_call {
> +	struct eventfd_ctx *ctx;
> +	struct irq_bypass_producer producer;
> +	spinlock_t ctx_lock;


It's not clear to me why we need ctx_lock here.

Thanks


> +};
> +
>   /* The virtqueue structure describes a queue attached to a device. */
>   struct vhost_virtqueue {
>   	struct vhost_dev *dev;
> @@ -72,7 +79,7 @@ struct vhost_virtqueue {
>   	vring_used_t __user *used;
>   	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
>   	struct file *kick;
> -	struct eventfd_ctx *call_ctx;
> +	struct vhost_vring_call call_ctx;
>   	struct eventfd_ctx *error_ctx;
>   	struct eventfd_ctx *log_ctx;
>
Jason Wang Aug. 4, 2020, 8:53 a.m. UTC | #2
On 2020/8/4 下午4:42, Zhu, Lingshan wrote:
>
>
> On 8/4/2020 4:38 PM, Jason Wang wrote:
>>
>> On 2020/7/31 下午2:55, Zhu Lingshan wrote:
>>> This commit introduces struct vhost_vring_call which replaced
>>> raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
>>> Besides eventfd_ctx, it contains a spin lock and an
>>> irq_bypass_producer in its structure.
>>>
>>> Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
>>> Suggested-by: Jason Wang <jasowang@redhat.com>
>>> ---
>>>   drivers/vhost/vdpa.c  |  4 ++--
>>>   drivers/vhost/vhost.c | 22 ++++++++++++++++------
>>>   drivers/vhost/vhost.h |  9 ++++++++-
>>>   3 files changed, 26 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>> index a54b60d6623f..df3cf386b0cd 100644
>>> --- a/drivers/vhost/vdpa.c
>>> +++ b/drivers/vhost/vdpa.c
>>> @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
>>>   static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
>>>   {
>>>       struct vhost_virtqueue *vq = private;
>>> -    struct eventfd_ctx *call_ctx = vq->call_ctx;
>>> +    struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
>>>         if (call_ctx)
>>>           eventfd_signal(call_ctx, 1);
>>> @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct 
>>> vhost_vdpa *v, unsigned int cmd,
>>>           break;
>>>         case VHOST_SET_VRING_CALL:
>>> -        if (vq->call_ctx) {
>>> +        if (vq->call_ctx.ctx) {
>>>               cb.callback = vhost_vdpa_virtqueue_cb;
>>>               cb.private = vq;
>>>           } else {
>>> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>>> index d7b8df3edffc..9f1a845a9302 100644
>>> --- a/drivers/vhost/vhost.c
>>> +++ b/drivers/vhost/vhost.c
>>> @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct 
>>> vhost_dev *d)
>>>           __vhost_vq_meta_reset(d->vqs[i]);
>>>   }
>>>   +static void vhost_vring_call_reset(struct vhost_vring_call 
>>> *call_ctx)
>>> +{
>>> +    call_ctx->ctx = NULL;
>>> +    memset(&call_ctx->producer, 0x0, sizeof(struct 
>>> irq_bypass_producer));
>>> +    spin_lock_init(&call_ctx->ctx_lock);
>>> +}
>>> +
>>>   static void vhost_vq_reset(struct vhost_dev *dev,
>>>                  struct vhost_virtqueue *vq)
>>>   {
>>> @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
>>>       vq->log_base = NULL;
>>>       vq->error_ctx = NULL;
>>>       vq->kick = NULL;
>>> -    vq->call_ctx = NULL;
>>>       vq->log_ctx = NULL;
>>>       vhost_reset_is_le(vq);
>>>       vhost_disable_cross_endian(vq);
>>>       vq->busyloop_timeout = 0;
>>>       vq->umem = NULL;
>>>       vq->iotlb = NULL;
>>> +    vhost_vring_call_reset(&vq->call_ctx);
>>>       __vhost_vq_meta_reset(vq);
>>>   }
>>>   @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
>>> eventfd_ctx_put(dev->vqs[i]->error_ctx);
>>>           if (dev->vqs[i]->kick)
>>>               fput(dev->vqs[i]->kick);
>>> -        if (dev->vqs[i]->call_ctx)
>>> - eventfd_ctx_put(dev->vqs[i]->call_ctx);
>>> +        if (dev->vqs[i]->call_ctx.ctx)
>>> + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
>>>           vhost_vq_reset(dev, dev->vqs[i]);
>>>       }
>>>       vhost_dev_free_iovecs(dev);
>>> @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, 
>>> unsigned int ioctl, void __user *arg
>>>               r = PTR_ERR(ctx);
>>>               break;
>>>           }
>>> -        swap(ctx, vq->call_ctx);
>>> +
>>> +        spin_lock(&vq->call_ctx.ctx_lock);
>>> +        swap(ctx, vq->call_ctx.ctx);
>>> +        spin_unlock(&vq->call_ctx.ctx_lock);
>>>           break;
>>>       case VHOST_SET_VRING_ERR:
>>>           if (copy_from_user(&f, argp, sizeof f)) {
>>> @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev 
>>> *dev, struct vhost_virtqueue *vq)
>>>   void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
>>>   {
>>>       /* Signal the Guest tell them we used something up. */
>>> -    if (vq->call_ctx && vhost_notify(dev, vq))
>>> -        eventfd_signal(vq->call_ctx, 1);
>>> +    if (vq->call_ctx.ctx && vhost_notify(dev, vq))
>>> +        eventfd_signal(vq->call_ctx.ctx, 1);
>>>   }
>>>   EXPORT_SYMBOL_GPL(vhost_signal);
>>>   diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
>>> index c8e96a095d3b..38eb1aa3b68d 100644
>>> --- a/drivers/vhost/vhost.h
>>> +++ b/drivers/vhost/vhost.h
>>> @@ -13,6 +13,7 @@
>>>   #include <linux/virtio_ring.h>
>>>   #include <linux/atomic.h>
>>>   #include <linux/vhost_iotlb.h>
>>> +#include <linux/irqbypass.h>
>>>     struct vhost_work;
>>>   typedef void (*vhost_work_fn_t)(struct vhost_work *work);
>>> @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
>>>       VHOST_NUM_ADDRS = 3,
>>>   };
>>>   +struct vhost_vring_call {
>>> +    struct eventfd_ctx *ctx;
>>> +    struct irq_bypass_producer producer;
>>> +    spinlock_t ctx_lock;
>>
>>
>> It's not clear to me why we need ctx_lock here.
>>
>> Thanks
> Hi Jason,
>
> we use this lock to protect the eventfd_ctx and irq from race conditions,


We don't support irq notification from vDPA device driver in this 
version, do we still have race condition?

Thanks


>   are you suggesting a better name?
>
> Thanks
>>
>>
>>> +};
>>> +
>>>   /* The virtqueue structure describes a queue attached to a device. */
>>>   struct vhost_virtqueue {
>>>       struct vhost_dev *dev;
>>> @@ -72,7 +79,7 @@ struct vhost_virtqueue {
>>>       vring_used_t __user *used;
>>>       const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
>>>       struct file *kick;
>>> -    struct eventfd_ctx *call_ctx;
>>> +    struct vhost_vring_call call_ctx;
>>>       struct eventfd_ctx *error_ctx;
>>>       struct eventfd_ctx *log_ctx;
>>
Michael S. Tsirkin Aug. 4, 2020, 9:21 a.m. UTC | #3
On Tue, Aug 04, 2020 at 04:53:39PM +0800, Jason Wang wrote:
> 
> On 2020/8/4 下午4:42, Zhu, Lingshan wrote:
> > 
> > 
> > On 8/4/2020 4:38 PM, Jason Wang wrote:
> > > 
> > > On 2020/7/31 下午2:55, Zhu Lingshan wrote:
> > > > This commit introduces struct vhost_vring_call which replaced
> > > > raw struct eventfd_ctx *call_ctx in struct vhost_virtqueue.
> > > > Besides eventfd_ctx, it contains a spin lock and an
> > > > irq_bypass_producer in its structure.
> > > > 
> > > > Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
> > > > Suggested-by: Jason Wang <jasowang@redhat.com>
> > > > ---
> > > >   drivers/vhost/vdpa.c  |  4 ++--
> > > >   drivers/vhost/vhost.c | 22 ++++++++++++++++------
> > > >   drivers/vhost/vhost.h |  9 ++++++++-
> > > >   3 files changed, 26 insertions(+), 9 deletions(-)
> > > > 
> > > > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> > > > index a54b60d6623f..df3cf386b0cd 100644
> > > > --- a/drivers/vhost/vdpa.c
> > > > +++ b/drivers/vhost/vdpa.c
> > > > @@ -96,7 +96,7 @@ static void handle_vq_kick(struct vhost_work *work)
> > > >   static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
> > > >   {
> > > >       struct vhost_virtqueue *vq = private;
> > > > -    struct eventfd_ctx *call_ctx = vq->call_ctx;
> > > > +    struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
> > > >         if (call_ctx)
> > > >           eventfd_signal(call_ctx, 1);
> > > > @@ -382,7 +382,7 @@ static long vhost_vdpa_vring_ioctl(struct
> > > > vhost_vdpa *v, unsigned int cmd,
> > > >           break;
> > > >         case VHOST_SET_VRING_CALL:
> > > > -        if (vq->call_ctx) {
> > > > +        if (vq->call_ctx.ctx) {
> > > >               cb.callback = vhost_vdpa_virtqueue_cb;
> > > >               cb.private = vq;
> > > >           } else {
> > > > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> > > > index d7b8df3edffc..9f1a845a9302 100644
> > > > --- a/drivers/vhost/vhost.c
> > > > +++ b/drivers/vhost/vhost.c
> > > > @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct
> > > > vhost_dev *d)
> > > >           __vhost_vq_meta_reset(d->vqs[i]);
> > > >   }
> > > >   +static void vhost_vring_call_reset(struct vhost_vring_call
> > > > *call_ctx)
> > > > +{
> > > > +    call_ctx->ctx = NULL;
> > > > +    memset(&call_ctx->producer, 0x0, sizeof(struct
> > > > irq_bypass_producer));
> > > > +    spin_lock_init(&call_ctx->ctx_lock);
> > > > +}
> > > > +
> > > >   static void vhost_vq_reset(struct vhost_dev *dev,
> > > >                  struct vhost_virtqueue *vq)
> > > >   {
> > > > @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev,
> > > >       vq->log_base = NULL;
> > > >       vq->error_ctx = NULL;
> > > >       vq->kick = NULL;
> > > > -    vq->call_ctx = NULL;
> > > >       vq->log_ctx = NULL;
> > > >       vhost_reset_is_le(vq);
> > > >       vhost_disable_cross_endian(vq);
> > > >       vq->busyloop_timeout = 0;
> > > >       vq->umem = NULL;
> > > >       vq->iotlb = NULL;
> > > > +    vhost_vring_call_reset(&vq->call_ctx);
> > > >       __vhost_vq_meta_reset(vq);
> > > >   }
> > > >   @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
> > > > eventfd_ctx_put(dev->vqs[i]->error_ctx);
> > > >           if (dev->vqs[i]->kick)
> > > >               fput(dev->vqs[i]->kick);
> > > > -        if (dev->vqs[i]->call_ctx)
> > > > - eventfd_ctx_put(dev->vqs[i]->call_ctx);
> > > > +        if (dev->vqs[i]->call_ctx.ctx)
> > > > + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
> > > >           vhost_vq_reset(dev, dev->vqs[i]);
> > > >       }
> > > >       vhost_dev_free_iovecs(dev);
> > > > @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev
> > > > *d, unsigned int ioctl, void __user *arg
> > > >               r = PTR_ERR(ctx);
> > > >               break;
> > > >           }
> > > > -        swap(ctx, vq->call_ctx);
> > > > +
> > > > +        spin_lock(&vq->call_ctx.ctx_lock);
> > > > +        swap(ctx, vq->call_ctx.ctx);
> > > > +        spin_unlock(&vq->call_ctx.ctx_lock);
> > > >           break;
> > > >       case VHOST_SET_VRING_ERR:
> > > >           if (copy_from_user(&f, argp, sizeof f)) {
> > > > @@ -2440,8 +2450,8 @@ static bool vhost_notify(struct vhost_dev
> > > > *dev, struct vhost_virtqueue *vq)
> > > >   void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
> > > >   {
> > > >       /* Signal the Guest tell them we used something up. */
> > > > -    if (vq->call_ctx && vhost_notify(dev, vq))
> > > > -        eventfd_signal(vq->call_ctx, 1);
> > > > +    if (vq->call_ctx.ctx && vhost_notify(dev, vq))
> > > > +        eventfd_signal(vq->call_ctx.ctx, 1);
> > > >   }
> > > >   EXPORT_SYMBOL_GPL(vhost_signal);
> > > >   diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > > > index c8e96a095d3b..38eb1aa3b68d 100644
> > > > --- a/drivers/vhost/vhost.h
> > > > +++ b/drivers/vhost/vhost.h
> > > > @@ -13,6 +13,7 @@
> > > >   #include <linux/virtio_ring.h>
> > > >   #include <linux/atomic.h>
> > > >   #include <linux/vhost_iotlb.h>
> > > > +#include <linux/irqbypass.h>
> > > >     struct vhost_work;
> > > >   typedef void (*vhost_work_fn_t)(struct vhost_work *work);
> > > > @@ -60,6 +61,12 @@ enum vhost_uaddr_type {
> > > >       VHOST_NUM_ADDRS = 3,
> > > >   };
> > > >   +struct vhost_vring_call {
> > > > +    struct eventfd_ctx *ctx;
> > > > +    struct irq_bypass_producer producer;
> > > > +    spinlock_t ctx_lock;
> > > 
> > > 
> > > It's not clear to me why we need ctx_lock here.
> > > 
> > > Thanks
> > Hi Jason,
> > 
> > we use this lock to protect the eventfd_ctx and irq from race conditions,
> 
> 
> We don't support irq notification from vDPA device driver in this version,
> do we still have race condition?
> 
> Thanks

Jason I'm not sure what you are trying to say here.


> 
> >   are you suggesting a better name?
> > 
> > Thanks
> > > 
> > > 
> > > > +};
> > > > +
> > > >   /* The virtqueue structure describes a queue attached to a device. */
> > > >   struct vhost_virtqueue {
> > > >       struct vhost_dev *dev;
> > > > @@ -72,7 +79,7 @@ struct vhost_virtqueue {
> > > >       vring_used_t __user *used;
> > > >       const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
> > > >       struct file *kick;
> > > > -    struct eventfd_ctx *call_ctx;
> > > > +    struct vhost_vring_call call_ctx;
> > > >       struct eventfd_ctx *error_ctx;
> > > >       struct eventfd_ctx *log_ctx;
> > >
Jason Wang Aug. 5, 2020, 2:16 a.m. UTC | #4
On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
>>>>>    +struct vhost_vring_call {
>>>>> +    struct eventfd_ctx *ctx;
>>>>> +    struct irq_bypass_producer producer;
>>>>> +    spinlock_t ctx_lock;
>>>> It's not clear to me why we need ctx_lock here.
>>>>
>>>> Thanks
>>> Hi Jason,
>>>
>>> we use this lock to protect the eventfd_ctx and irq from race conditions,
>> We don't support irq notification from vDPA device driver in this version,
>> do we still have race condition?
>>
>> Thanks
> Jason I'm not sure what you are trying to say here.


I meant we change the API from V4 so driver won't notify us if irq is 
changed.

Then it looks to me there's no need for the ctx_lock, everyhing could be 
synchronized with vq mutex.

Thanks

>
>
Jason Wang Aug. 5, 2020, 2:20 a.m. UTC | #5
On 2020/8/4 下午5:21, Zhu, Lingshan wrote:
>>> Hi Jason,
>>>
>>> we use this lock to protect the eventfd_ctx and irq from race 
>>> conditions,
>>
>>
>> We don't support irq notification from vDPA device driver in this 
>> version, do we still have race condition?
> as we discussed before:
> (1)if vendor change IRQ after DRIVER_OK, through they should not do this, but they can.
> (2)if user space changes ctx.
>
> Thanks


Yes, but then everything happens in context of syscall (ioctl), so vq 
mutex is sufficient I guess?

Thanks
Jason Wang Aug. 5, 2020, 5:53 a.m. UTC | #6
On 2020/8/5 下午1:49, Zhu, Lingshan wrote:
>
>
> On 8/5/2020 10:16 AM, Jason Wang wrote:
>>
>> On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
>>>>>>>    +struct vhost_vring_call {
>>>>>>> +    struct eventfd_ctx *ctx;
>>>>>>> +    struct irq_bypass_producer producer;
>>>>>>> +    spinlock_t ctx_lock;
>>>>>> It's not clear to me why we need ctx_lock here.
>>>>>>
>>>>>> Thanks
>>>>> Hi Jason,
>>>>>
>>>>> we use this lock to protect the eventfd_ctx and irq from race 
>>>>> conditions,
>>>> We don't support irq notification from vDPA device driver in this 
>>>> version,
>>>> do we still have race condition?
>>>>
>>>> Thanks
>>> Jason I'm not sure what you are trying to say here.
>>
>>
>> I meant we change the API from V4 so driver won't notify us if irq is 
>> changed.
>>
>> Then it looks to me there's no need for the ctx_lock, everyhing could 
>> be synchronized with vq mutex.
>>
>> Thanks
> from V4 to V5, there are only some minor improvements and bug fix, get_vq_irq() almost stays untouched, mutex can work for this, however I see the vq mutex is used in many scenarios.
> We only use this lock to protect the producer information, can this help to get less coupling, defensive code for less bugs?


I think not, vq mutex is used to protect all vq related data structure, 
introducing another one will increase the complexity.

Thanks


>
> Thanks
>>
>>>
>>>
>>
Michael S. Tsirkin Aug. 10, 2020, 1:37 p.m. UTC | #7
On Wed, Aug 05, 2020 at 10:16:16AM +0800, Jason Wang wrote:
> 
> On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
> > > > > >    +struct vhost_vring_call {
> > > > > > +    struct eventfd_ctx *ctx;
> > > > > > +    struct irq_bypass_producer producer;
> > > > > > +    spinlock_t ctx_lock;
> > > > > It's not clear to me why we need ctx_lock here.
> > > > > 
> > > > > Thanks
> > > > Hi Jason,
> > > > 
> > > > we use this lock to protect the eventfd_ctx and irq from race conditions,
> > > We don't support irq notification from vDPA device driver in this version,
> > > do we still have race condition?
> > > 
> > > Thanks
> > Jason I'm not sure what you are trying to say here.
> 
> 
> I meant we change the API from V4 so driver won't notify us if irq is
> changed.
> 
> Then it looks to me there's no need for the ctx_lock, everyhing could be
> synchronized with vq mutex.
> 
> Thanks

Jason do you want to post a cleanup patch simplifying code along these
lines?

Thanks,


> > 
> >
Jason Wang Aug. 11, 2020, 2:53 a.m. UTC | #8
On 2020/8/10 下午9:37, Michael S. Tsirkin wrote:
> On Wed, Aug 05, 2020 at 10:16:16AM +0800, Jason Wang wrote:
>> On 2020/8/4 下午5:21, Michael S. Tsirkin wrote:
>>>>>>>     +struct vhost_vring_call {
>>>>>>> +    struct eventfd_ctx *ctx;
>>>>>>> +    struct irq_bypass_producer producer;
>>>>>>> +    spinlock_t ctx_lock;
>>>>>> It's not clear to me why we need ctx_lock here.
>>>>>>
>>>>>> Thanks
>>>>> Hi Jason,
>>>>>
>>>>> we use this lock to protect the eventfd_ctx and irq from race conditions,
>>>> We don't support irq notification from vDPA device driver in this version,
>>>> do we still have race condition?
>>>>
>>>> Thanks
>>> Jason I'm not sure what you are trying to say here.
>>
>> I meant we change the API from V4 so driver won't notify us if irq is
>> changed.
>>
>> Then it looks to me there's no need for the ctx_lock, everyhing could be
>> synchronized with vq mutex.
>>
>> Thanks
> Jason do you want to post a cleanup patch simplifying code along these
> lines?


Ling Shan promised to post a patch to fix this.

Thanks


>
> Thanks,
>
>
>>>
diff mbox series

Patch

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index a54b60d6623f..df3cf386b0cd 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -96,7 +96,7 @@  static void handle_vq_kick(struct vhost_work *work)
 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
 {
 	struct vhost_virtqueue *vq = private;
-	struct eventfd_ctx *call_ctx = vq->call_ctx;
+	struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
 
 	if (call_ctx)
 		eventfd_signal(call_ctx, 1);
@@ -382,7 +382,7 @@  static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
 		break;
 
 	case VHOST_SET_VRING_CALL:
-		if (vq->call_ctx) {
+		if (vq->call_ctx.ctx) {
 			cb.callback = vhost_vdpa_virtqueue_cb;
 			cb.private = vq;
 		} else {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d7b8df3edffc..9f1a845a9302 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -298,6 +298,13 @@  static void vhost_vq_meta_reset(struct vhost_dev *d)
 		__vhost_vq_meta_reset(d->vqs[i]);
 }
 
+static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+{
+	call_ctx->ctx = NULL;
+	memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
+	spin_lock_init(&call_ctx->ctx_lock);
+}
+
 static void vhost_vq_reset(struct vhost_dev *dev,
 			   struct vhost_virtqueue *vq)
 {
@@ -319,13 +326,13 @@  static void vhost_vq_reset(struct vhost_dev *dev,
 	vq->log_base = NULL;
 	vq->error_ctx = NULL;
 	vq->kick = NULL;
-	vq->call_ctx = NULL;
 	vq->log_ctx = NULL;
 	vhost_reset_is_le(vq);
 	vhost_disable_cross_endian(vq);
 	vq->busyloop_timeout = 0;
 	vq->umem = NULL;
 	vq->iotlb = NULL;
+	vhost_vring_call_reset(&vq->call_ctx);
 	__vhost_vq_meta_reset(vq);
 }
 
@@ -685,8 +692,8 @@  void vhost_dev_cleanup(struct vhost_dev *dev)
 			eventfd_ctx_put(dev->vqs[i]->error_ctx);
 		if (dev->vqs[i]->kick)
 			fput(dev->vqs[i]->kick);
-		if (dev->vqs[i]->call_ctx)
-			eventfd_ctx_put(dev->vqs[i]->call_ctx);
+		if (dev->vqs[i]->call_ctx.ctx)
+			eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
 		vhost_vq_reset(dev, dev->vqs[i]);
 	}
 	vhost_dev_free_iovecs(dev);
@@ -1629,7 +1636,10 @@  long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
 			r = PTR_ERR(ctx);
 			break;
 		}
-		swap(ctx, vq->call_ctx);
+
+		spin_lock(&vq->call_ctx.ctx_lock);
+		swap(ctx, vq->call_ctx.ctx);
+		spin_unlock(&vq->call_ctx.ctx_lock);
 		break;
 	case VHOST_SET_VRING_ERR:
 		if (copy_from_user(&f, argp, sizeof f)) {
@@ -2440,8 +2450,8 @@  static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
 	/* Signal the Guest tell them we used something up. */
-	if (vq->call_ctx && vhost_notify(dev, vq))
-		eventfd_signal(vq->call_ctx, 1);
+	if (vq->call_ctx.ctx && vhost_notify(dev, vq))
+		eventfd_signal(vq->call_ctx.ctx, 1);
 }
 EXPORT_SYMBOL_GPL(vhost_signal);
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index c8e96a095d3b..38eb1aa3b68d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -13,6 +13,7 @@ 
 #include <linux/virtio_ring.h>
 #include <linux/atomic.h>
 #include <linux/vhost_iotlb.h>
+#include <linux/irqbypass.h>
 
 struct vhost_work;
 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -60,6 +61,12 @@  enum vhost_uaddr_type {
 	VHOST_NUM_ADDRS = 3,
 };
 
+struct vhost_vring_call {
+	struct eventfd_ctx *ctx;
+	struct irq_bypass_producer producer;
+	spinlock_t ctx_lock;
+};
+
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
 	struct vhost_dev *dev;
@@ -72,7 +79,7 @@  struct vhost_virtqueue {
 	vring_used_t __user *used;
 	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
 	struct file *kick;
-	struct eventfd_ctx *call_ctx;
+	struct vhost_vring_call call_ctx;
 	struct eventfd_ctx *error_ctx;
 	struct eventfd_ctx *log_ctx;