diff mbox

[v5,09/31] vhost: fix calling vhost_dev_cleanup() after vhost_dev_init()

Message ID 5796072A.8080208@samsung.com
State New
Headers show

Commit Message

Ilya Maximets July 25, 2016, 12:33 p.m. UTC
On 21.07.2016 11:57, Marc-André Lureau wrote:
> From: Marc-André Lureau <marcandre.lureau@redhat.com>
> 
> vhost_net_init() calls vhost_dev_init() and in case of failure, calls
> vhost_dev_cleanup() directly. However, the structure is already
> partially cleaned on error. Calling vhost_dev_cleanup() again will call
> vhost_virtqueue_cleanup() on already clean queues, and causing potential
> double-close. Instead, adjust dev->nvqs and simplify vhost_dev_init()
> code to not call vhost_virtqueue_cleanup() but vhost_dev_cleanup()
> instead.
> 
> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> ---
>  hw/virtio/vhost.c | 13 ++++---------
>  1 file changed, 4 insertions(+), 9 deletions(-)
> 
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 9400b47..c61302a 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -1047,7 +1047,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      for (i = 0; i < hdev->nvqs; ++i) {
>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
>          if (r < 0) {
> -            goto fail_vq;
> +            hdev->nvqs = i;
> +            goto fail;
>          }
>      }
>  
> @@ -1104,19 +1105,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
>      memory_listener_register(&hdev->memory_listener, &address_space_memory);
>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>      return 0;
> +
>  fail_busyloop:
>      while (--i >= 0) {
>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
>      }
> -    i = hdev->nvqs;
> -fail_vq:
> -    while (--i >= 0) {
> -        vhost_virtqueue_cleanup(hdev->vqs + i);
> -    }
>  fail:
> -    r = -errno;
> -    hdev->vhost_ops->vhost_backend_cleanup(hdev);
> -    QLIST_REMOVE(hdev, entry);
> +    vhost_dev_cleanup(hdev);
>      return r;
>  }
>  
> 

This patch introduces closing of zero fd on backend init failure or any
other error before virtqueue_init loop because of calling
'vhost_virtqueue_cleanup()' on not initialized virtqueues.

I'm suggesting following fixup:

----------------------------------------------------------------------

@@ -1069,10 +1071,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
         goto fail;
     }
 
-    for (i = 0; i < hdev->nvqs; ++i) {
+    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
         if (r < 0) {
-            hdev->nvqs = i;
             goto fail;
         }
     }
@@ -1136,6 +1137,7 @@ fail_busyloop:
         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
     }
 fail:
+    hdev->nvqs = n_initialized_vqs;
     vhost_dev_cleanup(hdev);
     return r;
 }
----------------------------------------------------------------------

Best regards, Ilya Maximets.

Comments

Marc-Andre Lureau July 25, 2016, 12:45 p.m. UTC | #1
Hi

----- Original Message -----
> On 21.07.2016 11:57, Marc-André Lureau wrote:
> > From: Marc-André Lureau <marcandre.lureau@redhat.com>
> > 
> > vhost_net_init() calls vhost_dev_init() and in case of failure, calls
> > vhost_dev_cleanup() directly. However, the structure is already
> > partially cleaned on error. Calling vhost_dev_cleanup() again will call
> > vhost_virtqueue_cleanup() on already clean queues, and causing potential
> > double-close. Instead, adjust dev->nvqs and simplify vhost_dev_init()
> > code to not call vhost_virtqueue_cleanup() but vhost_dev_cleanup()
> > instead.
> > 
> > Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> > ---
> >  hw/virtio/vhost.c | 13 ++++---------
> >  1 file changed, 4 insertions(+), 9 deletions(-)
> > 
> > diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> > index 9400b47..c61302a 100644
> > --- a/hw/virtio/vhost.c
> > +++ b/hw/virtio/vhost.c
> > @@ -1047,7 +1047,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> > *opaque,
> >      for (i = 0; i < hdev->nvqs; ++i) {
> >          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
> >          if (r < 0) {
> > -            goto fail_vq;
> > +            hdev->nvqs = i;
> > +            goto fail;
> >          }
> >      }
> >  
> > @@ -1104,19 +1105,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> > *opaque,
> >      memory_listener_register(&hdev->memory_listener,
> >      &address_space_memory);
> >      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> >      return 0;
> > +
> >  fail_busyloop:
> >      while (--i >= 0) {
> >          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
> >      }
> > -    i = hdev->nvqs;
> > -fail_vq:
> > -    while (--i >= 0) {
> > -        vhost_virtqueue_cleanup(hdev->vqs + i);
> > -    }
> >  fail:
> > -    r = -errno;
> > -    hdev->vhost_ops->vhost_backend_cleanup(hdev);
> > -    QLIST_REMOVE(hdev, entry);
> > +    vhost_dev_cleanup(hdev);
> >      return r;
> >  }
> >  
> > 
> 
> This patch introduces closing of zero fd on backend init failure or any
> other error before virtqueue_init loop because of calling
> 'vhost_virtqueue_cleanup()' on not initialized virtqueues.
> 
> I'm suggesting following fixup:
> 
> ----------------------------------------------------------------------
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 6175d8b..d7428c5 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -1038,8 +1038,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> *opaque,
>                     VhostBackendType backend_type, uint32_t busyloop_timeout)
>  {
>      uint64_t features;
> -    int i, r;
> +    int i, r, n_initialized_vqs;
>  
> +    n_initialized_vqs = 0;
>      hdev->migration_blocker = NULL;
>  
>      r = vhost_set_backend_type(hdev, backend_type);
> 
> @@ -1069,10 +1071,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> *opaque,
>          goto fail;
>      }
>  
> -    for (i = 0; i < hdev->nvqs; ++i) {
> +    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
>          if (r < 0) {
> -            hdev->nvqs = i;

Isn't that assignment doing the same thing?

btw, thanks for the review

>              goto fail;
>          }
>      }
> @@ -1136,6 +1137,7 @@ fail_busyloop:
>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
>      }
>  fail:
> +    hdev->nvqs = n_initialized_vqs;
>      vhost_dev_cleanup(hdev);
>      return r;
>  }
> ----------------------------------------------------------------------
> 
> Best regards, Ilya Maximets.
>
Ilya Maximets July 25, 2016, 12:52 p.m. UTC | #2
On 25.07.2016 15:45, Marc-André Lureau wrote:
> Hi
> 
> ----- Original Message -----
>> On 21.07.2016 11:57, Marc-André Lureau wrote:
>>> From: Marc-André Lureau <marcandre.lureau@redhat.com>
>>>
>>> vhost_net_init() calls vhost_dev_init() and in case of failure, calls
>>> vhost_dev_cleanup() directly. However, the structure is already
>>> partially cleaned on error. Calling vhost_dev_cleanup() again will call
>>> vhost_virtqueue_cleanup() on already clean queues, and causing potential
>>> double-close. Instead, adjust dev->nvqs and simplify vhost_dev_init()
>>> code to not call vhost_virtqueue_cleanup() but vhost_dev_cleanup()
>>> instead.
>>>
>>> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
>>> ---
>>>  hw/virtio/vhost.c | 13 ++++---------
>>>  1 file changed, 4 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
>>> index 9400b47..c61302a 100644
>>> --- a/hw/virtio/vhost.c
>>> +++ b/hw/virtio/vhost.c
>>> @@ -1047,7 +1047,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>>> *opaque,
>>>      for (i = 0; i < hdev->nvqs; ++i) {
>>>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
>>>          if (r < 0) {
>>> -            goto fail_vq;
>>> +            hdev->nvqs = i;
>>> +            goto fail;
>>>          }
>>>      }
>>>  
>>> @@ -1104,19 +1105,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>>> *opaque,
>>>      memory_listener_register(&hdev->memory_listener,
>>>      &address_space_memory);
>>>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>>>      return 0;
>>> +
>>>  fail_busyloop:
>>>      while (--i >= 0) {
>>>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
>>>      }
>>> -    i = hdev->nvqs;
>>> -fail_vq:
>>> -    while (--i >= 0) {
>>> -        vhost_virtqueue_cleanup(hdev->vqs + i);
>>> -    }
>>>  fail:
>>> -    r = -errno;
>>> -    hdev->vhost_ops->vhost_backend_cleanup(hdev);
>>> -    QLIST_REMOVE(hdev, entry);
>>> +    vhost_dev_cleanup(hdev);
>>>      return r;
>>>  }
>>>  
>>>
>>
>> This patch introduces closing of zero fd on backend init failure or any
>> other error before virtqueue_init loop because of calling
>> 'vhost_virtqueue_cleanup()' on not initialized virtqueues.
>>
>> I'm suggesting following fixup:
>>
>> ----------------------------------------------------------------------
>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
>> index 6175d8b..d7428c5 100644
>> --- a/hw/virtio/vhost.c
>> +++ b/hw/virtio/vhost.c
>> @@ -1038,8 +1038,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>> *opaque,
>>                     VhostBackendType backend_type, uint32_t busyloop_timeout)
>>  {
>>      uint64_t features;
>> -    int i, r;
>> +    int i, r, n_initialized_vqs;
>>  
>> +    n_initialized_vqs = 0;
>>      hdev->migration_blocker = NULL;
>>  
>>      r = vhost_set_backend_type(hdev, backend_type);
>>
>> @@ -1069,10 +1071,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>> *opaque,
>>          goto fail;
>>      }
>>  
>> -    for (i = 0; i < hdev->nvqs; ++i) {
>> +    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
>>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
>>          if (r < 0) {
>> -            hdev->nvqs = i;
> 
> Isn't that assignment doing the same thing?

Yes.
But assignment to zero (hdev->nvqs = 0) required before all previous
'goto fail;' instructions. I think, it's not a clean solution.

> btw, thanks for the review
> 
>>              goto fail;
>>          }
>>      }
>> @@ -1136,6 +1137,7 @@ fail_busyloop:
>>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
>>      }
>>  fail:
>> +    hdev->nvqs = n_initialized_vqs;
>>      vhost_dev_cleanup(hdev);
>>      return r;
>>  }
>> ----------------------------------------------------------------------
>>
>> Best regards, Ilya Maximets.
>>
> 
>
Marc-Andre Lureau July 25, 2016, 1:05 p.m. UTC | #3
Hi

----- Original Message -----
> On 25.07.2016 15:45, Marc-André Lureau wrote:
> > Hi
> > 
> > ----- Original Message -----
> >> On 21.07.2016 11:57, Marc-André Lureau wrote:
> >>> From: Marc-André Lureau <marcandre.lureau@redhat.com>
> >>>
> >>> vhost_net_init() calls vhost_dev_init() and in case of failure, calls
> >>> vhost_dev_cleanup() directly. However, the structure is already
> >>> partially cleaned on error. Calling vhost_dev_cleanup() again will call
> >>> vhost_virtqueue_cleanup() on already clean queues, and causing potential
> >>> double-close. Instead, adjust dev->nvqs and simplify vhost_dev_init()
> >>> code to not call vhost_virtqueue_cleanup() but vhost_dev_cleanup()
> >>> instead.
> >>>
> >>> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
> >>> ---
> >>>  hw/virtio/vhost.c | 13 ++++---------
> >>>  1 file changed, 4 insertions(+), 9 deletions(-)
> >>>
> >>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> >>> index 9400b47..c61302a 100644
> >>> --- a/hw/virtio/vhost.c
> >>> +++ b/hw/virtio/vhost.c
> >>> @@ -1047,7 +1047,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> >>> *opaque,
> >>>      for (i = 0; i < hdev->nvqs; ++i) {
> >>>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index +
> >>>          i);
> >>>          if (r < 0) {
> >>> -            goto fail_vq;
> >>> +            hdev->nvqs = i;
> >>> +            goto fail;
> >>>          }
> >>>      }
> >>>  
> >>> @@ -1104,19 +1105,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> >>> *opaque,
> >>>      memory_listener_register(&hdev->memory_listener,
> >>>      &address_space_memory);
> >>>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
> >>>      return 0;
> >>> +
> >>>  fail_busyloop:
> >>>      while (--i >= 0) {
> >>>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
> >>>          0);
> >>>      }
> >>> -    i = hdev->nvqs;
> >>> -fail_vq:
> >>> -    while (--i >= 0) {
> >>> -        vhost_virtqueue_cleanup(hdev->vqs + i);
> >>> -    }
> >>>  fail:
> >>> -    r = -errno;
> >>> -    hdev->vhost_ops->vhost_backend_cleanup(hdev);
> >>> -    QLIST_REMOVE(hdev, entry);
> >>> +    vhost_dev_cleanup(hdev);
> >>>      return r;
> >>>  }
> >>>  
> >>>
> >>
> >> This patch introduces closing of zero fd on backend init failure or any
> >> other error before virtqueue_init loop because of calling
> >> 'vhost_virtqueue_cleanup()' on not initialized virtqueues.
> >>
> >> I'm suggesting following fixup:
> >>
> >> ----------------------------------------------------------------------
> >> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> >> index 6175d8b..d7428c5 100644
> >> --- a/hw/virtio/vhost.c
> >> +++ b/hw/virtio/vhost.c
> >> @@ -1038,8 +1038,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> >> *opaque,
> >>                     VhostBackendType backend_type, uint32_t
> >>                     busyloop_timeout)
> >>  {
> >>      uint64_t features;
> >> -    int i, r;
> >> +    int i, r, n_initialized_vqs;
> >>  
> >> +    n_initialized_vqs = 0;
> >>      hdev->migration_blocker = NULL;
> >>  
> >>      r = vhost_set_backend_type(hdev, backend_type);
> >>
> >> @@ -1069,10 +1071,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
> >> *opaque,
> >>          goto fail;
> >>      }
> >>  
> >> -    for (i = 0; i < hdev->nvqs; ++i) {
> >> +    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
> >>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index +
> >>          i);
> >>          if (r < 0) {
> >> -            hdev->nvqs = i;
> > 
> > Isn't that assignment doing the same thing?
> 
> Yes.
> But assignment to zero (hdev->nvqs = 0) required before all previous
> 'goto fail;' instructions. I think, it's not a clean solution.
> 

Good point, I'll squash your change, should I add your sign-off-by?

thanks

> > btw, thanks for the review
> > 
> >>              goto fail;
> >>          }
> >>      }
> >> @@ -1136,6 +1137,7 @@ fail_busyloop:
> >>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
> >>          0);
> >>      }
> >>  fail:
> >> +    hdev->nvqs = n_initialized_vqs;
> >>      vhost_dev_cleanup(hdev);
> >>      return r;
> >>  }
> >> ----------------------------------------------------------------------
> >>
> >> Best regards, Ilya Maximets.
> >>
> > 
> > 
>
Ilya Maximets July 25, 2016, 1:14 p.m. UTC | #4
On 25.07.2016 16:05, Marc-André Lureau wrote:
> Hi
> 
> ----- Original Message -----
>> On 25.07.2016 15:45, Marc-André Lureau wrote:
>>> Hi
>>>
>>> ----- Original Message -----
>>>> On 21.07.2016 11:57, Marc-André Lureau wrote:
>>>>> From: Marc-André Lureau <marcandre.lureau@redhat.com>
>>>>>
>>>>> vhost_net_init() calls vhost_dev_init() and in case of failure, calls
>>>>> vhost_dev_cleanup() directly. However, the structure is already
>>>>> partially cleaned on error. Calling vhost_dev_cleanup() again will call
>>>>> vhost_virtqueue_cleanup() on already clean queues, and causing potential
>>>>> double-close. Instead, adjust dev->nvqs and simplify vhost_dev_init()
>>>>> code to not call vhost_virtqueue_cleanup() but vhost_dev_cleanup()
>>>>> instead.
>>>>>
>>>>> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
>>>>> ---
>>>>>  hw/virtio/vhost.c | 13 ++++---------
>>>>>  1 file changed, 4 insertions(+), 9 deletions(-)
>>>>>
>>>>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
>>>>> index 9400b47..c61302a 100644
>>>>> --- a/hw/virtio/vhost.c
>>>>> +++ b/hw/virtio/vhost.c
>>>>> @@ -1047,7 +1047,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>>>>> *opaque,
>>>>>      for (i = 0; i < hdev->nvqs; ++i) {
>>>>>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index +
>>>>>          i);
>>>>>          if (r < 0) {
>>>>> -            goto fail_vq;
>>>>> +            hdev->nvqs = i;
>>>>> +            goto fail;
>>>>>          }
>>>>>      }
>>>>>  
>>>>> @@ -1104,19 +1105,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>>>>> *opaque,
>>>>>      memory_listener_register(&hdev->memory_listener,
>>>>>      &address_space_memory);
>>>>>      QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
>>>>>      return 0;
>>>>> +
>>>>>  fail_busyloop:
>>>>>      while (--i >= 0) {
>>>>>          vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
>>>>>          0);
>>>>>      }
>>>>> -    i = hdev->nvqs;
>>>>> -fail_vq:
>>>>> -    while (--i >= 0) {
>>>>> -        vhost_virtqueue_cleanup(hdev->vqs + i);
>>>>> -    }
>>>>>  fail:
>>>>> -    r = -errno;
>>>>> -    hdev->vhost_ops->vhost_backend_cleanup(hdev);
>>>>> -    QLIST_REMOVE(hdev, entry);
>>>>> +    vhost_dev_cleanup(hdev);
>>>>>      return r;
>>>>>  }
>>>>>  
>>>>>
>>>>
>>>> This patch introduces closing of zero fd on backend init failure or any
>>>> other error before virtqueue_init loop because of calling
>>>> 'vhost_virtqueue_cleanup()' on not initialized virtqueues.
>>>>
>>>> I'm suggesting following fixup:
>>>>
>>>> ----------------------------------------------------------------------
>>>> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
>>>> index 6175d8b..d7428c5 100644
>>>> --- a/hw/virtio/vhost.c
>>>> +++ b/hw/virtio/vhost.c
>>>> @@ -1038,8 +1038,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>>>> *opaque,
>>>>                     VhostBackendType backend_type, uint32_t
>>>>                     busyloop_timeout)
>>>>  {
>>>>      uint64_t features;
>>>> -    int i, r;
>>>> +    int i, r, n_initialized_vqs;
>>>>  
>>>> +    n_initialized_vqs = 0;
>>>>      hdev->migration_blocker = NULL;
>>>>  
>>>>      r = vhost_set_backend_type(hdev, backend_type);
>>>>
>>>> @@ -1069,10 +1071,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void
>>>> *opaque,
>>>>          goto fail;
>>>>      }
>>>>  
>>>> -    for (i = 0; i < hdev->nvqs; ++i) {
>>>> +    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
>>>>          r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index +
>>>>          i);
>>>>          if (r < 0) {
>>>> -            hdev->nvqs = i;
>>>
>>> Isn't that assignment doing the same thing?
>>
>> Yes.
>> But assignment to zero (hdev->nvqs = 0) required before all previous
>> 'goto fail;' instructions. I think, it's not a clean solution.
>>
> 
> Good point, I'll squash your change,

Thanks for fixing it.

> should I add your sign-off-by?

I don't mind if you want to.

Best regards, Ilya Maximets.
diff mbox

Patch

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 6175d8b..d7428c5 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1038,8 +1038,9 @@  int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
                    VhostBackendType backend_type, uint32_t busyloop_timeout)
 {
     uint64_t features;
-    int i, r;
+    int i, r, n_initialized_vqs;
 
+    n_initialized_vqs = 0;
     hdev->migration_blocker = NULL;
 
     r = vhost_set_backend_type(hdev, backend_type);