diff mbox

[2/3] net: Flush queues when runstate changes back to running

Message ID 1407996838-10212-3-git-send-email-zhang.zhanghailiang@huawei.com
State New
Headers show

Commit Message

Zhanghailiang Aug. 14, 2014, 6:13 a.m. UTC
When the runstate changes back to running, we definitely need to flush
queues to get packets flowing again.

Here we implement this in the net layer:
(1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
Which will listen for VM runstate changes.
(2) Register a handler function for VMstate change.
When vm changes back to running, we flush all queues in the callback function.

Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
---
 include/net/net.h |  1 +
 net/net.c         | 26 ++++++++++++++++++++++++++
 2 files changed, 27 insertions(+)

Comments

Gonglei (Arei) Aug. 14, 2014, 7:12 a.m. UTC | #1
Hi,

> Subject: [Qemu-devel] [PATCH 2/3] net: Flush queues when runstate changes
> back to running
> 
> When the runstate changes back to running, we definitely need to flush
> queues to get packets flowing again.
> 
> Here we implement this in the net layer:
> (1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
> Which will listen for VM runstate changes.

Does this change will block migration during with different QEMU versions?

> (2) Register a handler function for VMstate change.
> When vm changes back to running, we flush all queues in the callback function.
> 
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> ---
>  include/net/net.h |  1 +
>  net/net.c         | 26 ++++++++++++++++++++++++++
>  2 files changed, 27 insertions(+)
> 
> diff --git a/include/net/net.h b/include/net/net.h
> index 312f728..a294277 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -97,6 +97,7 @@ typedef struct NICState {
>      NICConf *conf;
>      void *opaque;
>      bool peer_deleted;
> +    VMChangeStateEntry *vmstate;
>  } NICState;
> 
>  NetClientState *qemu_find_netdev(const char *id);
> diff --git a/net/net.c b/net/net.c
> index 5bb2821..506e58f 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -242,6 +242,29 @@ NetClientState *qemu_new_net_client(NetClientInfo
> *info,
>      return nc;
>  }
> 
> +static void nic_vmstate_change_handler(void *opaque,
> +                                       int running,
> +                                       RunState state)
> +{
> +    NICState *nic = opaque;
> +    NetClientState *nc;
> +    int i, queues;
> +
> +    if (!running) {
> +        return;
> +    }
> +
> +    queues =  MAX(1, nic->conf->peers.queues);
              ^
A superfluous space.
 
Best regards,
-Gonglei
Zhanghailiang Aug. 14, 2014, 8:24 a.m. UTC | #2
On 2014/8/14 15:12, Gonglei (Arei) wrote:
> Hi,
>
>> Subject: [Qemu-devel] [PATCH 2/3] net: Flush queues when runstate changes
>> back to running
>>
>> When the runstate changes back to running, we definitely need to flush
>> queues to get packets flowing again.
>>
>> Here we implement this in the net layer:
>> (1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
>> Which will listen for VM runstate changes.
>
> Does this change will block migration during with different QEMU versions?
>

No, I have tested migration between qemu-1.5.1 with new qemu which has
merged this patches, everything seems ok!

>> (2) Register a handler function for VMstate change.
>> When vm changes back to running, we flush all queues in the callback function.
>>
>> Signed-off-by: zhanghailiang<zhang.zhanghailiang@huawei.com>
>> ---
>>   include/net/net.h |  1 +
>>   net/net.c         | 26 ++++++++++++++++++++++++++
>>   2 files changed, 27 insertions(+)
>>
>> diff --git a/include/net/net.h b/include/net/net.h
>> index 312f728..a294277 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -97,6 +97,7 @@ typedef struct NICState {
>>       NICConf *conf;
>>       void *opaque;
>>       bool peer_deleted;
>> +    VMChangeStateEntry *vmstate;
>>   } NICState;
>>
>>   NetClientState *qemu_find_netdev(const char *id);
>> diff --git a/net/net.c b/net/net.c
>> index 5bb2821..506e58f 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -242,6 +242,29 @@ NetClientState *qemu_new_net_client(NetClientInfo
>> *info,
>>       return nc;
>>   }
>>
>> +static void nic_vmstate_change_handler(void *opaque,
>> +                                       int running,
>> +                                       RunState state)
>> +{
>> +    NICState *nic = opaque;
>> +    NetClientState *nc;
>> +    int i, queues;
>> +
>> +    if (!running) {
>> +        return;
>> +    }
>> +
>> +    queues =  MAX(1, nic->conf->peers.queues);
>                ^
> A superfluous space.
>

Yes, Good catch, i will modify this. Thanks.
Michael S. Tsirkin Aug. 14, 2014, 10:05 a.m. UTC | #3
On Thu, Aug 14, 2014 at 02:13:57PM +0800, zhanghailiang wrote:
> When the runstate changes back to running, we definitely need to flush
> queues to get packets flowing again.
> 
> Here we implement this in the net layer:
> (1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
> Which will listen for VM runstate changes.
> (2) Register a handler function for VMstate change.
> When vm changes back to running, we flush all queues in the callback function.

OK but smash this together with patch 1, otherwise
after patch 1 things are broken, which breaks
git bisect.

> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
> ---
>  include/net/net.h |  1 +
>  net/net.c         | 26 ++++++++++++++++++++++++++
>  2 files changed, 27 insertions(+)
> 
> diff --git a/include/net/net.h b/include/net/net.h
> index 312f728..a294277 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -97,6 +97,7 @@ typedef struct NICState {
>      NICConf *conf;
>      void *opaque;
>      bool peer_deleted;
> +    VMChangeStateEntry *vmstate;
>  } NICState;
>  
>  NetClientState *qemu_find_netdev(const char *id);
> diff --git a/net/net.c b/net/net.c
> index 5bb2821..506e58f 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -242,6 +242,29 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
>      return nc;
>  }
>  
> +static void nic_vmstate_change_handler(void *opaque,
> +                                       int running,
> +                                       RunState state)
> +{
> +    NICState *nic = opaque;
> +    NetClientState *nc;
> +    int i, queues;
> +
> +    if (!running) {
> +        return;
> +    }
> +
> +    queues =  MAX(1, nic->conf->peers.queues);
> +    for (i = 0; i < queues; i++) {
> +        nc = &nic->ncs[i];
> +        if (nc->receive_disabled
> +            || (nc->info->can_receive && !nc->info->can_receive(nc))) {
> +            continue;
> +        }
> +        qemu_flush_queued_packets(nc);
> +    }
> +}
> +
>  NICState *qemu_new_nic(NetClientInfo *info,
>                         NICConf *conf,
>                         const char *model,
> @@ -259,6 +282,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>      nic->ncs = (void *)nic + info->size;
>      nic->conf = conf;
>      nic->opaque = opaque;
> +    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
> +                                                    nic);
>  
>      for (i = 0; i < queues; i++) {
>          qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> @@ -379,6 +404,7 @@ void qemu_del_nic(NICState *nic)
>          qemu_free_net_client(nc);
>      }
>  
> +    qemu_del_vm_change_state_handler(nic->vmstate);
>      g_free(nic);
>  }
>  
> -- 
> 1.7.12.4
>
Michael S. Tsirkin Aug. 14, 2014, 10:09 a.m. UTC | #4
On Thu, Aug 14, 2014 at 02:13:57PM +0800, zhanghailiang wrote:
> When the runstate changes back to running, we definitely need to flush
> queues to get packets flowing again.
> 
> Here we implement this in the net layer:
> (1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
> Which will listen for VM runstate changes.
> (2) Register a handler function for VMstate change.
> When vm changes back to running, we flush all queues in the callback function.
> 
> Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com>

Hmm looks like virtio patch will need to be squashed as well?

> ---
>  include/net/net.h |  1 +
>  net/net.c         | 26 ++++++++++++++++++++++++++
>  2 files changed, 27 insertions(+)
> 
> diff --git a/include/net/net.h b/include/net/net.h
> index 312f728..a294277 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -97,6 +97,7 @@ typedef struct NICState {
>      NICConf *conf;
>      void *opaque;
>      bool peer_deleted;
> +    VMChangeStateEntry *vmstate;
>  } NICState;
>  
>  NetClientState *qemu_find_netdev(const char *id);
> diff --git a/net/net.c b/net/net.c
> index 5bb2821..506e58f 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -242,6 +242,29 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
>      return nc;
>  }
>  
> +static void nic_vmstate_change_handler(void *opaque,
> +                                       int running,
> +                                       RunState state)
> +{
> +    NICState *nic = opaque;
> +    NetClientState *nc;
> +    int i, queues;
> +
> +    if (!running) {
> +        return;
> +    }
> +
> +    queues =  MAX(1, nic->conf->peers.queues);
> +    for (i = 0; i < queues; i++) {
> +        nc = &nic->ncs[i];
> +        if (nc->receive_disabled
> +            || (nc->info->can_receive && !nc->info->can_receive(nc))) {
> +            continue;
> +        }
> +        qemu_flush_queued_packets(nc);
> +    }
> +}
> +
>  NICState *qemu_new_nic(NetClientInfo *info,
>                         NICConf *conf,
>                         const char *model,
> @@ -259,6 +282,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>      nic->ncs = (void *)nic + info->size;
>      nic->conf = conf;
>      nic->opaque = opaque;
> +    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
> +                                                    nic);
>  
>      for (i = 0; i < queues; i++) {
>          qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> @@ -379,6 +404,7 @@ void qemu_del_nic(NICState *nic)
>          qemu_free_net_client(nc);
>      }
>  
> +    qemu_del_vm_change_state_handler(nic->vmstate);
>      g_free(nic);
>  }
>  
> -- 
> 1.7.12.4
>
Zhanghailiang Aug. 18, 2014, 12:45 a.m. UTC | #5
On 2014/8/14 18:05, Michael S. Tsirkin wrote:
> On Thu, Aug 14, 2014 at 02:13:57PM +0800, zhanghailiang wrote:
>> When the runstate changes back to running, we definitely need to flush
>> queues to get packets flowing again.
>>
>> Here we implement this in the net layer:
>> (1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
>> Which will listen for VM runstate changes.
>> (2) Register a handler function for VMstate change.
>> When vm changes back to running, we flush all queues in the callback function.
>
> OK but smash this together with patch 1, otherwise
> after patch 1 things are broken, which breaks
> git bisect.
>

Hmm, i will put them together into one patch. Thanks for your reviewing.

>> Signed-off-by: zhanghailiang<zhang.zhanghailiang@huawei.com>
>> ---
>>   include/net/net.h |  1 +
>>   net/net.c         | 26 ++++++++++++++++++++++++++
>>   2 files changed, 27 insertions(+)
>>
>> diff --git a/include/net/net.h b/include/net/net.h
>> index 312f728..a294277 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -97,6 +97,7 @@ typedef struct NICState {
>>       NICConf *conf;
>>       void *opaque;
>>       bool peer_deleted;
>> +    VMChangeStateEntry *vmstate;
>>   } NICState;
>>
>>   NetClientState *qemu_find_netdev(const char *id);
>> diff --git a/net/net.c b/net/net.c
>> index 5bb2821..506e58f 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -242,6 +242,29 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
>>       return nc;
>>   }
>>
>> +static void nic_vmstate_change_handler(void *opaque,
>> +                                       int running,
>> +                                       RunState state)
>> +{
>> +    NICState *nic = opaque;
>> +    NetClientState *nc;
>> +    int i, queues;
>> +
>> +    if (!running) {
>> +        return;
>> +    }
>> +
>> +    queues =  MAX(1, nic->conf->peers.queues);
>> +    for (i = 0; i<  queues; i++) {
>> +        nc =&nic->ncs[i];
>> +        if (nc->receive_disabled
>> +            || (nc->info->can_receive&&  !nc->info->can_receive(nc))) {
>> +            continue;
>> +        }
>> +        qemu_flush_queued_packets(nc);
>> +    }
>> +}
>> +
>>   NICState *qemu_new_nic(NetClientInfo *info,
>>                          NICConf *conf,
>>                          const char *model,
>> @@ -259,6 +282,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>>       nic->ncs = (void *)nic + info->size;
>>       nic->conf = conf;
>>       nic->opaque = opaque;
>> +    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
>> +                                                    nic);
>>
>>       for (i = 0; i<  queues; i++) {
>>           qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
>> @@ -379,6 +404,7 @@ void qemu_del_nic(NICState *nic)
>>           qemu_free_net_client(nc);
>>       }
>>
>> +    qemu_del_vm_change_state_handler(nic->vmstate);
>>       g_free(nic);
>>   }
>>
>> --
>> 1.7.12.4
>>
>
> .
>
Zhanghailiang Aug. 18, 2014, 12:46 a.m. UTC | #6
On 2014/8/14 18:09, Michael S. Tsirkin wrote:
> On Thu, Aug 14, 2014 at 02:13:57PM +0800, zhanghailiang wrote:
>> When the runstate changes back to running, we definitely need to flush
>> queues to get packets flowing again.
>>
>> Here we implement this in the net layer:
>> (1) add a member 'VMChangeStateEntry *vmstate' to struct NICState,
>> Which will listen for VM runstate changes.
>> (2) Register a handler function for VMstate change.
>> When vm changes back to running, we flush all queues in the callback function.
>>
>> Signed-off-by: zhanghailiang<zhang.zhanghailiang@huawei.com>
>
> Hmm looks like virtio patch will need to be squashed as well?
>

OK, thanks.

>> ---
>>   include/net/net.h |  1 +
>>   net/net.c         | 26 ++++++++++++++++++++++++++
>>   2 files changed, 27 insertions(+)
>>
>> diff --git a/include/net/net.h b/include/net/net.h
>> index 312f728..a294277 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -97,6 +97,7 @@ typedef struct NICState {
>>       NICConf *conf;
>>       void *opaque;
>>       bool peer_deleted;
>> +    VMChangeStateEntry *vmstate;
>>   } NICState;
>>
>>   NetClientState *qemu_find_netdev(const char *id);
>> diff --git a/net/net.c b/net/net.c
>> index 5bb2821..506e58f 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -242,6 +242,29 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
>>       return nc;
>>   }
>>
>> +static void nic_vmstate_change_handler(void *opaque,
>> +                                       int running,
>> +                                       RunState state)
>> +{
>> +    NICState *nic = opaque;
>> +    NetClientState *nc;
>> +    int i, queues;
>> +
>> +    if (!running) {
>> +        return;
>> +    }
>> +
>> +    queues =  MAX(1, nic->conf->peers.queues);
>> +    for (i = 0; i<  queues; i++) {
>> +        nc =&nic->ncs[i];
>> +        if (nc->receive_disabled
>> +            || (nc->info->can_receive&&  !nc->info->can_receive(nc))) {
>> +            continue;
>> +        }
>> +        qemu_flush_queued_packets(nc);
>> +    }
>> +}
>> +
>>   NICState *qemu_new_nic(NetClientInfo *info,
>>                          NICConf *conf,
>>                          const char *model,
>> @@ -259,6 +282,8 @@ NICState *qemu_new_nic(NetClientInfo *info,
>>       nic->ncs = (void *)nic + info->size;
>>       nic->conf = conf;
>>       nic->opaque = opaque;
>> +    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
>> +                                                    nic);
>>
>>       for (i = 0; i<  queues; i++) {
>>           qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
>> @@ -379,6 +404,7 @@ void qemu_del_nic(NICState *nic)
>>           qemu_free_net_client(nc);
>>       }
>>
>> +    qemu_del_vm_change_state_handler(nic->vmstate);
>>       g_free(nic);
>>   }
>>
>> --
>> 1.7.12.4
>>
>
> .
>
diff mbox

Patch

diff --git a/include/net/net.h b/include/net/net.h
index 312f728..a294277 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -97,6 +97,7 @@  typedef struct NICState {
     NICConf *conf;
     void *opaque;
     bool peer_deleted;
+    VMChangeStateEntry *vmstate;
 } NICState;
 
 NetClientState *qemu_find_netdev(const char *id);
diff --git a/net/net.c b/net/net.c
index 5bb2821..506e58f 100644
--- a/net/net.c
+++ b/net/net.c
@@ -242,6 +242,29 @@  NetClientState *qemu_new_net_client(NetClientInfo *info,
     return nc;
 }
 
+static void nic_vmstate_change_handler(void *opaque,
+                                       int running,
+                                       RunState state)
+{
+    NICState *nic = opaque;
+    NetClientState *nc;
+    int i, queues;
+
+    if (!running) {
+        return;
+    }
+
+    queues =  MAX(1, nic->conf->peers.queues);
+    for (i = 0; i < queues; i++) {
+        nc = &nic->ncs[i];
+        if (nc->receive_disabled
+            || (nc->info->can_receive && !nc->info->can_receive(nc))) {
+            continue;
+        }
+        qemu_flush_queued_packets(nc);
+    }
+}
+
 NICState *qemu_new_nic(NetClientInfo *info,
                        NICConf *conf,
                        const char *model,
@@ -259,6 +282,8 @@  NICState *qemu_new_nic(NetClientInfo *info,
     nic->ncs = (void *)nic + info->size;
     nic->conf = conf;
     nic->opaque = opaque;
+    nic->vmstate = qemu_add_vm_change_state_handler(nic_vmstate_change_handler,
+                                                    nic);
 
     for (i = 0; i < queues; i++) {
         qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
@@ -379,6 +404,7 @@  void qemu_del_nic(NICState *nic)
         qemu_free_net_client(nc);
     }
 
+    qemu_del_vm_change_state_handler(nic->vmstate);
     g_free(nic);
 }