diff mbox series

[v9,08/12] migration: Test new fd infrastructure

Message ID 20171004104636.7963-9-quintela@redhat.com
State New
Headers show
Series Multifd | expand

Commit Message

Juan Quintela Oct. 4, 2017, 10:46 a.m. UTC
We just send the address through the alternate channels and test that it
is ok.

Signed-off-by: Juan Quintela <quintela@redhat.com>

--

Use qio_channel_*all functions
---
 migration/ram.c | 39 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

Comments

Dr. David Alan Gilbert Oct. 17, 2017, 11:11 a.m. UTC | #1
* Juan Quintela (quintela@redhat.com) wrote:
> We just send the address through the alternate channels and test that it
> is ok.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>

I remember questions on this patch from last time as well; this is just
test isn't it, and all this gets changed in later patches.  So I'm
not too sure of the point, especially since you could use
qio_channel_writev_all  here and make the changes smaller.

Dave

> --
> 
> Use qio_channel_*all functions
> ---
>  migration/ram.c | 39 +++++++++++++++++++++++++++++++++++++++
>  1 file changed, 39 insertions(+)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 745da2971d..4c16d0775b 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -492,8 +492,24 @@ static void *multifd_send_thread(void *opaque)
>              break;
>          }
>          if (p->pages.num) {
> +            Error *local_err = NULL;
> +            size_t ret;
> +            int i;
> +            int num;
> +
> +            num = p->pages.num;
>              p->pages.num = 0;
>              qemu_mutex_unlock(&p->mutex);
> +
> +            for (i = 0; i < num; i++) {
> +                ret = qio_channel_write_all(p->c,
> +                         (const char *)&p->pages.iov[i].iov_base,
> +                         sizeof(uint8_t *), &local_err);
> +                if (ret != 0) {
> +                    terminate_multifd_send_threads(local_err);
> +                    return NULL;
> +                }
> +            }
>              qemu_mutex_lock(&multifd_send_state->mutex);
>              p->done = true;
>              qemu_mutex_unlock(&multifd_send_state->mutex);
> @@ -675,6 +691,7 @@ int multifd_load_cleanup(Error **errp)
>  static void *multifd_recv_thread(void *opaque)
>  {
>      MultiFDRecvParams *p = opaque;
> +    uint8_t *recv_address;
>  
>      qemu_sem_post(&p->ready);
>      while (true) {
> @@ -684,7 +701,29 @@ static void *multifd_recv_thread(void *opaque)
>              break;
>          }
>          if (p->pages.num) {
> +            Error *local_err = NULL;
> +            size_t ret;
> +            int i;
> +            int num;
> +
> +            num = p->pages.num;
>              p->pages.num = 0;
> +
> +            for (i = 0; i < num; i++) {
> +                ret = qio_channel_read_all(p->c, (char *)&recv_address,
> +                                           sizeof(uint8_t *), &local_err);
> +                if (ret != 0) {
> +                    terminate_multifd_recv_threads(local_err);
> +                    return NULL;
> +                }
> +                if (recv_address != p->pages.iov[i].iov_base) {
> +                    error_setg(&local_err, "received %p and expecting %p (%d)",
> +                               recv_address, p->pages.iov[i].iov_base, i);
> +                    terminate_multifd_recv_threads(local_err);
> +                    return NULL;
> +                }
> +            }
> +
>              p->done = true;
>              qemu_mutex_unlock(&p->mutex);
>              qemu_sem_post(&p->ready);
> -- 
> 2.13.5
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 745da2971d..4c16d0775b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -492,8 +492,24 @@  static void *multifd_send_thread(void *opaque)
             break;
         }
         if (p->pages.num) {
+            Error *local_err = NULL;
+            size_t ret;
+            int i;
+            int num;
+
+            num = p->pages.num;
             p->pages.num = 0;
             qemu_mutex_unlock(&p->mutex);
+
+            for (i = 0; i < num; i++) {
+                ret = qio_channel_write_all(p->c,
+                         (const char *)&p->pages.iov[i].iov_base,
+                         sizeof(uint8_t *), &local_err);
+                if (ret != 0) {
+                    terminate_multifd_send_threads(local_err);
+                    return NULL;
+                }
+            }
             qemu_mutex_lock(&multifd_send_state->mutex);
             p->done = true;
             qemu_mutex_unlock(&multifd_send_state->mutex);
@@ -675,6 +691,7 @@  int multifd_load_cleanup(Error **errp)
 static void *multifd_recv_thread(void *opaque)
 {
     MultiFDRecvParams *p = opaque;
+    uint8_t *recv_address;
 
     qemu_sem_post(&p->ready);
     while (true) {
@@ -684,7 +701,29 @@  static void *multifd_recv_thread(void *opaque)
             break;
         }
         if (p->pages.num) {
+            Error *local_err = NULL;
+            size_t ret;
+            int i;
+            int num;
+
+            num = p->pages.num;
             p->pages.num = 0;
+
+            for (i = 0; i < num; i++) {
+                ret = qio_channel_read_all(p->c, (char *)&recv_address,
+                                           sizeof(uint8_t *), &local_err);
+                if (ret != 0) {
+                    terminate_multifd_recv_threads(local_err);
+                    return NULL;
+                }
+                if (recv_address != p->pages.iov[i].iov_base) {
+                    error_setg(&local_err, "received %p and expecting %p (%d)",
+                               recv_address, p->pages.iov[i].iov_base, i);
+                    terminate_multifd_recv_threads(local_err);
+                    return NULL;
+                }
+            }
+
             p->done = true;
             qemu_mutex_unlock(&p->mutex);
             qemu_sem_post(&p->ready);