Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/810534/?format=api
{ "id": 810534, "url": "http://patchwork.ozlabs.org/api/patches/810534/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170906115143.27451-15-quintela@redhat.com/", "project": { "id": 14, "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api", "name": "QEMU Development", "link_name": "qemu-devel", "list_id": "qemu-devel.nongnu.org", "list_email": "qemu-devel@nongnu.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20170906115143.27451-15-quintela@redhat.com>", "list_archive_url": null, "date": "2017-09-06T11:51:35", "name": "[v7,14/22] migration: Start of multiple fd work", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "c5d71464aa8faf6a19b9bdce31bd75cbccc5b0f0", "submitter": { "id": 2643, "url": "http://patchwork.ozlabs.org/api/people/2643/?format=api", "name": "Juan Quintela", "email": "quintela@redhat.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170906115143.27451-15-quintela@redhat.com/mbox/", "series": [ { "id": 1773, "url": "http://patchwork.ozlabs.org/api/series/1773/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=1773", "date": "2017-09-06T11:51:21", "name": "Multifd", "version": 7, "mbox": "http://patchwork.ozlabs.org/series/1773/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/810534/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/810534/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org", "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=nongnu.org\n\t(client-ip=2001:4830:134:3::11; helo=lists.gnu.org;\n\tenvelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n\treceiver=<UNKNOWN>)", "ext-mx01.extmail.prod.ext.phx2.redhat.com;\n\tdmarc=none (p=none dis=none) header.from=redhat.com", "ext-mx01.extmail.prod.ext.phx2.redhat.com;\n\tspf=fail smtp.mailfrom=quintela@redhat.com" ], "Received": [ "from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11])\n\t(using TLSv1 with cipher AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xnMVM3kf2z9s8J\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 6 Sep 2017 21:55:55 +1000 (AEST)", "from localhost ([::1]:35588 helo=lists.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.71) (envelope-from\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1dpYvx-0001Yn-Iw\n\tfor incoming@patchwork.ozlabs.org; Wed, 06 Sep 2017 07:55:53 -0400", "from eggs.gnu.org ([2001:4830:134:3::10]:60382)\n\tby lists.gnu.org with esmtp (Exim 4.71)\n\t(envelope-from <quintela@redhat.com>) id 1dpYse-0007fA-Pm\n\tfor qemu-devel@nongnu.org; Wed, 06 Sep 2017 07:52:32 -0400", "from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71)\n\t(envelope-from <quintela@redhat.com>) id 1dpYsZ-00087x-FP\n\tfor qemu-devel@nongnu.org; Wed, 06 Sep 2017 07:52:28 -0400", "from mx1.redhat.com ([209.132.183.28]:33374)\n\tby eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32)\n\t(Exim 4.71) (envelope-from <quintela@redhat.com>) id 1dpYsZ-00087Z-6K\n\tfor qemu-devel@nongnu.org; Wed, 06 Sep 2017 07:52:23 -0400", "from smtp.corp.redhat.com\n\t(int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby mx1.redhat.com (Postfix) with ESMTPS id 2667981DE1\n\tfor <qemu-devel@nongnu.org>; Wed, 6 Sep 2017 11:52:22 +0000 (UTC)", "from secure.mitica (ovpn-117-188.ams2.redhat.com [10.36.117.188])\n\tby smtp.corp.redhat.com (Postfix) with ESMTP id 082CC19C94;\n\tWed, 6 Sep 2017 11:52:18 +0000 (UTC)" ], "DMARC-Filter": "OpenDMARC Filter v1.3.2 mx1.redhat.com 2667981DE1", "From": "Juan Quintela <quintela@redhat.com>", "To": "qemu-devel@nongnu.org", "Date": "Wed, 6 Sep 2017 13:51:35 +0200", "Message-Id": "<20170906115143.27451-15-quintela@redhat.com>", "In-Reply-To": "<20170906115143.27451-1-quintela@redhat.com>", "References": "<20170906115143.27451-1-quintela@redhat.com>", "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.11", "X-Greylist": "Sender IP whitelisted, not delayed by milter-greylist-4.5.16\n\t(mx1.redhat.com [10.5.110.25]);\n\tWed, 06 Sep 2017 11:52:22 +0000 (UTC)", "X-detected-operating-system": "by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic]\n\t[fuzzy]", "X-Received-From": "209.132.183.28", "Subject": "[Qemu-devel] [PATCH v7 14/22] migration: Start of multiple fd work", "X-BeenThere": "qemu-devel@nongnu.org", "X-Mailman-Version": "2.1.21", "Precedence": "list", "List-Id": "<qemu-devel.nongnu.org>", "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>", "List-Archive": "<http://lists.nongnu.org/archive/html/qemu-devel/>", "List-Post": "<mailto:qemu-devel@nongnu.org>", "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>", "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=subscribe>", "Cc": "lvivier@redhat.com, dgilbert@redhat.com, peterx@redhat.com", "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org", "Sender": "\"Qemu-devel\"\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>" }, "content": "We create new channels for each new thread created. We send through\nthem a string containing <uuid> multifd <channel number> so we are\nsure that we connect the right channels in both sides.\n\nSigned-off-by: Juan Quintela <quintela@redhat.com>\n\n--\nSplit SocketArgs into incoming and outgoing args\n\nUse UUID's on the initial message, so we are sure we are connecting to\nthe right channel.\n\nRemove init semaphore. Now that we use uuids on the init message, we\nknow that this is our channel.\n\nFix recv socket destwroy, we were destroying send channels.\nThis was very interesting, because we were using an unreferred object\nwithout problems.\n\nMove to struct of pointers\ninit channel sooner.\nsplit recv thread creation.\nlisten on main thread\nWe count the number of created threads to know when we need to stop listening\nUse g_strdup_printf\nreport channel id on errors\nAdd name parameter\nUse local_err\nAdd Error * parameter to socket_send_channel_create()\n---\n migration/migration.c | 5 +++\n migration/ram.c | 120 ++++++++++++++++++++++++++++++++++++++++++++------\n migration/ram.h | 3 ++\n migration/socket.c | 33 +++++++++++++-\n migration/socket.h | 10 +++++\n 5 files changed, 157 insertions(+), 14 deletions(-)", "diff": "diff --git a/migration/migration.c b/migration/migration.c\nindex 18bd24a14c..b06de8b189 100644\n--- a/migration/migration.c\n+++ b/migration/migration.c\n@@ -420,6 +420,11 @@ void migration_ioc_process_incoming(QIOChannel *ioc)\n */\n bool migration_has_all_channels(void)\n {\n+ if (migrate_use_multifd()) {\n+ int thread_count = migrate_multifd_threads();\n+\n+ return thread_count == multifd_created_threads();\n+ }\n return true;\n }\n \ndiff --git a/migration/ram.c b/migration/ram.c\nindex 4e1616b953..9d45f4c7ca 100644\n--- a/migration/ram.c\n+++ b/migration/ram.c\n@@ -36,6 +36,7 @@\n #include \"xbzrle.h\"\n #include \"ram.h\"\n #include \"migration.h\"\n+#include \"socket.h\"\n #include \"migration/register.h\"\n #include \"migration/misc.h\"\n #include \"qemu-file.h\"\n@@ -46,6 +47,8 @@\n #include \"exec/ram_addr.h\"\n #include \"qemu/rcu_queue.h\"\n #include \"migration/colo.h\"\n+#include \"sysemu/sysemu.h\"\n+#include \"qemu/uuid.h\"\n \n /***********************************************************/\n /* ram save/restore */\n@@ -362,6 +365,7 @@ struct MultiFDSendParams {\n uint8_t id;\n char *name;\n QemuThread thread;\n+ QIOChannel *c;\n QemuSemaphore sem;\n QemuMutex mutex;\n bool quit;\n@@ -378,6 +382,12 @@ static void terminate_multifd_send_threads(Error *errp)\n {\n int i;\n \n+ if (errp) {\n+ MigrationState *s = migrate_get_current();\n+ migrate_set_error(s, errp);\n+ migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,\n+ MIGRATION_STATUS_FAILED);\n+ }\n for (i = 0; i < multifd_send_state->count; i++) {\n MultiFDSendParams *p = &multifd_send_state->params[i];\n \n@@ -403,6 +413,7 @@ int multifd_save_cleanup(Error **errp)\n qemu_thread_join(&p->thread);\n qemu_mutex_destroy(&p->mutex);\n qemu_sem_destroy(&p->sem);\n+ socket_send_channel_destroy(p->c);\n g_free(p->name);\n p->name = NULL;\n }\n@@ -413,9 +424,32 @@ int multifd_save_cleanup(Error **errp)\n return ret;\n }\n \n+/* Default uuid for multifd when qemu is not started with uuid */\n+static char multifd_uuid[] = \"5c49fd7e-af88-4a07-b6e8-091fd696ad40\";\n+/* strlen(multifd) + '-' + <channel id> + '-' + UUID_FMT + '\\0' */\n+#define MULTIFD_UUID_MSG (7 + 1 + 3 + 1 + UUID_FMT_LEN + 1)\n+\n static void *multifd_send_thread(void *opaque)\n {\n MultiFDSendParams *p = opaque;\n+ Error *local_err = NULL;\n+ char *string;\n+ char *string_uuid;\n+ size_t ret;\n+\n+ if (qemu_uuid_set) {\n+ string_uuid = qemu_uuid_unparse_strdup(&qemu_uuid);\n+ } else {\n+ string_uuid = g_strdup(multifd_uuid);\n+ }\n+ string = g_strdup_printf(\"%s multifd %03d\", string_uuid, p->id);\n+ g_free(string_uuid);\n+ ret = qio_channel_write(p->c, string, MULTIFD_UUID_MSG, &local_err);\n+ g_free(string);\n+ if (ret != MULTIFD_UUID_MSG) {\n+ terminate_multifd_send_threads(local_err);\n+ return NULL;\n+ }\n \n while (true) {\n qemu_mutex_lock(&p->mutex);\n@@ -432,6 +466,7 @@ static void *multifd_send_thread(void *opaque)\n \n int multifd_save_setup(void)\n {\n+ Error *local_err = NULL;\n int thread_count;\n uint8_t i;\n \n@@ -449,6 +484,13 @@ int multifd_save_setup(void)\n qemu_sem_init(&p->sem, 0);\n p->quit = false;\n p->id = i;\n+ p->c = socket_send_channel_create(&local_err);\n+ if (!p->c) {\n+ if (multifd_save_cleanup(&local_err) != 0) {\n+ migrate_set_error(migrate_get_current(), local_err);\n+ }\n+ return -1;\n+ }\n p->name = g_strdup_printf(\"multifdsend_%d\", i);\n qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,\n QEMU_THREAD_JOINABLE);\n@@ -462,6 +504,7 @@ struct MultiFDRecvParams {\n uint8_t id;\n char *name;\n QemuThread thread;\n+ QIOChannel *c;\n QemuSemaphore sem;\n QemuMutex mutex;\n bool quit;\n@@ -472,12 +515,22 @@ struct {\n MultiFDRecvParams *params;\n /* number of created threads */\n int count;\n+ /* Should we finish */\n+ bool quit;\n } *multifd_recv_state;\n \n static void terminate_multifd_recv_threads(Error *errp)\n {\n int i;\n \n+ if (errp) {\n+ MigrationState *s = migrate_get_current();\n+ migrate_set_error(s, errp);\n+ migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,\n+ MIGRATION_STATUS_FAILED);\n+ }\n+ multifd_recv_state->quit = true;\n+\n for (i = 0; i < multifd_recv_state->count; i++) {\n MultiFDRecvParams *p = &multifd_recv_state->params[i];\n \n@@ -503,6 +556,7 @@ int multifd_load_cleanup(Error **errp)\n qemu_thread_join(&p->thread);\n qemu_mutex_destroy(&p->mutex);\n qemu_sem_destroy(&p->sem);\n+ socket_recv_channel_destroy(p->c);\n g_free(p->name);\n p->name = NULL;\n }\n@@ -531,10 +585,56 @@ static void *multifd_recv_thread(void *opaque)\n return NULL;\n }\n \n+void multifd_new_channel(QIOChannel *ioc)\n+{\n+ MultiFDRecvParams *p;\n+ char string[MULTIFD_UUID_MSG];\n+ char string_uuid[UUID_FMT_LEN];\n+ Error *local_err = NULL;\n+ char *uuid;\n+ size_t ret;\n+ int id;\n+\n+ ret = qio_channel_read(ioc, string, sizeof(string), &local_err);\n+ if (ret != sizeof(string)) {\n+ terminate_multifd_recv_threads(local_err);\n+ return;\n+ }\n+ sscanf(string, \"%s multifd %03d\", string_uuid, &id);\n+\n+ if (qemu_uuid_set) {\n+ uuid = qemu_uuid_unparse_strdup(&qemu_uuid);\n+ } else {\n+ uuid = g_strdup(multifd_uuid);\n+ }\n+ if (strcmp(string_uuid, uuid)) {\n+ error_setg(&local_err, \"multifd: received uuid '%s' and expected \"\n+ \"uuid '%s' for channel %d\", string_uuid, uuid, id);\n+ terminate_multifd_recv_threads(local_err);\n+ return;\n+ }\n+ g_free(uuid);\n+\n+ p = &multifd_recv_state->params[id];\n+ if (p->id != 0) {\n+ error_setg(&local_err, \"multifd: received id '%d' already setup'\", id);\n+ terminate_multifd_recv_threads(local_err);\n+ return;\n+ }\n+ qemu_mutex_init(&p->mutex);\n+ qemu_sem_init(&p->sem, 0);\n+ p->quit = false;\n+ p->id = id;\n+ p->c = ioc;\n+ multifd_recv_state->count++;\n+ p->name = g_strdup_printf(\"multifdrecv_%d\", id);\n+ qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,\n+ QEMU_THREAD_JOINABLE);\n+}\n+\n int multifd_load_setup(void)\n {\n int thread_count;\n- uint8_t i;\n \n if (!migrate_use_multifd()) {\n return 0;\n@@ -543,21 +643,15 @@ int multifd_load_setup(void)\n multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));\n multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);\n multifd_recv_state->count = 0;\n- for (i = 0; i < thread_count; i++) {\n- MultiFDRecvParams *p = &multifd_recv_state->params[i];\n-\n- qemu_mutex_init(&p->mutex);\n- qemu_sem_init(&p->sem, 0);\n- p->quit = false;\n- p->id = i;\n- p->name = g_strdup_printf(\"multifdrecv_%d\", i);\n- qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,\n- QEMU_THREAD_JOINABLE);\n- multifd_recv_state->count++;\n- }\n+ multifd_recv_state->quit = false;\n return 0;\n }\n \n+int multifd_created_threads(void)\n+{\n+ return multifd_recv_state->count;\n+}\n+\n /**\n * save_page_header: write page header to wire\n *\ndiff --git a/migration/ram.h b/migration/ram.h\nindex 4a72d66503..5572f52f0a 100644\n--- a/migration/ram.h\n+++ b/migration/ram.h\n@@ -31,6 +31,7 @@\n \n #include \"qemu-common.h\"\n #include \"exec/cpu-common.h\"\n+#include \"io/channel.h\"\n \n extern MigrationStats ram_counters;\n extern XBZRLECacheStats xbzrle_counters;\n@@ -43,6 +44,8 @@ int multifd_save_setup(void);\n int multifd_save_cleanup(Error **errp);\n int multifd_load_setup(void);\n int multifd_load_cleanup(Error **errp);\n+void multifd_new_channel(QIOChannel *ioc);\n+int multifd_created_threads(void);\n \n uint64_t ram_pagesize_summary(void);\n int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);\ndiff --git a/migration/socket.c b/migration/socket.c\nindex 2d70747a1a..58e81ae87b 100644\n--- a/migration/socket.c\n+++ b/migration/socket.c\n@@ -26,6 +26,36 @@\n #include \"io/channel-socket.h\"\n #include \"trace.h\"\n \n+int socket_recv_channel_destroy(QIOChannel *recv)\n+{\n+ /* Remove channel */\n+ object_unref(OBJECT(recv));\n+ return 0;\n+}\n+\n+struct SocketOutgoingArgs {\n+ SocketAddress *saddr;\n+} outgoing_args;\n+\n+QIOChannel *socket_send_channel_create(Error **errp)\n+{\n+ QIOChannelSocket *sioc = qio_channel_socket_new();\n+\n+ qio_channel_socket_connect_sync(sioc, outgoing_args.saddr, errp);\n+ qio_channel_set_delay(QIO_CHANNEL(sioc), false);\n+ return QIO_CHANNEL(sioc);\n+}\n+\n+int socket_send_channel_destroy(QIOChannel *send)\n+{\n+ /* Remove channel */\n+ object_unref(OBJECT(send));\n+ if (outgoing_args.saddr) {\n+ qapi_free_SocketAddress(outgoing_args.saddr);\n+ outgoing_args.saddr = NULL;\n+ }\n+ return 0;\n+}\n \n static SocketAddress *tcp_build_address(const char *host_port, Error **errp)\n {\n@@ -95,6 +125,8 @@ static void socket_start_outgoing_migration(MigrationState *s,\n struct SocketConnectData *data = g_new0(struct SocketConnectData, 1);\n \n data->s = s;\n+ outgoing_args.saddr = saddr;\n+\n if (saddr->type == SOCKET_ADDRESS_TYPE_INET) {\n data->hostname = g_strdup(saddr->u.inet.host);\n }\n@@ -105,7 +137,6 @@ static void socket_start_outgoing_migration(MigrationState *s,\n socket_outgoing_migration,\n data,\n socket_connect_data_free);\n- qapi_free_SocketAddress(saddr);\n }\n \n void tcp_start_outgoing_migration(MigrationState *s,\ndiff --git a/migration/socket.h b/migration/socket.h\nindex 6b91e9db38..8dd1a78d29 100644\n--- a/migration/socket.h\n+++ b/migration/socket.h\n@@ -16,6 +16,16 @@\n \n #ifndef QEMU_MIGRATION_SOCKET_H\n #define QEMU_MIGRATION_SOCKET_H\n+\n+#include \"io/channel.h\"\n+\n+QIOChannel *socket_recv_channel_create(void);\n+int socket_recv_channel_destroy(QIOChannel *recv);\n+\n+QIOChannel *socket_send_channel_create(Error **errp);\n+\n+int socket_send_channel_destroy(QIOChannel *send);\n+\n void tcp_start_incoming_migration(const char *host_port, Error **errp);\n \n void tcp_start_outgoing_migration(MigrationState *s, const char *host_port,\n", "prefixes": [ "v7", "14/22" ] }