Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/810549/?format=api
{ "id": 810549, "url": "http://patchwork.ozlabs.org/api/patches/810549/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170906115143.27451-13-quintela@redhat.com/", "project": { "id": 14, "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api", "name": "QEMU Development", "link_name": "qemu-devel", "list_id": "qemu-devel.nongnu.org", "list_email": "qemu-devel@nongnu.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20170906115143.27451-13-quintela@redhat.com>", "list_archive_url": null, "date": "2017-09-06T11:51:33", "name": "[v7,12/22] migration: Create multifd migration threads", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "885fb5d37dde6fd617af69dacffe16ddd55359c9", "submitter": { "id": 2643, "url": "http://patchwork.ozlabs.org/api/people/2643/?format=api", "name": "Juan Quintela", "email": "quintela@redhat.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170906115143.27451-13-quintela@redhat.com/mbox/", "series": [ { "id": 1773, "url": "http://patchwork.ozlabs.org/api/series/1773/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=1773", "date": "2017-09-06T11:51:21", "name": "Multifd", "version": 7, "mbox": "http://patchwork.ozlabs.org/series/1773/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/810549/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/810549/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org", "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=nongnu.org\n\t(client-ip=2001:4830:134:3::11; helo=lists.gnu.org;\n\tenvelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n\treceiver=<UNKNOWN>)", "ext-mx05.extmail.prod.ext.phx2.redhat.com;\n\tdmarc=none (p=none dis=none) header.from=redhat.com", "ext-mx05.extmail.prod.ext.phx2.redhat.com;\n\tspf=fail smtp.mailfrom=quintela@redhat.com" ], "Received": [ "from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11])\n\t(using TLSv1 with cipher AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xnMhL2cDbz9sBW\n\tfor <incoming@patchwork.ozlabs.org>;\n\tWed, 6 Sep 2017 22:04:34 +1000 (AEST)", "from localhost ([::1]:35656 helo=lists.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.71) (envelope-from\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1dpZ4K-0000am-Fx\n\tfor incoming@patchwork.ozlabs.org; Wed, 06 Sep 2017 08:04:32 -0400", "from eggs.gnu.org ([2001:4830:134:3::10]:60294)\n\tby lists.gnu.org with esmtp (Exim 4.71)\n\t(envelope-from <quintela@redhat.com>) id 1dpYsV-0007XL-JU\n\tfor qemu-devel@nongnu.org; Wed, 06 Sep 2017 07:52:22 -0400", "from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71)\n\t(envelope-from <quintela@redhat.com>) id 1dpYsU-00085s-2t\n\tfor qemu-devel@nongnu.org; Wed, 06 Sep 2017 07:52:19 -0400", "from mx1.redhat.com ([209.132.183.28]:38242)\n\tby eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32)\n\t(Exim 4.71) (envelope-from <quintela@redhat.com>) id 1dpYsT-00085H-R0\n\tfor qemu-devel@nongnu.org; Wed, 06 Sep 2017 07:52:18 -0400", "from smtp.corp.redhat.com\n\t(int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11])\n\t(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby mx1.redhat.com (Postfix) with ESMTPS id D284AA795\n\tfor <qemu-devel@nongnu.org>; Wed, 6 Sep 2017 11:52:16 +0000 (UTC)", "from secure.mitica (ovpn-117-188.ams2.redhat.com [10.36.117.188])\n\tby smtp.corp.redhat.com (Postfix) with ESMTP id 5264619C94;\n\tWed, 6 Sep 2017 11:52:15 +0000 (UTC)" ], "DMARC-Filter": "OpenDMARC Filter v1.3.2 mx1.redhat.com D284AA795", "From": "Juan Quintela <quintela@redhat.com>", "To": "qemu-devel@nongnu.org", "Date": "Wed, 6 Sep 2017 13:51:33 +0200", "Message-Id": "<20170906115143.27451-13-quintela@redhat.com>", "In-Reply-To": "<20170906115143.27451-1-quintela@redhat.com>", "References": "<20170906115143.27451-1-quintela@redhat.com>", "X-Scanned-By": "MIMEDefang 2.79 on 10.5.11.11", "X-Greylist": "Sender IP whitelisted, not delayed by milter-greylist-4.5.16\n\t(mx1.redhat.com [10.5.110.29]);\n\tWed, 06 Sep 2017 11:52:16 +0000 (UTC)", "X-detected-operating-system": "by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic]\n\t[fuzzy]", "X-Received-From": "209.132.183.28", "Subject": "[Qemu-devel] [PATCH v7 12/22] migration: Create multifd migration\n\tthreads", "X-BeenThere": "qemu-devel@nongnu.org", "X-Mailman-Version": "2.1.21", "Precedence": "list", "List-Id": "<qemu-devel.nongnu.org>", "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>", "List-Archive": "<http://lists.nongnu.org/archive/html/qemu-devel/>", "List-Post": "<mailto:qemu-devel@nongnu.org>", "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>", "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=subscribe>", "Cc": "lvivier@redhat.com, dgilbert@redhat.com, peterx@redhat.com", "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org", "Sender": "\"Qemu-devel\"\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>" }, "content": "Creation of the threads, nothing inside yet.\n\nSigned-off-by: Juan Quintela <quintela@redhat.com>\nReviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>\n\n--\n\nUse pointers instead of long array names\nMove to use semaphores instead of conditions as paolo suggestion\n\nPut all the state inside one struct.\nUse a counter for the number of threads created. Needed during cancellation.\n\nAdd error return to thread creation\n\nAdd id field\n\nRename functions to multifd_save/load_setup/cleanup\nChange recv parameters to a pointer to struct\nChange back to a struct\nUse Error * for _cleanup\n\nSigned-off-by: Juan Quintela <quintela@redhat.com>\n---\n migration/migration.c | 26 +++++++\n migration/ram.c | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++\n migration/ram.h | 5 ++\n 3 files changed, 233 insertions(+)", "diff": "diff --git a/migration/migration.c b/migration/migration.c\nindex 208554dc37..9fec880a58 100644\n--- a/migration/migration.c\n+++ b/migration/migration.c\n@@ -281,6 +281,10 @@ static void process_incoming_migration_bh(void *opaque)\n */\n qemu_announce_self();\n \n+ if (multifd_load_cleanup(&local_err) != 0) {\n+ error_report_err(local_err);\n+ autostart = false;\n+ }\n /* If global state section was not received or we are in running\n state, we need to obey autostart. Any other state is set with\n runstate_set. */\n@@ -353,10 +357,15 @@ static void process_incoming_migration_co(void *opaque)\n }\n \n if (ret < 0) {\n+ Error *local_err = NULL;\n+\n migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,\n MIGRATION_STATUS_FAILED);\n error_report(\"load of migration failed: %s\", strerror(-ret));\n qemu_fclose(mis->from_src_file);\n+ if (multifd_load_cleanup(&local_err) != 0) {\n+ error_report_err(local_err);\n+ }\n exit(EXIT_FAILURE);\n }\n mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);\n@@ -368,6 +377,12 @@ void migration_fd_process_incoming(QEMUFile *f)\n Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);\n MigrationIncomingState *mis = migration_incoming_get_current();\n \n+ if (multifd_load_setup() != 0) {\n+ /* We haven't been able to create multifd threads\n+ nothing better to do */\n+ exit(EXIT_FAILURE);\n+ }\n+\n if (!mis->from_src_file) {\n mis->from_src_file = f;\n }\n@@ -1019,6 +1034,8 @@ static void migrate_fd_cleanup(void *opaque)\n s->cleanup_bh = NULL;\n \n if (s->to_dst_file) {\n+ Error *local_err = NULL;\n+\n trace_migrate_fd_cleanup();\n qemu_mutex_unlock_iothread();\n if (s->migration_thread_running) {\n@@ -1027,6 +1044,9 @@ static void migrate_fd_cleanup(void *opaque)\n }\n qemu_mutex_lock_iothread();\n \n+ if (multifd_save_cleanup(&local_err) != 0) {\n+ error_report_err(local_err);\n+ }\n qemu_fclose(s->to_dst_file);\n s->to_dst_file = NULL;\n }\n@@ -2225,6 +2245,12 @@ void migrate_fd_connect(MigrationState *s)\n }\n }\n \n+ if (multifd_save_setup() != 0) {\n+ migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,\n+ MIGRATION_STATUS_FAILED);\n+ migrate_fd_cleanup(s);\n+ return;\n+ }\n qemu_thread_create(&s->thread, \"live_migration\", migration_thread, s,\n QEMU_THREAD_JOINABLE);\n s->migration_thread_running = true;\ndiff --git a/migration/ram.c b/migration/ram.c\nindex e0179fc838..4e1616b953 100644\n--- a/migration/ram.c\n+++ b/migration/ram.c\n@@ -356,6 +356,208 @@ static void compress_threads_save_setup(void)\n }\n }\n \n+/* Multiple fd's */\n+\n+struct MultiFDSendParams {\n+ uint8_t id;\n+ char *name;\n+ QemuThread thread;\n+ QemuSemaphore sem;\n+ QemuMutex mutex;\n+ bool quit;\n+};\n+typedef struct MultiFDSendParams MultiFDSendParams;\n+\n+struct {\n+ MultiFDSendParams *params;\n+ /* number of created threads */\n+ int count;\n+} *multifd_send_state;\n+\n+static void terminate_multifd_send_threads(Error *errp)\n+{\n+ int i;\n+\n+ for (i = 0; i < multifd_send_state->count; i++) {\n+ MultiFDSendParams *p = &multifd_send_state->params[i];\n+\n+ qemu_mutex_lock(&p->mutex);\n+ p->quit = true;\n+ qemu_sem_post(&p->sem);\n+ qemu_mutex_unlock(&p->mutex);\n+ }\n+}\n+\n+int multifd_save_cleanup(Error **errp)\n+{\n+ int i;\n+ int ret = 0;\n+\n+ if (!migrate_use_multifd()) {\n+ return 0;\n+ }\n+ terminate_multifd_send_threads(NULL);\n+ for (i = 0; i < multifd_send_state->count; i++) {\n+ MultiFDSendParams *p = &multifd_send_state->params[i];\n+\n+ qemu_thread_join(&p->thread);\n+ qemu_mutex_destroy(&p->mutex);\n+ qemu_sem_destroy(&p->sem);\n+ g_free(p->name);\n+ p->name = NULL;\n+ }\n+ g_free(multifd_send_state->params);\n+ multifd_send_state->params = NULL;\n+ g_free(multifd_send_state);\n+ multifd_send_state = NULL;\n+ return ret;\n+}\n+\n+static void *multifd_send_thread(void *opaque)\n+{\n+ MultiFDSendParams *p = opaque;\n+\n+ while (true) {\n+ qemu_mutex_lock(&p->mutex);\n+ if (p->quit) {\n+ qemu_mutex_unlock(&p->mutex);\n+ break;\n+ }\n+ qemu_mutex_unlock(&p->mutex);\n+ qemu_sem_wait(&p->sem);\n+ }\n+\n+ return NULL;\n+}\n+\n+int multifd_save_setup(void)\n+{\n+ int thread_count;\n+ uint8_t i;\n+\n+ if (!migrate_use_multifd()) {\n+ return 0;\n+ }\n+ thread_count = migrate_multifd_threads();\n+ multifd_send_state = g_malloc0(sizeof(*multifd_send_state));\n+ multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);\n+ multifd_send_state->count = 0;\n+ for (i = 0; i < thread_count; i++) {\n+ MultiFDSendParams *p = &multifd_send_state->params[i];\n+\n+ qemu_mutex_init(&p->mutex);\n+ qemu_sem_init(&p->sem, 0);\n+ p->quit = false;\n+ p->id = i;\n+ p->name = g_strdup_printf(\"multifdsend_%d\", i);\n+ qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,\n+ QEMU_THREAD_JOINABLE);\n+\n+ multifd_send_state->count++;\n+ }\n+ return 0;\n+}\n+\n+struct MultiFDRecvParams {\n+ uint8_t id;\n+ char *name;\n+ QemuThread thread;\n+ QemuSemaphore sem;\n+ QemuMutex mutex;\n+ bool quit;\n+};\n+typedef struct MultiFDRecvParams MultiFDRecvParams;\n+\n+struct {\n+ MultiFDRecvParams *params;\n+ /* number of created threads */\n+ int count;\n+} *multifd_recv_state;\n+\n+static void terminate_multifd_recv_threads(Error *errp)\n+{\n+ int i;\n+\n+ for (i = 0; i < multifd_recv_state->count; i++) {\n+ MultiFDRecvParams *p = &multifd_recv_state->params[i];\n+\n+ qemu_mutex_lock(&p->mutex);\n+ p->quit = true;\n+ qemu_sem_post(&p->sem);\n+ qemu_mutex_unlock(&p->mutex);\n+ }\n+}\n+\n+int multifd_load_cleanup(Error **errp)\n+{\n+ int i;\n+ int ret = 0;\n+\n+ if (!migrate_use_multifd()) {\n+ return 0;\n+ }\n+ terminate_multifd_recv_threads(NULL);\n+ for (i = 0; i < multifd_recv_state->count; i++) {\n+ MultiFDRecvParams *p = &multifd_recv_state->params[i];\n+\n+ qemu_thread_join(&p->thread);\n+ qemu_mutex_destroy(&p->mutex);\n+ qemu_sem_destroy(&p->sem);\n+ g_free(p->name);\n+ p->name = NULL;\n+ }\n+ g_free(multifd_recv_state->params);\n+ multifd_recv_state->params = NULL;\n+ g_free(multifd_recv_state);\n+ multifd_recv_state = NULL;\n+\n+ return ret;\n+}\n+\n+static void *multifd_recv_thread(void *opaque)\n+{\n+ MultiFDRecvParams *p = opaque;\n+\n+ while (true) {\n+ qemu_mutex_lock(&p->mutex);\n+ if (p->quit) {\n+ qemu_mutex_unlock(&p->mutex);\n+ break;\n+ }\n+ qemu_mutex_unlock(&p->mutex);\n+ qemu_sem_wait(&p->sem);\n+ }\n+\n+ return NULL;\n+}\n+\n+int multifd_load_setup(void)\n+{\n+ int thread_count;\n+ uint8_t i;\n+\n+ if (!migrate_use_multifd()) {\n+ return 0;\n+ }\n+ thread_count = migrate_multifd_threads();\n+ multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));\n+ multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);\n+ multifd_recv_state->count = 0;\n+ for (i = 0; i < thread_count; i++) {\n+ MultiFDRecvParams *p = &multifd_recv_state->params[i];\n+\n+ qemu_mutex_init(&p->mutex);\n+ qemu_sem_init(&p->sem, 0);\n+ p->quit = false;\n+ p->id = i;\n+ p->name = g_strdup_printf(\"multifdrecv_%d\", i);\n+ qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,\n+ QEMU_THREAD_JOINABLE);\n+ multifd_recv_state->count++;\n+ }\n+ return 0;\n+}\n+\n /**\n * save_page_header: write page header to wire\n *\ndiff --git a/migration/ram.h b/migration/ram.h\nindex c081fde86c..4a72d66503 100644\n--- a/migration/ram.h\n+++ b/migration/ram.h\n@@ -39,6 +39,11 @@ int64_t xbzrle_cache_resize(int64_t new_size);\n uint64_t ram_bytes_remaining(void);\n uint64_t ram_bytes_total(void);\n \n+int multifd_save_setup(void);\n+int multifd_save_cleanup(Error **errp);\n+int multifd_load_setup(void);\n+int multifd_load_cleanup(Error **errp);\n+\n uint64_t ram_pagesize_summary(void);\n int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);\n void acct_update_position(QEMUFile *f, size_t size, bool zero);\n", "prefixes": [ "v7", "12/22" ] }