diff mbox series

[04/14] ram.c: Do not call save_page_header() from compress threads

Message ID a56e373205a5a0f135b5d97c14c64b0d56fa80e9.1680457764.git.lukasstraub2@web.de
State New
Headers show
Series migration/ram.c: Refactor compress code | expand

Commit Message

Lukas Straub April 2, 2023, 5:56 p.m. UTC
save_page_header() accesses several global variables, so calling it
from multiple threads is pretty ugly.

Instead, call save_page_header() before writing out the compressed
data from the compress buffer to the migration stream.

This also makes the core compress code more independend from ram.c.

Signed-off-by: Lukas Straub <lukasstraub2@web.de>
---
 migration/ram.c | 44 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 35 insertions(+), 9 deletions(-)

--
2.30.2
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index bef6292ef7..7ab008145b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1476,17 +1476,13 @@  static CompressResult do_compress_ram_page(QEMUFile *f, z_stream *stream,
                                            RAMBlock *block, ram_addr_t offset,
                                            uint8_t *source_buf)
 {
-    RAMState *rs = ram_state;
-    PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
     uint8_t *p = block->host + offset;
     int ret;

-    if (save_zero_page_to_file(pss, f, block, offset)) {
+    if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
         return RES_ZEROPAGE;
     }

-    save_page_header(pss, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
-
     /*
      * copy it to a internal buffer to avoid it being modified by VM
      * so that we can catch up the error during compression and
@@ -1526,9 +1522,40 @@  static inline void compress_reset_result(CompressParam *param)
     param->offset = 0;
 }

-static void flush_compressed_data(RAMState *rs)
+static int send_queued_data(CompressParam *param)
 {
+    PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY];
     MigrationState *ms = migrate_get_current();
+    QEMUFile *file = ms->to_dst_file;
+    int len = 0;
+
+    RAMBlock *block = param->block;
+    ram_addr_t offset = param->offset;
+
+    if (param->result == RES_NONE) {
+        return 0;
+    }
+
+    assert(block == pss->last_sent_block);
+
+    if (param->result == RES_ZEROPAGE) {
+        len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
+        qemu_put_byte(file, 0);
+        len += 1;
+        ram_release_page(block->idstr, offset);
+    } else if (param->result == RES_COMPRESS) {
+        len += save_page_header(pss, file, block,
+                                offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
+        len += qemu_put_qemu_file(file, param->file);
+    } else {
+        abort();
+    }
+
+    return len;
+}
+
+static void flush_compressed_data(RAMState *rs)
+{
     int idx, len, thread_count;

     if (!save_page_use_compression(rs)) {
@@ -1548,7 +1575,7 @@  static void flush_compressed_data(RAMState *rs)
         qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
             CompressParam *param = &comp_param[idx];
-            len = qemu_put_qemu_file(ms->to_dst_file, param->file);
+            len = send_queued_data(param);
             compress_reset_result(param);

             /*
@@ -1574,7 +1601,6 @@  static int compress_page_with_multi_thread(RAMBlock *block, ram_addr_t offset)
 {
     int idx, thread_count, bytes_xmit = -1, pages = -1;
     bool wait = migrate_compress_wait_thread();
-    MigrationState *ms = migrate_get_current();

     thread_count = migrate_compress_threads();
     qemu_mutex_lock(&comp_done_lock);
@@ -1584,7 +1610,7 @@  retry:
             CompressParam *param = &comp_param[idx];
             qemu_mutex_lock(&param->mutex);
             param->done = false;
-            bytes_xmit = qemu_put_qemu_file(ms->to_dst_file, param->file);
+            bytes_xmit = send_queued_data(param);
             compress_reset_result(param);
             set_compress_params(param, block, offset);