diff mbox series

migration/throttle: use the xfer pages as threshold

Message ID 20200401010858.799-1-wangxinxin.wang@huawei.com
State New
Headers show
Series migration/throttle: use the xfer pages as threshold | expand

Commit Message

Wang Xin April 1, 2020, 1:08 a.m. UTC
If VM migration with lots of zero page or enable data compress, the peroid
tansfer bytes may be much less than the available bandwidth, which trigger
unnecessary guest throttle down. Use the raw transfer pages as the
threshold instead.

Signed-off-by: Wang Xin <wangxinxin.wang@huawei.com>
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index 04f13feb2e..e53333bc6a 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -323,6 +323,8 @@  struct RAMState {
     int64_t time_last_bitmap_sync;
     /* bytes transferred at start_time */
     uint64_t bytes_xfer_prev;
+    /* pages transferred at start_time */
+    uint64_t pages_xfer_prev;
     /* number of dirty pages since start_time */
     uint64_t num_dirty_pages_period;
     /* xbzrle misses since the beginning of the period */
@@ -901,9 +903,9 @@  static void migration_trigger_throttle(RAMState *rs)
     MigrationState *s = migrate_get_current();
     uint64_t threshold = s->parameters.throttle_trigger_threshold;
 
-    uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
-    uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
-    uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
+    uint64_t pages_xfer_period = ram_get_total_transferred_pages() -
+                                 rs->pages_xfer_prev;
+    uint64_t pages_dirty_threshold = pages_xfer_period * threshold / 100;
 
     /* During block migration the auto-converge logic incorrectly detects
      * that ram migration makes no progress. Avoid this by disabling the
@@ -915,7 +917,7 @@  static void migration_trigger_throttle(RAMState *rs)
            we were in this routine reaches the threshold. If that happens
            twice, start or increase throttling. */
 
-        if ((bytes_dirty_period > bytes_dirty_threshold) &&
+        if ((rs->num_dirty_pages_period > pages_dirty_threshold) &&
             (++rs->dirty_rate_high_cnt >= 2)) {
             trace_migration_throttle();
             rs->dirty_rate_high_cnt = 0;
@@ -964,6 +966,7 @@  static void migration_bitmap_sync(RAMState *rs)
         rs->time_last_bitmap_sync = end_time;
         rs->num_dirty_pages_period = 0;
         rs->bytes_xfer_prev = ram_counters.transferred;
+        rs->pages_xfer_prev = ram_get_total_transferred_pages();
     }
     if (migrate_use_events()) {
         qapi_event_send_migration_pass(ram_counters.dirty_sync_count);