diff mbox

[v3,3/5] migration: Dynamic cpu throttling for auto-converge

Message ID 1435254377-13322-4-git-send-email-jjherne@linux.vnet.ibm.com
State New
Headers show

Commit Message

Jason J. Herne June 25, 2015, 5:46 p.m. UTC
Remove traditional auto-converge static 30ms throttling code and replace it
with a dynamic throttling algorithm.

Additionally, be more aggressive when deciding when to start throttling.
Previously we waited until four unproductive memory passes. Now we begin
throttling after only two unproductive memory passes. Four seemed quite
arbitrary and only waiting for two passes allows us to complete the migration
faster.

Signed-off-by: Jason J. Herne <jjherne@linux.vnet.ibm.com>
Reviewed-by: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
---
 arch_init.c           | 93 +++++++++++++++++----------------------------------
 migration/migration.c |  4 +++
 2 files changed, 34 insertions(+), 63 deletions(-)

Comments

Dr. David Alan Gilbert June 26, 2015, 5:54 p.m. UTC | #1
* Jason J. Herne (jjherne@linux.vnet.ibm.com) wrote:
> Remove traditional auto-converge static 30ms throttling code and replace it
> with a dynamic throttling algorithm.
> 
> Additionally, be more aggressive when deciding when to start throttling.
> Previously we waited until four unproductive memory passes. Now we begin
> throttling after only two unproductive memory passes. Four seemed quite
> arbitrary and only waiting for two passes allows us to complete the migration
> faster.
> 
> Signed-off-by: Jason J. Herne <jjherne@linux.vnet.ibm.com>
> Reviewed-by: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
> ---
>  arch_init.c           | 93 +++++++++++++++++----------------------------------
>  migration/migration.c |  4 +++
>  2 files changed, 34 insertions(+), 63 deletions(-)
> 
> diff --git a/arch_init.c b/arch_init.c
> index 23d3feb..d456527 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -111,9 +111,7 @@ int graphic_depth = 32;
>  #endif
>  
>  const uint32_t arch_type = QEMU_ARCH;
> -static bool mig_throttle_on;
>  static int dirty_rate_high_cnt;
> -static void check_guest_throttling(void);
>  
>  static uint64_t bitmap_sync_count;
>  
> @@ -487,6 +485,29 @@ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
>      return size;
>  }
>  
> +/* Reduce amount of guest cpu execution to hopefully slow down memory writes.
> + * If guest dirty memory rate is reduced below the rate at which we can
> + * transfer pages to the destination then we should be able to complete
> + * migration. Some workloads dirty memory way too fast and will not effectively
> + * converge, even with auto-converge.
> + */
> +static void mig_throttle_guest_down(void)
> +{
> +    MigrationState *s = migrate_get_current();
> +    uint64_t pct_initial =
> +            s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
> +    uint64_t pct_icrement =
> +            s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
> +
> +    /* We have not started throttling yet. Let's start it. */
> +    if (!cpu_throttle_active()) {
> +        cpu_throttle_set(pct_initial);
> +    } else {
> +        /* Throttling already on, just increase the rate */
> +        cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
> +    }

Shouldn't this cap it at 100% ?

> +}
> +
>  /* Update the xbzrle cache to reflect a page that's been sent as all 0.
>   * The important thing is that a stale (not-yet-0'd) page be replaced
>   * by the new data.
> @@ -714,21 +735,21 @@ static void migration_bitmap_sync(void)
>              /* The following detection logic can be refined later. For now:
>                 Check to see if the dirtied bytes is 50% more than the approx.
>                 amount of bytes that just got transferred since the last time we
> -               were in this routine. If that happens >N times (for now N==4)
> -               we turn on the throttle down logic */
> +               were in this routine. If that happens twice, start or increase
> +               throttling */
>              bytes_xfer_now = ram_bytes_transferred();
> +
>              if (s->dirty_pages_rate &&
>                 (num_dirty_pages_period * TARGET_PAGE_SIZE >
>                     (bytes_xfer_now - bytes_xfer_prev)/2) &&
> -               (dirty_rate_high_cnt++ > 4)) {
> +               (dirty_rate_high_cnt++ >= 2)) {
>                      trace_migration_throttle();
> -                    mig_throttle_on = true;
>                      dirty_rate_high_cnt = 0;
> +                    mig_throttle_guest_down();
>               }
>               bytes_xfer_prev = bytes_xfer_now;
> -        } else {
> -             mig_throttle_on = false;
>          }
> +
>          if (migrate_use_xbzrle()) {
>              if (iterations_prev != acct_info.iterations) {
>                  acct_info.xbzrle_cache_miss_rate =
> @@ -1197,7 +1218,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>      RAMBlock *block;
>      int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
>  
> -    mig_throttle_on = false;
>      dirty_rate_high_cnt = 0;
>      bitmap_sync_count = 0;
>      migration_bitmap_sync_init();
> @@ -1301,12 +1321,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>          }
>          pages_sent += pages;
>          acct_info.iterations++;
> -        check_guest_throttling();
> -        /* we want to check in the 1st loop, just in case it was the 1st time
> -           and we had to sync the dirty bitmap.
> -           qemu_get_clock_ns() is a bit expensive, so we only check each some
> -           iterations
> -        */
> +

Those comments are related to the code below aren't they, not the line you removed?

>          if ((i & 63) == 0) {
>              uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
>              if (t1 > MAX_WAIT) {
> @@ -1913,51 +1928,3 @@ TargetInfo *qmp_query_target(Error **errp)
>      return info;
>  }
>  
> -/* Stub function that's gets run on the vcpu when its brought out of the
> -   VM to run inside qemu via async_run_on_cpu()*/
> -static void mig_sleep_cpu(void *opq)
> -{
> -    qemu_mutex_unlock_iothread();
> -    g_usleep(30*1000);
> -    qemu_mutex_lock_iothread();
> -}
> -
> -/* To reduce the dirty rate explicitly disallow the VCPUs from spending
> -   much time in the VM. The migration thread will try to catchup.
> -   Workload will experience a performance drop.
> -*/
> -static void mig_throttle_guest_down(void)
> -{
> -    CPUState *cpu;
> -
> -    qemu_mutex_lock_iothread();
> -    CPU_FOREACH(cpu) {
> -        async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
> -    }
> -    qemu_mutex_unlock_iothread();
> -}
> -
> -static void check_guest_throttling(void)
> -{
> -    static int64_t t0;
> -    int64_t        t1;
> -
> -    if (!mig_throttle_on) {
> -        return;
> -    }
> -
> -    if (!t0)  {
> -        t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
> -        return;
> -    }
> -
> -    t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
> -
> -    /* If it has been more than 40 ms since the last time the guest
> -     * was throttled then do it again.
> -     */
> -    if (40 < (t1-t0)/1000000) {
> -        mig_throttle_guest_down();
> -        t0 = t1;
> -    }
> -}
> diff --git a/migration/migration.c b/migration/migration.c
> index 05790e9..7708c54 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -25,6 +25,7 @@
>  #include "qemu/thread.h"
>  #include "qmp-commands.h"
>  #include "trace.h"
> +#include "qom/cpu.h"
>  
>  #define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
>  
> @@ -858,6 +859,9 @@ static void *migration_thread(void *opaque)
>          }
>      }
>  
> +    /* If we enabled cpu throttling for auto-converge, turn it off. */
> +    cpu_throttle_stop();
> +
>      qemu_mutex_lock_iothread();
>      if (s->state == MIGRATION_STATUS_COMPLETED) {
>          int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);

Is that cpu_throttle_stop() sufficient if I use 'migration cancel'
so that next time through it's all reset so that there's no throttling
at the beginning?

Dave

> -- 
> 1.9.1
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Jason J. Herne June 26, 2015, 6:42 p.m. UTC | #2
On 06/26/2015 01:54 PM, Dr. David Alan Gilbert wrote:
> * Jason J. Herne (jjherne@linux.vnet.ibm.com) wrote:
>> Remove traditional auto-converge static 30ms throttling code and replace it
>> with a dynamic throttling algorithm.
>>
>> Additionally, be more aggressive when deciding when to start throttling.
>> Previously we waited until four unproductive memory passes. Now we begin
>> throttling after only two unproductive memory passes. Four seemed quite
>> arbitrary and only waiting for two passes allows us to complete the migration
>> faster.
>>
>> Signed-off-by: Jason J. Herne <jjherne@linux.vnet.ibm.com>
>> Reviewed-by: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
>> ---
>>   arch_init.c           | 93 +++++++++++++++++----------------------------------
>>   migration/migration.c |  4 +++
>>   2 files changed, 34 insertions(+), 63 deletions(-)
>>
>> diff --git a/arch_init.c b/arch_init.c
>> index 23d3feb..d456527 100644
>> --- a/arch_init.c
>> +++ b/arch_init.c
>> @@ -111,9 +111,7 @@ int graphic_depth = 32;
>>   #endif
>>
>>   const uint32_t arch_type = QEMU_ARCH;
>> -static bool mig_throttle_on;
>>   static int dirty_rate_high_cnt;
>> -static void check_guest_throttling(void);
>>
>>   static uint64_t bitmap_sync_count;
>>
>> @@ -487,6 +485,29 @@ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
>>       return size;
>>   }
>>
>> +/* Reduce amount of guest cpu execution to hopefully slow down memory writes.
>> + * If guest dirty memory rate is reduced below the rate at which we can
>> + * transfer pages to the destination then we should be able to complete
>> + * migration. Some workloads dirty memory way too fast and will not effectively
>> + * converge, even with auto-converge.
>> + */
>> +static void mig_throttle_guest_down(void)
>> +{
>> +    MigrationState *s = migrate_get_current();
>> +    uint64_t pct_initial =
>> +            s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
>> +    uint64_t pct_icrement =
>> +            s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
>> +
>> +    /* We have not started throttling yet. Let's start it. */
>> +    if (!cpu_throttle_active()) {
>> +        cpu_throttle_set(pct_initial);
>> +    } else {
>> +        /* Throttling already on, just increase the rate */
>> +        cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
>> +    }
>
> Shouldn't this cap it at 100% ?
>

The code in cpu_throttle_set() actually caps it at 99 percent.

...
>> @@ -1197,7 +1218,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
>>       RAMBlock *block;
>>       int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
>>
>> -    mig_throttle_on = false;
>>       dirty_rate_high_cnt = 0;
>>       bitmap_sync_count = 0;
>>       migration_bitmap_sync_init();
>> @@ -1301,12 +1321,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>>           }
>>           pages_sent += pages;
>>           acct_info.iterations++;
>> -        check_guest_throttling();
>> -        /* we want to check in the 1st loop, just in case it was the 1st time
>> -           and we had to sync the dirty bitmap.
>> -           qemu_get_clock_ns() is a bit expensive, so we only check each some
>> -           iterations
>> -        */
>> +
>
> Those comments are related to the code below aren't they, not the line you removed?
>

Yes, oversight on my part. I will re-add them for v4 :) Thanks!!

>>           if ((i & 63) == 0) {
>>               uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
>>               if (t1 > MAX_WAIT) {
>> @@ -1913,51 +1928,3 @@ TargetInfo *qmp_query_target(Error **errp)
>>       return info;
>>   }
>>
...
Jason J. Herne June 26, 2015, 7:07 p.m. UTC | #3
On 06/26/2015 01:54 PM, Dr. David Alan Gilbert wrote:
> * Jason J. Herne (jjherne@linux.vnet.ibm.com) wrote:
...
>> diff --git a/migration/migration.c b/migration/migration.c
>> index 05790e9..7708c54 100644
>> --- a/migration/migration.c
>> +++ b/migration/migration.c
>> @@ -25,6 +25,7 @@
>>   #include "qemu/thread.h"
>>   #include "qmp-commands.h"
>>   #include "trace.h"
>> +#include "qom/cpu.h"
>>
>>   #define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
>>
>> @@ -858,6 +859,9 @@ static void *migration_thread(void *opaque)
>>           }
>>       }
>>
>> +    /* If we enabled cpu throttling for auto-converge, turn it off. */
>> +    cpu_throttle_stop();
>> +
>>       qemu_mutex_lock_iothread();
>>       if (s->state == MIGRATION_STATUS_COMPLETED) {
>>           int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>
> Is that cpu_throttle_stop() sufficient if I use 'migration cancel'
> so that next time through it's all reset so that there's no throttling
> at the beginning?
>

It will be reset when cpu_throttle_set is called again when auto-converge
re-enables throttling for the next migration. This happens in patch 3,
mig_throttle_guest_down():

Basically, cpu_throttle_set requires the user to provide a throttling
percentage. So starting a new throttling operation will overwrite the
previous value.
diff mbox

Patch

diff --git a/arch_init.c b/arch_init.c
index 23d3feb..d456527 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -111,9 +111,7 @@  int graphic_depth = 32;
 #endif
 
 const uint32_t arch_type = QEMU_ARCH;
-static bool mig_throttle_on;
 static int dirty_rate_high_cnt;
-static void check_guest_throttling(void);
 
 static uint64_t bitmap_sync_count;
 
@@ -487,6 +485,29 @@  static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
     return size;
 }
 
+/* Reduce amount of guest cpu execution to hopefully slow down memory writes.
+ * If guest dirty memory rate is reduced below the rate at which we can
+ * transfer pages to the destination then we should be able to complete
+ * migration. Some workloads dirty memory way too fast and will not effectively
+ * converge, even with auto-converge.
+ */
+static void mig_throttle_guest_down(void)
+{
+    MigrationState *s = migrate_get_current();
+    uint64_t pct_initial =
+            s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
+    uint64_t pct_icrement =
+            s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
+
+    /* We have not started throttling yet. Let's start it. */
+    if (!cpu_throttle_active()) {
+        cpu_throttle_set(pct_initial);
+    } else {
+        /* Throttling already on, just increase the rate */
+        cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
+    }
+}
+
 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
  * The important thing is that a stale (not-yet-0'd) page be replaced
  * by the new data.
@@ -714,21 +735,21 @@  static void migration_bitmap_sync(void)
             /* The following detection logic can be refined later. For now:
                Check to see if the dirtied bytes is 50% more than the approx.
                amount of bytes that just got transferred since the last time we
-               were in this routine. If that happens >N times (for now N==4)
-               we turn on the throttle down logic */
+               were in this routine. If that happens twice, start or increase
+               throttling */
             bytes_xfer_now = ram_bytes_transferred();
+
             if (s->dirty_pages_rate &&
                (num_dirty_pages_period * TARGET_PAGE_SIZE >
                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
-               (dirty_rate_high_cnt++ > 4)) {
+               (dirty_rate_high_cnt++ >= 2)) {
                     trace_migration_throttle();
-                    mig_throttle_on = true;
                     dirty_rate_high_cnt = 0;
+                    mig_throttle_guest_down();
              }
              bytes_xfer_prev = bytes_xfer_now;
-        } else {
-             mig_throttle_on = false;
         }
+
         if (migrate_use_xbzrle()) {
             if (iterations_prev != acct_info.iterations) {
                 acct_info.xbzrle_cache_miss_rate =
@@ -1197,7 +1218,6 @@  static int ram_save_setup(QEMUFile *f, void *opaque)
     RAMBlock *block;
     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
 
-    mig_throttle_on = false;
     dirty_rate_high_cnt = 0;
     bitmap_sync_count = 0;
     migration_bitmap_sync_init();
@@ -1301,12 +1321,7 @@  static int ram_save_iterate(QEMUFile *f, void *opaque)
         }
         pages_sent += pages;
         acct_info.iterations++;
-        check_guest_throttling();
-        /* we want to check in the 1st loop, just in case it was the 1st time
-           and we had to sync the dirty bitmap.
-           qemu_get_clock_ns() is a bit expensive, so we only check each some
-           iterations
-        */
+
         if ((i & 63) == 0) {
             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
             if (t1 > MAX_WAIT) {
@@ -1913,51 +1928,3 @@  TargetInfo *qmp_query_target(Error **errp)
     return info;
 }
 
-/* Stub function that's gets run on the vcpu when its brought out of the
-   VM to run inside qemu via async_run_on_cpu()*/
-static void mig_sleep_cpu(void *opq)
-{
-    qemu_mutex_unlock_iothread();
-    g_usleep(30*1000);
-    qemu_mutex_lock_iothread();
-}
-
-/* To reduce the dirty rate explicitly disallow the VCPUs from spending
-   much time in the VM. The migration thread will try to catchup.
-   Workload will experience a performance drop.
-*/
-static void mig_throttle_guest_down(void)
-{
-    CPUState *cpu;
-
-    qemu_mutex_lock_iothread();
-    CPU_FOREACH(cpu) {
-        async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
-    }
-    qemu_mutex_unlock_iothread();
-}
-
-static void check_guest_throttling(void)
-{
-    static int64_t t0;
-    int64_t        t1;
-
-    if (!mig_throttle_on) {
-        return;
-    }
-
-    if (!t0)  {
-        t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
-        return;
-    }
-
-    t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
-
-    /* If it has been more than 40 ms since the last time the guest
-     * was throttled then do it again.
-     */
-    if (40 < (t1-t0)/1000000) {
-        mig_throttle_guest_down();
-        t0 = t1;
-    }
-}
diff --git a/migration/migration.c b/migration/migration.c
index 05790e9..7708c54 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -25,6 +25,7 @@ 
 #include "qemu/thread.h"
 #include "qmp-commands.h"
 #include "trace.h"
+#include "qom/cpu.h"
 
 #define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
 
@@ -858,6 +859,9 @@  static void *migration_thread(void *opaque)
         }
     }
 
+    /* If we enabled cpu throttling for auto-converge, turn it off. */
+    cpu_throttle_stop();
+
     qemu_mutex_lock_iothread();
     if (s->state == MIGRATION_STATUS_COMPLETED) {
         int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);