diff mbox series

[3/7] blockjob: create block_job_throttle

Message ID 20171214005953.8898-4-jsnow@redhat.com
State New
Headers show
Series blockjob: refactor mirror_throttle | expand

Commit Message

John Snow Dec. 14, 2017, 12:59 a.m. UTC
This will replace mirror_throttle, for reuse in other jobs.

Signed-off-by: John Snow <jsnow@redhat.com>
---
 block/mirror.c               | 15 ++-------------
 blockjob.c                   | 11 +++++++++++
 include/block/blockjob_int.h |  9 +++++++++
 3 files changed, 22 insertions(+), 13 deletions(-)

Comments

Paolo Bonzini Dec. 14, 2017, 8:39 a.m. UTC | #1
On 14/12/2017 01:59, John Snow wrote:
> + * Yield if it has been SLICE_TIME nanoseconds since the last yield.
> + * Otherwise, check if we need to pause (and update the yield counter).

What is the yield counter?

Thanks,

Paolo
John Snow Dec. 14, 2017, 3:57 p.m. UTC | #2
On 12/14/2017 03:39 AM, Paolo Bonzini wrote:
> On 14/12/2017 01:59, John Snow wrote:
>> + * Yield if it has been SLICE_TIME nanoseconds since the last yield.
>> + * Otherwise, check if we need to pause (and update the yield counter).
> 
> What is the yield counter?
> 
> Thanks,
> 
> Paolo
> 

Fuzzy brain talk.

I mean to refer to the last_yield_ns field, which gets updated by both
pause and sleep commands. I'm trying to document that no matter what
happens when you call this function (either a scheduled 0ns sleep, the
sleep requested, or a pause was requested) that it will update the
last_yield_ns variable.
Stefan Hajnoczi Dec. 18, 2017, 2:27 p.m. UTC | #3
On Wed, Dec 13, 2017 at 07:59:49PM -0500, John Snow wrote:
> +/**
> + * block_job_throttle:
> + * @job: The job that calls the function.
> + *
> + * Yield if it has been SLICE_TIME nanoseconds since the last yield.
> + * Otherwise, check if we need to pause (and update the yield counter).
> + */
> +void block_job_throttle(BlockJob *job);

This name is easily confused with the block-job-set-speed
ratelimit/throttling feature.

I suggest block_job_cpu_relax() or just block_job_relax() to make it
clear we're giving up our CPU time voluntarily - but this isn't
"throttling".
Jeff Cody Jan. 2, 2018, 9:23 p.m. UTC | #4
On Wed, Dec 13, 2017 at 07:59:49PM -0500, John Snow wrote:
> This will replace mirror_throttle, for reuse in other jobs.
> 
> Signed-off-by: John Snow <jsnow@redhat.com>
> ---
>  block/mirror.c               | 15 ++-------------
>  blockjob.c                   | 11 +++++++++++
>  include/block/blockjob_int.h |  9 +++++++++
>  3 files changed, 22 insertions(+), 13 deletions(-)
> 
> diff --git a/block/mirror.c b/block/mirror.c
> index eef5b598f5..60b52cfb19 100644
> --- a/block/mirror.c
> +++ b/block/mirror.c
> @@ -590,17 +590,6 @@ static void mirror_exit(BlockJob *job, void *opaque)
>      bdrv_unref(src);
>  }
>  
> -static void mirror_throttle(MirrorBlockJob *s)
> -{
> -    int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
> -
> -    if (now - s->common.last_yield_ns > SLICE_TIME) {
> -        block_job_sleep_ns(&s->common, 0);
> -    } else {
> -        block_job_pause_point(&s->common);
> -    }
> -}
> -
>  static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
>  {
>      int64_t offset;
> @@ -621,7 +610,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
>              int bytes = MIN(s->bdev_length - offset,
>                              QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
>  
> -            mirror_throttle(s);
> +            block_job_throttle(&s->common);
>  
>              if (block_job_is_cancelled(&s->common)) {
>                  s->initial_zeroing_ongoing = false;
> @@ -649,7 +638,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
>          int bytes = MIN(s->bdev_length - offset,
>                          QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
>  
> -        mirror_throttle(s);
> +        block_job_throttle(&s->common);
>  
>          if (block_job_is_cancelled(&s->common)) {
>              return 0;
> diff --git a/blockjob.c b/blockjob.c
> index 8cbc142f57..8d0c89a813 100644
> --- a/blockjob.c
> +++ b/blockjob.c
> @@ -882,6 +882,17 @@ void block_job_yield(BlockJob *job)
>      block_job_pause_point(job);
>  }
>  
> +void block_job_throttle(BlockJob *job)
> +{
> +    int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
> +
> +    if (now - job->last_yield_ns > SLICE_TIME) {
> +        block_job_sleep_ns(job, 0);
> +    } else {
> +        block_job_pause_point(job);
> +    }
> +}
> +
>  void block_job_iostatus_reset(BlockJob *job)
>  {
>      if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
> diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
> index 209fa1bb3e..1a771b1e2e 100644
> --- a/include/block/blockjob_int.h
> +++ b/include/block/blockjob_int.h
> @@ -157,6 +157,15 @@ void block_job_sleep_ns(BlockJob *job, int64_t ns);
>   */
>  void block_job_yield(BlockJob *job);
>  
> +/**
> + * block_job_throttle:
> + * @job: The job that calls the function.
> + *
> + * Yield if it has been SLICE_TIME nanoseconds since the last yield.
> + * Otherwise, check if we need to pause (and update the yield counter).
> + */
> +void block_job_throttle(BlockJob *job);
> +
>  /**
>   * block_job_pause_all:
>   *
> -- 
> 2.14.3
> 

Reviewed-by: Jeff Cody <jcody@redhat.com>
diff mbox series

Patch

diff --git a/block/mirror.c b/block/mirror.c
index eef5b598f5..60b52cfb19 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -590,17 +590,6 @@  static void mirror_exit(BlockJob *job, void *opaque)
     bdrv_unref(src);
 }
 
-static void mirror_throttle(MirrorBlockJob *s)
-{
-    int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
-
-    if (now - s->common.last_yield_ns > SLICE_TIME) {
-        block_job_sleep_ns(&s->common, 0);
-    } else {
-        block_job_pause_point(&s->common);
-    }
-}
-
 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
 {
     int64_t offset;
@@ -621,7 +610,7 @@  static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
             int bytes = MIN(s->bdev_length - offset,
                             QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
 
-            mirror_throttle(s);
+            block_job_throttle(&s->common);
 
             if (block_job_is_cancelled(&s->common)) {
                 s->initial_zeroing_ongoing = false;
@@ -649,7 +638,7 @@  static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
         int bytes = MIN(s->bdev_length - offset,
                         QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
 
-        mirror_throttle(s);
+        block_job_throttle(&s->common);
 
         if (block_job_is_cancelled(&s->common)) {
             return 0;
diff --git a/blockjob.c b/blockjob.c
index 8cbc142f57..8d0c89a813 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -882,6 +882,17 @@  void block_job_yield(BlockJob *job)
     block_job_pause_point(job);
 }
 
+void block_job_throttle(BlockJob *job)
+{
+    int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+
+    if (now - job->last_yield_ns > SLICE_TIME) {
+        block_job_sleep_ns(job, 0);
+    } else {
+        block_job_pause_point(job);
+    }
+}
+
 void block_job_iostatus_reset(BlockJob *job)
 {
     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
index 209fa1bb3e..1a771b1e2e 100644
--- a/include/block/blockjob_int.h
+++ b/include/block/blockjob_int.h
@@ -157,6 +157,15 @@  void block_job_sleep_ns(BlockJob *job, int64_t ns);
  */
 void block_job_yield(BlockJob *job);
 
+/**
+ * block_job_throttle:
+ * @job: The job that calls the function.
+ *
+ * Yield if it has been SLICE_TIME nanoseconds since the last yield.
+ * Otherwise, check if we need to pause (and update the yield counter).
+ */
+void block_job_throttle(BlockJob *job);
+
 /**
  * block_job_pause_all:
  *