diff mbox series

[06/14] migration: Use atomic ops properly for page accountings

Message ID 20220920225212.48785-1-peterx@redhat.com
State New
Headers show
Series migration: Postcopy Preempt-Full | expand

Commit Message

Peter Xu Sept. 20, 2022, 10:52 p.m. UTC
To prepare for thread-safety on page accountings, at least below counters
need to be accessed only atomically, they are:

        ram_counters.transferred
        ram_counters.duplicate
        ram_counters.normal
        ram_counters.postcopy_bytes

There are a lot of other counters but they won't be accessed outside
migration thread, then they're still safe to be accessed without atomic
ops.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 migration/migration.c | 10 +++++-----
 migration/multifd.c   |  2 +-
 migration/ram.c       | 29 +++++++++++++++--------------
 3 files changed, 21 insertions(+), 20 deletions(-)

Comments

Dr. David Alan Gilbert Oct. 4, 2022, 4:59 p.m. UTC | #1
* Peter Xu (peterx@redhat.com) wrote:
> To prepare for thread-safety on page accountings, at least below counters
> need to be accessed only atomically, they are:
> 
>         ram_counters.transferred
>         ram_counters.duplicate
>         ram_counters.normal
>         ram_counters.postcopy_bytes
> 
> There are a lot of other counters but they won't be accessed outside
> migration thread, then they're still safe to be accessed without atomic
> ops.
> 
> Signed-off-by: Peter Xu <peterx@redhat.com>

I think this is OK; I'm not sure whether the memset 0's of ram_counters
technically need changing.
I'd love to put a comment somewhere saying these fields need to be
atomically read, but their qapi defined so I don't think we can.

Finally, we probably need to check these are happy on 32 bit builds,
sometimes it's a bit funny with atomic adds.


Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

> ---
>  migration/migration.c | 10 +++++-----
>  migration/multifd.c   |  2 +-
>  migration/ram.c       | 29 +++++++++++++++--------------
>  3 files changed, 21 insertions(+), 20 deletions(-)
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index 07c74a79a2..0eacc0c99b 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1048,13 +1048,13 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
>  
>      info->has_ram = true;
>      info->ram = g_malloc0(sizeof(*info->ram));
> -    info->ram->transferred = ram_counters.transferred;
> +    info->ram->transferred = qatomic_read(&ram_counters.transferred);
>      info->ram->total = ram_bytes_total();
> -    info->ram->duplicate = ram_counters.duplicate;
> +    info->ram->duplicate = qatomic_read(&ram_counters.duplicate);
>      /* legacy value.  It is not used anymore */
>      info->ram->skipped = 0;
> -    info->ram->normal = ram_counters.normal;
> -    info->ram->normal_bytes = ram_counters.normal * page_size;
> +    info->ram->normal = qatomic_read(&ram_counters.normal);
> +    info->ram->normal_bytes = info->ram->normal * page_size;
>      info->ram->mbps = s->mbps;
>      info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
>      info->ram->dirty_sync_missed_zero_copy =
> @@ -1065,7 +1065,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
>      info->ram->pages_per_second = s->pages_per_second;
>      info->ram->precopy_bytes = ram_counters.precopy_bytes;
>      info->ram->downtime_bytes = ram_counters.downtime_bytes;
> -    info->ram->postcopy_bytes = ram_counters.postcopy_bytes;
> +    info->ram->postcopy_bytes = qatomic_read(&ram_counters.postcopy_bytes);
>  
>      if (migrate_use_xbzrle()) {
>          info->has_xbzrle_cache = true;
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 586ddc9d65..460326acd4 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -437,7 +437,7 @@ static int multifd_send_pages(QEMUFile *f)
>                  + p->packet_len;
>      qemu_file_acct_rate_limit(f, transferred);
>      ram_counters.multifd_bytes += transferred;
> -    ram_counters.transferred += transferred;
> +    qatomic_add(&ram_counters.transferred, transferred);
>      qemu_mutex_unlock(&p->mutex);
>      qemu_sem_post(&p->sem);
>  
> diff --git a/migration/ram.c b/migration/ram.c
> index 6e7de6087a..5bd3d76bf0 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -432,11 +432,11 @@ static void ram_transferred_add(uint64_t bytes)
>      if (runstate_is_running()) {
>          ram_counters.precopy_bytes += bytes;
>      } else if (migration_in_postcopy()) {
> -        ram_counters.postcopy_bytes += bytes;
> +        qatomic_add(&ram_counters.postcopy_bytes, bytes);
>      } else {
>          ram_counters.downtime_bytes += bytes;
>      }
> -    ram_counters.transferred += bytes;
> +    qatomic_add(&ram_counters.transferred, bytes);
>  }
>  
>  void dirty_sync_missed_zero_copy(void)
> @@ -725,7 +725,7 @@ void mig_throttle_counter_reset(void)
>  
>      rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>      rs->num_dirty_pages_period = 0;
> -    rs->bytes_xfer_prev = ram_counters.transferred;
> +    rs->bytes_xfer_prev = qatomic_read(&ram_counters.transferred);
>  }
>  
>  /**
> @@ -1085,8 +1085,9 @@ uint64_t ram_pagesize_summary(void)
>  
>  uint64_t ram_get_total_transferred_pages(void)
>  {
> -    return  ram_counters.normal + ram_counters.duplicate +
> -                compression_counters.pages + xbzrle_counters.pages;
> +    return  qatomic_read(&ram_counters.normal) +
> +        qatomic_read(&ram_counters.duplicate) +
> +        compression_counters.pages + xbzrle_counters.pages;
>  }
>  
>  static void migration_update_rates(RAMState *rs, int64_t end_time)
> @@ -1145,8 +1146,8 @@ static void migration_trigger_throttle(RAMState *rs)
>  {
>      MigrationState *s = migrate_get_current();
>      uint64_t threshold = s->parameters.throttle_trigger_threshold;
> -
> -    uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
> +    uint64_t bytes_xfer_period =
> +        qatomic_read(&ram_counters.transferred) - rs->bytes_xfer_prev;
>      uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
>      uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
>  
> @@ -1285,7 +1286,7 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
>      int len = save_zero_page_to_file(rs, rs->f, block, offset);
>  
>      if (len) {
> -        ram_counters.duplicate++;
> +        qatomic_inc(&ram_counters.duplicate);
>          ram_transferred_add(len);
>          return 1;
>      }
> @@ -1322,9 +1323,9 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
>      }
>  
>      if (bytes_xmit > 0) {
> -        ram_counters.normal++;
> +        qatomic_inc(&ram_counters.normal);
>      } else if (bytes_xmit == 0) {
> -        ram_counters.duplicate++;
> +        qatomic_inc(&ram_counters.duplicate);
>      }
>  
>      return true;
> @@ -1354,7 +1355,7 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
>          qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
>      }
>      ram_transferred_add(TARGET_PAGE_SIZE);
> -    ram_counters.normal++;
> +    qatomic_inc(&ram_counters.normal);
>      return 1;
>  }
>  
> @@ -1448,7 +1449,7 @@ update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
>      ram_transferred_add(bytes_xmit);
>  
>      if (param->zero_page) {
> -        ram_counters.duplicate++;
> +        qatomic_inc(&ram_counters.duplicate);
>          return;
>      }
>  
> @@ -2620,9 +2621,9 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero)
>      uint64_t pages = size / TARGET_PAGE_SIZE;
>  
>      if (zero) {
> -        ram_counters.duplicate += pages;
> +        qatomic_add(&ram_counters.duplicate, pages);
>      } else {
> -        ram_counters.normal += pages;
> +        qatomic_add(&ram_counters.normal, pages);
>          ram_transferred_add(size);
>          qemu_file_credit_transfer(f, size);
>      }
> -- 
> 2.32.0
>
Peter Xu Oct. 4, 2022, 7:23 p.m. UTC | #2
On Tue, Oct 04, 2022 at 05:59:36PM +0100, Dr. David Alan Gilbert wrote:
> * Peter Xu (peterx@redhat.com) wrote:
> > To prepare for thread-safety on page accountings, at least below counters
> > need to be accessed only atomically, they are:
> > 
> >         ram_counters.transferred
> >         ram_counters.duplicate
> >         ram_counters.normal
> >         ram_counters.postcopy_bytes
> > 
> > There are a lot of other counters but they won't be accessed outside
> > migration thread, then they're still safe to be accessed without atomic
> > ops.
> > 
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> 
> I think this is OK; I'm not sure whether the memset 0's of ram_counters
> technically need changing.

IMHO they're fine - what we need there should be thing like WRITE_ONCE()
just to make sure no register caches (actually atomic_write() is normally
implemented with WRITE_ONCE afaik).  But I think that's already guaranteed
by memset() as the function call does, so we should be 100% safe.

> I'd love to put a comment somewhere saying these fields need to be
> atomically read, but their qapi defined so I don't think we can.

How about I add a comment above ram_counters declarations in ram.c?

> 
> Finally, we probably need to check these are happy on 32 bit builds,
> sometimes it's a bit funny with atomic adds.

Yeah.. I hope using qatomic_*() APIs can help me avoid any issues.  Or
anything concerning?  I'd be happy to test on specific things if there are.

> 
> 
> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

Thanks!
Dr. David Alan Gilbert Oct. 5, 2022, 11:38 a.m. UTC | #3
* Peter Xu (peterx@redhat.com) wrote:
> On Tue, Oct 04, 2022 at 05:59:36PM +0100, Dr. David Alan Gilbert wrote:
> > * Peter Xu (peterx@redhat.com) wrote:
> > > To prepare for thread-safety on page accountings, at least below counters
> > > need to be accessed only atomically, they are:
> > > 
> > >         ram_counters.transferred
> > >         ram_counters.duplicate
> > >         ram_counters.normal
> > >         ram_counters.postcopy_bytes
> > > 
> > > There are a lot of other counters but they won't be accessed outside
> > > migration thread, then they're still safe to be accessed without atomic
> > > ops.
> > > 
> > > Signed-off-by: Peter Xu <peterx@redhat.com>
> > 
> > I think this is OK; I'm not sure whether the memset 0's of ram_counters
> > technically need changing.
> 
> IMHO they're fine - what we need there should be thing like WRITE_ONCE()
> just to make sure no register caches (actually atomic_write() is normally
> implemented with WRITE_ONCE afaik).  But I think that's already guaranteed
> by memset() as the function call does, so we should be 100% safe.

I agree you're probably OK.

> > I'd love to put a comment somewhere saying these fields need to be
> > atomically read, but their qapi defined so I don't think we can.
> 
> How about I add a comment above ram_counters declarations in ram.c?

Yeh.

> > 
> > Finally, we probably need to check these are happy on 32 bit builds,
> > sometimes it's a bit funny with atomic adds.
> 
> Yeah.. I hope using qatomic_*() APIs can help me avoid any issues.  Or
> anything concerning?  I'd be happy to test on specific things if there are.

I just remember hitting problems in the past; especially if we end up
with trying to do a 64 bit atomic on a platofmr that can only do 32???

Dave

> > 
> > 
> > Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> 
> Thanks!
> 
> -- 
> Peter Xu
>
Peter Xu Oct. 5, 2022, 1:53 p.m. UTC | #4
On Wed, Oct 05, 2022 at 12:38:05PM +0100, Dr. David Alan Gilbert wrote:
> * Peter Xu (peterx@redhat.com) wrote:
> > On Tue, Oct 04, 2022 at 05:59:36PM +0100, Dr. David Alan Gilbert wrote:
> > > * Peter Xu (peterx@redhat.com) wrote:
> > > > To prepare for thread-safety on page accountings, at least below counters
> > > > need to be accessed only atomically, they are:
> > > > 
> > > >         ram_counters.transferred
> > > >         ram_counters.duplicate
> > > >         ram_counters.normal
> > > >         ram_counters.postcopy_bytes
> > > > 
> > > > There are a lot of other counters but they won't be accessed outside
> > > > migration thread, then they're still safe to be accessed without atomic
> > > > ops.
> > > > 
> > > > Signed-off-by: Peter Xu <peterx@redhat.com>
> > > 
> > > I think this is OK; I'm not sure whether the memset 0's of ram_counters
> > > technically need changing.
> > 
> > IMHO they're fine - what we need there should be thing like WRITE_ONCE()
> > just to make sure no register caches (actually atomic_write() is normally
> > implemented with WRITE_ONCE afaik).  But I think that's already guaranteed
> > by memset() as the function call does, so we should be 100% safe.
> 
> I agree you're probably OK.
> 
> > > I'd love to put a comment somewhere saying these fields need to be
> > > atomically read, but their qapi defined so I don't think we can.
> > 
> > How about I add a comment above ram_counters declarations in ram.c?
> 
> Yeh.
> 
> > > 
> > > Finally, we probably need to check these are happy on 32 bit builds,
> > > sometimes it's a bit funny with atomic adds.
> > 
> > Yeah.. I hope using qatomic_*() APIs can help me avoid any issues.  Or
> > anything concerning?  I'd be happy to test on specific things if there are.
> 
> I just remember hitting problems in the past; especially if we end up
> with trying to do a 64 bit atomic on a platofmr that can only do 32???

I see what you meant... when I was looking in the existing callers of
qatomic_add(), I do find that we seem to have Stat64 just for that
!CONFIG_ATOMIC64 problem.

I'll dig a bit on whether and how we can do that; the thing is these
counters are in the qapi so I need to make sure it can support Stat64
somehow.  Hmm..
Peter Xu Oct. 6, 2022, 8:40 p.m. UTC | #5
On Wed, Oct 05, 2022 at 09:53:57AM -0400, Peter Xu wrote:
> On Wed, Oct 05, 2022 at 12:38:05PM +0100, Dr. David Alan Gilbert wrote:
> > * Peter Xu (peterx@redhat.com) wrote:
> > > On Tue, Oct 04, 2022 at 05:59:36PM +0100, Dr. David Alan Gilbert wrote:
> > > > * Peter Xu (peterx@redhat.com) wrote:
> > > > > To prepare for thread-safety on page accountings, at least below counters
> > > > > need to be accessed only atomically, they are:
> > > > > 
> > > > >         ram_counters.transferred
> > > > >         ram_counters.duplicate
> > > > >         ram_counters.normal
> > > > >         ram_counters.postcopy_bytes
> > > > > 
> > > > > There are a lot of other counters but they won't be accessed outside
> > > > > migration thread, then they're still safe to be accessed without atomic
> > > > > ops.
> > > > > 
> > > > > Signed-off-by: Peter Xu <peterx@redhat.com>
> > > > 
> > > > I think this is OK; I'm not sure whether the memset 0's of ram_counters
> > > > technically need changing.
> > > 
> > > IMHO they're fine - what we need there should be thing like WRITE_ONCE()
> > > just to make sure no register caches (actually atomic_write() is normally
> > > implemented with WRITE_ONCE afaik).  But I think that's already guaranteed
> > > by memset() as the function call does, so we should be 100% safe.
> > 
> > I agree you're probably OK.
> > 
> > > > I'd love to put a comment somewhere saying these fields need to be
> > > > atomically read, but their qapi defined so I don't think we can.
> > > 
> > > How about I add a comment above ram_counters declarations in ram.c?
> > 
> > Yeh.
> > 
> > > > 
> > > > Finally, we probably need to check these are happy on 32 bit builds,
> > > > sometimes it's a bit funny with atomic adds.
> > > 
> > > Yeah.. I hope using qatomic_*() APIs can help me avoid any issues.  Or
> > > anything concerning?  I'd be happy to test on specific things if there are.
> > 
> > I just remember hitting problems in the past; especially if we end up
> > with trying to do a 64 bit atomic on a platofmr that can only do 32???
> 
> I see what you meant... when I was looking in the existing callers of
> qatomic_add(), I do find that we seem to have Stat64 just for that
> !CONFIG_ATOMIC64 problem.
> 
> I'll dig a bit on whether and how we can do that; the thing is these
> counters are in the qapi so I need to make sure it can support Stat64
> somehow.  Hmm..

I think I can't directly change the qapi MigrationStats to make some of
them to Stat64 since for !ATOMIC_64 systems Stat64 actually takes more than
64 bits space (since we'll need to do the locking with Stat64.lock), so
it'll definitely break the ABI no matter what..

I don't have a better option but introduce another ram_counters_internal to
maintain the fields that need atomic access, declaring as a Stat64 array.
Then we only mirror those values to MigrationStats in QMP queries when
needed.  The mirror will not contain the lock itself so it'll keep the ABI.

Let me know if there's early comment for that, or I'll go with it.  I'll
definitely add some comment for ram_counters to explain the mirror counters
in that case.

Thanks,
diff mbox series

Patch

diff --git a/migration/migration.c b/migration/migration.c
index 07c74a79a2..0eacc0c99b 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1048,13 +1048,13 @@  static void populate_ram_info(MigrationInfo *info, MigrationState *s)
 
     info->has_ram = true;
     info->ram = g_malloc0(sizeof(*info->ram));
-    info->ram->transferred = ram_counters.transferred;
+    info->ram->transferred = qatomic_read(&ram_counters.transferred);
     info->ram->total = ram_bytes_total();
-    info->ram->duplicate = ram_counters.duplicate;
+    info->ram->duplicate = qatomic_read(&ram_counters.duplicate);
     /* legacy value.  It is not used anymore */
     info->ram->skipped = 0;
-    info->ram->normal = ram_counters.normal;
-    info->ram->normal_bytes = ram_counters.normal * page_size;
+    info->ram->normal = qatomic_read(&ram_counters.normal);
+    info->ram->normal_bytes = info->ram->normal * page_size;
     info->ram->mbps = s->mbps;
     info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
     info->ram->dirty_sync_missed_zero_copy =
@@ -1065,7 +1065,7 @@  static void populate_ram_info(MigrationInfo *info, MigrationState *s)
     info->ram->pages_per_second = s->pages_per_second;
     info->ram->precopy_bytes = ram_counters.precopy_bytes;
     info->ram->downtime_bytes = ram_counters.downtime_bytes;
-    info->ram->postcopy_bytes = ram_counters.postcopy_bytes;
+    info->ram->postcopy_bytes = qatomic_read(&ram_counters.postcopy_bytes);
 
     if (migrate_use_xbzrle()) {
         info->has_xbzrle_cache = true;
diff --git a/migration/multifd.c b/migration/multifd.c
index 586ddc9d65..460326acd4 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -437,7 +437,7 @@  static int multifd_send_pages(QEMUFile *f)
                 + p->packet_len;
     qemu_file_acct_rate_limit(f, transferred);
     ram_counters.multifd_bytes += transferred;
-    ram_counters.transferred += transferred;
+    qatomic_add(&ram_counters.transferred, transferred);
     qemu_mutex_unlock(&p->mutex);
     qemu_sem_post(&p->sem);
 
diff --git a/migration/ram.c b/migration/ram.c
index 6e7de6087a..5bd3d76bf0 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -432,11 +432,11 @@  static void ram_transferred_add(uint64_t bytes)
     if (runstate_is_running()) {
         ram_counters.precopy_bytes += bytes;
     } else if (migration_in_postcopy()) {
-        ram_counters.postcopy_bytes += bytes;
+        qatomic_add(&ram_counters.postcopy_bytes, bytes);
     } else {
         ram_counters.downtime_bytes += bytes;
     }
-    ram_counters.transferred += bytes;
+    qatomic_add(&ram_counters.transferred, bytes);
 }
 
 void dirty_sync_missed_zero_copy(void)
@@ -725,7 +725,7 @@  void mig_throttle_counter_reset(void)
 
     rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
     rs->num_dirty_pages_period = 0;
-    rs->bytes_xfer_prev = ram_counters.transferred;
+    rs->bytes_xfer_prev = qatomic_read(&ram_counters.transferred);
 }
 
 /**
@@ -1085,8 +1085,9 @@  uint64_t ram_pagesize_summary(void)
 
 uint64_t ram_get_total_transferred_pages(void)
 {
-    return  ram_counters.normal + ram_counters.duplicate +
-                compression_counters.pages + xbzrle_counters.pages;
+    return  qatomic_read(&ram_counters.normal) +
+        qatomic_read(&ram_counters.duplicate) +
+        compression_counters.pages + xbzrle_counters.pages;
 }
 
 static void migration_update_rates(RAMState *rs, int64_t end_time)
@@ -1145,8 +1146,8 @@  static void migration_trigger_throttle(RAMState *rs)
 {
     MigrationState *s = migrate_get_current();
     uint64_t threshold = s->parameters.throttle_trigger_threshold;
-
-    uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
+    uint64_t bytes_xfer_period =
+        qatomic_read(&ram_counters.transferred) - rs->bytes_xfer_prev;
     uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
     uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
 
@@ -1285,7 +1286,7 @@  static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
     int len = save_zero_page_to_file(rs, rs->f, block, offset);
 
     if (len) {
-        ram_counters.duplicate++;
+        qatomic_inc(&ram_counters.duplicate);
         ram_transferred_add(len);
         return 1;
     }
@@ -1322,9 +1323,9 @@  static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
     }
 
     if (bytes_xmit > 0) {
-        ram_counters.normal++;
+        qatomic_inc(&ram_counters.normal);
     } else if (bytes_xmit == 0) {
-        ram_counters.duplicate++;
+        qatomic_inc(&ram_counters.duplicate);
     }
 
     return true;
@@ -1354,7 +1355,7 @@  static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
         qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
     }
     ram_transferred_add(TARGET_PAGE_SIZE);
-    ram_counters.normal++;
+    qatomic_inc(&ram_counters.normal);
     return 1;
 }
 
@@ -1448,7 +1449,7 @@  update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
     ram_transferred_add(bytes_xmit);
 
     if (param->zero_page) {
-        ram_counters.duplicate++;
+        qatomic_inc(&ram_counters.duplicate);
         return;
     }
 
@@ -2620,9 +2621,9 @@  void acct_update_position(QEMUFile *f, size_t size, bool zero)
     uint64_t pages = size / TARGET_PAGE_SIZE;
 
     if (zero) {
-        ram_counters.duplicate += pages;
+        qatomic_add(&ram_counters.duplicate, pages);
     } else {
-        ram_counters.normal += pages;
+        qatomic_add(&ram_counters.normal, pages);
         ram_transferred_add(size);
         qemu_file_credit_transfer(f, size);
     }