diff mbox series

[10/14] migration: Make PageSearchStatus part of RAMState

Message ID 20220920225221.48999-1-peterx@redhat.com
State New
Headers show
Series migration: Postcopy Preempt-Full | expand

Commit Message

Peter Xu Sept. 20, 2022, 10:52 p.m. UTC
We used to allocate PSS structure on the stack for precopy when sending
pages.  Make it static, so as to describe per-channel ram migration status.

Here we declared RAM_CHANNEL_MAX instances, preparing for postcopy to use
it, even though this patch has not yet to start using the 2nd instance.

This should not have any functional change per se, but it already starts to
export PSS information via the RAMState, so that e.g. one PSS channel can
start to reference the other PSS channel.

Always protect PSS access using the same RAMState.bitmap_mutex.  We already
do so, so no code change needed, just some comment update.  Maybe we should
consider renaming bitmap_mutex some day as it's going to be a more commonly
and big mutex we use for ram states, but just leave it for later.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 migration/ram.c | 112 ++++++++++++++++++++++++++----------------------
 1 file changed, 61 insertions(+), 51 deletions(-)

Comments

Dr. David Alan Gilbert Oct. 5, 2022, 6:51 p.m. UTC | #1
* Peter Xu (peterx@redhat.com) wrote:
> We used to allocate PSS structure on the stack for precopy when sending
> pages.  Make it static, so as to describe per-channel ram migration status.
> 
> Here we declared RAM_CHANNEL_MAX instances, preparing for postcopy to use
> it, even though this patch has not yet to start using the 2nd instance.
> 
> This should not have any functional change per se, but it already starts to
> export PSS information via the RAMState, so that e.g. one PSS channel can
> start to reference the other PSS channel.
> 
> Always protect PSS access using the same RAMState.bitmap_mutex.  We already
> do so, so no code change needed, just some comment update.  Maybe we should
> consider renaming bitmap_mutex some day as it's going to be a more commonly
> and big mutex we use for ram states, but just leave it for later.
> 
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  migration/ram.c | 112 ++++++++++++++++++++++++++----------------------
>  1 file changed, 61 insertions(+), 51 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index b4b36ca59e..dbe11e1ace 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -85,6 +85,46 @@
>  
>  XBZRLECacheStats xbzrle_counters;
>  
> +/* used by the search for pages to send */
> +struct PageSearchStatus {
> +    /* The migration channel used for a specific host page */
> +    QEMUFile    *pss_channel;
> +    /* Current block being searched */
> +    RAMBlock    *block;
> +    /* Current page to search from */
> +    unsigned long page;
> +    /* Set once we wrap around */
> +    bool         complete_round;
> +    /*
> +     * [POSTCOPY-ONLY] Whether current page is explicitly requested by
> +     * postcopy.  When set, the request is "urgent" because the dest QEMU
> +     * threads are waiting for us.
> +     */
> +    bool         postcopy_requested;
> +    /*
> +     * [POSTCOPY-ONLY] The target channel to use to send current page.
> +     *
> +     * Note: This may _not_ match with the value in postcopy_requested
> +     * above. Let's imagine the case where the postcopy request is exactly
> +     * the page that we're sending in progress during precopy. In this case
> +     * we'll have postcopy_requested set to true but the target channel
> +     * will be the precopy channel (so that we don't split brain on that
> +     * specific page since the precopy channel already contains partial of
> +     * that page data).
> +     *
> +     * Besides that specific use case, postcopy_target_channel should
> +     * always be equal to postcopy_requested, because by default we send
> +     * postcopy pages via postcopy preempt channel.
> +     */
> +    bool         postcopy_target_channel;
> +    /* Whether we're sending a host page */
> +    bool          host_page_sending;
> +    /* The start/end of current host page.  Invalid if host_page_sending==false */
> +    unsigned long host_page_start;
> +    unsigned long host_page_end;
> +};
> +typedef struct PageSearchStatus PageSearchStatus;
> +
>  /* struct contains XBZRLE cache and a static page
>     used by the compression */
>  static struct {
> @@ -319,6 +359,11 @@ typedef struct {
>  struct RAMState {
>      /* QEMUFile used for this migration */
>      QEMUFile *f;
> +    /*
> +     * PageSearchStatus structures for the channels when send pages.
> +     * Protected by the bitmap_mutex.
> +     */
> +    PageSearchStatus pss[RAM_CHANNEL_MAX];

Why statically size this rather than allocate it in ram_state_init ?

Dave

>      /* UFFD file descriptor, used in 'write-tracking' migration */
>      int uffdio_fd;
>      /* Last block that we have visited searching for dirty pages */
> @@ -362,7 +407,12 @@ struct RAMState {
>      uint64_t target_page_count;
>      /* number of dirty bits in the bitmap */
>      uint64_t migration_dirty_pages;
> -    /* Protects modification of the bitmap and migration dirty pages */
> +    /*
> +     * Protects:
> +     * - dirty/clear bitmap
> +     * - migration_dirty_pages
> +     * - pss structures
> +     */
>      QemuMutex bitmap_mutex;
>      /* The RAMBlock used in the last src_page_requests */
>      RAMBlock *last_req_rb;
> @@ -444,46 +494,6 @@ void dirty_sync_missed_zero_copy(void)
>      ram_counters.dirty_sync_missed_zero_copy++;
>  }
>  
> -/* used by the search for pages to send */
> -struct PageSearchStatus {
> -    /* The migration channel used for a specific host page */
> -    QEMUFile    *pss_channel;
> -    /* Current block being searched */
> -    RAMBlock    *block;
> -    /* Current page to search from */
> -    unsigned long page;
> -    /* Set once we wrap around */
> -    bool         complete_round;
> -    /*
> -     * [POSTCOPY-ONLY] Whether current page is explicitly requested by
> -     * postcopy.  When set, the request is "urgent" because the dest QEMU
> -     * threads are waiting for us.
> -     */
> -    bool         postcopy_requested;
> -    /*
> -     * [POSTCOPY-ONLY] The target channel to use to send current page.
> -     *
> -     * Note: This may _not_ match with the value in postcopy_requested
> -     * above. Let's imagine the case where the postcopy request is exactly
> -     * the page that we're sending in progress during precopy. In this case
> -     * we'll have postcopy_requested set to true but the target channel
> -     * will be the precopy channel (so that we don't split brain on that
> -     * specific page since the precopy channel already contains partial of
> -     * that page data).
> -     *
> -     * Besides that specific use case, postcopy_target_channel should
> -     * always be equal to postcopy_requested, because by default we send
> -     * postcopy pages via postcopy preempt channel.
> -     */
> -    bool         postcopy_target_channel;
> -    /* Whether we're sending a host page */
> -    bool          host_page_sending;
> -    /* The start/end of current host page.  Only valid if host_page_sending==true */
> -    unsigned long host_page_start;
> -    unsigned long host_page_end;
> -};
> -typedef struct PageSearchStatus PageSearchStatus;
> -
>  CompressionStats compression_counters;
>  
>  struct CompressParam {
> @@ -2627,7 +2637,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
>   */
>  static int ram_find_and_save_block(RAMState *rs)
>  {
> -    PageSearchStatus pss;
> +    PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
>      int pages = 0;
>      bool again, found;
>  
> @@ -2648,11 +2658,11 @@ static int ram_find_and_save_block(RAMState *rs)
>          rs->last_page = 0;
>      }
>  
> -    pss_init(&pss, rs->last_seen_block, rs->last_page);
> +    pss_init(pss, rs->last_seen_block, rs->last_page);
>  
>      do {
>          again = true;
> -        found = get_queued_page(rs, &pss);
> +        found = get_queued_page(rs, pss);
>  
>          if (!found) {
>              /*
> @@ -2660,27 +2670,27 @@ static int ram_find_and_save_block(RAMState *rs)
>               * preempted precopy.  Otherwise find the next dirty bit.
>               */
>              if (postcopy_preempt_triggered(rs)) {
> -                postcopy_preempt_restore(rs, &pss, false);
> +                postcopy_preempt_restore(rs, pss, false);
>                  found = true;
>              } else {
>                  /* priority queue empty, so just search for something dirty */
> -                found = find_dirty_block(rs, &pss, &again);
> +                found = find_dirty_block(rs, pss, &again);
>              }
>          }
>  
>          if (found) {
>              /* Update rs->f with correct channel */
>              if (postcopy_preempt_active()) {
> -                postcopy_preempt_choose_channel(rs, &pss);
> +                postcopy_preempt_choose_channel(rs, pss);
>              }
>              /* Cache rs->f in pss_channel (TODO: remove rs->f) */
> -            pss.pss_channel = rs->f;
> -            pages = ram_save_host_page(rs, &pss);
> +            pss->pss_channel = rs->f;
> +            pages = ram_save_host_page(rs, pss);
>          }
>      } while (!pages && again);
>  
> -    rs->last_seen_block = pss.block;
> -    rs->last_page = pss.page;
> +    rs->last_seen_block = pss->block;
> +    rs->last_page = pss->page;
>  
>      return pages;
>  }
> -- 
> 2.32.0
>
Peter Xu Oct. 5, 2022, 7:41 p.m. UTC | #2
On Wed, Oct 05, 2022 at 07:51:34PM +0100, Dr. David Alan Gilbert wrote:
> >  /* struct contains XBZRLE cache and a static page
> >     used by the compression */
> >  static struct {
> > @@ -319,6 +359,11 @@ typedef struct {
> >  struct RAMState {
> >      /* QEMUFile used for this migration */
> >      QEMUFile *f;
> > +    /*
> > +     * PageSearchStatus structures for the channels when send pages.
> > +     * Protected by the bitmap_mutex.
> > +     */
> > +    PageSearchStatus pss[RAM_CHANNEL_MAX];
> 
> Why statically size this rather than allocate it in ram_state_init ?

I don't strongly feel like it needs the complexity? As there're only at
most 2 channels anyway, so the best chance is we save ~56 bytes on src qemu
but only during migration (RAMState allocated only in ram setup).

If you think we should still do the dynamic allcation, definitely doable on
my side too.
Dr. David Alan Gilbert Oct. 6, 2022, 8:36 a.m. UTC | #3
* Peter Xu (peterx@redhat.com) wrote:
> On Wed, Oct 05, 2022 at 07:51:34PM +0100, Dr. David Alan Gilbert wrote:
> > >  /* struct contains XBZRLE cache and a static page
> > >     used by the compression */
> > >  static struct {
> > > @@ -319,6 +359,11 @@ typedef struct {
> > >  struct RAMState {
> > >      /* QEMUFile used for this migration */
> > >      QEMUFile *f;
> > > +    /*
> > > +     * PageSearchStatus structures for the channels when send pages.
> > > +     * Protected by the bitmap_mutex.
> > > +     */
> > > +    PageSearchStatus pss[RAM_CHANNEL_MAX];
> > 
> > Why statically size this rather than allocate it in ram_state_init ?
> 
> I don't strongly feel like it needs the complexity? As there're only at
> most 2 channels anyway, so the best chance is we save ~56 bytes on src qemu
> but only during migration (RAMState allocated only in ram setup).
> 
> If you think we should still do the dynamic allcation, definitely doable on
> my side too.

Oh for 2 channels, yes that's fine - what confused me here was
'RAM_CHANNEL_...' - I forgot that was PRE vs POST - I was getting
confused with multifd channel numbering; too many concepts of 'channel'.

Dave

> -- 
> Peter Xu
>
Dr. David Alan Gilbert Oct. 6, 2022, 8:37 a.m. UTC | #4
* Peter Xu (peterx@redhat.com) wrote:
> We used to allocate PSS structure on the stack for precopy when sending
> pages.  Make it static, so as to describe per-channel ram migration status.
> 
> Here we declared RAM_CHANNEL_MAX instances, preparing for postcopy to use
> it, even though this patch has not yet to start using the 2nd instance.
> 
> This should not have any functional change per se, but it already starts to
> export PSS information via the RAMState, so that e.g. one PSS channel can
> start to reference the other PSS channel.
> 
> Always protect PSS access using the same RAMState.bitmap_mutex.  We already
> do so, so no code change needed, just some comment update.  Maybe we should
> consider renaming bitmap_mutex some day as it's going to be a more commonly
> and big mutex we use for ram states, but just leave it for later.
> 
> Signed-off-by: Peter Xu <peterx@redhat.com>

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

> ---
>  migration/ram.c | 112 ++++++++++++++++++++++++++----------------------
>  1 file changed, 61 insertions(+), 51 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index b4b36ca59e..dbe11e1ace 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -85,6 +85,46 @@
>  
>  XBZRLECacheStats xbzrle_counters;
>  
> +/* used by the search for pages to send */
> +struct PageSearchStatus {
> +    /* The migration channel used for a specific host page */
> +    QEMUFile    *pss_channel;
> +    /* Current block being searched */
> +    RAMBlock    *block;
> +    /* Current page to search from */
> +    unsigned long page;
> +    /* Set once we wrap around */
> +    bool         complete_round;
> +    /*
> +     * [POSTCOPY-ONLY] Whether current page is explicitly requested by
> +     * postcopy.  When set, the request is "urgent" because the dest QEMU
> +     * threads are waiting for us.
> +     */
> +    bool         postcopy_requested;
> +    /*
> +     * [POSTCOPY-ONLY] The target channel to use to send current page.
> +     *
> +     * Note: This may _not_ match with the value in postcopy_requested
> +     * above. Let's imagine the case where the postcopy request is exactly
> +     * the page that we're sending in progress during precopy. In this case
> +     * we'll have postcopy_requested set to true but the target channel
> +     * will be the precopy channel (so that we don't split brain on that
> +     * specific page since the precopy channel already contains partial of
> +     * that page data).
> +     *
> +     * Besides that specific use case, postcopy_target_channel should
> +     * always be equal to postcopy_requested, because by default we send
> +     * postcopy pages via postcopy preempt channel.
> +     */
> +    bool         postcopy_target_channel;
> +    /* Whether we're sending a host page */
> +    bool          host_page_sending;
> +    /* The start/end of current host page.  Invalid if host_page_sending==false */
> +    unsigned long host_page_start;
> +    unsigned long host_page_end;
> +};
> +typedef struct PageSearchStatus PageSearchStatus;
> +
>  /* struct contains XBZRLE cache and a static page
>     used by the compression */
>  static struct {
> @@ -319,6 +359,11 @@ typedef struct {
>  struct RAMState {
>      /* QEMUFile used for this migration */
>      QEMUFile *f;
> +    /*
> +     * PageSearchStatus structures for the channels when send pages.
> +     * Protected by the bitmap_mutex.
> +     */
> +    PageSearchStatus pss[RAM_CHANNEL_MAX];
>      /* UFFD file descriptor, used in 'write-tracking' migration */
>      int uffdio_fd;
>      /* Last block that we have visited searching for dirty pages */
> @@ -362,7 +407,12 @@ struct RAMState {
>      uint64_t target_page_count;
>      /* number of dirty bits in the bitmap */
>      uint64_t migration_dirty_pages;
> -    /* Protects modification of the bitmap and migration dirty pages */
> +    /*
> +     * Protects:
> +     * - dirty/clear bitmap
> +     * - migration_dirty_pages
> +     * - pss structures
> +     */
>      QemuMutex bitmap_mutex;
>      /* The RAMBlock used in the last src_page_requests */
>      RAMBlock *last_req_rb;
> @@ -444,46 +494,6 @@ void dirty_sync_missed_zero_copy(void)
>      ram_counters.dirty_sync_missed_zero_copy++;
>  }
>  
> -/* used by the search for pages to send */
> -struct PageSearchStatus {
> -    /* The migration channel used for a specific host page */
> -    QEMUFile    *pss_channel;
> -    /* Current block being searched */
> -    RAMBlock    *block;
> -    /* Current page to search from */
> -    unsigned long page;
> -    /* Set once we wrap around */
> -    bool         complete_round;
> -    /*
> -     * [POSTCOPY-ONLY] Whether current page is explicitly requested by
> -     * postcopy.  When set, the request is "urgent" because the dest QEMU
> -     * threads are waiting for us.
> -     */
> -    bool         postcopy_requested;
> -    /*
> -     * [POSTCOPY-ONLY] The target channel to use to send current page.
> -     *
> -     * Note: This may _not_ match with the value in postcopy_requested
> -     * above. Let's imagine the case where the postcopy request is exactly
> -     * the page that we're sending in progress during precopy. In this case
> -     * we'll have postcopy_requested set to true but the target channel
> -     * will be the precopy channel (so that we don't split brain on that
> -     * specific page since the precopy channel already contains partial of
> -     * that page data).
> -     *
> -     * Besides that specific use case, postcopy_target_channel should
> -     * always be equal to postcopy_requested, because by default we send
> -     * postcopy pages via postcopy preempt channel.
> -     */
> -    bool         postcopy_target_channel;
> -    /* Whether we're sending a host page */
> -    bool          host_page_sending;
> -    /* The start/end of current host page.  Only valid if host_page_sending==true */
> -    unsigned long host_page_start;
> -    unsigned long host_page_end;
> -};
> -typedef struct PageSearchStatus PageSearchStatus;
> -
>  CompressionStats compression_counters;
>  
>  struct CompressParam {
> @@ -2627,7 +2637,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
>   */
>  static int ram_find_and_save_block(RAMState *rs)
>  {
> -    PageSearchStatus pss;
> +    PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
>      int pages = 0;
>      bool again, found;
>  
> @@ -2648,11 +2658,11 @@ static int ram_find_and_save_block(RAMState *rs)
>          rs->last_page = 0;
>      }
>  
> -    pss_init(&pss, rs->last_seen_block, rs->last_page);
> +    pss_init(pss, rs->last_seen_block, rs->last_page);
>  
>      do {
>          again = true;
> -        found = get_queued_page(rs, &pss);
> +        found = get_queued_page(rs, pss);
>  
>          if (!found) {
>              /*
> @@ -2660,27 +2670,27 @@ static int ram_find_and_save_block(RAMState *rs)
>               * preempted precopy.  Otherwise find the next dirty bit.
>               */
>              if (postcopy_preempt_triggered(rs)) {
> -                postcopy_preempt_restore(rs, &pss, false);
> +                postcopy_preempt_restore(rs, pss, false);
>                  found = true;
>              } else {
>                  /* priority queue empty, so just search for something dirty */
> -                found = find_dirty_block(rs, &pss, &again);
> +                found = find_dirty_block(rs, pss, &again);
>              }
>          }
>  
>          if (found) {
>              /* Update rs->f with correct channel */
>              if (postcopy_preempt_active()) {
> -                postcopy_preempt_choose_channel(rs, &pss);
> +                postcopy_preempt_choose_channel(rs, pss);
>              }
>              /* Cache rs->f in pss_channel (TODO: remove rs->f) */
> -            pss.pss_channel = rs->f;
> -            pages = ram_save_host_page(rs, &pss);
> +            pss->pss_channel = rs->f;
> +            pages = ram_save_host_page(rs, pss);
>          }
>      } while (!pages && again);
>  
> -    rs->last_seen_block = pss.block;
> -    rs->last_page = pss.page;
> +    rs->last_seen_block = pss->block;
> +    rs->last_page = pss->page;
>  
>      return pages;
>  }
> -- 
> 2.32.0
>
diff mbox series

Patch

diff --git a/migration/ram.c b/migration/ram.c
index b4b36ca59e..dbe11e1ace 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -85,6 +85,46 @@ 
 
 XBZRLECacheStats xbzrle_counters;
 
+/* used by the search for pages to send */
+struct PageSearchStatus {
+    /* The migration channel used for a specific host page */
+    QEMUFile    *pss_channel;
+    /* Current block being searched */
+    RAMBlock    *block;
+    /* Current page to search from */
+    unsigned long page;
+    /* Set once we wrap around */
+    bool         complete_round;
+    /*
+     * [POSTCOPY-ONLY] Whether current page is explicitly requested by
+     * postcopy.  When set, the request is "urgent" because the dest QEMU
+     * threads are waiting for us.
+     */
+    bool         postcopy_requested;
+    /*
+     * [POSTCOPY-ONLY] The target channel to use to send current page.
+     *
+     * Note: This may _not_ match with the value in postcopy_requested
+     * above. Let's imagine the case where the postcopy request is exactly
+     * the page that we're sending in progress during precopy. In this case
+     * we'll have postcopy_requested set to true but the target channel
+     * will be the precopy channel (so that we don't split brain on that
+     * specific page since the precopy channel already contains partial of
+     * that page data).
+     *
+     * Besides that specific use case, postcopy_target_channel should
+     * always be equal to postcopy_requested, because by default we send
+     * postcopy pages via postcopy preempt channel.
+     */
+    bool         postcopy_target_channel;
+    /* Whether we're sending a host page */
+    bool          host_page_sending;
+    /* The start/end of current host page.  Invalid if host_page_sending==false */
+    unsigned long host_page_start;
+    unsigned long host_page_end;
+};
+typedef struct PageSearchStatus PageSearchStatus;
+
 /* struct contains XBZRLE cache and a static page
    used by the compression */
 static struct {
@@ -319,6 +359,11 @@  typedef struct {
 struct RAMState {
     /* QEMUFile used for this migration */
     QEMUFile *f;
+    /*
+     * PageSearchStatus structures for the channels when send pages.
+     * Protected by the bitmap_mutex.
+     */
+    PageSearchStatus pss[RAM_CHANNEL_MAX];
     /* UFFD file descriptor, used in 'write-tracking' migration */
     int uffdio_fd;
     /* Last block that we have visited searching for dirty pages */
@@ -362,7 +407,12 @@  struct RAMState {
     uint64_t target_page_count;
     /* number of dirty bits in the bitmap */
     uint64_t migration_dirty_pages;
-    /* Protects modification of the bitmap and migration dirty pages */
+    /*
+     * Protects:
+     * - dirty/clear bitmap
+     * - migration_dirty_pages
+     * - pss structures
+     */
     QemuMutex bitmap_mutex;
     /* The RAMBlock used in the last src_page_requests */
     RAMBlock *last_req_rb;
@@ -444,46 +494,6 @@  void dirty_sync_missed_zero_copy(void)
     ram_counters.dirty_sync_missed_zero_copy++;
 }
 
-/* used by the search for pages to send */
-struct PageSearchStatus {
-    /* The migration channel used for a specific host page */
-    QEMUFile    *pss_channel;
-    /* Current block being searched */
-    RAMBlock    *block;
-    /* Current page to search from */
-    unsigned long page;
-    /* Set once we wrap around */
-    bool         complete_round;
-    /*
-     * [POSTCOPY-ONLY] Whether current page is explicitly requested by
-     * postcopy.  When set, the request is "urgent" because the dest QEMU
-     * threads are waiting for us.
-     */
-    bool         postcopy_requested;
-    /*
-     * [POSTCOPY-ONLY] The target channel to use to send current page.
-     *
-     * Note: This may _not_ match with the value in postcopy_requested
-     * above. Let's imagine the case where the postcopy request is exactly
-     * the page that we're sending in progress during precopy. In this case
-     * we'll have postcopy_requested set to true but the target channel
-     * will be the precopy channel (so that we don't split brain on that
-     * specific page since the precopy channel already contains partial of
-     * that page data).
-     *
-     * Besides that specific use case, postcopy_target_channel should
-     * always be equal to postcopy_requested, because by default we send
-     * postcopy pages via postcopy preempt channel.
-     */
-    bool         postcopy_target_channel;
-    /* Whether we're sending a host page */
-    bool          host_page_sending;
-    /* The start/end of current host page.  Only valid if host_page_sending==true */
-    unsigned long host_page_start;
-    unsigned long host_page_end;
-};
-typedef struct PageSearchStatus PageSearchStatus;
-
 CompressionStats compression_counters;
 
 struct CompressParam {
@@ -2627,7 +2637,7 @@  static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
  */
 static int ram_find_and_save_block(RAMState *rs)
 {
-    PageSearchStatus pss;
+    PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
     int pages = 0;
     bool again, found;
 
@@ -2648,11 +2658,11 @@  static int ram_find_and_save_block(RAMState *rs)
         rs->last_page = 0;
     }
 
-    pss_init(&pss, rs->last_seen_block, rs->last_page);
+    pss_init(pss, rs->last_seen_block, rs->last_page);
 
     do {
         again = true;
-        found = get_queued_page(rs, &pss);
+        found = get_queued_page(rs, pss);
 
         if (!found) {
             /*
@@ -2660,27 +2670,27 @@  static int ram_find_and_save_block(RAMState *rs)
              * preempted precopy.  Otherwise find the next dirty bit.
              */
             if (postcopy_preempt_triggered(rs)) {
-                postcopy_preempt_restore(rs, &pss, false);
+                postcopy_preempt_restore(rs, pss, false);
                 found = true;
             } else {
                 /* priority queue empty, so just search for something dirty */
-                found = find_dirty_block(rs, &pss, &again);
+                found = find_dirty_block(rs, pss, &again);
             }
         }
 
         if (found) {
             /* Update rs->f with correct channel */
             if (postcopy_preempt_active()) {
-                postcopy_preempt_choose_channel(rs, &pss);
+                postcopy_preempt_choose_channel(rs, pss);
             }
             /* Cache rs->f in pss_channel (TODO: remove rs->f) */
-            pss.pss_channel = rs->f;
-            pages = ram_save_host_page(rs, &pss);
+            pss->pss_channel = rs->f;
+            pages = ram_save_host_page(rs, pss);
         }
     } while (!pages && again);
 
-    rs->last_seen_block = pss.block;
-    rs->last_page = pss.page;
+    rs->last_seen_block = pss->block;
+    rs->last_page = pss->page;
 
     return pages;
 }