Message ID | 20190430034412.12935-3-richardw.yang@linux.intel.com |
---|---|
State | New |
Headers | show |
Series | Cleanup migration/ram.c | expand |
* Wei Yang (richardw.yang@linux.intel.com) wrote: > Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can > remove this parameter and simplify the calculation a bit. > > Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> So I think you're right it's currently unused; however, lets ask Paolo: Do we need to keep this parameter for flexiblity? Dave > --- > include/exec/ram_addr.h | 15 ++++++--------- > migration/ram.c | 2 +- > 2 files changed, 7 insertions(+), 10 deletions(-) > > diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h > index 9ecd911c3e..3dfb2d52fb 100644 > --- a/include/exec/ram_addr.h > +++ b/include/exec/ram_addr.h > @@ -409,18 +409,16 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, > > static inline > uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, > - ram_addr_t start, > ram_addr_t length, > uint64_t *real_dirty_pages) > { > ram_addr_t addr; > - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); > + unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS); > uint64_t num_dirty = 0; > unsigned long *dest = rb->bmap; > > - /* start address and length is aligned at the start of a word? */ > - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == > - (start + rb->offset) && > + /* offset and length is aligned at the start of a word? */ > + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == (rb->offset) && > !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { > int k; > int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); > @@ -428,14 +426,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, > unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; > unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % > DIRTY_MEMORY_BLOCK_SIZE); > - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); > > rcu_read_lock(); > > src = atomic_rcu_read( > &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; > > - for (k = page; k < page + nr; k++) { > + for (k = 0; k < nr; k++) { > if (src[idx][offset]) { > unsigned long bits = atomic_xchg(&src[idx][offset], 0); > unsigned long new_dirty; > @@ -458,11 +455,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, > > for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { > if (cpu_physical_memory_test_and_clear_dirty( > - start + addr + offset, > + addr + offset, > TARGET_PAGE_SIZE, > DIRTY_MEMORY_MIGRATION)) { > *real_dirty_pages += 1; > - long k = (start + addr) >> TARGET_PAGE_BITS; > + long k = addr >> TARGET_PAGE_BITS; > if (!test_and_set_bit(k, dest)) { > num_dirty++; > } > diff --git a/migration/ram.c b/migration/ram.c > index 9948b2d021..1def8122e9 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -1646,7 +1646,7 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, > ram_addr_t length) > { > rs->migration_dirty_pages += > - cpu_physical_memory_sync_dirty_bitmap(rb, 0, length, > + cpu_physical_memory_sync_dirty_bitmap(rb, length, > &rs->num_dirty_pages_period); > } > > -- > 2.19.1 > -- Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On Tue, May 14, 2019 at 03:21:08PM +0100, Dr. David Alan Gilbert wrote: >* Wei Yang (richardw.yang@linux.intel.com) wrote: >> Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can >> remove this parameter and simplify the calculation a bit. >> >> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> > >So I think you're right it's currently unused; however, lets ask >Paolo: Do we need to keep this parameter for flexiblity? > Hi Paolo Do you like this one? >Dave > >> --- >> include/exec/ram_addr.h | 15 ++++++--------- >> migration/ram.c | 2 +- >> 2 files changed, 7 insertions(+), 10 deletions(-) >> >> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h >> index 9ecd911c3e..3dfb2d52fb 100644 >> --- a/include/exec/ram_addr.h >> +++ b/include/exec/ram_addr.h >> @@ -409,18 +409,16 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, >> >> static inline >> uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> - ram_addr_t start, >> ram_addr_t length, >> uint64_t *real_dirty_pages) >> { >> ram_addr_t addr; >> - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); >> + unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS); >> uint64_t num_dirty = 0; >> unsigned long *dest = rb->bmap; >> >> - /* start address and length is aligned at the start of a word? */ >> - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == >> - (start + rb->offset) && >> + /* offset and length is aligned at the start of a word? */ >> + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == (rb->offset) && >> !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { >> int k; >> int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); >> @@ -428,14 +426,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; >> unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % >> DIRTY_MEMORY_BLOCK_SIZE); >> - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); >> >> rcu_read_lock(); >> >> src = atomic_rcu_read( >> &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; >> >> - for (k = page; k < page + nr; k++) { >> + for (k = 0; k < nr; k++) { >> if (src[idx][offset]) { >> unsigned long bits = atomic_xchg(&src[idx][offset], 0); >> unsigned long new_dirty; >> @@ -458,11 +455,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> >> for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { >> if (cpu_physical_memory_test_and_clear_dirty( >> - start + addr + offset, >> + addr + offset, >> TARGET_PAGE_SIZE, >> DIRTY_MEMORY_MIGRATION)) { >> *real_dirty_pages += 1; >> - long k = (start + addr) >> TARGET_PAGE_BITS; >> + long k = addr >> TARGET_PAGE_BITS; >> if (!test_and_set_bit(k, dest)) { >> num_dirty++; >> } >> diff --git a/migration/ram.c b/migration/ram.c >> index 9948b2d021..1def8122e9 100644 >> --- a/migration/ram.c >> +++ b/migration/ram.c >> @@ -1646,7 +1646,7 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, >> ram_addr_t length) >> { >> rs->migration_dirty_pages += >> - cpu_physical_memory_sync_dirty_bitmap(rb, 0, length, >> + cpu_physical_memory_sync_dirty_bitmap(rb, length, >> &rs->num_dirty_pages_period); >> } >> >> -- >> 2.19.1 >> >-- >Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On Tue, May 14, 2019 at 03:21:08PM +0100, Dr. David Alan Gilbert wrote: >* Wei Yang (richardw.yang@linux.intel.com) wrote: >> Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can >> remove this parameter and simplify the calculation a bit. >> >> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> > >So I think you're right it's currently unused; however, lets ask >Paolo: Do we need to keep this parameter for flexiblity? > Hi, Paolo, Have time to take a look? >Dave > >> --- >> include/exec/ram_addr.h | 15 ++++++--------- >> migration/ram.c | 2 +- >> 2 files changed, 7 insertions(+), 10 deletions(-) >> >> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h >> index 9ecd911c3e..3dfb2d52fb 100644 >> --- a/include/exec/ram_addr.h >> +++ b/include/exec/ram_addr.h >> @@ -409,18 +409,16 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, >> >> static inline >> uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> - ram_addr_t start, >> ram_addr_t length, >> uint64_t *real_dirty_pages) >> { >> ram_addr_t addr; >> - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); >> + unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS); >> uint64_t num_dirty = 0; >> unsigned long *dest = rb->bmap; >> >> - /* start address and length is aligned at the start of a word? */ >> - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == >> - (start + rb->offset) && >> + /* offset and length is aligned at the start of a word? */ >> + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == (rb->offset) && >> !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { >> int k; >> int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); >> @@ -428,14 +426,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; >> unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % >> DIRTY_MEMORY_BLOCK_SIZE); >> - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); >> >> rcu_read_lock(); >> >> src = atomic_rcu_read( >> &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; >> >> - for (k = page; k < page + nr; k++) { >> + for (k = 0; k < nr; k++) { >> if (src[idx][offset]) { >> unsigned long bits = atomic_xchg(&src[idx][offset], 0); >> unsigned long new_dirty; >> @@ -458,11 +455,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> >> for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { >> if (cpu_physical_memory_test_and_clear_dirty( >> - start + addr + offset, >> + addr + offset, >> TARGET_PAGE_SIZE, >> DIRTY_MEMORY_MIGRATION)) { >> *real_dirty_pages += 1; >> - long k = (start + addr) >> TARGET_PAGE_BITS; >> + long k = addr >> TARGET_PAGE_BITS; >> if (!test_and_set_bit(k, dest)) { >> num_dirty++; >> } >> diff --git a/migration/ram.c b/migration/ram.c >> index 9948b2d021..1def8122e9 100644 >> --- a/migration/ram.c >> +++ b/migration/ram.c >> @@ -1646,7 +1646,7 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, >> ram_addr_t length) >> { >> rs->migration_dirty_pages += >> - cpu_physical_memory_sync_dirty_bitmap(rb, 0, length, >> + cpu_physical_memory_sync_dirty_bitmap(rb, length, >> &rs->num_dirty_pages_period); >> } >> >> -- >> 2.19.1 >> >-- >Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On Tue, May 14, 2019 at 03:21:08PM +0100, Dr. David Alan Gilbert wrote: >* Wei Yang (richardw.yang@linux.intel.com) wrote: >> Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can >> remove this parameter and simplify the calculation a bit. >> >> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> > >So I think you're right it's currently unused; however, lets ask >Paolo: Do we need to keep this parameter for flexiblity? > Hey, Paolo Do you have some spare time to give some insight? >Dave > >> --- >> include/exec/ram_addr.h | 15 ++++++--------- >> migration/ram.c | 2 +- >> 2 files changed, 7 insertions(+), 10 deletions(-) >> >> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h >> index 9ecd911c3e..3dfb2d52fb 100644 >> --- a/include/exec/ram_addr.h >> +++ b/include/exec/ram_addr.h >> @@ -409,18 +409,16 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, >> >> static inline >> uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> - ram_addr_t start, >> ram_addr_t length, >> uint64_t *real_dirty_pages) >> { >> ram_addr_t addr; >> - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); >> + unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS); >> uint64_t num_dirty = 0; >> unsigned long *dest = rb->bmap; >> >> - /* start address and length is aligned at the start of a word? */ >> - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == >> - (start + rb->offset) && >> + /* offset and length is aligned at the start of a word? */ >> + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == (rb->offset) && >> !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { >> int k; >> int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); >> @@ -428,14 +426,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; >> unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % >> DIRTY_MEMORY_BLOCK_SIZE); >> - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); >> >> rcu_read_lock(); >> >> src = atomic_rcu_read( >> &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; >> >> - for (k = page; k < page + nr; k++) { >> + for (k = 0; k < nr; k++) { >> if (src[idx][offset]) { >> unsigned long bits = atomic_xchg(&src[idx][offset], 0); >> unsigned long new_dirty; >> @@ -458,11 +455,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >> >> for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { >> if (cpu_physical_memory_test_and_clear_dirty( >> - start + addr + offset, >> + addr + offset, >> TARGET_PAGE_SIZE, >> DIRTY_MEMORY_MIGRATION)) { >> *real_dirty_pages += 1; >> - long k = (start + addr) >> TARGET_PAGE_BITS; >> + long k = addr >> TARGET_PAGE_BITS; >> if (!test_and_set_bit(k, dest)) { >> num_dirty++; >> } >> diff --git a/migration/ram.c b/migration/ram.c >> index 9948b2d021..1def8122e9 100644 >> --- a/migration/ram.c >> +++ b/migration/ram.c >> @@ -1646,7 +1646,7 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, >> ram_addr_t length) >> { >> rs->migration_dirty_pages += >> - cpu_physical_memory_sync_dirty_bitmap(rb, 0, length, >> + cpu_physical_memory_sync_dirty_bitmap(rb, length, >> &rs->num_dirty_pages_period); >> } >> >> -- >> 2.19.1 >> >-- >Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On 17/07/19 03:13, Wei Yang wrote: > On Tue, May 14, 2019 at 03:21:08PM +0100, Dr. David Alan Gilbert wrote: >> * Wei Yang (richardw.yang@linux.intel.com) wrote: >>> Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can >>> remove this parameter and simplify the calculation a bit. >>> >>> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> >> >> So I think you're right it's currently unused; however, lets ask >> Paolo: Do we need to keep this parameter for flexiblity? >> > > Hey, Paolo > > Do you have some spare time to give some insight? I think it's cleaner to leave the start argument in place. However, I'll note that in migration_bitmap_sync_range the length argument is always block->used_length so that's also unnecessary (you already have block). Paolo >> Dave >> >>> --- >>> include/exec/ram_addr.h | 15 ++++++--------- >>> migration/ram.c | 2 +- >>> 2 files changed, 7 insertions(+), 10 deletions(-) >>> >>> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h >>> index 9ecd911c3e..3dfb2d52fb 100644 >>> --- a/include/exec/ram_addr.h >>> +++ b/include/exec/ram_addr.h >>> @@ -409,18 +409,16 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, >>> >>> static inline >>> uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >>> - ram_addr_t start, >>> ram_addr_t length, >>> uint64_t *real_dirty_pages) >>> { >>> ram_addr_t addr; >>> - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); >>> + unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS); >>> uint64_t num_dirty = 0; >>> unsigned long *dest = rb->bmap; >>> >>> - /* start address and length is aligned at the start of a word? */ >>> - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == >>> - (start + rb->offset) && >>> + /* offset and length is aligned at the start of a word? */ >>> + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == (rb->offset) && >>> !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { >>> int k; >>> int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); >>> @@ -428,14 +426,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >>> unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; >>> unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % >>> DIRTY_MEMORY_BLOCK_SIZE); >>> - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); >>> >>> rcu_read_lock(); >>> >>> src = atomic_rcu_read( >>> &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; >>> >>> - for (k = page; k < page + nr; k++) { >>> + for (k = 0; k < nr; k++) { >>> if (src[idx][offset]) { >>> unsigned long bits = atomic_xchg(&src[idx][offset], 0); >>> unsigned long new_dirty; >>> @@ -458,11 +455,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, >>> >>> for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { >>> if (cpu_physical_memory_test_and_clear_dirty( >>> - start + addr + offset, >>> + addr + offset, >>> TARGET_PAGE_SIZE, >>> DIRTY_MEMORY_MIGRATION)) { >>> *real_dirty_pages += 1; >>> - long k = (start + addr) >> TARGET_PAGE_BITS; >>> + long k = addr >> TARGET_PAGE_BITS; >>> if (!test_and_set_bit(k, dest)) { >>> num_dirty++; >>> } >>> diff --git a/migration/ram.c b/migration/ram.c >>> index 9948b2d021..1def8122e9 100644 >>> --- a/migration/ram.c >>> +++ b/migration/ram.c >>> @@ -1646,7 +1646,7 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, >>> ram_addr_t length) >>> { >>> rs->migration_dirty_pages += >>> - cpu_physical_memory_sync_dirty_bitmap(rb, 0, length, >>> + cpu_physical_memory_sync_dirty_bitmap(rb, length, >>> &rs->num_dirty_pages_period); >>> } >>> >>> -- >>> 2.19.1 >>> >> -- >> Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK >
On Wed, Jul 17, 2019 at 11:16:19AM +0200, Paolo Bonzini wrote: >On 17/07/19 03:13, Wei Yang wrote: >> On Tue, May 14, 2019 at 03:21:08PM +0100, Dr. David Alan Gilbert wrote: >>> * Wei Yang (richardw.yang@linux.intel.com) wrote: >>>> Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can >>>> remove this parameter and simplify the calculation a bit. >>>> >>>> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> >>> >>> So I think you're right it's currently unused; however, lets ask >>> Paolo: Do we need to keep this parameter for flexiblity? >>> >> >> Hey, Paolo >> >> Do you have some spare time to give some insight? > >I think it's cleaner to leave the start argument in place. However, >I'll note that in migration_bitmap_sync_range the length argument is >always block->used_length so that's also unnecessary (you already have >block). > That's reasonable. >Paolo >
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 9ecd911c3e..3dfb2d52fb 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -409,18 +409,16 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, static inline uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, - ram_addr_t start, ram_addr_t length, uint64_t *real_dirty_pages) { ram_addr_t addr; - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); + unsigned long word = BIT_WORD(rb->offset >> TARGET_PAGE_BITS); uint64_t num_dirty = 0; unsigned long *dest = rb->bmap; - /* start address and length is aligned at the start of a word? */ - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == - (start + rb->offset) && + /* offset and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == (rb->offset) && !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { int k; int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); @@ -428,14 +426,13 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % DIRTY_MEMORY_BLOCK_SIZE); - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); rcu_read_lock(); src = atomic_rcu_read( &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; - for (k = page; k < page + nr; k++) { + for (k = 0; k < nr; k++) { if (src[idx][offset]) { unsigned long bits = atomic_xchg(&src[idx][offset], 0); unsigned long new_dirty; @@ -458,11 +455,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { if (cpu_physical_memory_test_and_clear_dirty( - start + addr + offset, + addr + offset, TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION)) { *real_dirty_pages += 1; - long k = (start + addr) >> TARGET_PAGE_BITS; + long k = addr >> TARGET_PAGE_BITS; if (!test_and_set_bit(k, dest)) { num_dirty++; } diff --git a/migration/ram.c b/migration/ram.c index 9948b2d021..1def8122e9 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1646,7 +1646,7 @@ static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, ram_addr_t length) { rs->migration_dirty_pages += - cpu_physical_memory_sync_dirty_bitmap(rb, 0, length, + cpu_physical_memory_sync_dirty_bitmap(rb, length, &rs->num_dirty_pages_period); }
Since start of cpu_physical_memory_sync_dirty_bitmap is always 0, we can remove this parameter and simplify the calculation a bit. Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> --- include/exec/ram_addr.h | 15 ++++++--------- migration/ram.c | 2 +- 2 files changed, 7 insertions(+), 10 deletions(-)