@@ -44,6 +44,7 @@ struct RAMBlock {
uint64_t fd_offset;
int guest_memfd;
size_t page_size;
+ uint64_t align;
/* dirty bitmap used during migration */
unsigned long *bmap;
@@ -91,5 +92,10 @@ struct RAMBlock {
*/
ram_addr_t postcopy_length;
};
+
+#define RAM_BLOCK "RAMBlock"
+
+extern const VMStateDescription vmstate_ram_block;
+
#endif
#endif
@@ -1398,6 +1398,7 @@ static void *file_ram_alloc(RAMBlock *block,
block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
}
#endif
+ block->align = block->mr->align;
if (memory < block->page_size) {
error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
@@ -1848,6 +1849,7 @@ static void *ram_block_alloc_host(RAMBlock *rb, Error **errp)
rb->idstr);
}
}
+ rb->align = mr->align;
if (host) {
memory_try_enable_merging(host, rb->max_length);
@@ -1934,6 +1936,7 @@ static RAMBlock *ram_block_create(MemoryRegion *mr, ram_addr_t size,
rb->flags = ram_flags;
rb->page_size = qemu_real_host_page_size();
rb->mr = mr;
+ rb->align = mr->align;
if (ram_flags & RAM_GUEST_MEMFD) {
rb->guest_memfd = ram_block_create_guest_memfd(rb, errp);
@@ -2060,6 +2063,26 @@ RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
}
#endif
+const VMStateDescription vmstate_ram_block = {
+ .name = RAM_BLOCK,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .precreate = true,
+ .factory = true,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(align, RAMBlock),
+ VMSTATE_VOID_PTR(host, RAMBlock),
+ VMSTATE_INT32(fd, RAMBlock),
+ VMSTATE_INT32(guest_memfd, RAMBlock),
+ VMSTATE_UINT32(flags, RAMBlock),
+ VMSTATE_UINT64(used_length, RAMBlock),
+ VMSTATE_UINT64(max_length, RAMBlock),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+vmstate_register_init_factory(vmstate_ram_block, RAMBlock);
+
static
RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
@@ -2070,6 +2093,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
{
RAMBlock *new_block;
int align;
+ g_autofree RAMBlock *preserved = NULL;
assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
@@ -2086,6 +2110,17 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
}
new_block->resized = resized;
+ preserved = vmstate_claim_factory_object(RAM_BLOCK, new_block->idstr, 0);
+ if (preserved) {
+ assert(mr->align <= preserved->align);
+ mr->align = mr->align ?: preserved->align;
+ new_block->align = preserved->align;
+ new_block->fd = preserved->fd;
+ new_block->flags = preserved->flags;
+ new_block->used_length = preserved->used_length;
+ new_block->max_length = preserved->max_length;
+ }
+
if (!host) {
host = ram_block_alloc_host(new_block, errp);
if (!host) {
@@ -2093,6 +2128,10 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
g_free(new_block);
return NULL;
}
+ if (!(ram_flags & RAM_GUEST_MEMFD)) {
+ vmstate_register_named(new_block->idstr, 0, &vmstate_ram_block,
+ new_block);
+ }
}
new_block->host = host;
@@ -2157,6 +2196,7 @@ void qemu_ram_free(RAMBlock *block)
}
qemu_mutex_lock_ramlist();
+ vmstate_unregister_named(RAM_BLOCK, block->idstr, 0);
qemu_ram_unset_idstr(block);
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
Preserve fields of RAMBlocks that allocate their host memory during CPR so the RAM allocation can be recovered. Mirror the mr->align field in the RAMBlock to simplify the vmstate. Preserve the old host address, even though it is immediately discarded, as it will be needed in the future for CPR with iommufd. Preserve guest_memfd, even though CPR does not yet support it, to maintain vmstate compatibility when it becomes supported. Signed-off-by: Steve Sistare <steven.sistare@oracle.com> --- include/exec/ramblock.h | 6 ++++++ system/physmem.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+)