diff mbox

[2/8] qmp: create qmp_savevm command

Message ID 1452770941-21582-3-git-send-email-den@openvz.org
State New
Headers show

Commit Message

Denis V. Lunev Jan. 14, 2016, 11:28 a.m. UTC
'name' attribute is made mandatory in distinction with HMP command.

The patch also moves hmp_savevm implementation into hmp.c. This function
is just a simple wrapper now and does not have knowledge about
migration internals.

Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Juan Quintela <quintela@redhat.com>
CC: Amit Shah <amit.shah@redhat.com>
CC: Markus Armbruster <armbru@redhat.com>
CC: Eric Blake <eblake@redhat.com>
---
 hmp.c                         |  3 +--
 include/migration/migration.h |  2 --
 migration/savevm.c            |  2 +-
 qapi-schema.json              | 13 +++++++++++++
 qmp-commands.hx               | 25 +++++++++++++++++++++++++
 5 files changed, 40 insertions(+), 5 deletions(-)

Comments

Markus Armbruster Jan. 18, 2016, 3:58 p.m. UTC | #1
"Denis V. Lunev" <den@openvz.org> writes:

> 'name' attribute is made mandatory in distinction with HMP command.
>
> The patch also moves hmp_savevm implementation into hmp.c. This function
> is just a simple wrapper now and does not have knowledge about
> migration internals.
>
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> CC: Juan Quintela <quintela@redhat.com>
> CC: Amit Shah <amit.shah@redhat.com>
> CC: Markus Armbruster <armbru@redhat.com>
> CC: Eric Blake <eblake@redhat.com>
> ---
>  hmp.c                         |  3 +--
>  include/migration/migration.h |  2 --
>  migration/savevm.c            |  2 +-
>  qapi-schema.json              | 13 +++++++++++++
>  qmp-commands.hx               | 25 +++++++++++++++++++++++++
>  5 files changed, 40 insertions(+), 5 deletions(-)
>
> diff --git a/hmp.c b/hmp.c
> index e7bab75..8d6eda6 100644
> --- a/hmp.c
> +++ b/hmp.c
> @@ -33,7 +33,6 @@
>  #include "ui/console.h"
>  #include "block/qapi.h"
>  #include "qemu-io.h"
> -#include "migration/migration.h"
>  
>  #ifdef CONFIG_SPICE
>  #include <spice/enums.h>
> @@ -2406,7 +2405,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict)
>          name = name_buf;
>      }
>  
> -    migrate_savevm(name, &local_err);
> +    qmp_savevm(name, &local_err);
>  
>      if (local_err != NULL) {
>          error_report_err(local_err);
> diff --git a/include/migration/migration.h b/include/migration/migration.h
> index 73c8bb1..58c04a9 100644
> --- a/include/migration/migration.h
> +++ b/include/migration/migration.h
> @@ -277,8 +277,6 @@ int migrate_compress_threads(void);
>  int migrate_decompress_threads(void);
>  bool migrate_use_events(void);
>  
> -void migrate_savevm(const char *name, Error **errp);
> -
>  /* Sending on the return path - generic and then for each message type */
>  void migrate_send_rp_message(MigrationIncomingState *mis,
>                               enum mig_rp_message_type message_type,
> diff --git a/migration/savevm.c b/migration/savevm.c
> index 308302a..0dbb15f 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1905,7 +1905,7 @@ int qemu_loadvm_state(QEMUFile *f)
>      return ret;
>  }
>  
> -void migrate_savevm(const char *name, Error **errp)
> +void qmp_savevm(const char *name, Error **errp)
>  {
>      BlockDriverState *bs, *bs1;
>      QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
> diff --git a/qapi-schema.json b/qapi-schema.json
> index 2e31733..09d1a1a 100644
> --- a/qapi-schema.json
> +++ b/qapi-schema.json
> @@ -4054,3 +4054,16 @@
>  ##
>  { 'enum': 'ReplayMode',
>    'data': [ 'none', 'record', 'play' ] }
> +
> +##
> +# @savevm
> +#
> +# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
> +#
> +# @name: identifier of a snapshot to be created
> +#
> +# Returns: Nothing on success
> +#
> +# Since 2.6
> +##
> +{ 'command': 'savevm', 'data': {'name': 'str'} }
> diff --git a/qmp-commands.hx b/qmp-commands.hx
> index db072a6..b7851e1 100644
> --- a/qmp-commands.hx
> +++ b/qmp-commands.hx
> @@ -4795,3 +4795,28 @@ Example:
>                   {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
>                    "pop-vlan": 1, "id": 251658240}
>     ]}
> +
> +EQMP
> +
> +SQMP
> +savevm
> +------
> +
> +Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
> +
> +Arguments:
> +
> +- "name": snapshot name
> +
> +Example:
> +
> +-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
> +<- { "return": {} }
> +
> +EQMP
> +
> +    {
> +        .name       = "savevm",
> +        .args_type  = "name:s",
> +        .mhandler.cmd_new = qmp_marshal_savevm,
> +    },

A snapshot has a tag (QEMUSnapshotInfo member name) and an ID
(QEMUSnapshotInfo member id_str).

HMP's name arguments are overloaded: they're matched both against tag
and ID.  Unwisely chosen tags can create ambiguity.  Example:

    (qemu) savevm 2
    (qemu) savevm 
    (qemu) info snapshots
    ID        TAG                 VM SIZE                DATE       VM CLOCK
    1         2                      1.7M 2016-01-18 16:56:31   00:00:00.000
    2         vm-20160118165641      1.7M 2016-01-18 16:56:41   00:00:00.000

Care to guess which one we get when we ask for "2"?

I think we want separate, unoverloaded arguments for QMP.
Denis V. Lunev Jan. 18, 2016, 4:10 p.m. UTC | #2
On 01/18/2016 06:58 PM, Markus Armbruster wrote:
> "Denis V. Lunev" <den@openvz.org> writes:
>
>> 'name' attribute is made mandatory in distinction with HMP command.
>>
>> The patch also moves hmp_savevm implementation into hmp.c. This function
>> is just a simple wrapper now and does not have knowledge about
>> migration internals.
>>
>> Signed-off-by: Denis V. Lunev <den@openvz.org>
>> CC: Juan Quintela <quintela@redhat.com>
>> CC: Amit Shah <amit.shah@redhat.com>
>> CC: Markus Armbruster <armbru@redhat.com>
>> CC: Eric Blake <eblake@redhat.com>
>> ---
>>   hmp.c                         |  3 +--
>>   include/migration/migration.h |  2 --
>>   migration/savevm.c            |  2 +-
>>   qapi-schema.json              | 13 +++++++++++++
>>   qmp-commands.hx               | 25 +++++++++++++++++++++++++
>>   5 files changed, 40 insertions(+), 5 deletions(-)
>>
>> diff --git a/hmp.c b/hmp.c
>> index e7bab75..8d6eda6 100644
>> --- a/hmp.c
>> +++ b/hmp.c
>> @@ -33,7 +33,6 @@
>>   #include "ui/console.h"
>>   #include "block/qapi.h"
>>   #include "qemu-io.h"
>> -#include "migration/migration.h"
>>   
>>   #ifdef CONFIG_SPICE
>>   #include <spice/enums.h>
>> @@ -2406,7 +2405,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict)
>>           name = name_buf;
>>       }
>>   
>> -    migrate_savevm(name, &local_err);
>> +    qmp_savevm(name, &local_err);
>>   
>>       if (local_err != NULL) {
>>           error_report_err(local_err);
>> diff --git a/include/migration/migration.h b/include/migration/migration.h
>> index 73c8bb1..58c04a9 100644
>> --- a/include/migration/migration.h
>> +++ b/include/migration/migration.h
>> @@ -277,8 +277,6 @@ int migrate_compress_threads(void);
>>   int migrate_decompress_threads(void);
>>   bool migrate_use_events(void);
>>   
>> -void migrate_savevm(const char *name, Error **errp);
>> -
>>   /* Sending on the return path - generic and then for each message type */
>>   void migrate_send_rp_message(MigrationIncomingState *mis,
>>                                enum mig_rp_message_type message_type,
>> diff --git a/migration/savevm.c b/migration/savevm.c
>> index 308302a..0dbb15f 100644
>> --- a/migration/savevm.c
>> +++ b/migration/savevm.c
>> @@ -1905,7 +1905,7 @@ int qemu_loadvm_state(QEMUFile *f)
>>       return ret;
>>   }
>>   
>> -void migrate_savevm(const char *name, Error **errp)
>> +void qmp_savevm(const char *name, Error **errp)
>>   {
>>       BlockDriverState *bs, *bs1;
>>       QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
>> diff --git a/qapi-schema.json b/qapi-schema.json
>> index 2e31733..09d1a1a 100644
>> --- a/qapi-schema.json
>> +++ b/qapi-schema.json
>> @@ -4054,3 +4054,16 @@
>>   ##
>>   { 'enum': 'ReplayMode',
>>     'data': [ 'none', 'record', 'play' ] }
>> +
>> +##
>> +# @savevm
>> +#
>> +# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>> +#
>> +# @name: identifier of a snapshot to be created
>> +#
>> +# Returns: Nothing on success
>> +#
>> +# Since 2.6
>> +##
>> +{ 'command': 'savevm', 'data': {'name': 'str'} }
>> diff --git a/qmp-commands.hx b/qmp-commands.hx
>> index db072a6..b7851e1 100644
>> --- a/qmp-commands.hx
>> +++ b/qmp-commands.hx
>> @@ -4795,3 +4795,28 @@ Example:
>>                    {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
>>                     "pop-vlan": 1, "id": 251658240}
>>      ]}
>> +
>> +EQMP
>> +
>> +SQMP
>> +savevm
>> +------
>> +
>> +Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>> +
>> +Arguments:
>> +
>> +- "name": snapshot name
>> +
>> +Example:
>> +
>> +-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
>> +<- { "return": {} }
>> +
>> +EQMP
>> +
>> +    {
>> +        .name       = "savevm",
>> +        .args_type  = "name:s",
>> +        .mhandler.cmd_new = qmp_marshal_savevm,
>> +    },
> A snapshot has a tag (QEMUSnapshotInfo member name) and an ID
> (QEMUSnapshotInfo member id_str).
>
> HMP's name arguments are overloaded: they're matched both against tag
> and ID.  Unwisely chosen tags can create ambiguity.  Example:
>
>      (qemu) savevm 2
>      (qemu) savevm
>      (qemu) info snapshots
>      ID        TAG                 VM SIZE                DATE       VM CLOCK
>      1         2                      1.7M 2016-01-18 16:56:31   00:00:00.000
>      2         vm-20160118165641      1.7M 2016-01-18 16:56:41   00:00:00.000
>
> Care to guess which one we get when we ask for "2"?
>
> I think we want separate, unoverloaded arguments for QMP.
I think there is no need to. Name is now absolutely mandatory.
Thus for new snapshots we will have 'name' specified and we
will be bound to name only.

'id' will be used for old VMs and this is convenience
layer to make old 'id' only snaphosts accessible
through new interface in the same way as old.
Markus Armbruster Jan. 19, 2016, 6:11 p.m. UTC | #3
"Denis V. Lunev" <den@openvz.org> writes:

> On 01/18/2016 06:58 PM, Markus Armbruster wrote:
>> "Denis V. Lunev" <den@openvz.org> writes:
>>
>>> 'name' attribute is made mandatory in distinction with HMP command.
>>>
>>> The patch also moves hmp_savevm implementation into hmp.c. This function
>>> is just a simple wrapper now and does not have knowledge about
>>> migration internals.
[...]
>>> diff --git a/qapi-schema.json b/qapi-schema.json
>>> index 2e31733..09d1a1a 100644
>>> --- a/qapi-schema.json
>>> +++ b/qapi-schema.json
>>> @@ -4054,3 +4054,16 @@
>>>   ##
>>>   { 'enum': 'ReplayMode',
>>>     'data': [ 'none', 'record', 'play' ] }
>>> +
>>> +##
>>> +# @savevm
>>> +#
>>> +# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>> +#
>>> +# @name: identifier of a snapshot to be created
>>> +#
>>> +# Returns: Nothing on success
>>> +#
>>> +# Since 2.6
>>> +##
>>> +{ 'command': 'savevm', 'data': {'name': 'str'} }
>>> diff --git a/qmp-commands.hx b/qmp-commands.hx
>>> index db072a6..b7851e1 100644
>>> --- a/qmp-commands.hx
>>> +++ b/qmp-commands.hx
>>> @@ -4795,3 +4795,28 @@ Example:
>>>                    {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
>>>                     "pop-vlan": 1, "id": 251658240}
>>>      ]}
>>> +
>>> +EQMP
>>> +
>>> +SQMP
>>> +savevm
>>> +------
>>> +
>>> +Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>> +
>>> +Arguments:
>>> +
>>> +- "name": snapshot name
>>> +
>>> +Example:
>>> +
>>> +-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
>>> +<- { "return": {} }
>>> +
>>> +EQMP
>>> +
>>> +    {
>>> +        .name       = "savevm",
>>> +        .args_type  = "name:s",
>>> +        .mhandler.cmd_new = qmp_marshal_savevm,
>>> +    },
>> A snapshot has a tag (QEMUSnapshotInfo member name) and an ID
>> (QEMUSnapshotInfo member id_str).
>>
>> HMP's name arguments are overloaded: they're matched both against tag
>> and ID.  Unwisely chosen tags can create ambiguity.  Example:
>>
>>      (qemu) savevm 2
>>      (qemu) savevm
>>      (qemu) info snapshots
>>      ID        TAG                 VM SIZE                DATE       VM CLOCK
>>      1         2                      1.7M 2016-01-18 16:56:31   00:00:00.000
>>      2         vm-20160118165641      1.7M 2016-01-18 16:56:41   00:00:00.000
>>
>> Care to guess which one we get when we ask for "2"?
>>
>> I think we want separate, unoverloaded arguments for QMP.
> I think there is no need to. Name is now absolutely mandatory.
> Thus for new snapshots we will have 'name' specified and we
> will be bound to name only.
>
> 'id' will be used for old VMs and this is convenience
> layer to make old 'id' only snaphosts accessible
> through new interface in the same way as old.

The overloaded interface you propose is more complex than it seems.  You
hide the complexity by not documenting its workings.  Not even to the
(insufficient!) degree the HMP interface documents how its overloaded
name parameters work.

Merely copying over the HMP documentation won't cut it.  The bar for new
QMP interfaces is a fair bit higher than "no worse than HMP".  The new
interface must reasonably sane for *QMP*, and sufficiently documented.

If we can't make a sane QMP interface, I'd rather have no QMP interface.
However, I believe we *can* make a sane QMP interface if we put in the
design work.

The design work must start with a review of what we're trying to
accomplish, and how to fit it into the rest of the system.  Here's my
attempt.  Since my knowledge on snapshots is rather superficial, I'm
cc'ing Kevin for additional snapshot expertise.  Kevin, please correct
me when I talk nonsense.  I'm further cc'ing Eric and Peter for the
management layer perspective.

A point-in-time snapshot of a system consists of a snapshot of its
machine state and snapshots of its storage.  All the snapshots need to
be made at the same point in time for the result to be consistent.

Snapshots of read-only storage carry no information and are commonly
omitted.

Isolated storage snapshots can make sense, but snapshotting the machine
state without also snapshotting the machine's storage doesn't sound
useful to me.

Both storage and machine state snapshots come in two flavours: internal
and external.

External ones can be made with any block backend, but internal storage
snapshots work only with certain formats, notably qcow2.  QMP supports
both kinds of storage snapshots.

Both kinds of storage snapshots need exclusive access while they work.
They're relatively quick, but the delay could be noticable for large
internal snapshots, and perhaps for external snapshots on really slow
storage.

Internal machine state snapshots are currently only available via HMP's
savevm, which integrates internal machine state and storage snapshots.
This is non-live, i.e. the guest is stopped while the snapshot gets
saved.  I figure we could make it live if we really wanted to.  Another
instance of the emerging background job concept.

On the implementation level, QCOW2 can't currently store a machine state
snapshot without also storing a storage snapshot.  I guess we could
change this if we really wanted to.

External machine state snapshots are basically migrate to file.
Supported by QMP.

Live migration to file is possible, but currently wastes space, because
memory dirtied during migration gets saved multiple times.  Could be
fixed either by making migration update previously saved memory instead
of appending (beware, random I/O), or by compacting the file afterwards.

Non-live migration to file doesn't waste space that way.

To take multiple *consistent* snapshots, you have to bundle them up in a
transaction.  Transactions currently support only *storage* snapshots,
though.

Work-around for external machine state snapshot: migrate to file
(possibly live), leaving the guest sopped on completion, take storage
snapshots, resume guest.

You can combine internal and external storage snapshots with an external
machine state snapshot to get a mixed system snapshot.

You currently can't do that with an internal machine state snapshot: the
only way to take one is HMP savevm, which insists on internally
snapshotting all writable storage, and doesn't transact together with
external storage snapshots.

Except for the case "purely internal snapshot with just one writable
storage device", a system snapshot consists of multiple parts stored in
separate files.  Tying the parts together is a management problem.  QEMU
provides rudimentary management for purely internal snapshots, but it's
flawed: missing storage isn't detected, and additional storage can creep
in if snapshot tags or IDs happen to match.  I guess managing the parts
is better left to the management layer.

I figure a fully general QMP interface would let you perform a system
snapshot by combining storage snapshots of either kind with either kind
of machine state snapshot.

We already have most of the building blocks: we can take external and
internal storage snapshots, and combine them in transactions.

What's missing is transactionable machine state snapshots.

We know how to work around it for external machine state snapshots (see
above), but a transaction-based solution might be nicer.

Any solution for internal machine state snapshots in QMP should at least
try to fit into this.  Some restrictions are okay.  For instance, we
probably need to restrict internal machine state snapshots to piggyback
on an internal storage snapshot for now, so we don't have to dig up
QCOW2 just to get QMP support.

We can talk about more convenient interfaces for common special cases,
but I feel we need to design for the general case.  We don't have to
implement the completely general case right away, though.  As long as we
know where we want to go, incremental steps towards that goal are fine.

Can we create a transactionable QMP command to take an internal machine
state snapshot?

This would be like HMP savevm with the following differences:

* Separate parameters for tag and ID.  I'll have none of this
  overloading nonsense in QMP.

* Specify the destination block device.  I'll have none of this "pick a
  device in some magic, undocumented way" in QMP.

* Leave alone the other block devices.  Adding transaction actions to
  snapshot all the writable block devices to get a full system snapshot
  is the user's responsibility.

Limitations:

* No live internal machine snapshot, yet.

* The storage device taking the internal snapshot must also be
  internally snapshot for now.  In fact, the command does both
  (tolerable wart).

Open questions:

* Do we want the QMP command to delete existing snapshots with
  conflicting tag / ID, like HMP savevm does?  Or do we want it to fail
  the transaction?

* Do we need transactions for switching to a system snapshot, too?

Opinions?

Denis, I understand doing this right will be more work than simply
rebadging HMP's savevm etc for QMP, and probably a good deal more than
you anticipated.  Sorry about that.  I'm trying to accomodate you as far
as possible without screwing up QMP.  Not screwing up QMP too much is my
job as maintainer.
Denis V. Lunev Jan. 22, 2016, 8:01 a.m. UTC | #4
On 01/19/2016 09:11 PM, Markus Armbruster wrote:
> "Denis V. Lunev" <den@openvz.org> writes:
>
>> On 01/18/2016 06:58 PM, Markus Armbruster wrote:
>>> "Denis V. Lunev" <den@openvz.org> writes:
>>>
>>>> 'name' attribute is made mandatory in distinction with HMP command.
>>>>
>>>> The patch also moves hmp_savevm implementation into hmp.c. This function
>>>> is just a simple wrapper now and does not have knowledge about
>>>> migration internals.
> [...]
>>>> diff --git a/qapi-schema.json b/qapi-schema.json
>>>> index 2e31733..09d1a1a 100644
>>>> --- a/qapi-schema.json
>>>> +++ b/qapi-schema.json
>>>> @@ -4054,3 +4054,16 @@
>>>>    ##
>>>>    { 'enum': 'ReplayMode',
>>>>      'data': [ 'none', 'record', 'play' ] }
>>>> +
>>>> +##
>>>> +# @savevm
>>>> +#
>>>> +# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>>> +#
>>>> +# @name: identifier of a snapshot to be created
>>>> +#
>>>> +# Returns: Nothing on success
>>>> +#
>>>> +# Since 2.6
>>>> +##
>>>> +{ 'command': 'savevm', 'data': {'name': 'str'} }
>>>> diff --git a/qmp-commands.hx b/qmp-commands.hx
>>>> index db072a6..b7851e1 100644
>>>> --- a/qmp-commands.hx
>>>> +++ b/qmp-commands.hx
>>>> @@ -4795,3 +4795,28 @@ Example:
>>>>                     {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
>>>>                      "pop-vlan": 1, "id": 251658240}
>>>>       ]}
>>>> +
>>>> +EQMP
>>>> +
>>>> +SQMP
>>>> +savevm
>>>> +------
>>>> +
>>>> +Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>>> +
>>>> +Arguments:
>>>> +
>>>> +- "name": snapshot name
>>>> +
>>>> +Example:
>>>> +
>>>> +-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
>>>> +<- { "return": {} }
>>>> +
>>>> +EQMP
>>>> +
>>>> +    {
>>>> +        .name       = "savevm",
>>>> +        .args_type  = "name:s",
>>>> +        .mhandler.cmd_new = qmp_marshal_savevm,
>>>> +    },
>>> A snapshot has a tag (QEMUSnapshotInfo member name) and an ID
>>> (QEMUSnapshotInfo member id_str).
>>>
>>> HMP's name arguments are overloaded: they're matched both against tag
>>> and ID.  Unwisely chosen tags can create ambiguity.  Example:
>>>
>>>       (qemu) savevm 2
>>>       (qemu) savevm
>>>       (qemu) info snapshots
>>>       ID        TAG                 VM SIZE                DATE       VM CLOCK
>>>       1         2                      1.7M 2016-01-18 16:56:31   00:00:00.000
>>>       2         vm-20160118165641      1.7M 2016-01-18 16:56:41   00:00:00.000
>>>
>>> Care to guess which one we get when we ask for "2"?
>>>
>>> I think we want separate, unoverloaded arguments for QMP.
>> I think there is no need to. Name is now absolutely mandatory.
>> Thus for new snapshots we will have 'name' specified and we
>> will be bound to name only.
>>
>> 'id' will be used for old VMs and this is convenience
>> layer to make old 'id' only snaphosts accessible
>> through new interface in the same way as old.
> The overloaded interface you propose is more complex than it seems.  You
> hide the complexity by not documenting its workings.  Not even to the
> (insufficient!) degree the HMP interface documents how its overloaded
> name parameters work.
>
> Merely copying over the HMP documentation won't cut it.  The bar for new
> QMP interfaces is a fair bit higher than "no worse than HMP".  The new
> interface must reasonably sane for *QMP*, and sufficiently documented.
>
> If we can't make a sane QMP interface, I'd rather have no QMP interface.
> However, I believe we *can* make a sane QMP interface if we put in the
> design work.
>
> The design work must start with a review of what we're trying to
> accomplish, and how to fit it into the rest of the system.  Here's my
> attempt.  Since my knowledge on snapshots is rather superficial, I'm
> cc'ing Kevin for additional snapshot expertise.  Kevin, please correct
> me when I talk nonsense.  I'm further cc'ing Eric and Peter for the
> management layer perspective.
>
> A point-in-time snapshot of a system consists of a snapshot of its
> machine state and snapshots of its storage.  All the snapshots need to
> be made at the same point in time for the result to be consistent.
>
> Snapshots of read-only storage carry no information and are commonly
> omitted.
>
> Isolated storage snapshots can make sense, but snapshotting the machine
> state without also snapshotting the machine's storage doesn't sound
> useful to me.
>
> Both storage and machine state snapshots come in two flavours: internal
> and external.
>
> External ones can be made with any block backend, but internal storage
> snapshots work only with certain formats, notably qcow2.  QMP supports
> both kinds of storage snapshots.
>
> Both kinds of storage snapshots need exclusive access while they work.
> They're relatively quick, but the delay could be noticable for large
> internal snapshots, and perhaps for external snapshots on really slow
> storage.
>
> Internal machine state snapshots are currently only available via HMP's
> savevm, which integrates internal machine state and storage snapshots.
> This is non-live, i.e. the guest is stopped while the snapshot gets
> saved.  I figure we could make it live if we really wanted to.  Another
> instance of the emerging background job concept.
>
> On the implementation level, QCOW2 can't currently store a machine state
> snapshot without also storing a storage snapshot.  I guess we could
> change this if we really wanted to.
>
> External machine state snapshots are basically migrate to file.
> Supported by QMP.
>
> Live migration to file is possible, but currently wastes space, because
> memory dirtied during migration gets saved multiple times.  Could be
> fixed either by making migration update previously saved memory instead
> of appending (beware, random I/O), or by compacting the file afterwards.
>
> Non-live migration to file doesn't waste space that way.
>
> To take multiple *consistent* snapshots, you have to bundle them up in a
> transaction.  Transactions currently support only *storage* snapshots,
> though.
>
> Work-around for external machine state snapshot: migrate to file
> (possibly live), leaving the guest sopped on completion, take storage
> snapshots, resume guest.
>
> You can combine internal and external storage snapshots with an external
> machine state snapshot to get a mixed system snapshot.
>
> You currently can't do that with an internal machine state snapshot: the
> only way to take one is HMP savevm, which insists on internally
> snapshotting all writable storage, and doesn't transact together with
> external storage snapshots.
>
> Except for the case "purely internal snapshot with just one writable
> storage device", a system snapshot consists of multiple parts stored in
> separate files.  Tying the parts together is a management problem.  QEMU
> provides rudimentary management for purely internal snapshots, but it's
> flawed: missing storage isn't detected, and additional storage can creep
> in if snapshot tags or IDs happen to match.  I guess managing the parts
> is better left to the management layer.
>
> I figure a fully general QMP interface would let you perform a system
> snapshot by combining storage snapshots of either kind with either kind
> of machine state snapshot.
>
> We already have most of the building blocks: we can take external and
> internal storage snapshots, and combine them in transactions.
>
> What's missing is transactionable machine state snapshots.
>
> We know how to work around it for external machine state snapshots (see
> above), but a transaction-based solution might be nicer.
>
> Any solution for internal machine state snapshots in QMP should at least
> try to fit into this.  Some restrictions are okay.  For instance, we
> probably need to restrict internal machine state snapshots to piggyback
> on an internal storage snapshot for now, so we don't have to dig up
> QCOW2 just to get QMP support.
>
> We can talk about more convenient interfaces for common special cases,
> but I feel we need to design for the general case.  We don't have to
> implement the completely general case right away, though.  As long as we
> know where we want to go, incremental steps towards that goal are fine.
>
> Can we create a transactionable QMP command to take an internal machine
> state snapshot?
>
> This would be like HMP savevm with the following differences:
>
> * Separate parameters for tag and ID.  I'll have none of this
>    overloading nonsense in QMP.
why do we need both 'name' and 'id' values in the future at all?
Why single descriptive parameter is not enough? From my point
of view all this overloading is non-sense and in future one
name parameter should be kept.

> * Specify the destination block device.  I'll have none of this "pick a
>    device in some magic, undocumented way" in QMP.
agreed

> * Leave alone the other block devices.  Adding transaction actions to
>    snapshot all the writable block devices to get a full system snapshot
>    is the user's responsibility.
this requires clarification:
- do you mean that the list of the devices should come from the 
management layer?
If so this is error prone at the restoration stage. From my point of 
view restoration
must check that all RW devices are restored properly and there are no missed
ones. Do you propose to put this into libvirt? From my point of view 
this flexibility
is not that necessary.

There is one important thing missed in your description.
Machine snapshot MUST handle configuration change. This means
that at the moment of snapshot VM can have different amount of
disks and other peripherals and this should be properly handled.


> Limitations:
>
> * No live internal machine snapshot, yet.
>
> * The storage device taking the internal snapshot must also be
>    internally snapshot for now.  In fact, the command does both
>    (tolerable wart).
>
> Open questions:
>
> * Do we want the QMP command to delete existing snapshots with
>    conflicting tag / ID, like HMP savevm does?  Or do we want it to fail
>    the transaction?
>
> * Do we need transactions for switching to a system snapshot, too?
>
> Opinions?
>
> Denis, I understand doing this right will be more work than simply
> rebadging HMP's savevm etc for QMP, and probably a good deal more than
> you anticipated.  Sorry about that.  I'm trying to accomodate you as far
> as possible without screwing up QMP.  Not screwing up QMP too much is my
> job as maintainer.
this is not a problem if we will be able to do things with less
latency. From my side I am a bit frustrated that this discussion
comes with v5 of patches after two months of waiting to be
merged assuming that most of previous discussions were about
some mistakes in the code.
Markus Armbruster Jan. 22, 2016, 3:31 p.m. UTC | #5
"Denis V. Lunev" <den@openvz.org> writes:

> On 01/19/2016 09:11 PM, Markus Armbruster wrote:
>> "Denis V. Lunev" <den@openvz.org> writes:
>>
>>> On 01/18/2016 06:58 PM, Markus Armbruster wrote:
>>>> "Denis V. Lunev" <den@openvz.org> writes:
>>>>
>>>>> 'name' attribute is made mandatory in distinction with HMP command.
>>>>>
>>>>> The patch also moves hmp_savevm implementation into hmp.c. This function
>>>>> is just a simple wrapper now and does not have knowledge about
>>>>> migration internals.
>> [...]
>>>>> diff --git a/qapi-schema.json b/qapi-schema.json
>>>>> index 2e31733..09d1a1a 100644
>>>>> --- a/qapi-schema.json
>>>>> +++ b/qapi-schema.json
>>>>> @@ -4054,3 +4054,16 @@
>>>>>    ##
>>>>>    { 'enum': 'ReplayMode',
>>>>>      'data': [ 'none', 'record', 'play' ] }
>>>>> +
>>>>> +##
>>>>> +# @savevm
>>>>> +#
>>>>> +# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>>>> +#
>>>>> +# @name: identifier of a snapshot to be created
>>>>> +#
>>>>> +# Returns: Nothing on success
>>>>> +#
>>>>> +# Since 2.6
>>>>> +##
>>>>> +{ 'command': 'savevm', 'data': {'name': 'str'} }
>>>>> diff --git a/qmp-commands.hx b/qmp-commands.hx
>>>>> index db072a6..b7851e1 100644
>>>>> --- a/qmp-commands.hx
>>>>> +++ b/qmp-commands.hx
>>>>> @@ -4795,3 +4795,28 @@ Example:
>>>>>                     {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
>>>>>                      "pop-vlan": 1, "id": 251658240}
>>>>>       ]}
>>>>> +
>>>>> +EQMP
>>>>> +
>>>>> +SQMP
>>>>> +savevm
>>>>> +------
>>>>> +
>>>>> +Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>>>> +
>>>>> +Arguments:
>>>>> +
>>>>> +- "name": snapshot name
>>>>> +
>>>>> +Example:
>>>>> +
>>>>> +-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
>>>>> +<- { "return": {} }
>>>>> +
>>>>> +EQMP
>>>>> +
>>>>> +    {
>>>>> +        .name       = "savevm",
>>>>> +        .args_type  = "name:s",
>>>>> +        .mhandler.cmd_new = qmp_marshal_savevm,
>>>>> +    },
>>>> A snapshot has a tag (QEMUSnapshotInfo member name) and an ID
>>>> (QEMUSnapshotInfo member id_str).
>>>>
>>>> HMP's name arguments are overloaded: they're matched both against tag
>>>> and ID.  Unwisely chosen tags can create ambiguity.  Example:
>>>>
>>>>       (qemu) savevm 2
>>>>       (qemu) savevm
>>>>       (qemu) info snapshots
>>>>       ID        TAG                 VM SIZE                DATE       VM CLOCK
>>>>       1         2                      1.7M 2016-01-18 16:56:31   00:00:00.000
>>>>       2         vm-20160118165641      1.7M 2016-01-18 16:56:41   00:00:00.000
>>>>
>>>> Care to guess which one we get when we ask for "2"?
>>>>
>>>> I think we want separate, unoverloaded arguments for QMP.
>>> I think there is no need to. Name is now absolutely mandatory.
>>> Thus for new snapshots we will have 'name' specified and we
>>> will be bound to name only.
>>>
>>> 'id' will be used for old VMs and this is convenience
>>> layer to make old 'id' only snaphosts accessible
>>> through new interface in the same way as old.
>> The overloaded interface you propose is more complex than it seems.  You
>> hide the complexity by not documenting its workings.  Not even to the
>> (insufficient!) degree the HMP interface documents how its overloaded
>> name parameters work.
>>
>> Merely copying over the HMP documentation won't cut it.  The bar for new
>> QMP interfaces is a fair bit higher than "no worse than HMP".  The new
>> interface must reasonably sane for *QMP*, and sufficiently documented.
>>
>> If we can't make a sane QMP interface, I'd rather have no QMP interface.
>> However, I believe we *can* make a sane QMP interface if we put in the
>> design work.
>>
>> The design work must start with a review of what we're trying to
>> accomplish, and how to fit it into the rest of the system.  Here's my
>> attempt.  Since my knowledge on snapshots is rather superficial, I'm
>> cc'ing Kevin for additional snapshot expertise.  Kevin, please correct
>> me when I talk nonsense.  I'm further cc'ing Eric and Peter for the
>> management layer perspective.
>>
>> A point-in-time snapshot of a system consists of a snapshot of its
>> machine state and snapshots of its storage.  All the snapshots need to
>> be made at the same point in time for the result to be consistent.
>>
>> Snapshots of read-only storage carry no information and are commonly
>> omitted.
>>
>> Isolated storage snapshots can make sense, but snapshotting the machine
>> state without also snapshotting the machine's storage doesn't sound
>> useful to me.
>>
>> Both storage and machine state snapshots come in two flavours: internal
>> and external.
>>
>> External ones can be made with any block backend, but internal storage
>> snapshots work only with certain formats, notably qcow2.  QMP supports
>> both kinds of storage snapshots.
>>
>> Both kinds of storage snapshots need exclusive access while they work.
>> They're relatively quick, but the delay could be noticable for large
>> internal snapshots, and perhaps for external snapshots on really slow
>> storage.
>>
>> Internal machine state snapshots are currently only available via HMP's
>> savevm, which integrates internal machine state and storage snapshots.
>> This is non-live, i.e. the guest is stopped while the snapshot gets
>> saved.  I figure we could make it live if we really wanted to.  Another
>> instance of the emerging background job concept.
>>
>> On the implementation level, QCOW2 can't currently store a machine state
>> snapshot without also storing a storage snapshot.  I guess we could
>> change this if we really wanted to.
>>
>> External machine state snapshots are basically migrate to file.
>> Supported by QMP.
>>
>> Live migration to file is possible, but currently wastes space, because
>> memory dirtied during migration gets saved multiple times.  Could be
>> fixed either by making migration update previously saved memory instead
>> of appending (beware, random I/O), or by compacting the file afterwards.
>>
>> Non-live migration to file doesn't waste space that way.
>>
>> To take multiple *consistent* snapshots, you have to bundle them up in a
>> transaction.  Transactions currently support only *storage* snapshots,
>> though.
>>
>> Work-around for external machine state snapshot: migrate to file
>> (possibly live), leaving the guest sopped on completion, take storage
>> snapshots, resume guest.
>>
>> You can combine internal and external storage snapshots with an external
>> machine state snapshot to get a mixed system snapshot.
>>
>> You currently can't do that with an internal machine state snapshot: the
>> only way to take one is HMP savevm, which insists on internally
>> snapshotting all writable storage, and doesn't transact together with
>> external storage snapshots.
>>
>> Except for the case "purely internal snapshot with just one writable
>> storage device", a system snapshot consists of multiple parts stored in
>> separate files.  Tying the parts together is a management problem.  QEMU
>> provides rudimentary management for purely internal snapshots, but it's
>> flawed: missing storage isn't detected, and additional storage can creep
>> in if snapshot tags or IDs happen to match.  I guess managing the parts
>> is better left to the management layer.
>>
>> I figure a fully general QMP interface would let you perform a system
>> snapshot by combining storage snapshots of either kind with either kind
>> of machine state snapshot.
>>
>> We already have most of the building blocks: we can take external and
>> internal storage snapshots, and combine them in transactions.
>>
>> What's missing is transactionable machine state snapshots.
>>
>> We know how to work around it for external machine state snapshots (see
>> above), but a transaction-based solution might be nicer.
>>
>> Any solution for internal machine state snapshots in QMP should at least
>> try to fit into this.  Some restrictions are okay.  For instance, we
>> probably need to restrict internal machine state snapshots to piggyback
>> on an internal storage snapshot for now, so we don't have to dig up
>> QCOW2 just to get QMP support.
>>
>> We can talk about more convenient interfaces for common special cases,
>> but I feel we need to design for the general case.  We don't have to
>> implement the completely general case right away, though.  As long as we
>> know where we want to go, incremental steps towards that goal are fine.
>>
>> Can we create a transactionable QMP command to take an internal machine
>> state snapshot?
>>
>> This would be like HMP savevm with the following differences:
>>
>> * Separate parameters for tag and ID.  I'll have none of this
>>    overloading nonsense in QMP.
> why do we need both 'name' and 'id' values in the future at all?
> Why single descriptive parameter is not enough? From my point
> of view all this overloading is non-sense and in future one
> name parameter should be kept.

Ever since we support multiple snapshots (commit faea38e, Aug 2006), a
snapshot has both an ID and a tag.  We can debate whether that's a good
idea, but we can't go back in time and change the image formats.

Monitor commands and qemu-img let you identify a snapshot by ID or by
tag.  They generally use a single, overloaded parameter, and match it
against both.  Exception: QMP blockdev-snapshot-delete-internal-sync has
two unoverloaded parameters, to avoid ambiguity.  Quoting commit
44e3e05:

    This interface use id and name as optional parameters, to handle the
    case that one image contain multiple snapshots with same name which
    may be '', but with different id.
    
    Adding parameter id is for historical compatiability reason, and
    that case is not possible in qemu's new interface for internal
    snapshot at block device level, but still possible in qemu-img.

Are you proposing to ditch the ability to identify snapshots by ID?  I
doubt we can.

Since HMP commands should be build on top of QMP commands, HMP savevm's
underlying QMP commands cannot be less general than HMP savevm.  HMP
savevm can do both ID and tag.  Therefore, the underlying QMP commands
must be able to do that, too.

Once we accept that, the only question is whether to continue to do that
with a single overloaded parameter, or two unambigous ones.  For QMP, my
answer is unambigous.

>> * Specify the destination block device.  I'll have none of this "pick a
>>    device in some magic, undocumented way" in QMP.
> agreed
>
>> * Leave alone the other block devices.  Adding transaction actions to
>>    snapshot all the writable block devices to get a full system snapshot
>>    is the user's responsibility.
> this requires clarification:
> - do you mean that the list of the devices should come from the
> management layer?

See below.

> If so this is error prone at the restoration stage. From my point of
> view restoration
> must check that all RW devices are restored properly and there are no missed
> ones.

Agreed.

>       Do you propose to put this into libvirt? From my point of view
> this flexibility
> is not that necessary.

VM snapshot / restore has much in common with migration: both save and
restore machine state.  Another similarity is that you have to ensure
identical block device contents.  For migration, this is the management
layer's job.  My point is: the management layer is already prepared to
track block device contents.

For the special case all-internal snapshot / restore, QEMU provides
convenience features:

* You can take a consistent, complete, all-internal snapshot with
  savevm.

* You can restore all parts of an all-internal snapshot with loadvm.

* You can delete all parts of an all-internal snapshot with delvm.

Except "all parts" is a lie: if you manage to lose a block device since
savevm, QEMU won't notice.  Ensuring you still have them all is left to
the management layer even there.

Once external snapshots come into play, QEMU doesn't even try.  It has
no idea where to find external snapshots.  It's completely left to the
management layer.

I'm not fundamentally opposed to convenience features in HMP or even
QMP.  But I don't want to start with them.  I want us to get the core
functionality right before we add convenience.

I wrote:

    A point-in-time snapshot of a system consists of a snapshot of its
    machine state and snapshots of its storage.  All the snapshots need
    to be made at the same point in time for the result to be
    consistent.

and

    I figure a fully general QMP interface would let you perform a
    system snapshot by combining storage snapshots of either kind with
    either kind of machine state snapshot.

where "either kind" is external or internal.

If we decide that we'll never need that much flexibility, we can discuss
restrictions, and how to design a restricted interface.

For instance, if we decide that we'll never need to mix external and
internal in the same snapshot, we can discuss how an the all-internal
and all-external interfaces could look like.

I'm not the person to decide how much flexibility we'll need.  I'm happy
to listen to advice.  I'm hoping the management layer guys I cc'ed can
provide some.

> There is one important thing missed in your description.
> Machine snapshot MUST handle configuration change. This means
> that at the moment of snapshot VM can have different amount of
> disks and other peripherals and this should be properly handled.

I'm not sure I understand.

Configuration changes between which two points of time?  savevm and
loadvm?

How should configuraton change be handled?

>> Limitations:
>>
>> * No live internal machine snapshot, yet.
>>
>> * The storage device taking the internal snapshot must also be
>>    internally snapshot for now.  In fact, the command does both
>>    (tolerable wart).
>>
>> Open questions:
>>
>> * Do we want the QMP command to delete existing snapshots with
>>    conflicting tag / ID, like HMP savevm does?  Or do we want it to fail
>>    the transaction?
>>
>> * Do we need transactions for switching to a system snapshot, too?
>>
>> Opinions?
>>
>> Denis, I understand doing this right will be more work than simply
>> rebadging HMP's savevm etc for QMP, and probably a good deal more than
>> you anticipated.  Sorry about that.  I'm trying to accomodate you as far
>> as possible without screwing up QMP.  Not screwing up QMP too much is my
>> job as maintainer.
> this is not a problem if we will be able to do things with less
> latency. From my side I am a bit frustrated that this discussion
> comes with v5 of patches after two months of waiting to be
> merged assuming that most of previous discussions were about
> some mistakes in the code.

You're absolutely right to complain.  We *failed* to review your patches
in a timely manner.  And "we" very much includes "me".

Reasons include:

* I'm the QMP maintainer, but I'm chronically overloaded with review.
  It's been particularly bad since summer, because I'm also the QAPI
  maintainer, and Eric has been keeping me very busy there.

* I did a quick review of v1 (posted Nov 16, reviewed Nov 17), but
  utterly failed to see the bigger picture: external snapshots,
  migration and transactions.  Bad job on my part.

* Unlucky timing on your part: you posted v2 on Dec 4 during the
  pre-release crunch and general end-of-year crunch, and v3+v4 on Jan 8
  right into my after-Christmas mail deluge.  When I found some time for
  your series again, it was at v5 already.

* v1 and v2 got were reviewed by Eric and Juan, but they also failed to
  see the bigger picture.  I don't blame them.  You can competently
  review code with just local knowledge, but proper interface review
  often requires a more global view, which few people have.
Denis V. Lunev Jan. 22, 2016, 4:20 p.m. UTC | #6
On 01/22/2016 06:31 PM, Markus Armbruster wrote:
> "Denis V. Lunev" <den@openvz.org> writes:
>
>> On 01/19/2016 09:11 PM, Markus Armbruster wrote:
>>> "Denis V. Lunev" <den@openvz.org> writes:
>>>
>>>> On 01/18/2016 06:58 PM, Markus Armbruster wrote:
>>>>> "Denis V. Lunev" <den@openvz.org> writes:
>>>>>
>>>>>> 'name' attribute is made mandatory in distinction with HMP command.
>>>>>>
>>>>>> The patch also moves hmp_savevm implementation into hmp.c. This function
>>>>>> is just a simple wrapper now and does not have knowledge about
>>>>>> migration internals.
>>> [...]
>>>>>> diff --git a/qapi-schema.json b/qapi-schema.json
>>>>>> index 2e31733..09d1a1a 100644
>>>>>> --- a/qapi-schema.json
>>>>>> +++ b/qapi-schema.json
>>>>>> @@ -4054,3 +4054,16 @@
>>>>>>     ##
>>>>>>     { 'enum': 'ReplayMode',
>>>>>>       'data': [ 'none', 'record', 'play' ] }
>>>>>> +
>>>>>> +##
>>>>>> +# @savevm
>>>>>> +#
>>>>>> +# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>>>>> +#
>>>>>> +# @name: identifier of a snapshot to be created
>>>>>> +#
>>>>>> +# Returns: Nothing on success
>>>>>> +#
>>>>>> +# Since 2.6
>>>>>> +##
>>>>>> +{ 'command': 'savevm', 'data': {'name': 'str'} }
>>>>>> diff --git a/qmp-commands.hx b/qmp-commands.hx
>>>>>> index db072a6..b7851e1 100644
>>>>>> --- a/qmp-commands.hx
>>>>>> +++ b/qmp-commands.hx
>>>>>> @@ -4795,3 +4795,28 @@ Example:
>>>>>>                      {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
>>>>>>                       "pop-vlan": 1, "id": 251658240}
>>>>>>        ]}
>>>>>> +
>>>>>> +EQMP
>>>>>> +
>>>>>> +SQMP
>>>>>> +savevm
>>>>>> +------
>>>>>> +
>>>>>> +Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
>>>>>> +
>>>>>> +Arguments:
>>>>>> +
>>>>>> +- "name": snapshot name
>>>>>> +
>>>>>> +Example:
>>>>>> +
>>>>>> +-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
>>>>>> +<- { "return": {} }
>>>>>> +
>>>>>> +EQMP
>>>>>> +
>>>>>> +    {
>>>>>> +        .name       = "savevm",
>>>>>> +        .args_type  = "name:s",
>>>>>> +        .mhandler.cmd_new = qmp_marshal_savevm,
>>>>>> +    },
>>>>> A snapshot has a tag (QEMUSnapshotInfo member name) and an ID
>>>>> (QEMUSnapshotInfo member id_str).
>>>>>
>>>>> HMP's name arguments are overloaded: they're matched both against tag
>>>>> and ID.  Unwisely chosen tags can create ambiguity.  Example:
>>>>>
>>>>>        (qemu) savevm 2
>>>>>        (qemu) savevm
>>>>>        (qemu) info snapshots
>>>>>        ID        TAG                 VM SIZE                DATE       VM CLOCK
>>>>>        1         2                      1.7M 2016-01-18 16:56:31   00:00:00.000
>>>>>        2         vm-20160118165641      1.7M 2016-01-18 16:56:41   00:00:00.000
>>>>>
>>>>> Care to guess which one we get when we ask for "2"?
>>>>>
>>>>> I think we want separate, unoverloaded arguments for QMP.
>>>> I think there is no need to. Name is now absolutely mandatory.
>>>> Thus for new snapshots we will have 'name' specified and we
>>>> will be bound to name only.
>>>>
>>>> 'id' will be used for old VMs and this is convenience
>>>> layer to make old 'id' only snaphosts accessible
>>>> through new interface in the same way as old.
>>> The overloaded interface you propose is more complex than it seems.  You
>>> hide the complexity by not documenting its workings.  Not even to the
>>> (insufficient!) degree the HMP interface documents how its overloaded
>>> name parameters work.
>>>
>>> Merely copying over the HMP documentation won't cut it.  The bar for new
>>> QMP interfaces is a fair bit higher than "no worse than HMP".  The new
>>> interface must reasonably sane for *QMP*, and sufficiently documented.
>>>
>>> If we can't make a sane QMP interface, I'd rather have no QMP interface.
>>> However, I believe we *can* make a sane QMP interface if we put in the
>>> design work.
>>>
>>> The design work must start with a review of what we're trying to
>>> accomplish, and how to fit it into the rest of the system.  Here's my
>>> attempt.  Since my knowledge on snapshots is rather superficial, I'm
>>> cc'ing Kevin for additional snapshot expertise.  Kevin, please correct
>>> me when I talk nonsense.  I'm further cc'ing Eric and Peter for the
>>> management layer perspective.
>>>
>>> A point-in-time snapshot of a system consists of a snapshot of its
>>> machine state and snapshots of its storage.  All the snapshots need to
>>> be made at the same point in time for the result to be consistent.
>>>
>>> Snapshots of read-only storage carry no information and are commonly
>>> omitted.
>>>
>>> Isolated storage snapshots can make sense, but snapshotting the machine
>>> state without also snapshotting the machine's storage doesn't sound
>>> useful to me.
>>>
>>> Both storage and machine state snapshots come in two flavours: internal
>>> and external.
>>>
>>> External ones can be made with any block backend, but internal storage
>>> snapshots work only with certain formats, notably qcow2.  QMP supports
>>> both kinds of storage snapshots.
>>>
>>> Both kinds of storage snapshots need exclusive access while they work.
>>> They're relatively quick, but the delay could be noticable for large
>>> internal snapshots, and perhaps for external snapshots on really slow
>>> storage.
>>>
>>> Internal machine state snapshots are currently only available via HMP's
>>> savevm, which integrates internal machine state and storage snapshots.
>>> This is non-live, i.e. the guest is stopped while the snapshot gets
>>> saved.  I figure we could make it live if we really wanted to.  Another
>>> instance of the emerging background job concept.
>>>
>>> On the implementation level, QCOW2 can't currently store a machine state
>>> snapshot without also storing a storage snapshot.  I guess we could
>>> change this if we really wanted to.
>>>
>>> External machine state snapshots are basically migrate to file.
>>> Supported by QMP.
>>>
>>> Live migration to file is possible, but currently wastes space, because
>>> memory dirtied during migration gets saved multiple times.  Could be
>>> fixed either by making migration update previously saved memory instead
>>> of appending (beware, random I/O), or by compacting the file afterwards.
>>>
>>> Non-live migration to file doesn't waste space that way.
>>>
>>> To take multiple *consistent* snapshots, you have to bundle them up in a
>>> transaction.  Transactions currently support only *storage* snapshots,
>>> though.
>>>
>>> Work-around for external machine state snapshot: migrate to file
>>> (possibly live), leaving the guest sopped on completion, take storage
>>> snapshots, resume guest.
>>>
>>> You can combine internal and external storage snapshots with an external
>>> machine state snapshot to get a mixed system snapshot.
>>>
>>> You currently can't do that with an internal machine state snapshot: the
>>> only way to take one is HMP savevm, which insists on internally
>>> snapshotting all writable storage, and doesn't transact together with
>>> external storage snapshots.
>>>
>>> Except for the case "purely internal snapshot with just one writable
>>> storage device", a system snapshot consists of multiple parts stored in
>>> separate files.  Tying the parts together is a management problem.  QEMU
>>> provides rudimentary management for purely internal snapshots, but it's
>>> flawed: missing storage isn't detected, and additional storage can creep
>>> in if snapshot tags or IDs happen to match.  I guess managing the parts
>>> is better left to the management layer.
>>>
>>> I figure a fully general QMP interface would let you perform a system
>>> snapshot by combining storage snapshots of either kind with either kind
>>> of machine state snapshot.
>>>
>>> We already have most of the building blocks: we can take external and
>>> internal storage snapshots, and combine them in transactions.
>>>
>>> What's missing is transactionable machine state snapshots.
>>>
>>> We know how to work around it for external machine state snapshots (see
>>> above), but a transaction-based solution might be nicer.
>>>
>>> Any solution for internal machine state snapshots in QMP should at least
>>> try to fit into this.  Some restrictions are okay.  For instance, we
>>> probably need to restrict internal machine state snapshots to piggyback
>>> on an internal storage snapshot for now, so we don't have to dig up
>>> QCOW2 just to get QMP support.
>>>
>>> We can talk about more convenient interfaces for common special cases,
>>> but I feel we need to design for the general case.  We don't have to
>>> implement the completely general case right away, though.  As long as we
>>> know where we want to go, incremental steps towards that goal are fine.
>>>
>>> Can we create a transactionable QMP command to take an internal machine
>>> state snapshot?
>>>
>>> This would be like HMP savevm with the following differences:
>>>
>>> * Separate parameters for tag and ID.  I'll have none of this
>>>     overloading nonsense in QMP.
>> why do we need both 'name' and 'id' values in the future at all?
>> Why single descriptive parameter is not enough? From my point
>> of view all this overloading is non-sense and in future one
>> name parameter should be kept.
> Ever since we support multiple snapshots (commit faea38e, Aug 2006), a
> snapshot has both an ID and a tag.  We can debate whether that's a good
> idea, but we can't go back in time and change the image formats.
>
> Monitor commands and qemu-img let you identify a snapshot by ID or by
> tag.  They generally use a single, overloaded parameter, and match it
> against both.  Exception: QMP blockdev-snapshot-delete-internal-sync has
> two unoverloaded parameters, to avoid ambiguity.  Quoting commit
> 44e3e05:
>
>      This interface use id and name as optional parameters, to handle the
>      case that one image contain multiple snapshots with same name which
>      may be '', but with different id.
>      
>      Adding parameter id is for historical compatiability reason, and
>      that case is not possible in qemu's new interface for internal
>      snapshot at block device level, but still possible in qemu-img.
>
> Are you proposing to ditch the ability to identify snapshots by ID?  I
> doubt we can.
>
> Since HMP commands should be build on top of QMP commands, HMP savevm's
> underlying QMP commands cannot be less general than HMP savevm.  HMP
> savevm can do both ID and tag.  Therefore, the underlying QMP commands
> must be able to do that, too.
>
> Once we accept that, the only question is whether to continue to do that
> with a single overloaded parameter, or two unambigous ones.  For QMP, my
> answer is unambigous.
thank you for an extensive explanation. This sounds fair for me.


>>> * Specify the destination block device.  I'll have none of this "pick a
>>>     device in some magic, undocumented way" in QMP.
>> agreed
>>
>>> * Leave alone the other block devices.  Adding transaction actions to
>>>     snapshot all the writable block devices to get a full system snapshot
>>>     is the user's responsibility.
>> this requires clarification:
>> - do you mean that the list of the devices should come from the
>> management layer?
> See below.
>
>> If so this is error prone at the restoration stage. From my point of
>> view restoration
>> must check that all RW devices are restored properly and there are no missed
>> ones.
> Agreed.
>
>>        Do you propose to put this into libvirt? From my point of view
>> this flexibility
>> is not that necessary.
> VM snapshot / restore has much in common with migration: both save and
> restore machine state.  Another similarity is that you have to ensure
> identical block device contents.  For migration, this is the management
> layer's job.  My point is: the management layer is already prepared to
> track block device contents.
no. I am speaking about a little different thing.

At the moment (1) VM is configured to have 2 disks, floppy and 2 network 
cards.
Internal snapshot is made with this configuration with running QEMU. 
After that the
user has stopped QEMU, removed 1 disk, floppy and 1 network card and has 
started
QEMU back. Now we are in a bad shape with loadvm: devices configuration is
completely different. This should be handled gracefully on both layers. 
Either QEMU
should be stopped and started back or configuration change should be applied
in the middle.

> For the special case all-internal snapshot / restore, QEMU provides
> convenience features:
>
> * You can take a consistent, complete, all-internal snapshot with
>    savevm.
>
> * You can restore all parts of an all-internal snapshot with loadvm.
>
> * You can delete all parts of an all-internal snapshot with delvm.
>
> Except "all parts" is a lie: if you manage to lose a block device since
> savevm, QEMU won't notice.  Ensuring you still have them all is left to
> the management layer even there.
>
> Once external snapshots come into play, QEMU doesn't even try.  It has
> no idea where to find external snapshots.  It's completely left to the
> management layer.
>
> I'm not fundamentally opposed to convenience features in HMP or even
> QMP.  But I don't want to start with them.  I want us to get the core
> functionality right before we add convenience.
>
> I wrote:
>
>      A point-in-time snapshot of a system consists of a snapshot of its
>      machine state and snapshots of its storage.  All the snapshots need
>      to be made at the same point in time for the result to be
>      consistent.
>
> and
>
>      I figure a fully general QMP interface would let you perform a
>      system snapshot by combining storage snapshots of either kind with
>      either kind of machine state snapshot.
>
> where "either kind" is external or internal.
fair enough with note about configuration. Though we could make a check and
report proper error code to management layer if configurations with the 
current
moment and previous moment are inconsistent to force management layer to
restart.

> If we decide that we'll never need that much flexibility, we can discuss
> restrictions, and how to design a restricted interface.
>
> For instance, if we decide that we'll never need to mix external and
> internal in the same snapshot, we can discuss how an the all-internal
> and all-external interfaces could look like.
>
> I'm not the person to decide how much flexibility we'll need.  I'm happy
> to listen to advice.  I'm hoping the management layer guys I cc'ed can
> provide some.
>
>> There is one important thing missed in your description.
>> Machine snapshot MUST handle configuration change. This means
>> that at the moment of snapshot VM can have different amount of
>> disks and other peripherals and this should be properly handled.
> I'm not sure I understand.
>
> Configuration changes between which two points of time?  savevm and
> loadvm?
>
> How should configuraton change be handled?
I have written a bit about this above

>>> Limitations:
>>>
>>> * No live internal machine snapshot, yet.
>>>
>>> * The storage device taking the internal snapshot must also be
>>>     internally snapshot for now.  In fact, the command does both
>>>     (tolerable wart).
>>>
>>> Open questions:
>>>
>>> * Do we want the QMP command to delete existing snapshots with
>>>     conflicting tag / ID, like HMP savevm does?  Or do we want it to fail
>>>     the transaction?
>>>
>>> * Do we need transactions for switching to a system snapshot, too?
>>>
>>> Opinions?
>>>
>>> Denis, I understand doing this right will be more work than simply
>>> rebadging HMP's savevm etc for QMP, and probably a good deal more than
>>> you anticipated.  Sorry about that.  I'm trying to accomodate you as far
>>> as possible without screwing up QMP.  Not screwing up QMP too much is my
>>> job as maintainer.
>> this is not a problem if we will be able to do things with less
>> latency. From my side I am a bit frustrated that this discussion
>> comes with v5 of patches after two months of waiting to be
>> merged assuming that most of previous discussions were about
>> some mistakes in the code.
> You're absolutely right to complain.  We *failed* to review your patches
> in a timely manner.  And "we" very much includes "me".
>
> Reasons include:
>
> * I'm the QMP maintainer, but I'm chronically overloaded with review.
>    It's been particularly bad since summer, because I'm also the QAPI
>    maintainer, and Eric has been keeping me very busy there.
>
> * I did a quick review of v1 (posted Nov 16, reviewed Nov 17), but
>    utterly failed to see the bigger picture: external snapshots,
>    migration and transactions.  Bad job on my part.
>
> * Unlucky timing on your part: you posted v2 on Dec 4 during the
>    pre-release crunch and general end-of-year crunch, and v3+v4 on Jan 8
>    right into my after-Christmas mail deluge.  When I found some time for
>    your series again, it was at v5 already.
>
> * v1 and v2 got were reviewed by Eric and Juan, but they also failed to
>    see the bigger picture.  I don't blame them.  You can competently
>    review code with just local knowledge, but proper interface review
>    often requires a more global view, which few people have.
no problem actually. Thank you for a prompt response.

I think that we should start next iteration with API review only (no 
code changes)
and after that I'll start working on the code to simplify your work and 
reduce my.

Is it fair enough?
Peter Krempa Jan. 26, 2016, 12:39 p.m. UTC | #7
On Tue, Jan 19, 2016 at 19:11:05 +0100, Markus Armbruster wrote:
> "Denis V. Lunev" <den@openvz.org> writes:
> 
> > On 01/18/2016 06:58 PM, Markus Armbruster wrote:
> >> "Denis V. Lunev" <den@openvz.org> writes:

...

> 
> The overloaded interface you propose is more complex than it seems.  You
> hide the complexity by not documenting its workings.  Not even to the
> (insufficient!) degree the HMP interface documents how its overloaded
> name parameters work.
> 
> Merely copying over the HMP documentation won't cut it.  The bar for new
> QMP interfaces is a fair bit higher than "no worse than HMP".  The new
> interface must reasonably sane for *QMP*, and sufficiently documented.
> 
> If we can't make a sane QMP interface, I'd rather have no QMP interface.
> However, I believe we *can* make a sane QMP interface if we put in the
> design work.

I have to agree on this one. A broken interface will introduce just more
pain in the long run. Management tools still can fall back to the old
command if they need to (granted that error handling sucks there though
.. [1]) and use it as previously.

[1]
http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=4ac14cde9ae925515009400c2186d7eec5478b05

> 
> The design work must start with a review of what we're trying to
> accomplish, and how to fit it into the rest of the system.  Here's my
> attempt.  Since my knowledge on snapshots is rather superficial, I'm
> cc'ing Kevin for additional snapshot expertise.  Kevin, please correct
> me when I talk nonsense.  I'm further cc'ing Eric and Peter for the
> management layer perspective.
> 
> A point-in-time snapshot of a system consists of a snapshot of its
> machine state and snapshots of its storage.  All the snapshots need to
> be made at the same point in time for the result to be consistent.
> 
> Snapshots of read-only storage carry no information and are commonly
> omitted.
> 
> Isolated storage snapshots can make sense, but snapshotting the machine
> state without also snapshotting the machine's storage doesn't sound
> useful to me.

Well, technically if all your storage is read-only then a full snapshot
is reduced to a machine state snapshot. Libvirt actually allows this for
external snapshots. For internal ones, it obviously doesn't work.

> 
> Both storage and machine state snapshots come in two flavours: internal
> and external.
> 
> External ones can be made with any block backend, but internal storage
> snapshots work only with certain formats, notably qcow2.  QMP supports
> both kinds of storage snapshots.
> 
> Both kinds of storage snapshots need exclusive access while they work.
> They're relatively quick, but the delay could be noticable for large
> internal snapshots, and perhaps for external snapshots on really slow
> storage.
> 
> Internal machine state snapshots are currently only available via HMP's
> savevm, which integrates internal machine state and storage snapshots.
> This is non-live, i.e. the guest is stopped while the snapshot gets
> saved.  I figure we could make it live if we really wanted to.  Another
> instance of the emerging background job concept.
> 
> On the implementation level, QCOW2 can't currently store a machine state
> snapshot without also storing a storage snapshot.  I guess we could
> change this if we really wanted to.
> 
> External machine state snapshots are basically migrate to file.
> Supported by QMP.
> 
> Live migration to file is possible, but currently wastes space, because
> memory dirtied during migration gets saved multiple times.  Could be
> fixed either by making migration update previously saved memory instead
> of appending (beware, random I/O), or by compacting the file afterwards.
> 
> Non-live migration to file doesn't waste space that way.
> 
> To take multiple *consistent* snapshots, you have to bundle them up in a
> transaction.  Transactions currently support only *storage* snapshots,
> though.
> 
> Work-around for external machine state snapshot: migrate to file
> (possibly live), leaving the guest sopped on completion, take storage
> snapshots, resume guest.

This is exactly what I've implemented in libvirt.

> 
> You can combine internal and external storage snapshots with an external
> machine state snapshot to get a mixed system snapshot.

Libvirt allows this for two separate snapshots of a single vm, but it's
really buggy. You can take such snapshot, but reverting or deleting it
is where the fun begins. In a mixed snapshot you need to track the
backing files along with the internal snapshots in qcow properly and
libvirt currently doesn't really allow that.

> 
> You currently can't do that with an internal machine state snapshot: the
> only way to take one is HMP savevm, which insists on internally
> snapshotting all writable storage, and doesn't transact together with
> external storage snapshots.
> 
> Except for the case "purely internal snapshot with just one writable
> storage device", a system snapshot consists of multiple parts stored in
> separate files.  Tying the parts together is a management problem.  QEMU
> provides rudimentary management for purely internal snapshots, but it's
> flawed: missing storage isn't detected, and additional storage can creep
> in if snapshot tags or IDs happen to match.  I guess managing the parts
> is better left to the management layer.
> 
> I figure a fully general QMP interface would let you perform a system
> snapshot by combining storage snapshots of either kind with either kind
> of machine state snapshot.

That'd be the ideal state. It's questionable though whether combining
internal and external snapshot targets for a single snapshot is useful
or desirable, but having a general interface is probably the best
solution.

> 
> We already have most of the building blocks: we can take external and
> internal storage snapshots, and combine them in transactions.
> 
> What's missing is transactionable machine state snapshots.

That will be totally great. Especially in combination with the ability
to do live snapshot and discard updated storage pages.

> 
> We know how to work around it for external machine state snapshots (see
> above), but a transaction-based solution might be nicer.
> 
> Any solution for internal machine state snapshots in QMP should at least
> try to fit into this.  Some restrictions are okay.  For instance, we
> probably need to restrict internal machine state snapshots to piggyback
> on an internal storage snapshot for now, so we don't have to dig up
> QCOW2 just to get QMP support.

That's fine. Libvirt already mandates at least one QCOW2 storage file
for a VM for an internal snapshot. Allowing us to specify which one will
be used would be great though.

> 
> We can talk about more convenient interfaces for common special cases,
> but I feel we need to design for the general case.  We don't have to
> implement the completely general case right away, though.  As long as we
> know where we want to go, incremental steps towards that goal are fine.
> 
> Can we create a transactionable QMP command to take an internal machine
> state snapshot?
> 
> This would be like HMP savevm with the following differences:
> 
> * Separate parameters for tag and ID.  I'll have none of this
>   overloading nonsense in QMP.
> 
> * Specify the destination block device.  I'll have none of this "pick a
>   device in some magic, undocumented way" in QMP.
> 
> * Leave alone the other block devices.  Adding transaction actions to
>   snapshot all the writable block devices to get a full system snapshot
>   is the user's responsibility.
> 
> Limitations:
> 
> * No live internal machine snapshot, yet.
> 
> * The storage device taking the internal snapshot must also be
>   internally snapshot for now.  In fact, the command does both
>   (tolerable wart).

Having a boolean to switch to live although it will not be implemented
yet would be great. That

> 
> Open questions:
> 
> * Do we want the QMP command to delete existing snapshots with
>   conflicting tag / ID, like HMP savevm does?  Or do we want it to fail
>   the transaction?

For libvirt this doesn't matter that much. When you create all snapshots
using the APIs we make sure that the IDs don't collide. If you tamper
with it externally though, you are on your own.

> 
> * Do we need transactions for switching to a system snapshot, too?
> 
> Opinions?
> 
> Denis, I understand doing this right will be more work than simply
> rebadging HMP's savevm etc for QMP, and probably a good deal more than
> you anticipated.  Sorry about that.  I'm trying to accomodate you as far
> as possible without screwing up QMP.  Not screwing up QMP too much is my
> job as maintainer.

+1

Peter
Peter Krempa Jan. 26, 2016, 12:56 p.m. UTC | #8
On Fri, Jan 22, 2016 at 16:31:04 +0100, Markus Armbruster wrote:
> "Denis V. Lunev" <den@openvz.org> writes:
> > On 01/19/2016 09:11 PM, Markus Armbruster wrote:
> >> "Denis V. Lunev" <den@openvz.org> writes:

[...]

> >> This would be like HMP savevm with the following differences:
> >>
> >> * Separate parameters for tag and ID.  I'll have none of this
> >>    overloading nonsense in QMP.
> > why do we need both 'name' and 'id' values in the future at all?
> > Why single descriptive parameter is not enough? From my point
> > of view all this overloading is non-sense and in future one
> > name parameter should be kept.
> 
> Ever since we support multiple snapshots (commit faea38e, Aug 2006), a
> snapshot has both an ID and a tag.  We can debate whether that's a good
> idea, but we can't go back in time and change the image formats.
> 
> Monitor commands and qemu-img let you identify a snapshot by ID or by
> tag.  They generally use a single, overloaded parameter, and match it
> against both.  Exception: QMP blockdev-snapshot-delete-internal-sync has
> two unoverloaded parameters, to avoid ambiguity.  Quoting commit
> 44e3e05:
> 
>     This interface use id and name as optional parameters, to handle the
>     case that one image contain multiple snapshots with same name which
>     may be '', but with different id.
>     
>     Adding parameter id is for historical compatiability reason, and
>     that case is not possible in qemu's new interface for internal
>     snapshot at block device level, but still possible in qemu-img.
> 
> Are you proposing to ditch the ability to identify snapshots by ID?  I
> doubt we can.

Speaking for libvirt: We don't care, we use the snapshot name as the
identifier, so ID can be killed from our PoV. Backwards compatibility
can be hard though ...

> 
> Since HMP commands should be build on top of QMP commands, HMP savevm's
> underlying QMP commands cannot be less general than HMP savevm.  HMP
> savevm can do both ID and tag.  Therefore, the underlying QMP commands
> must be able to do that, too.
> 
> Once we accept that, the only question is whether to continue to do that
> with a single overloaded parameter, or two unambigous ones.  For QMP, my
> answer is unambigous.
> 
> >> * Specify the destination block device.  I'll have none of this "pick a
> >>    device in some magic, undocumented way" in QMP.
> > agreed
> >
> >> * Leave alone the other block devices.  Adding transaction actions to
> >>    snapshot all the writable block devices to get a full system snapshot
> >>    is the user's responsibility.
> > this requires clarification:
> > - do you mean that the list of the devices should come from the
> > management layer?
> 
> See below.
> 
> > If so this is error prone at the restoration stage. From my point of
> > view restoration
> > must check that all RW devices are restored properly and there are no missed
> > ones.
> 
> Agreed.
> 
> >       Do you propose to put this into libvirt? From my point of view
> > this flexibility
> > is not that necessary.
> 
> VM snapshot / restore has much in common with migration: both save and
> restore machine state.  Another similarity is that you have to ensure
> identical block device contents.  For migration, this is the management
> layer's job.  My point is: the management layer is already prepared to
> track block device contents.
> 
> For the special case all-internal snapshot / restore, QEMU provides
> convenience features:
> 
> * You can take a consistent, complete, all-internal snapshot with
>   savevm.
> 
> * You can restore all parts of an all-internal snapshot with loadvm.
> 
> * You can delete all parts of an all-internal snapshot with delvm.
> 
> Except "all parts" is a lie: if you manage to lose a block device since
> savevm, QEMU won't notice.  Ensuring you still have them all is left to
> the management layer even there.

If you work only on internal snapshots with libvirt we always make sure
that all parts of the 

> 
> Once external snapshots come into play, QEMU doesn't even try.  It has
> no idea where to find external snapshots.  It's completely left to the
> management layer.

This is still mostly buggy/unimplemented in libvirt although the plan is
to do it properly.

> 
> I'm not fundamentally opposed to convenience features in HMP or even
> QMP.  But I don't want to start with them.  I want us to get the core
> functionality right before we add convenience.

This depends on whether anybody would want to use that interface
manually. As we are speaking about QMP it's not really likely that
anyone would need convenience wrappers, since you need a lot of data and
decisions to do such operation properly anyways.

> 
> I wrote:
> 
>     A point-in-time snapshot of a system consists of a snapshot of its
>     machine state and snapshots of its storage.  All the snapshots need
>     to be made at the same point in time for the result to be
>     consistent.
> 
> and
> 
>     I figure a fully general QMP interface would let you perform a
>     system snapshot by combining storage snapshots of either kind with
>     either kind of machine state snapshot.
> 
> where "either kind" is external or internal.
> 
> If we decide that we'll never need that much flexibility, we can discuss
> restrictions, and how to design a restricted interface.
> 
> For instance, if we decide that we'll never need to mix external and
> internal in the same snapshot, we can discuss how an the all-internal
> and all-external interfaces could look like.
> 
> I'm not the person to decide how much flexibility we'll need.  I'm happy
> to listen to advice.  I'm hoping the management layer guys I cc'ed can
> provide some.

This is really hard to decide. Libvirt currently only allows
full-internal or full-external snapshots. Doing a mixed one is not
really the focus. We need to cover mixed trees of snapshots and their
children first. If we do it properly doing combined snapshot should be
trivial then.

I can't imagine how that would be useful though.

> 
> > There is one important thing missed in your description.
> > Machine snapshot MUST handle configuration change. This means
> > that at the moment of snapshot VM can have different amount of
> > disks and other peripherals and this should be properly handled.
> 
> I'm not sure I understand.
> 
> Configuration changes between which two points of time?  savevm and
> loadvm?
> 
> How should configuraton change be handled?

This is handled in libvirt. If you 'savevm' then do some stuff (e.g.
hot-unplug a disk) and then attempt to loadvm, libvirt detects that the
guest ABI would change and kills qemu and restarts it with the config at
the time of 'savevm' along with loading of the snapshot. When we added
external snapshots the approach may be buggy (we don't detect that the
backing file of a disk has changed yet) but the idea of the
infrastructure already exists.

Peter
diff mbox

Patch

diff --git a/hmp.c b/hmp.c
index e7bab75..8d6eda6 100644
--- a/hmp.c
+++ b/hmp.c
@@ -33,7 +33,6 @@ 
 #include "ui/console.h"
 #include "block/qapi.h"
 #include "qemu-io.h"
-#include "migration/migration.h"
 
 #ifdef CONFIG_SPICE
 #include <spice/enums.h>
@@ -2406,7 +2405,7 @@  void hmp_savevm(Monitor *mon, const QDict *qdict)
         name = name_buf;
     }
 
-    migrate_savevm(name, &local_err);
+    qmp_savevm(name, &local_err);
 
     if (local_err != NULL) {
         error_report_err(local_err);
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 73c8bb1..58c04a9 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -277,8 +277,6 @@  int migrate_compress_threads(void);
 int migrate_decompress_threads(void);
 bool migrate_use_events(void);
 
-void migrate_savevm(const char *name, Error **errp);
-
 /* Sending on the return path - generic and then for each message type */
 void migrate_send_rp_message(MigrationIncomingState *mis,
                              enum mig_rp_message_type message_type,
diff --git a/migration/savevm.c b/migration/savevm.c
index 308302a..0dbb15f 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1905,7 +1905,7 @@  int qemu_loadvm_state(QEMUFile *f)
     return ret;
 }
 
-void migrate_savevm(const char *name, Error **errp)
+void qmp_savevm(const char *name, Error **errp)
 {
     BlockDriverState *bs, *bs1;
     QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
diff --git a/qapi-schema.json b/qapi-schema.json
index 2e31733..09d1a1a 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -4054,3 +4054,16 @@ 
 ##
 { 'enum': 'ReplayMode',
   'data': [ 'none', 'record', 'play' ] }
+
+##
+# @savevm
+#
+# Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
+#
+# @name: identifier of a snapshot to be created
+#
+# Returns: Nothing on success
+#
+# Since 2.6
+##
+{ 'command': 'savevm', 'data': {'name': 'str'} }
diff --git a/qmp-commands.hx b/qmp-commands.hx
index db072a6..b7851e1 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -4795,3 +4795,28 @@  Example:
                  {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
                   "pop-vlan": 1, "id": 251658240}
    ]}
+
+EQMP
+
+SQMP
+savevm
+------
+
+Save a VM snapshot. Old snapshot with the same name will be deleted if exists.
+
+Arguments:
+
+- "name": snapshot name
+
+Example:
+
+-> { "execute": "savevm", "arguments": { "name": "snapshot1" } }
+<- { "return": {} }
+
+EQMP
+
+    {
+        .name       = "savevm",
+        .args_type  = "name:s",
+        .mhandler.cmd_new = qmp_marshal_savevm,
+    },