Patchwork [v5,3/6] add backup related monitor commands

login
register
mail settings
Submitter Dietmar Maurer
Date Feb. 21, 2013, 11:27 a.m.
Message ID <1361446072-601980-4-git-send-email-dietmar@proxmox.com>
Download mbox | patch
Permalink /patch/222271/
State New
Headers show

Comments

Dietmar Maurer - Feb. 21, 2013, 11:27 a.m.
We use a generic BackupDriver struct to encapsulate all archive format
related function.

Another option would be to simply dump <devid,cluster_num,cluster_data> to
the output fh (pipe), and an external binary saves the data. That way we
could move the whole archive format related code out of qemu.

Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>
---
 backup.h         |   15 ++
 blockdev.c       |  423 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 hmp-commands.hx  |   31 ++++
 hmp.c            |   63 ++++++++
 hmp.h            |    3 +
 monitor.c        |    7 +
 qapi-schema.json |   95 ++++++++++++
 qmp-commands.hx  |   27 ++++
 8 files changed, 664 insertions(+), 0 deletions(-)
Markus Armbruster - Feb. 27, 2013, 10:31 a.m.
First pass, concentrating on interfaces, implementation mostly ignored.

Dietmar Maurer <dietmar@proxmox.com> writes:

> We use a generic BackupDriver struct to encapsulate all archive format
> related function.
>
> Another option would be to simply dump <devid,cluster_num,cluster_data> to
> the output fh (pipe), and an external binary saves the data. That way we
> could move the whole archive format related code out of qemu.
>
> Signed-off-by: Dietmar Maurer <dietmar@proxmox.com>

Sounds like this patch is much more than just monitor commands.

> ---
>  backup.h         |   15 ++
>  blockdev.c       |  423 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  hmp-commands.hx  |   31 ++++
>  hmp.c            |   63 ++++++++
>  hmp.h            |    3 +
>  monitor.c        |    7 +
>  qapi-schema.json |   95 ++++++++++++
>  qmp-commands.hx  |   27 ++++
>  8 files changed, 664 insertions(+), 0 deletions(-)
>
> diff --git a/backup.h b/backup.h
> index 9b1ea1c..22598a6 100644
> --- a/backup.h
> +++ b/backup.h
> @@ -14,6 +14,9 @@
>  #ifndef QEMU_BACKUP_H
>  #define QEMU_BACKUP_H
>  
> +#include <uuid/uuid.h>
> +#include "block/block.h"
> +

What do you need block.h for?

>  #define BACKUP_CLUSTER_BITS 16
>  #define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
>  #define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
> @@ -27,4 +30,16 @@ int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
>                        BlockDriverCompletionFunc *backup_complete_cb,
>                        void *opaque, int64_t speed);
>  
> +typedef struct BackupDriver {
> +    const char *format;
> +    void *(*open)(const char *filename, uuid_t uuid, Error **errp);
> +    int (*close)(void *opaque, Error **errp);
> +    int (*register_config)(void *opaque, const char *name, gpointer data,

void * rather than gpointer, please!

> +                              size_t data_len);
> +    int (*register_stream)(void *opaque, const char *devname, size_t size);
> +    int (*dump)(void *opaque, uint8_t dev_id, int64_t cluster_num,
> +                   unsigned char *buf, size_t *zero_bytes);
> +    int (*complete)(void *opaque, uint8_t dev_id, int ret);
> +} BackupDriver;
> +
>  #endif /* QEMU_BACKUP_H */
[...]
> diff --git a/hmp-commands.hx b/hmp-commands.hx
> index 64008a9..0f178d8 100644
> --- a/hmp-commands.hx
> +++ b/hmp-commands.hx
> @@ -83,6 +83,35 @@ STEXI
>  Copy data from a backing file into a block device.
>  ETEXI
>  
> +   {
> +        .name       = "backup",
> +        .args_type  = "backupfile:s,speed:o?,devlist:s?",
> +        .params     = "backupfile [speed [devlist]]",
> +        .help       = "create a VM Backup.",
> +        .mhandler.cmd = hmp_backup,
> +    },
> +
> +STEXI
> +@item backup
> +@findex backup
> +Create a VM backup.

-v please.

> +ETEXI
> +
> +    {
> +        .name       = "backup_cancel",
> +        .args_type  = "",
> +        .params     = "",
> +        .help       = "cancel the current VM backup",
> +        .mhandler.cmd = hmp_backup_cancel,
> +    },
> +
> +STEXI
> +@item backup_cancel
> +@findex backup_cancel
> +Cancel the current VM backup.
> +
> +ETEXI
> +
>      {
>          .name       = "block_job_set_speed",
>          .args_type  = "device:B,speed:o",

Recommend to do HMP in a separate patch, for easier review.

> @@ -1630,6 +1659,8 @@ show CPU statistics
>  show user network stack connection states
>  @item info migrate
>  show migration status
> +@item info backup
> +show backup status
>  @item info migrate_capabilities
>  show current migration capabilities
>  @item info migrate_cache_size
> diff --git a/hmp.c b/hmp.c
> index 2f47a8a..b2c1f23 100644
> --- a/hmp.c
> +++ b/hmp.c
> @@ -131,6 +131,38 @@ void hmp_info_mice(Monitor *mon, const QDict *qdict)
>      qapi_free_MouseInfoList(mice_list);
>  }
>  
> +void hmp_info_backup(Monitor *mon, const QDict *qdict)
> +{
> +    BackupStatus *info;
> +
> +    info = qmp_query_backup(NULL);
> +    if (info->has_status) {
> +        if (info->has_errmsg) {
> +            monitor_printf(mon, "Backup status: %s - %s\n",
> +                           info->status, info->errmsg);
> +        } else {
> +            monitor_printf(mon, "Backup status: %s\n", info->status);
> +        }
> +    }
> +    if (info->has_backup_file) {
> +        int per = (info->has_total && info->total &&
> +            info->has_transferred && info->transferred) ?
> +            (info->transferred * 100)/info->total : 0;
> +        int zero_per = (info->has_total && info->total &&
> +                        info->has_zero_bytes && info->zero_bytes) ?
> +            (info->zero_bytes * 100)/info->total : 0;
> +        monitor_printf(mon, "Backup file: %s\n", info->backup_file);
> +        monitor_printf(mon, "Backup uuid: %s\n", info->uuid);
> +        monitor_printf(mon, "Total size: %zd\n", info->total);
> +        monitor_printf(mon, "Transferred bytes: %zd (%d%%)\n",
> +                       info->transferred, per);
> +        monitor_printf(mon, "Zero bytes: %zd (%d%%)\n",
> +                       info->zero_bytes, zero_per);
> +    }
> +
> +    qapi_free_BackupStatus(info);
> +}
> +
>  void hmp_info_migrate(Monitor *mon, const QDict *qdict)
>  {
>      MigrationInfo *info;
> @@ -998,6 +1030,37 @@ void hmp_block_stream(Monitor *mon, const QDict *qdict)
>      hmp_handle_error(mon, &error);
>  }
>  
> +void hmp_backup_cancel(Monitor *mon, const QDict *qdict)
> +{
> +    Error *errp = NULL;
> +
> +    qmp_backup_cancel(&errp);
> +
> +    if (error_is_set(&errp)) {
> +        monitor_printf(mon, "%s\n", error_get_pretty(errp));
> +        error_free(errp);
> +        return;
> +    }
> +}
> +
> +void hmp_backup(Monitor *mon, const QDict *qdict)
> +{
> +    const char *backup_file = qdict_get_str(qdict, "backup-file");
> +    const char *devlist = qdict_get_try_str(qdict, "devlist");
> +    int64_t speed = qdict_get_try_int(qdict, "speed", 0);
> +
> +    Error *errp = NULL;
> +
> +    qmp_backup(backup_file, true, BACKUP_FORMAT_VMA, false, NULL, !!devlist,
> +               devlist, qdict_haskey(qdict, "speed"), speed, &errp);
> +
> +    if (error_is_set(&errp)) {
> +        monitor_printf(mon, "%s\n", error_get_pretty(errp));
> +        error_free(errp);
> +        return;
> +    }
> +}
> +
>  void hmp_block_job_set_speed(Monitor *mon, const QDict *qdict)
>  {
>      Error *error = NULL;
> diff --git a/hmp.h b/hmp.h
> index 30b3c20..ad4cf80 100644
> --- a/hmp.h
> +++ b/hmp.h
> @@ -28,6 +28,7 @@ void hmp_info_mice(Monitor *mon, const QDict *qdict);
>  void hmp_info_migrate(Monitor *mon, const QDict *qdict);
>  void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict);
>  void hmp_info_migrate_cache_size(Monitor *mon, const QDict *qdict);
> +void hmp_info_backup(Monitor *mon, const QDict *qdict);
>  void hmp_info_cpus(Monitor *mon, const QDict *qdict);
>  void hmp_info_block(Monitor *mon, const QDict *qdict);
>  void hmp_info_blockstats(Monitor *mon, const QDict *qdict);
> @@ -65,6 +66,8 @@ void hmp_eject(Monitor *mon, const QDict *qdict);
>  void hmp_change(Monitor *mon, const QDict *qdict);
>  void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict);
>  void hmp_block_stream(Monitor *mon, const QDict *qdict);
> +void hmp_backup(Monitor *mon, const QDict *qdict);
> +void hmp_backup_cancel(Monitor *mon, const QDict *qdict);
>  void hmp_block_job_set_speed(Monitor *mon, const QDict *qdict);
>  void hmp_block_job_cancel(Monitor *mon, const QDict *qdict);
>  void hmp_block_job_pause(Monitor *mon, const QDict *qdict);
> diff --git a/monitor.c b/monitor.c
> index 6a0f257..e4a810c 100644
> --- a/monitor.c
> +++ b/monitor.c
> @@ -2666,6 +2666,13 @@ static mon_cmd_t info_cmds[] = {
>      },
>  #endif
>      {
> +        .name       = "backup",
> +        .args_type  = "",
> +        .params     = "",
> +        .help       = "show backup status",
> +        .mhandler.cmd = hmp_info_backup,
> +    },
> +    {
>          .name       = "migrate",
>          .args_type  = "",
>          .params     = "",
> diff --git a/qapi-schema.json b/qapi-schema.json
> index 7275b5d..09ca8ef 100644
> --- a/qapi-schema.json
> +++ b/qapi-schema.json
> @@ -425,6 +425,39 @@
>  { 'type': 'EventInfo', 'data': {'name': 'str'} }
>  
>  ##
> +# @BackupStatus:
> +#
> +# Detailed backup status.

Is there any non-detailed status?

> +#
> +# @status: #optional string describing the current backup status.
> +#          This can be 'active', 'done', 'error'. If this field is not
> +#          returned, no backup process has been initiated

Enum rather than string, please.

> +#
> +# @errmsg: #optional error message (only returned if status is 'error')
> +#
> +# @total: #optional total amount of bytes involved in the backup process

You mean total size of data to be backed up?

> +#
> +# @transferred: #optional amount of bytes already backed up.
> +#
> +# @zero-bytes: #optional amount of 'zero' bytes detected.

These are backed up more efficiently, right?

Why does the user need to know?

> +#
> +# @start-time: #optional time (epoch) when backup job started.
> +#
> +# @end-time: #optional time (epoch) when backup job finished.
> +#
> +# @backupfile: #optional backup file name
> +#
> +# @uuid: #optional uuid for this backup job

For all optional members: how is their presence related to @status?
Already answered for @errmsg, want to know for the others.

> +#
> +# Since: 1.5.0
> +##
> +{ 'type': 'BackupStatus',
> +  'data': {'*status': 'str', '*errmsg': 'str', '*total': 'int',
> +           '*transferred': 'int', '*zero-bytes': 'int',
> +           '*start-time': 'int', '*end-time': 'int',
> +           '*backup-file': 'str', '*uuid': 'str' } }
> +
> +##
>  # @query-events:
>  #
>  # Return a list of supported QMP events by this server
> @@ -1824,6 +1857,68 @@
>    'data': { 'path': 'str' },
>    'returns': [ 'ObjectPropertyInfo' ] }
>  
> +
> +##
> +# @BackupFormat
> +#
> +# An enumeration of supported backup formats.
> +#
> +# @vma: Proxmox vma backup format
> +##
> +{ 'enum': 'BackupFormat',
> +  'data': [ 'vma' ] }
> +
> +##
> +# @backup:
> +#
> +# Starts a VM backup.
> +#
> +# @backup-file: the backup file name

We'll need support for backup up to a fd passed with SCM rights.

> +#
> +# @format: format of the backup file
> +#
> +# @config-filename: #optional name of a configuration file to include into
> +# the backup archive.

This lets you include a single host file in the backup.  Can be
anything, not just configuration.

Like previous reviewers, I think backing up some other piece of
software's configuration file is none of QEMU's business.  If you want
to include it in the backup, better do it at the appropriate level of
the stack.  If you think that's not practical, please explain why.

> +#
> +# @speed: #optional the maximum speed, in bytes per second
> +#
> +# @devlist: #optional list of block device names (separated by ',', ';'
> +# or ':'). By default the backup includes all writable block devices.

Make this a proper list, please.

> +#
> +# Returns: the uuid of the backup job
> +#
> +# Since: 1.5.0
> +##
> +{ 'command': 'backup', 'data': { 'backup-file': 'str',
> +                                 '*format': 'BackupFormat',
> +                                 '*config-file': 'str',
> +                                 '*devlist': 'str', '*speed': 'int' },
> +  'returns': 'str' }
> +
> +##
> +# @query-backup
> +#
> +# Returns information about current/last backup task.
> +#
> +# Returns: @BackupStatus
> +#
> +# Since: 1.5.0
> +##
> +{ 'command': 'query-backup', 'returns': 'BackupStatus' }
> +
> +##
> +# @backup-cancel
> +#
> +# Cancel the current executing backup process.
> +#
> +# Returns: nothing on success
> +#
> +# Notes: This command succeeds even if there is no backup process running.
> +#
> +# Since: 1.5.0
> +##
> +{ 'command': 'backup-cancel' }
> +
>  ##
>  # @qom-get:
>  #

You bake the "only one backup at a time" restriction right into the API.
That's fine if and only if multiple backups cannot possibly make sense.

Unless we agree that's the case, the API needs to be designed so it can
grow: backup needs to return an ID (it already does), backup-cancel
needs to take it as argument (it doesn't), and query-backup either
returns a list, or takes an ID argument.

The implementation may still restrict to a single backup, of course.

> diff --git a/qmp-commands.hx b/qmp-commands.hx
> index 799adea..17e381b 100644
> --- a/qmp-commands.hx
> +++ b/qmp-commands.hx
> @@ -889,6 +889,18 @@ EQMP
>      },
>  
>      {
> +        .name       = "backup",
> +        .args_type  = "backup-file:s,format:s?,config-file:F?,speed:o?,devlist:s?",
> +        .mhandler.cmd_new = qmp_marshal_input_backup,
> +    },
> +
> +    {
> +        .name       = "backup-cancel",
> +        .args_type  = "",
> +        .mhandler.cmd_new = qmp_marshal_input_backup_cancel,
> +    },

Documentation missing.

> +
> +    {
>          .name       = "block-job-set-speed",
>          .args_type  = "device:B,speed:o",
>          .mhandler.cmd_new = qmp_marshal_input_block_job_set_speed,
> @@ -2566,6 +2578,21 @@ EQMP
>      },
>  
>  SQMP
> +
> +query-backup
> +-------------
> +
> +Backup status.
> +
> +EQMP
> +
> +    {
> +        .name       = "query-backup",
> +        .args_type  = "",
> +        .mhandler.cmd_new = qmp_marshal_input_query_backup,
> +    },
> +
> +SQMP
>  migrate-set-capabilities
>  -------
Eric Blake - Feb. 27, 2013, 3:35 p.m.
On 02/27/2013 03:31 AM, Markus Armbruster wrote:
> First pass, concentrating on interfaces, implementation mostly ignored.
> 
> Dietmar Maurer <dietmar@proxmox.com> writes:
> 
>> We use a generic BackupDriver struct to encapsulate all archive format
>> related function.
>>

>> +# @backup:
>> +#
>> +# Starts a VM backup.
>> +#
>> +# @backup-file: the backup file name
> 
> We'll need support for backup up to a fd passed with SCM rights.

This already works with /dev/fdset/nnn for fds passed with 'add-fd', if
you used qemu_open() on the passed file name.

>> +# @devlist: #optional list of block device names (separated by ',', ';'
>> +# or ':'). By default the backup includes all writable block devices.
> 
> Make this a proper list, please.

That is, make it a JSON array: '*devlist' : [ 'str' ]
Any time that you pass a string through JSON that then requires further
ad-hoc parsing (such as splitting on ',' or ':'), it is a sign that your
JSON representation was incorrect.

> 
> You bake the "only one backup at a time" restriction right into the API.
> That's fine if and only if multiple backups cannot possibly make sense.
> 
> Unless we agree that's the case, the API needs to be designed so it can
> grow: backup needs to return an ID (it already does), backup-cancel
> needs to take it as argument (it doesn't), and query-backup either
> returns a list, or takes an ID argument.

Agreed.  In the case of backup-cancel, if you want to make the argument
optional, and have it do the right thing when only one job is active
(and only fail if multiple jobs are running), that would also work.

> 
> The implementation may still restrict to a single backup, of course.

The initial implementation, obviously.  The point is that the API should
allow for growth, even if the initial implementation doesn't support
everything that the API allows.
Dietmar Maurer - Feb. 27, 2013, 5:28 p.m.
> >> +# @devlist: #optional list of block device names (separated by ',', ';'

> >> +# or ':'). By default the backup includes all writable block devices.

> >

> > Make this a proper list, please.

> 

> That is, make it a JSON array: '*devlist' : [ 'str' ] Any time that you pass a string

> through JSON that then requires further ad-hoc parsing (such as splitting on ',' or

> ':'), it is a sign that your JSON representation was incorrect.


I want to use that on HMP, so I need to parse ad-hoc anyways?

> > You bake the "only one backup at a time" restriction right into the API.

> > That's fine if and only if multiple backups cannot possibly make sense.

> >

> > Unless we agree that's the case, the API needs to be designed so it

> > can

> > grow: backup needs to return an ID (it already does), backup-cancel

> > needs to take it as argument (it doesn't), and query-backup either

> > returns a list, or takes an ID argument.

> 

> Agreed.  In the case of backup-cancel, if you want to make the argument

> optional, and have it do the right thing when only one job is active (and only fail

> if multiple jobs are running), that would also work.

> 

> >

> > The implementation may still restrict to a single backup, of course.

> 

> The initial implementation, obviously.  The point is that the API should allow for

> growth, even if the initial implementation doesn't support everything that the

> API allows.


You can easily extend the API to allow multiple backups (In a fully backwards
compatible way). So there is no need to change that now.
Markus Armbruster - Feb. 28, 2013, 9:42 a.m.
Dietmar Maurer <dietmar@proxmox.com> writes:

>> >> +# @devlist: #optional list of block device names (separated by ',', ';'
>> >> +# or ':'). By default the backup includes all writable block devices.
>> >
>> > Make this a proper list, please.
>> 
>> That is, make it a JSON array: '*devlist' : [ 'str' ] Any time that
>> you pass a string
>> through JSON that then requires further ad-hoc parsing (such as
>> splitting on ',' or
>> ':'), it is a sign that your JSON representation was incorrect.
>
> I want to use that on HMP, so I need to parse ad-hoc anyways?

HMP syntax should be designed for humans, QMP syntax for machines.

Example: HMP size and time parameters accept numbers with unit suffixes.
QMP accept only plain numbers.

Example: HMP's netdev_add takes a single argument of the form
KEY=VALUE,...  QMP takes a JSON object { "KEY": "VALUE", ... }.

Your QMP syntax should make use of JSON as much as possible.  In
particular, use JSON numbers for numeric quantities, JSON arrays for
lists, JSON objects for dictionaries.

You may have to do some ad hoc parsing to go from HMP syntax to
QAPI/QMP.  In your particular case: you want a list of device names.
Device names are of the form /[a-z][-._a-z0-9]/i (see id_wellformed()).
Thus, having them in a single argument separated by /[,;:]/ is
syntactically unambiguous.

Three alternative separators are too baroque for my taste.  Even if you
drop all but one, it's still ad hoc syntax.  We've been there, done
that, it's bound to degenerate into inconsistent mess.

Therefore, I'd very much prefer to use generic syntax.  Options I can
see:

1. Define generic syntax for lists of identifiers, in monitor.c.

2. Why new syntax when existing syntax works?  Instead of

        backup foo.vma dev1:dev2:dev3

   have

        backup foo.vma dev1 dev2 dev3

   Requires support for arg_type B* in monitor.c.

>> > You bake the "only one backup at a time" restriction right into the API.
>> > That's fine if and only if multiple backups cannot possibly make sense.
>> >
>> > Unless we agree that's the case, the API needs to be designed so it
>> > can
>> > grow: backup needs to return an ID (it already does), backup-cancel
>> > needs to take it as argument (it doesn't), and query-backup either
>> > returns a list, or takes an ID argument.
>> 
>> Agreed.  In the case of backup-cancel, if you want to make the argument
>> optional, and have it do the right thing when only one job is active
>> (and only fail
>> if multiple jobs are running), that would also work.
>> 
>> >
>> > The implementation may still restrict to a single backup, of course.
>> 
>> The initial implementation, obviously.  The point is that the API
>> should allow for
>> growth, even if the initial implementation doesn't support everything that the
>> API allows.
>
> You can easily extend the API to allow multiple backups (In a fully backwards
> compatible way). So there is no need to change that now.

I disagree.

You propose something like:

1.  -> { "execute": "backup",
         "arguments": { "backup-file": "foo.vma" } }
    <- { "return": { "deb8e5da-5b69-46d1-86c6-1b715a22809f" } }

    This *deletes* any prior BackupStatus.  Clearly inappropriate with
    multiple backups.  How exactly do you propose to extend it in a
    backwards compatible way?

2.  -> { "execute": "query-backup" }

    This is specified to return exactly one BackupStatus.

    How exactly do you propose to extend it for multiple backups?

3.  -> { "execute": "backup-cancel" }

    This is specified to cancel "the current execute backup process".

    This becomes *ill-defined* when multiple backup processes are
    executing.

    With multiple backup support, we still need to keep it working
    unchanged when no or just one backup is executing.  When more are
    executing, we have to make it fail, unless an optional argument is
    given.

I don't like the resulting complexity at all.

I'd rather do:

1.  -> { "execute": "backup",
         "arguments": { "id": "client-chosen-id",
                        "backup-file": "foo.vma" } }
    <- { "return": { } }

    Restriction: fails if a backup already exists, regardless of its
    state.  This is intended to be the only bit that needs to be changed
    when we go to multiple backups.

    Note that I have the client pass an id, for consistency with other
    QMP commands.

    I could be persuaded to return an UUID, if you can demonstrate a
    need for it.

2.  -> { "execute": "query-backup" }

    Returns status of all backups, as a list of BackupStatus.  Backups
    stay around until the client gets rid of them (see 4.).

    If you like, you can add suitable optional arguments to get only
    selected status.

3.  -> { "execute": "backup-cancel",
         "arguments": { "id": "client-chosen-id" } }

    If backup is in state "active", make it transition to state "error",
    and succeed.  Else fail.

4.  -> { "execute": "backup-forget",
         "arguments": { "id": "client-chosen-id" } }

    If backup is in state "done" or "error", free remaining resources
    associated with it (id becomes invalid) and succeed.  Else fail.
Dietmar Maurer - Feb. 28, 2013, 10:01 a.m.
> > You can easily extend the API to allow multiple backups (In a fully
> > backwards compatible way). So there is no need to change that now.
> 
> I disagree.
> 
> You propose something like:
> 
> 1.  -> { "execute": "backup",
>          "arguments": { "backup-file": "foo.vma" } }
>     <- { "return": { "deb8e5da-5b69-46d1-86c6-1b715a22809f" } }
> 
>     This *deletes* any prior BackupStatus.  Clearly inappropriate with
>     multiple backups.  How exactly do you propose to extend it in a
>     backwards compatible way?

It currently deletes the BackupStatus. But this is related to the current implementation, not the API definition.

> 2.  -> { "execute": "query-backup" }
> 
>     This is specified to return exactly one BackupStatus.
> 
>     How exactly do you propose to extend it for multiple backups?

Add an optional 'uuid' parameter. We can auto-select the job if there is only one job running
 
> 3.  -> { "execute": "backup-cancel" }
> 
>     This is specified to cancel "the current execute backup process".
> 
>     This becomes *ill-defined* when multiple backup processes are
>     executing.

Again, we can add an optional 'uuid' parameter.
Markus Armbruster - Feb. 28, 2013, 12:27 p.m.
Dietmar Maurer <dietmar@proxmox.com> writes:

>> > You can easily extend the API to allow multiple backups (In a fully
>> > backwards compatible way). So there is no need to change that now.
>> 
>> I disagree.
>> 
>> You propose something like:
>> 
>> 1.  -> { "execute": "backup",
>>          "arguments": { "backup-file": "foo.vma" } }
>>     <- { "return": { "deb8e5da-5b69-46d1-86c6-1b715a22809f" } }
>> 
>>     This *deletes* any prior BackupStatus.  Clearly inappropriate with
>>     multiple backups.  How exactly do you propose to extend it in a
>>     backwards compatible way?
>
> It currently deletes the BackupStatus. But this is related to the
> current implementation, not the API definition.

What exactly makes BackupStatus get deleted is as API as it gets :)

>> 2.  -> { "execute": "query-backup" }
>> 
>>     This is specified to return exactly one BackupStatus.
>> 
>>     How exactly do you propose to extend it for multiple backups?
>
> Add an optional 'uuid' parameter. We can auto-select the job if there
> is only one job running

Adds a bit of complexity for no discernable gain.

>> 3.  -> { "execute": "backup-cancel" }
>> 
>>     This is specified to cancel "the current execute backup process".
>> 
>>     This becomes *ill-defined* when multiple backup processes are
>>     executing.
>
> Again, we can add an optional 'uuid' parameter.

Yes, but:

>>     With multiple backup support, we still need to keep it working
>>     unchanged when no or just one backup is executing.  When more are
>>     executing, we have to make it fail, unless an optional argument is
>>     given.

Again, unnecessary complexity.

Moreover, omitting the "optional" argument will break hard unless you
control all monitors.  Because if you don't, you never know whether
there's another backup in flight.

Evolution of APIs occasionally gets us "optional" parameters that are
really mandatory.  Tolerable.  But when a little foresight lets us avoid
such warts, we should.
Dietmar Maurer - Feb. 28, 2013, 12:43 p.m.
> >>     With multiple backup support, we still need to keep it working
> >>     unchanged when no or just one backup is executing.  When more are
> >>     executing, we have to make it fail, unless an optional argument is
> >>     given.
> 
> Again, unnecessary complexity.
> 
> Moreover, omitting the "optional" argument will break hard unless you control
> all monitors.  Because if you don't, you never know whether there's another
> backup in flight.
> 
> Evolution of APIs occasionally gets us "optional" parameters that are really
> mandatory.  Tolerable.  But when a little foresight lets us avoid such warts, we
> should.

OK, will try to fix that.

Patch

diff --git a/backup.h b/backup.h
index 9b1ea1c..22598a6 100644
--- a/backup.h
+++ b/backup.h
@@ -14,6 +14,9 @@ 
 #ifndef QEMU_BACKUP_H
 #define QEMU_BACKUP_H
 
+#include <uuid/uuid.h>
+#include "block/block.h"
+
 #define BACKUP_CLUSTER_BITS 16
 #define BACKUP_CLUSTER_SIZE (1<<BACKUP_CLUSTER_BITS)
 #define BACKUP_BLOCKS_PER_CLUSTER (BACKUP_CLUSTER_SIZE/BDRV_SECTOR_SIZE)
@@ -27,4 +30,16 @@  int backup_job_create(BlockDriverState *bs, BackupDumpFunc *backup_dump_cb,
                       BlockDriverCompletionFunc *backup_complete_cb,
                       void *opaque, int64_t speed);
 
+typedef struct BackupDriver {
+    const char *format;
+    void *(*open)(const char *filename, uuid_t uuid, Error **errp);
+    int (*close)(void *opaque, Error **errp);
+    int (*register_config)(void *opaque, const char *name, gpointer data,
+                              size_t data_len);
+    int (*register_stream)(void *opaque, const char *devname, size_t size);
+    int (*dump)(void *opaque, uint8_t dev_id, int64_t cluster_num,
+                   unsigned char *buf, size_t *zero_bytes);
+    int (*complete)(void *opaque, uint8_t dev_id, int ret);
+} BackupDriver;
+
 #endif /* QEMU_BACKUP_H */
diff --git a/blockdev.c b/blockdev.c
index 63e6f1e..84f598d 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -20,6 +20,7 @@ 
 #include "qmp-commands.h"
 #include "trace.h"
 #include "sysemu/arch_init.h"
+#include "backup.h"
 
 static QTAILQ_HEAD(drivelist, DriveInfo) drives = QTAILQ_HEAD_INITIALIZER(drives);
 
@@ -1334,6 +1335,428 @@  void qmp_drive_mirror(const char *device, const char *target,
     drive_get_ref(drive_get_by_blockdev(bs));
 }
 
+/* Backup related function */
+
+static void backup_run_next_job(void);
+
+static struct GenericBackupState {
+    Error *error;
+    bool cancel;
+    uuid_t uuid;
+    char uuid_str[37];
+    int64_t speed;
+    time_t start_time;
+    time_t end_time;
+    char *backup_file;
+    const BackupDriver *driver;
+    void *writer;
+    GList *bcb_list;
+    size_t total;
+    size_t transferred;
+    size_t zero_bytes;
+} backup_state;
+
+typedef struct BackupCB {
+    BlockDriverState *bs;
+    uint8_t dev_id;
+    bool started;
+    bool completed;
+    size_t size;
+    size_t transferred;
+    size_t zero_bytes;
+} BackupCB;
+
+static int backup_dump_cb(void *opaque, BlockDriverState *bs,
+                          int64_t cluster_num, unsigned char *buf)
+{
+    BackupCB *bcb = opaque;
+
+    assert(backup_state.driver);
+    assert(backup_state.writer);
+    assert(backup_state.driver->dump);
+
+    size_t zero_bytes = 0;
+    int bytes = backup_state.driver->dump(backup_state.writer,
+                                          bcb->dev_id, cluster_num,
+                                          buf, &zero_bytes);
+
+    if (bytes > 0) {
+        bcb->transferred += bytes;
+        backup_state.transferred += bytes;
+        if (zero_bytes) {
+            bcb->zero_bytes += bytes;
+            backup_state.zero_bytes += zero_bytes;
+        }
+    }
+
+    return bytes;
+}
+
+static void backup_cleanup(void)
+{
+    if (backup_state.writer && backup_state.driver) {
+        backup_state.end_time = time(NULL);
+        Error *local_err = NULL;
+        backup_state.driver->close(backup_state.writer, &local_err);
+        error_propagate(&backup_state.error, local_err);
+        backup_state.writer = NULL;
+    }
+
+    if (backup_state.bcb_list) {
+        GList *l = backup_state.bcb_list;
+        while (l) {
+            BackupCB *bcb = l->data;
+            l = g_list_next(l);
+            drive_put_ref_bh_schedule(drive_get_by_blockdev(bcb->bs));
+            g_free(bcb);
+        }
+        g_list_free(backup_state.bcb_list);
+        backup_state.bcb_list = NULL;
+    }
+}
+
+static void backup_complete_cb(void *opaque, int ret)
+{
+    BackupCB *bcb = opaque;
+
+    assert(backup_state.driver);
+    assert(backup_state.writer);
+    assert(backup_state.driver->complete);
+    assert(backup_state.driver->close);
+
+    bcb->completed = true;
+
+    backup_state.driver->complete(backup_state.writer, bcb->dev_id, ret);
+
+    if (!backup_state.cancel) {
+        backup_run_next_job();
+    }
+}
+
+static void backup_cancel(void)
+{
+    backup_state.cancel = true;
+
+    if (!backup_state.error) {
+        error_setg(&backup_state.error, "backup cancelled");
+    }
+
+    /* drain all i/o (awake jobs waiting for aio) */
+    bdrv_drain_all();
+
+    int job_count = 0;
+    GList *l = backup_state.bcb_list;
+    while (l) {
+        BackupCB *bcb = l->data;
+        l = g_list_next(l);
+        BlockJob *job = bcb->bs->job;
+        if (job) {
+            job_count++;
+            if (!bcb->started) {
+                bcb->started = true;
+                backup_job_start(bcb->bs, true);
+            }
+            if (!bcb->completed) {
+                block_job_cancel_sync(job);
+            }
+        }
+    }
+
+    backup_cleanup();
+}
+
+void qmp_backup_cancel(Error **errp)
+{
+    backup_cancel();
+}
+
+static void backup_run_next_job(void)
+{
+    GList *l = backup_state.bcb_list;
+    while (l) {
+        BackupCB *bcb = l->data;
+        l = g_list_next(l);
+
+        if (bcb->bs && bcb->bs->job && !bcb->completed) {
+            if (!bcb->started) {
+                bcb->started = true;
+                bool cancel = backup_state.error || backup_state.cancel;
+                backup_job_start(bcb->bs, cancel);
+            }
+            return;
+        }
+    }
+
+    backup_cleanup();
+}
+
+static void backup_start_jobs(void)
+{
+    /* create all jobs (one for each device), start first one */
+    GList *l = backup_state.bcb_list;
+    while (l) {
+        BackupCB *bcb = l->data;
+        l = g_list_next(l);
+
+        if (backup_job_create(bcb->bs, backup_dump_cb, backup_complete_cb,
+                              bcb, backup_state.speed) != 0) {
+            error_setg(&backup_state.error, "backup_job_create failed");
+            backup_cancel();
+            return;
+        }
+    }
+
+    backup_run_next_job();
+}
+
+char *qmp_backup(const char *backup_file, bool has_format, BackupFormat format,
+                 bool has_config_file, const char *config_file,
+                 bool has_devlist, const char *devlist,
+                 bool has_speed, int64_t speed, Error **errp)
+{
+    BlockDriverState *bs;
+    Error *local_err = NULL;
+    uuid_t uuid;
+    void *writer = NULL;
+    gchar **devs = NULL;
+    GList *bcblist = NULL;
+
+    if (backup_state.bcb_list) {
+        error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+                  "previous backup not finished");
+        return NULL;
+    }
+
+    /* Todo: try to auto-detect format based on file name */
+    format = has_format ? format : BACKUP_FORMAT_VMA;
+
+    /* fixme: find driver for specifued format */
+    const BackupDriver *driver = NULL;
+
+    if (!driver) {
+        error_set(errp, ERROR_CLASS_GENERIC_ERROR, "unknown backup format");
+        return NULL;
+    }
+
+    if (has_devlist) {
+        devs = g_strsplit_set(devlist, ",;:", -1);
+
+        gchar **d = devs;
+        while (d && *d) {
+            bs = bdrv_find(*d);
+            if (bs) {
+                if (bdrv_is_read_only(bs)) {
+                    error_set(errp, QERR_DEVICE_IS_READ_ONLY, *d);
+                    goto err;
+                }
+                if (!bdrv_is_inserted(bs)) {
+                    error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, *d);
+                    goto err;
+                }
+                BackupCB *bcb = g_new0(BackupCB, 1);
+                bcb->bs = bs;
+                bcblist = g_list_append(bcblist, bcb);
+            } else {
+                error_set(errp, QERR_DEVICE_NOT_FOUND, *d);
+                goto err;
+            }
+            d++;
+        }
+
+    } else {
+
+        bs = NULL;
+        while ((bs = bdrv_next(bs))) {
+
+            if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
+                continue;
+            }
+
+            BackupCB *bcb = g_new0(BackupCB, 1);
+            bcb->bs = bs;
+            bcblist = g_list_append(bcblist, bcb);
+        }
+    }
+
+    if (!bcblist) {
+        error_set(errp, ERROR_CLASS_GENERIC_ERROR, "empty device list");
+        goto err;
+    }
+
+    GList *l = bcblist;
+    while (l) {
+        BackupCB *bcb = l->data;
+        l = g_list_next(l);
+        if (bcb->bs->job) {
+            error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bcb->bs));
+            goto err;
+        }
+    }
+
+    uuid_generate(uuid);
+
+    writer = driver->open(backup_file, uuid, &local_err);
+    if (!writer) {
+        if (error_is_set(&local_err)) {
+            error_propagate(errp, local_err);
+        }
+        goto err;
+    }
+
+    size_t total = 0;
+
+    /* register all devices for vma writer */
+    l = bcblist;
+    while (l) {
+        BackupCB *bcb = l->data;
+        l = g_list_next(l);
+
+        int64_t size = bdrv_getlength(bcb->bs);
+        const char *devname = bdrv_get_device_name(bcb->bs);
+        bcb->dev_id = driver->register_stream(writer, devname, size);
+        if (bcb->dev_id <= 0) {
+            error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+                      "register_stream failed");
+            goto err;
+        }
+        bcb->size = size;
+        total += size;
+    }
+
+    /* add configuration file to archive */
+    if (has_config_file) {
+        char *cdata = NULL;
+        gsize clen = 0;
+        GError *err = NULL;
+        if (!g_file_get_contents(config_file, &cdata, &clen, &err)) {
+            error_setg(errp, "unable to read file '%s'", config_file);
+            goto err;
+        }
+
+        const char *basename = g_path_get_basename(config_file);
+        if (driver->register_config(writer, basename, cdata, clen) < 0) {
+            error_setg(errp, "register_config failed");
+            g_free(cdata);
+            goto err;
+        }
+        g_free(cdata);
+    }
+
+    /* initialize global backup_state now */
+
+    backup_state.cancel = false;
+
+    if (backup_state.error) {
+        error_free(backup_state.error);
+        backup_state.error = NULL;
+    }
+
+    backup_state.driver = driver;
+
+    backup_state.speed = (has_speed && speed > 0) ? speed : 0;
+
+    backup_state.start_time = time(NULL);
+    backup_state.end_time = 0;
+
+    if (backup_state.backup_file) {
+        g_free(backup_state.backup_file);
+    }
+    backup_state.backup_file = g_strdup(backup_file);
+
+    backup_state.writer = writer;
+
+    uuid_copy(backup_state.uuid, uuid);
+    uuid_unparse_lower(uuid, backup_state.uuid_str);
+
+    backup_state.bcb_list = bcblist;
+
+    backup_state.total = total;
+    backup_state.transferred = 0;
+    backup_state.zero_bytes = 0;
+
+    /* Grab a reference so hotplug does not delete the
+     * BlockDriverState from underneath us.
+     */
+    l = bcblist;
+    while (l) {
+        BackupCB *bcb = l->data;
+        l = g_list_next(l);
+        drive_get_ref(drive_get_by_blockdev(bcb->bs));
+    }
+
+    backup_start_jobs();
+
+    return g_strdup(backup_state.uuid_str);
+
+err:
+
+    l = bcblist;
+    while (l) {
+        g_free(l->data);
+        l = g_list_next(l);
+    }
+    g_list_free(bcblist);
+
+    if (devs) {
+        g_strfreev(devs);
+    }
+
+    if (writer) {
+        unlink(backup_file);
+        if (driver) {
+            Error *err = NULL;
+            driver->close(writer, &err);
+        }
+    }
+
+    return NULL;
+}
+
+BackupStatus *qmp_query_backup(Error **errp)
+{
+    BackupStatus *info = g_malloc0(sizeof(*info));
+
+    if (!backup_state.start_time) {
+        /* not started, return {} */
+        return info;
+    }
+
+    info->has_status = true;
+    info->has_start_time = true;
+    info->start_time = backup_state.start_time;
+
+    if (backup_state.backup_file) {
+        info->has_backup_file = true;
+        info->backup_file = g_strdup(backup_state.backup_file);
+    }
+
+    info->has_uuid = true;
+    info->uuid = g_strdup(backup_state.uuid_str);
+
+    if (backup_state.end_time) {
+        if (backup_state.error) {
+            info->status = g_strdup("error");
+            info->has_errmsg = true;
+            info->errmsg = g_strdup(error_get_pretty(backup_state.error));
+        } else {
+            info->status = g_strdup("done");
+        }
+        info->has_end_time = true;
+        info->end_time = backup_state.end_time;
+    } else {
+        info->status = g_strdup("active");
+    }
+
+    info->has_total = true;
+    info->total = backup_state.total;
+    info->has_zero_bytes = true;
+    info->zero_bytes = backup_state.zero_bytes;
+    info->has_transferred = true;
+    info->transferred = backup_state.transferred;
+
+    return info;
+}
+
 static BlockJob *find_block_job(const char *device)
 {
     BlockDriverState *bs;
diff --git a/hmp-commands.hx b/hmp-commands.hx
index 64008a9..0f178d8 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -83,6 +83,35 @@  STEXI
 Copy data from a backing file into a block device.
 ETEXI
 
+   {
+        .name       = "backup",
+        .args_type  = "backupfile:s,speed:o?,devlist:s?",
+        .params     = "backupfile [speed [devlist]]",
+        .help       = "create a VM Backup.",
+        .mhandler.cmd = hmp_backup,
+    },
+
+STEXI
+@item backup
+@findex backup
+Create a VM backup.
+ETEXI
+
+    {
+        .name       = "backup_cancel",
+        .args_type  = "",
+        .params     = "",
+        .help       = "cancel the current VM backup",
+        .mhandler.cmd = hmp_backup_cancel,
+    },
+
+STEXI
+@item backup_cancel
+@findex backup_cancel
+Cancel the current VM backup.
+
+ETEXI
+
     {
         .name       = "block_job_set_speed",
         .args_type  = "device:B,speed:o",
@@ -1630,6 +1659,8 @@  show CPU statistics
 show user network stack connection states
 @item info migrate
 show migration status
+@item info backup
+show backup status
 @item info migrate_capabilities
 show current migration capabilities
 @item info migrate_cache_size
diff --git a/hmp.c b/hmp.c
index 2f47a8a..b2c1f23 100644
--- a/hmp.c
+++ b/hmp.c
@@ -131,6 +131,38 @@  void hmp_info_mice(Monitor *mon, const QDict *qdict)
     qapi_free_MouseInfoList(mice_list);
 }
 
+void hmp_info_backup(Monitor *mon, const QDict *qdict)
+{
+    BackupStatus *info;
+
+    info = qmp_query_backup(NULL);
+    if (info->has_status) {
+        if (info->has_errmsg) {
+            monitor_printf(mon, "Backup status: %s - %s\n",
+                           info->status, info->errmsg);
+        } else {
+            monitor_printf(mon, "Backup status: %s\n", info->status);
+        }
+    }
+    if (info->has_backup_file) {
+        int per = (info->has_total && info->total &&
+            info->has_transferred && info->transferred) ?
+            (info->transferred * 100)/info->total : 0;
+        int zero_per = (info->has_total && info->total &&
+                        info->has_zero_bytes && info->zero_bytes) ?
+            (info->zero_bytes * 100)/info->total : 0;
+        monitor_printf(mon, "Backup file: %s\n", info->backup_file);
+        monitor_printf(mon, "Backup uuid: %s\n", info->uuid);
+        monitor_printf(mon, "Total size: %zd\n", info->total);
+        monitor_printf(mon, "Transferred bytes: %zd (%d%%)\n",
+                       info->transferred, per);
+        monitor_printf(mon, "Zero bytes: %zd (%d%%)\n",
+                       info->zero_bytes, zero_per);
+    }
+
+    qapi_free_BackupStatus(info);
+}
+
 void hmp_info_migrate(Monitor *mon, const QDict *qdict)
 {
     MigrationInfo *info;
@@ -998,6 +1030,37 @@  void hmp_block_stream(Monitor *mon, const QDict *qdict)
     hmp_handle_error(mon, &error);
 }
 
+void hmp_backup_cancel(Monitor *mon, const QDict *qdict)
+{
+    Error *errp = NULL;
+
+    qmp_backup_cancel(&errp);
+
+    if (error_is_set(&errp)) {
+        monitor_printf(mon, "%s\n", error_get_pretty(errp));
+        error_free(errp);
+        return;
+    }
+}
+
+void hmp_backup(Monitor *mon, const QDict *qdict)
+{
+    const char *backup_file = qdict_get_str(qdict, "backup-file");
+    const char *devlist = qdict_get_try_str(qdict, "devlist");
+    int64_t speed = qdict_get_try_int(qdict, "speed", 0);
+
+    Error *errp = NULL;
+
+    qmp_backup(backup_file, true, BACKUP_FORMAT_VMA, false, NULL, !!devlist,
+               devlist, qdict_haskey(qdict, "speed"), speed, &errp);
+
+    if (error_is_set(&errp)) {
+        monitor_printf(mon, "%s\n", error_get_pretty(errp));
+        error_free(errp);
+        return;
+    }
+}
+
 void hmp_block_job_set_speed(Monitor *mon, const QDict *qdict)
 {
     Error *error = NULL;
diff --git a/hmp.h b/hmp.h
index 30b3c20..ad4cf80 100644
--- a/hmp.h
+++ b/hmp.h
@@ -28,6 +28,7 @@  void hmp_info_mice(Monitor *mon, const QDict *qdict);
 void hmp_info_migrate(Monitor *mon, const QDict *qdict);
 void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict);
 void hmp_info_migrate_cache_size(Monitor *mon, const QDict *qdict);
+void hmp_info_backup(Monitor *mon, const QDict *qdict);
 void hmp_info_cpus(Monitor *mon, const QDict *qdict);
 void hmp_info_block(Monitor *mon, const QDict *qdict);
 void hmp_info_blockstats(Monitor *mon, const QDict *qdict);
@@ -65,6 +66,8 @@  void hmp_eject(Monitor *mon, const QDict *qdict);
 void hmp_change(Monitor *mon, const QDict *qdict);
 void hmp_block_set_io_throttle(Monitor *mon, const QDict *qdict);
 void hmp_block_stream(Monitor *mon, const QDict *qdict);
+void hmp_backup(Monitor *mon, const QDict *qdict);
+void hmp_backup_cancel(Monitor *mon, const QDict *qdict);
 void hmp_block_job_set_speed(Monitor *mon, const QDict *qdict);
 void hmp_block_job_cancel(Monitor *mon, const QDict *qdict);
 void hmp_block_job_pause(Monitor *mon, const QDict *qdict);
diff --git a/monitor.c b/monitor.c
index 6a0f257..e4a810c 100644
--- a/monitor.c
+++ b/monitor.c
@@ -2666,6 +2666,13 @@  static mon_cmd_t info_cmds[] = {
     },
 #endif
     {
+        .name       = "backup",
+        .args_type  = "",
+        .params     = "",
+        .help       = "show backup status",
+        .mhandler.cmd = hmp_info_backup,
+    },
+    {
         .name       = "migrate",
         .args_type  = "",
         .params     = "",
diff --git a/qapi-schema.json b/qapi-schema.json
index 7275b5d..09ca8ef 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -425,6 +425,39 @@ 
 { 'type': 'EventInfo', 'data': {'name': 'str'} }
 
 ##
+# @BackupStatus:
+#
+# Detailed backup status.
+#
+# @status: #optional string describing the current backup status.
+#          This can be 'active', 'done', 'error'. If this field is not
+#          returned, no backup process has been initiated
+#
+# @errmsg: #optional error message (only returned if status is 'error')
+#
+# @total: #optional total amount of bytes involved in the backup process
+#
+# @transferred: #optional amount of bytes already backed up.
+#
+# @zero-bytes: #optional amount of 'zero' bytes detected.
+#
+# @start-time: #optional time (epoch) when backup job started.
+#
+# @end-time: #optional time (epoch) when backup job finished.
+#
+# @backupfile: #optional backup file name
+#
+# @uuid: #optional uuid for this backup job
+#
+# Since: 1.5.0
+##
+{ 'type': 'BackupStatus',
+  'data': {'*status': 'str', '*errmsg': 'str', '*total': 'int',
+           '*transferred': 'int', '*zero-bytes': 'int',
+           '*start-time': 'int', '*end-time': 'int',
+           '*backup-file': 'str', '*uuid': 'str' } }
+
+##
 # @query-events:
 #
 # Return a list of supported QMP events by this server
@@ -1824,6 +1857,68 @@ 
   'data': { 'path': 'str' },
   'returns': [ 'ObjectPropertyInfo' ] }
 
+
+##
+# @BackupFormat
+#
+# An enumeration of supported backup formats.
+#
+# @vma: Proxmox vma backup format
+##
+{ 'enum': 'BackupFormat',
+  'data': [ 'vma' ] }
+
+##
+# @backup:
+#
+# Starts a VM backup.
+#
+# @backup-file: the backup file name
+#
+# @format: format of the backup file
+#
+# @config-filename: #optional name of a configuration file to include into
+# the backup archive.
+#
+# @speed: #optional the maximum speed, in bytes per second
+#
+# @devlist: #optional list of block device names (separated by ',', ';'
+# or ':'). By default the backup includes all writable block devices.
+#
+# Returns: the uuid of the backup job
+#
+# Since: 1.5.0
+##
+{ 'command': 'backup', 'data': { 'backup-file': 'str',
+                                 '*format': 'BackupFormat',
+                                 '*config-file': 'str',
+                                 '*devlist': 'str', '*speed': 'int' },
+  'returns': 'str' }
+
+##
+# @query-backup
+#
+# Returns information about current/last backup task.
+#
+# Returns: @BackupStatus
+#
+# Since: 1.5.0
+##
+{ 'command': 'query-backup', 'returns': 'BackupStatus' }
+
+##
+# @backup-cancel
+#
+# Cancel the current executing backup process.
+#
+# Returns: nothing on success
+#
+# Notes: This command succeeds even if there is no backup process running.
+#
+# Since: 1.5.0
+##
+{ 'command': 'backup-cancel' }
+
 ##
 # @qom-get:
 #
diff --git a/qmp-commands.hx b/qmp-commands.hx
index 799adea..17e381b 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -889,6 +889,18 @@  EQMP
     },
 
     {
+        .name       = "backup",
+        .args_type  = "backup-file:s,format:s?,config-file:F?,speed:o?,devlist:s?",
+        .mhandler.cmd_new = qmp_marshal_input_backup,
+    },
+
+    {
+        .name       = "backup-cancel",
+        .args_type  = "",
+        .mhandler.cmd_new = qmp_marshal_input_backup_cancel,
+    },
+
+    {
         .name       = "block-job-set-speed",
         .args_type  = "device:B,speed:o",
         .mhandler.cmd_new = qmp_marshal_input_block_job_set_speed,
@@ -2566,6 +2578,21 @@  EQMP
     },
 
 SQMP
+
+query-backup
+-------------
+
+Backup status.
+
+EQMP
+
+    {
+        .name       = "query-backup",
+        .args_type  = "",
+        .mhandler.cmd_new = qmp_marshal_input_query_backup,
+    },
+
+SQMP
 migrate-set-capabilities
 -------