diff mbox

[1/2] Add virtagent file system freeze/thaw

Message ID 1296557928-30019-2-git-send-email-Jes.Sorensen@redhat.com
State New
Headers show

Commit Message

Jes Sorensen Feb. 1, 2011, 10:58 a.m. UTC
From: Jes Sorensen <Jes.Sorensen@redhat.com>

Implement freeze/thaw support in the guest, allowing the host to
request the guest freezes all it's file systems before a live snapshot
is performed.
 - fsfreeze(): Walk the list of mounted local real file systems,
               and freeze them.
 - fsthaw():   Walk the list of previously frozen file systems and
               thaw them.
 - fsstatus(): Return the current status of freeze/thaw. The host must
               poll this function, in case fsfreeze() returned with a
	       timeout, to wait for the operation to finish.

Signed-off-by: Jes Sorensen <Jes.Sorensen@redhat.com>
---
 virtagent-common.h |    8 ++
 virtagent-server.c |  196 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 204 insertions(+), 0 deletions(-)

Comments

Stefan Hajnoczi Feb. 1, 2011, 2:12 p.m. UTC | #1
On Tue, Feb 1, 2011 at 10:58 AM,  <Jes.Sorensen@redhat.com> wrote:
> From: Jes Sorensen <Jes.Sorensen@redhat.com>
>
> Implement freeze/thaw support in the guest, allowing the host to
> request the guest freezes all it's file systems before a live snapshot
> is performed.
>  - fsfreeze(): Walk the list of mounted local real file systems,
>               and freeze them.
>  - fsthaw():   Walk the list of previously frozen file systems and
>               thaw them.
>  - fsstatus(): Return the current status of freeze/thaw. The host must
>               poll this function, in case fsfreeze() returned with a
>               timeout, to wait for the operation to finish.

It is desirable to minimize the freeze time, which may interrupt or
degrade the service that applications inside the VM can provide.
Polling means we have to choose a fixed value (500 ms?) at which to
check for freeze completion.  In this example we could have up to 500
ms extra time spent in freeze because it completed right after we
polled.  Any thoughts on this?

In terms of the fsfreeze(), fsthaw(), fsstatus() API, are you looking
at Windows Volume Shadow Copy Services and does this API fit that
model (I haven't looked at it in detail yet)?
http://msdn.microsoft.com/en-us/library/bb968832(v=vs.85).aspx

> Signed-off-by: Jes Sorensen <Jes.Sorensen@redhat.com>
> ---
>  virtagent-common.h |    8 ++
>  virtagent-server.c |  196 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 204 insertions(+), 0 deletions(-)
>
> diff --git a/virtagent-common.h b/virtagent-common.h
> index 5d8f5c1..220a4b6 100644
> --- a/virtagent-common.h
> +++ b/virtagent-common.h
> @@ -61,6 +61,14 @@ typedef struct VAContext {
>     const char *channel_path;
>  } VAContext;
>
> +enum vs_fsfreeze_status {
> +    FREEZE_ERROR = -1,
> +    FREEZE_THAWED = 0,
> +    FREEZE_INPROGRESS = 1,
> +    FREEZE_FROZEN = 2,
> +    FREEZE_THAWINPROGRESS = 3,
> +};
> +
>  enum va_job_status {
>     VA_JOB_STATUS_PENDING = 0,
>     VA_JOB_STATUS_OK,
> diff --git a/virtagent-server.c b/virtagent-server.c
> index 7bb35b2..cf2a3f0 100644
> --- a/virtagent-server.c
> +++ b/virtagent-server.c
> @@ -14,6 +14,13 @@
>  #include <syslog.h>
>  #include "qemu_socket.h"
>  #include "virtagent-common.h"
> +#include <mntent.h>
> +#include <sys/types.h>
> +#include <sys/stat.h>
> +#include <sys/errno.h>
> +#include <sys/ioctl.h>
> +#include <fcntl.h>
> +#include <linux/fs.h>
>
>  static VAServerData *va_server_data;
>  static bool va_enable_syslog = false; /* enable syslog'ing of RPCs */
> @@ -217,6 +224,189 @@ static xmlrpc_value *va_hello(xmlrpc_env *env,
>     return result;
>  }
>
> +
> +/*
> + * Walk the mount table and build a list of local file systems
> + */
> +
> +struct direntry {
> +    char *dirname;
> +    char *devtype;
> +    struct direntry *next;
> +};
> +
> +static struct direntry *mount_list;
> +static int fsfreeze_status;
> +
> +static int build_mount_list(void)
> +{
> +    struct mntent *mnt;
> +    struct direntry *entry;
> +    struct direntry *next;
> +    char const *mtab = MOUNTED;
> +    FILE *fp;
> +
> +    fp = setmntent(mtab, "r");
> +    if (!fp) {
> +       fprintf(stderr, "unable to read mtab\n");
> +       goto fail;
> +    }
> +
> +    while ((mnt = getmntent(fp))) {
> +       /*
> +        * An entry which device name doesn't start with a '/' is
> +        * either a dummy file system or a network file system.
> +        * Add special handling for smbfs and cifs as is done by
> +        * coreutils as well.
> +        */
> +       if ((mnt->mnt_fsname[0] != '/') ||
> +           (strcmp(mnt->mnt_type, "smbfs") == 0) ||
> +           (strcmp(mnt->mnt_type, "cifs") == 0)) {
> +           continue;
> +       }
> +
> +       entry = qemu_malloc(sizeof(struct direntry));
> +       if (!entry) {
> +           goto fail;
> +       }

qemu_malloc() never fails.

> +       entry->dirname = qemu_strdup(mnt->mnt_dir);
> +       entry->devtype = qemu_strdup(mnt->mnt_type);
> +       entry->next = mount_list;
> +
> +       mount_list = entry;
> +    }
> +
> +    endmntent(fp);
> +
> +    return 0;
> +
> +fail:
> +    while(mount_list) {
> +       next = mount_list->next;
> +       qemu_free(mount_list->dirname);
> +       qemu_free(mount_list->devtype);
> +       qemu_free(mount_list);
> +       mount_list = next;
> +    }
> +
> +    return -1;
> +}
> +
> +/*
> + * va_fsfreeze(): Walk list of mounted file systems in the guest, and
> + *   freeze the ones which are real local file systems.
> + * rpc return values: Number of file systems frozen, -1 on error.
> + */
> +static xmlrpc_value *va_fsfreeze(xmlrpc_env *env,
> +                                 xmlrpc_value *params,
> +                                 void *user_data)
> +{
> +    xmlrpc_int32 ret = 0, i = 0;
> +    xmlrpc_value *result;
> +    struct direntry *entry;
> +    int fd;
> +    SLOG("va_fsfreeze()");
> +
> +    if (fsfreeze_status == FREEZE_FROZEN) {
> +        ret = 0;
> +        goto out;
> +    }

The only valid status here is FREEZE_THAWED?  Perhaps we should test
for that specifically.

> +
> +    ret = build_mount_list();
> +    if (ret < 0) {
> +        goto out;
> +    }
> +
> +    fsfreeze_status = FREEZE_INPROGRESS;
> +
> +    entry = mount_list;
> +    while(entry) {
> +        fd = qemu_open(entry->dirname, O_RDONLY);
> +        if (fd == -1) {
> +            ret = errno;
> +            goto error;
> +        }
> +        ret = ioctl(fd, FIFREEZE);

If you close(fd) here then it won't leak or need extra code in the error path.

> +        if (ret < 0 && ret != EOPNOTSUPP) {
> +            goto error;
> +        }
> +
> +        close(fd);
> +        entry = entry->next;
> +        i++;
> +    }
> +
> +    fsfreeze_status = FREEZE_FROZEN;
> +    ret = i;
> +out:
> +    result = xmlrpc_build_value(env, "i", ret);
> +    return result;
> +error:
> +    if (i > 0) {
> +        fsfreeze_status = FREEZE_ERROR;
> +    }
> +    goto out;
> +}
> +
> +/*
> + * va_fsthaw(): Walk list of frozen file systems in the guest, and
> + *   thaw them.
> + * rpc return values: Number of file systems thawed on success, -1 on error.
> + */
> +static xmlrpc_value *va_fsthaw(xmlrpc_env *env,
> +                               xmlrpc_value *params,
> +                               void *user_data)
> +{
> +    xmlrpc_int32 ret;
> +    xmlrpc_value *result;
> +    struct direntry *entry;
> +    int fd, i = 0;
> +    SLOG("va_fsthaw()");
> +
> +    if (fsfreeze_status == FREEZE_THAWED) {
> +        ret = 0;
> +        goto out;
> +    }

A stricter check would be status FREEZE_FROZEN.

> +
> +    while((entry = mount_list)) {
> +        fd = qemu_open(entry->dirname, O_RDONLY);
> +        if (fd == -1) {
> +            ret = -1;
> +            goto out;
> +        }
> +        ret = ioctl(fd, FITHAW);

Same thing about close(fd) here.

Stefan
Jes Sorensen Feb. 1, 2011, 2:26 p.m. UTC | #2
On 02/01/11 15:12, Stefan Hajnoczi wrote:
> On Tue, Feb 1, 2011 at 10:58 AM,  <Jes.Sorensen@redhat.com> wrote:
>> From: Jes Sorensen <Jes.Sorensen@redhat.com>
>>
>> Implement freeze/thaw support in the guest, allowing the host to
>> request the guest freezes all it's file systems before a live snapshot
>> is performed.
>>  - fsfreeze(): Walk the list of mounted local real file systems,
>>               and freeze them.
>>  - fsthaw():   Walk the list of previously frozen file systems and
>>               thaw them.
>>  - fsstatus(): Return the current status of freeze/thaw. The host must
>>               poll this function, in case fsfreeze() returned with a
>>               timeout, to wait for the operation to finish.
> 
> It is desirable to minimize the freeze time, which may interrupt or
> degrade the service that applications inside the VM can provide.
> Polling means we have to choose a fixed value (500 ms?) at which to
> check for freeze completion.  In this example we could have up to 500
> ms extra time spent in freeze because it completed right after we
> polled.  Any thoughts on this?

I have to admit you lost me here, where do you get that 500ms time from?
Is that the XMLRPC polling time or? I just used the example code from
other agent calls.

> In terms of the fsfreeze(), fsthaw(), fsstatus() API, are you looking
> at Windows Volume Shadow Copy Services and does this API fit that
> model (I haven't looked at it in detail yet)?
> http://msdn.microsoft.com/en-us/library/bb968832(v=vs.85).aspx

I haven't looked at it, I designed the calls based on how they fit with
the Linux ioctls.

>> +       entry = qemu_malloc(sizeof(struct direntry));
>> +       if (!entry) {
>> +           goto fail;
>> +       }
> 
> qemu_malloc() never fails.

Good point, we have ugly malloc in qemu :( I wrote the code to handle
this outside QEMU first, to make sure it worked correctly and trying to
see how many times I could crash my laptop in the process. I'll fix it.

>> +static xmlrpc_value *va_fsfreeze(xmlrpc_env *env,
>> +                                 xmlrpc_value *params,
>> +                                 void *user_data)
>> +{
>> +    xmlrpc_int32 ret = 0, i = 0;
>> +    xmlrpc_value *result;
>> +    struct direntry *entry;
>> +    int fd;
>> +    SLOG("va_fsfreeze()");
>> +
>> +    if (fsfreeze_status == FREEZE_FROZEN) {
>> +        ret = 0;
>> +        goto out;
>> +    }
> 
> The only valid status here is FREEZE_THAWED?  Perhaps we should test
> for that specifically.

Good point, I'll fix this.

>> +
>> +    ret = build_mount_list();
>> +    if (ret < 0) {
>> +        goto out;
>> +    }
>> +
>> +    fsfreeze_status = FREEZE_INPROGRESS;
>> +
>> +    entry = mount_list;
>> +    while(entry) {
>> +        fd = qemu_open(entry->dirname, O_RDONLY);
>> +        if (fd == -1) {
>> +            ret = errno;
>> +            goto error;
>> +        }
>> +        ret = ioctl(fd, FIFREEZE);
> 
> If you close(fd) here then it won't leak or need extra code in the error path.

Good point, will fix.

>> +static xmlrpc_value *va_fsthaw(xmlrpc_env *env,
>> +                               xmlrpc_value *params,
>> +                               void *user_data)
>> +{
>> +    xmlrpc_int32 ret;
>> +    xmlrpc_value *result;
>> +    struct direntry *entry;
>> +    int fd, i = 0;
>> +    SLOG("va_fsthaw()");
>> +
>> +    if (fsfreeze_status == FREEZE_THAWED) {
>> +        ret = 0;
>> +        goto out;
>> +    }
> 
> A stricter check would be status FREEZE_FROZEN.

Yep, will fix

>> +
>> +    while((entry = mount_list)) {
>> +        fd = qemu_open(entry->dirname, O_RDONLY);
>> +        if (fd == -1) {
>> +            ret = -1;
>> +            goto out;
>> +        }
>> +        ret = ioctl(fd, FITHAW);
> 
> Same thing about close(fd) here.

Thanks for the review, all valid points!

Cheers,
Jes
Stefan Hajnoczi Feb. 1, 2011, 2:34 p.m. UTC | #3
On Tue, Feb 1, 2011 at 2:26 PM, Jes Sorensen <Jes.Sorensen@redhat.com> wrote:
> On 02/01/11 15:12, Stefan Hajnoczi wrote:
>> On Tue, Feb 1, 2011 at 10:58 AM,  <Jes.Sorensen@redhat.com> wrote:
>>> From: Jes Sorensen <Jes.Sorensen@redhat.com>
>>>
>>> Implement freeze/thaw support in the guest, allowing the host to
>>> request the guest freezes all it's file systems before a live snapshot
>>> is performed.
>>>  - fsfreeze(): Walk the list of mounted local real file systems,
>>>               and freeze them.
>>>  - fsthaw():   Walk the list of previously frozen file systems and
>>>               thaw them.
>>>  - fsstatus(): Return the current status of freeze/thaw. The host must
>>>               poll this function, in case fsfreeze() returned with a
>>>               timeout, to wait for the operation to finish.
>>
>> It is desirable to minimize the freeze time, which may interrupt or
>> degrade the service that applications inside the VM can provide.
>> Polling means we have to choose a fixed value (500 ms?) at which to
>> check for freeze completion.  In this example we could have up to 500
>> ms extra time spent in freeze because it completed right after we
>> polled.  Any thoughts on this?
>
> I have to admit you lost me here, where do you get that 500ms time from?
> Is that the XMLRPC polling time or? I just used the example code from
> other agent calls.

500 ms is made up.  I was thinking, "what would a reasonable polling
interval be?" and picked a sub-second number.

Can you explain how the timeout in fsfreeze can happen?  It's probably
because I don't know the virtagent details.

Stefan
Jes Sorensen Feb. 1, 2011, 2:36 p.m. UTC | #4
On 02/01/11 15:34, Stefan Hajnoczi wrote:
> On Tue, Feb 1, 2011 at 2:26 PM, Jes Sorensen <Jes.Sorensen@redhat.com> wrote:
>> I have to admit you lost me here, where do you get that 500ms time from?
>> Is that the XMLRPC polling time or? I just used the example code from
>> other agent calls.
> 
> 500 ms is made up.  I was thinking, "what would a reasonable polling
> interval be?" and picked a sub-second number.
> 
> Can you explain how the timeout in fsfreeze can happen?  It's probably
> because I don't know the virtagent details.

Ah ok.

From what I understand, the XMLRPC code is setup to timeout if the guest
doesn't reply within a certain amount of time. In that case, the caller
needs to poll to wait for the guest to complete the freeze. This really
should only happen if you have a guest with a large number of very large
file systems. I don't know how likely it is to happen in real life.

Cheers,
Jes
Stefan Hajnoczi Feb. 1, 2011, 2:41 p.m. UTC | #5
On Tue, Feb 1, 2011 at 2:36 PM, Jes Sorensen <Jes.Sorensen@redhat.com> wrote:
> On 02/01/11 15:34, Stefan Hajnoczi wrote:
>> On Tue, Feb 1, 2011 at 2:26 PM, Jes Sorensen <Jes.Sorensen@redhat.com> wrote:
>>> I have to admit you lost me here, where do you get that 500ms time from?
>>> Is that the XMLRPC polling time or? I just used the example code from
>>> other agent calls.
>>
>> 500 ms is made up.  I was thinking, "what would a reasonable polling
>> interval be?" and picked a sub-second number.
>>
>> Can you explain how the timeout in fsfreeze can happen?  It's probably
>> because I don't know the virtagent details.
>
> Ah ok.
>
> From what I understand, the XMLRPC code is setup to timeout if the guest
> doesn't reply within a certain amount of time. In that case, the caller
> needs to poll to wait for the guest to complete the freeze. This really
> should only happen if you have a guest with a large number of very large
> file systems. I don't know how likely it is to happen in real life.

Perhaps Michael can confirm that the freeze function continues to
execute after timeout but the client is able to send fsstatus()
requests?

Stefan
Adam Litke Feb. 1, 2011, 2:48 p.m. UTC | #6
On Tue, 2011-02-01 at 11:58 +0100, Jes.Sorensen@redhat.com wrote:
> +/*
> + * va_fsfreeze(): Walk list of mounted file systems in the guest, and
> + *   freeze the ones which are real local file systems.
> + * rpc return values: Number of file systems frozen, -1 on error.
> + */
> +static xmlrpc_value *va_fsfreeze(xmlrpc_env *env,
> +                                 xmlrpc_value *params,
> +                                 void *user_data)
> +{
> +    xmlrpc_int32 ret = 0, i = 0;
> +    xmlrpc_value *result;
> +    struct direntry *entry;
> +    int fd;
> +    SLOG("va_fsfreeze()");
> +
> +    if (fsfreeze_status == FREEZE_FROZEN) {
> +        ret = 0;
> +        goto out;
> +    }
> +
> +    ret = build_mount_list();
> +    if (ret < 0) {
> +        goto out;
> +    }
> +
> +    fsfreeze_status = FREEZE_INPROGRESS;
> +
> +    entry = mount_list;
> +    while(entry) {
> +        fd = qemu_open(entry->dirname, O_RDONLY);
> +        if (fd == -1) {
> +            ret = errno;
> +            goto error;
> +        }
> +        ret = ioctl(fd, FIFREEZE);
> +        if (ret < 0 && ret != EOPNOTSUPP) {
> +            goto error;
> +        }

Here we silently ignore filesystems that do not support the FIFREEZE
ioctl.  Do we need to have a more complex return value so that we can
communicate which mount points could not be frozen?  Otherwise, an
unsuspecting host could retrieve a corrupted snapshot of that
filesystem, right?

> +
> +        close(fd);
> +        entry = entry->next;
> +        i++;
> +    }
> +
> +    fsfreeze_status = FREEZE_FROZEN;
> +    ret = i;
> +out:
> +    result = xmlrpc_build_value(env, "i", ret);
> +    return result;
> +error:
> +    if (i > 0) {
> +        fsfreeze_status = FREEZE_ERROR;
> +    }
> +    goto out;
> +}
Jes Sorensen Feb. 1, 2011, 3:02 p.m. UTC | #7
On 02/01/11 15:48, Adam Litke wrote:
> On Tue, 2011-02-01 at 11:58 +0100, Jes.Sorensen@redhat.com wrote:
>> +/*
>> + * va_fsfreeze(): Walk list of mounted file systems in the guest, and
>> + *   freeze the ones which are real local file systems.
>> + * rpc return values: Number of file systems frozen, -1 on error.
>> + */
>> +static xmlrpc_value *va_fsfreeze(xmlrpc_env *env,
>> +                                 xmlrpc_value *params,
>> +                                 void *user_data)
>> +{
>> +    xmlrpc_int32 ret = 0, i = 0;
>> +    xmlrpc_value *result;
>> +    struct direntry *entry;
>> +    int fd;
>> +    SLOG("va_fsfreeze()");
>> +
>> +    if (fsfreeze_status == FREEZE_FROZEN) {
>> +        ret = 0;
>> +        goto out;
>> +    }
>> +
>> +    ret = build_mount_list();
>> +    if (ret < 0) {
>> +        goto out;
>> +    }
>> +
>> +    fsfreeze_status = FREEZE_INPROGRESS;
>> +
>> +    entry = mount_list;
>> +    while(entry) {
>> +        fd = qemu_open(entry->dirname, O_RDONLY);
>> +        if (fd == -1) {
>> +            ret = errno;
>> +            goto error;
>> +        }
>> +        ret = ioctl(fd, FIFREEZE);
>> +        if (ret < 0 && ret != EOPNOTSUPP) {
>> +            goto error;
>> +        }
> 
> Here we silently ignore filesystems that do not support the FIFREEZE
> ioctl.  Do we need to have a more complex return value so that we can
> communicate which mount points could not be frozen?  Otherwise, an
> unsuspecting host could retrieve a corrupted snapshot of that
> filesystem, right?

That is correct, however most Linux file systems do support it, and for
the ones that don't, there really isn't anything we can do.

Cheers,
Jes
Michael Roth Feb. 1, 2011, 4:50 p.m. UTC | #8
On 02/01/2011 04:58 AM, Jes.Sorensen@redhat.com wrote:
> From: Jes Sorensen<Jes.Sorensen@redhat.com>
>
> Implement freeze/thaw support in the guest, allowing the host to
> request the guest freezes all it's file systems before a live snapshot
> is performed.
>   - fsfreeze(): Walk the list of mounted local real file systems,
>                 and freeze them.
>   - fsthaw():   Walk the list of previously frozen file systems and
>                 thaw them.
>   - fsstatus(): Return the current status of freeze/thaw. The host must
>                 poll this function, in case fsfreeze() returned with a
> 	       timeout, to wait for the operation to finish.
>
> Signed-off-by: Jes Sorensen<Jes.Sorensen@redhat.com>
> ---
>   virtagent-common.h |    8 ++
>   virtagent-server.c |  196 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>   2 files changed, 204 insertions(+), 0 deletions(-)
>
> diff --git a/virtagent-common.h b/virtagent-common.h
> index 5d8f5c1..220a4b6 100644
> --- a/virtagent-common.h
> +++ b/virtagent-common.h
> @@ -61,6 +61,14 @@ typedef struct VAContext {
>       const char *channel_path;
>   } VAContext;
>
> +enum vs_fsfreeze_status {
> +    FREEZE_ERROR = -1,
> +    FREEZE_THAWED = 0,
> +    FREEZE_INPROGRESS = 1,
> +    FREEZE_FROZEN = 2,
> +    FREEZE_THAWINPROGRESS = 3,
> +};

Any reason for vs_* vs. va_*?

> +
>   enum va_job_status {
>       VA_JOB_STATUS_PENDING = 0,
>       VA_JOB_STATUS_OK,
> diff --git a/virtagent-server.c b/virtagent-server.c
> index 7bb35b2..cf2a3f0 100644
> --- a/virtagent-server.c
> +++ b/virtagent-server.c
> @@ -14,6 +14,13 @@
>   #include<syslog.h>
>   #include "qemu_socket.h"
>   #include "virtagent-common.h"
> +#include<mntent.h>
> +#include<sys/types.h>
> +#include<sys/stat.h>
> +#include<sys/errno.h>
> +#include<sys/ioctl.h>
> +#include<fcntl.h>
> +#include<linux/fs.h>

Can probably clean these up a bit, I believe fcntl.h/errno.h/stat.h are 
already available at least.

>
>   static VAServerData *va_server_data;
>   static bool va_enable_syslog = false; /* enable syslog'ing of RPCs */
> @@ -217,6 +224,189 @@ static xmlrpc_value *va_hello(xmlrpc_env *env,
>       return result;
>   }
>
> +
> +/*
> + * Walk the mount table and build a list of local file systems
> + */
> +
> +struct direntry {
> +    char *dirname;
> +    char *devtype;
> +    struct direntry *next;
> +};
> +
> +static struct direntry *mount_list;
> +static int fsfreeze_status;
> +
> +static int build_mount_list(void)
> +{
> +    struct mntent *mnt;
> +    struct direntry *entry;
> +    struct direntry *next;
> +    char const *mtab = MOUNTED;
> +    FILE *fp;
> +
> +    fp = setmntent(mtab, "r");
> +    if (!fp) {
> +	fprintf(stderr, "unable to read mtab\n");
> +	goto fail;
> +    }
> +
> +    while ((mnt = getmntent(fp))) {
> +	/*
> +	 * An entry which device name doesn't start with a '/' is
> +	 * either a dummy file system or a network file system.
> +	 * Add special handling for smbfs and cifs as is done by
> +	 * coreutils as well.
> +	 */
> +	if ((mnt->mnt_fsname[0] != '/') ||
> +	    (strcmp(mnt->mnt_type, "smbfs") == 0) ||
> +	    (strcmp(mnt->mnt_type, "cifs") == 0)) {
> +	    continue;
> +	}
> +
> +	entry = qemu_malloc(sizeof(struct direntry));
> +	if (!entry) {
> +	    goto fail;
> +	}
> +	entry->dirname = qemu_strdup(mnt->mnt_dir);
> +	entry->devtype = qemu_strdup(mnt->mnt_type);
> +	entry->next = mount_list;
> +
> +	mount_list = entry;
> +    }
> +
> +    endmntent(fp);
> +
> +    return 0;
> +
> +fail:
> +    while(mount_list) {
> +	next = mount_list->next;
> +	qemu_free(mount_list->dirname);
> +	qemu_free(mount_list->devtype);
> +	qemu_free(mount_list);
> +	mount_list = next;
> +    }

should be spaces instead of tabs

> +
> +    return -1;
> +}
> +
> +/*
> + * va_fsfreeze(): Walk list of mounted file systems in the guest, and
> + *   freeze the ones which are real local file systems.
> + * rpc return values: Number of file systems frozen, -1 on error.
> + */
> +static xmlrpc_value *va_fsfreeze(xmlrpc_env *env,
> +                                 xmlrpc_value *params,
> +                                 void *user_data)
> +{
> +    xmlrpc_int32 ret = 0, i = 0;
> +    xmlrpc_value *result;
> +    struct direntry *entry;
> +    int fd;
> +    SLOG("va_fsfreeze()");
> +
> +    if (fsfreeze_status == FREEZE_FROZEN) {
> +        ret = 0;
> +        goto out;
> +    }
> +
> +    ret = build_mount_list();
> +    if (ret<  0) {
> +        goto out;
> +    }
> +
> +    fsfreeze_status = FREEZE_INPROGRESS;
> +
> +    entry = mount_list;

I think as we start adding more and more stateful RPCs, free-floating 
state variables can start getting a bit hairy to keep track of. 
Eventually I'd like to have state information that only applies to a 
subset of RPCs consolidated into a single object. I wouldn't focus on 
this too much because I'd like to have an interface to do this in the 
future (mainly so they can state objects can register themselves and 
provide a reset() function that can be called when, for instance, an 
agent disconnects/reconnects), but in the meantime I think it would be 
more readable to have a global va_fsfreeze_state object to track freeze 
status and mount points.

> +    while(entry) {
> +        fd = qemu_open(entry->dirname, O_RDONLY);
> +        if (fd == -1) {
> +            ret = errno;
> +            goto error;
> +        }
> +        ret = ioctl(fd, FIFREEZE);
> +        if (ret<  0&&  ret != EOPNOTSUPP) {
> +            goto error;
> +        }
> +
> +        close(fd);
> +        entry = entry->next;
> +        i++;
> +    }
> +
> +    fsfreeze_status = FREEZE_FROZEN;
> +    ret = i;
> +out:
> +    result = xmlrpc_build_value(env, "i", ret);
> +    return result;
> +error:
> +    if (i>  0) {
> +        fsfreeze_status = FREEZE_ERROR;
> +    }
> +    goto out;
> +}
> +
> +/*
> + * va_fsthaw(): Walk list of frozen file systems in the guest, and
> + *   thaw them.
> + * rpc return values: Number of file systems thawed on success, -1 on error.
> + */
> +static xmlrpc_value *va_fsthaw(xmlrpc_env *env,
> +                               xmlrpc_value *params,
> +                               void *user_data)
> +{
> +    xmlrpc_int32 ret;
> +    xmlrpc_value *result;
> +    struct direntry *entry;
> +    int fd, i = 0;
> +    SLOG("va_fsthaw()");
> +
> +    if (fsfreeze_status == FREEZE_THAWED) {
> +        ret = 0;
> +        goto out;
> +    }
> +
> +    while((entry = mount_list)) {
> +        fd = qemu_open(entry->dirname, O_RDONLY);
> +        if (fd == -1) {
> +            ret = -1;
> +            goto out;
> +        }
> +        ret = ioctl(fd, FITHAW);
> +        if (ret<  0&&  ret != EOPNOTSUPP) {
> +            ret = -1;
> +            goto out;
> +	}

whitespace issues

> +        close(fd);
> +
> +        mount_list = entry->next;
> +        qemu_free(entry->dirname);
> +        qemu_free(entry->devtype);
> +        qemu_free(entry);
> +        i++;
> +    }
> +
> +    fsfreeze_status = FREEZE_THAWED;
> +    ret = i;
> +out:
> +    result = xmlrpc_build_value(env, "i", ret);
> +    return result;
> +}
> +
> +/* va_fsstatus(): Return status of freeze/thaw
> + * rpc return values: fsfreeze_status
> + */
> +static xmlrpc_value *va_fsstatus(xmlrpc_env *env,
> +                                 xmlrpc_value *params,
> +                                 void *user_data)
> +{
> +    xmlrpc_value *result = xmlrpc_build_value(env, "i", fsfreeze_status);
> +    SLOG("va_fsstatus()");
> +    return result;
> +}

Hmm, you mentioned before that these freezes may be long-running 
jobs...do the ioctl()'s not return until completion? There is global 
timeout in virtagent, currently under a minute, to prevent a virtagent 
monitor command from hanging the monitor session, so if it's unlikely 
you'll fit in this window we'll need to work on something to better 
support these this kinds of situations.

The 3 main approaches would be:

1) allow command-specific timeouts with values that are sane for the 
command in question, and potentially allow timeouts to be disabled
2) fork() long running jobs and provide a mechanism for them to provide 
asynchronous updates to us to we can query status
3) fork() long running jobs, have them provide status information 
elsewhere, and provide a polling function to check that status

3) would likely require something like writing status to a file and then 
provide a polling function to check it, which doesn't work here so 
that's probably out.

I'd initially planned on doing 2) at some point, but I'm beginning to 
think 1) is the better approach, since qemu "opts in" on how long it's 
willing to hang for a particular command, so there's not really any 
surprises. At least not to qemu...users might get worried after a while, 
so there is a bit of a trade-off. But it's also more user-friendly....no 
need for polling or dealing with asynchronous updates to figure out when 
an RPC has actually finished. Seem reasonable?

> +
>   typedef struct RPCFunction {
>       xmlrpc_value *(*func)(xmlrpc_env *env, xmlrpc_value *param, void *unused);
>       const char *func_name;
> @@ -237,6 +427,12 @@ static RPCFunction guest_functions[] = {
>         .func_name = "va.ping" },
>       { .func = va_capabilities,
>         .func_name = "va.capabilities" },
> +    { .func = va_fsfreeze,
> +      .func_name = "va.fsfreeze" },
> +    { .func = va_fsthaw,
> +      .func_name = "va.fsthaw" },
> +    { .func = va_fsstatus,
> +      .func_name = "va.fsstatus" },
>       { NULL, NULL }
>   };
>   static RPCFunction host_functions[] = {
Michael Roth Feb. 1, 2011, 5:22 p.m. UTC | #9
On 02/01/2011 08:41 AM, Stefan Hajnoczi wrote:
> On Tue, Feb 1, 2011 at 2:36 PM, Jes Sorensen<Jes.Sorensen@redhat.com>  wrote:
>> On 02/01/11 15:34, Stefan Hajnoczi wrote:
>>> On Tue, Feb 1, 2011 at 2:26 PM, Jes Sorensen<Jes.Sorensen@redhat.com>  wrote:
>>>> I have to admit you lost me here, where do you get that 500ms time from?
>>>> Is that the XMLRPC polling time or? I just used the example code from
>>>> other agent calls.
>>>
>>> 500 ms is made up.  I was thinking, "what would a reasonable polling
>>> interval be?" and picked a sub-second number.
>>>
>>> Can you explain how the timeout in fsfreeze can happen?  It's probably
>>> because I don't know the virtagent details.
>>
>> Ah ok.
>>
>>  From what I understand, the XMLRPC code is setup to timeout if the guest
>> doesn't reply within a certain amount of time. In that case, the caller
>> needs to poll to wait for the guest to complete the freeze. This really
>> should only happen if you have a guest with a large number of very large
>> file systems. I don't know how likely it is to happen in real life.
>
> Perhaps Michael can confirm that the freeze function continues to
> execute after timeout but the client is able to send fsstatus()
> requests?

Ahh, yeah there's the confusion: we only execute one RPC at a time, so a 
polling function for a previous RPC won't work unless that RPC is being 
done concurrently, via fork()ing or something and communicating status 
via some method of IPC.

I touched on possible approaches to dealing with this in the response I 
just sent to this patch.

>
> Stefan
Stefan Hajnoczi Feb. 2, 2011, 7:57 a.m. UTC | #10
On Tue, Feb 1, 2011 at 10:58 AM,  <Jes.Sorensen@redhat.com> wrote:
> From: Jes Sorensen <Jes.Sorensen@redhat.com>
>
> Implement freeze/thaw support in the guest, allowing the host to
> request the guest freezes all it's file systems before a live snapshot
> is performed.
>  - fsfreeze(): Walk the list of mounted local real file systems,
>               and freeze them.

Does this add a requirement that guest agent code issues no disk I/O
in its main loop (e.g. logging)?  Otherwise we might deadlock
ourselves waiting for I/O which is never issued.

Stefan
Jes Sorensen Feb. 2, 2011, 8:38 a.m. UTC | #11
On 02/01/11 17:50, Michael Roth wrote:
> On 02/01/2011 04:58 AM, Jes.Sorensen@redhat.com wrote:
>> +enum vs_fsfreeze_status {
>> +    FREEZE_ERROR = -1,
>> +    FREEZE_THAWED = 0,
>> +    FREEZE_INPROGRESS = 1,
>> +    FREEZE_FROZEN = 2,
>> +    FREEZE_THAWINPROGRESS = 3,
>> +};
> 
> Any reason for vs_* vs. va_*?

Hmmmm let me see if I can find a good excuse for that typo :)

>> diff --git a/virtagent-server.c b/virtagent-server.c
>> index 7bb35b2..cf2a3f0 100644
>> --- a/virtagent-server.c
>> +++ b/virtagent-server.c
>> @@ -14,6 +14,13 @@
>>   #include<syslog.h>
>>   #include "qemu_socket.h"
>>   #include "virtagent-common.h"
>> +#include<mntent.h>
>> +#include<sys/types.h>
>> +#include<sys/stat.h>
>> +#include<sys/errno.h>
>> +#include<sys/ioctl.h>
>> +#include<fcntl.h>
>> +#include<linux/fs.h>
> 
> Can probably clean these up a bit, I believe fcntl.h/errno.h/stat.h are
> already available at least.

Carry-over from writing the code outside of qemu. Would be much cleaner
than relying on the include everything and the kitchen sink in a global
header file, but thats how it is :(

>> +
>> +    fsfreeze_status = FREEZE_INPROGRESS;
>> +
>> +    entry = mount_list;
> 
> I think as we start adding more and more stateful RPCs, free-floating
> state variables can start getting a bit hairy to keep track of.
> Eventually I'd like to have state information that only applies to a
> subset of RPCs consolidated into a single object. I wouldn't focus on
> this too much because I'd like to have an interface to do this in the
> future (mainly so they can state objects can register themselves and
> provide a reset() function that can be called when, for instance, an
> agent disconnects/reconnects), but in the meantime I think it would be
> more readable to have a global va_fsfreeze_state object to track freeze
> status and mount points.

Urgh, what do you mean by object here? I have to admit the word object
always makes me cringe.... I changed the variables to have the va_ prefix.

>> +static xmlrpc_value *va_fsstatus(xmlrpc_env *env,
>> +                                 xmlrpc_value *params,
>> +                                 void *user_data)
>> +{
>> +    xmlrpc_value *result = xmlrpc_build_value(env, "i",
>> fsfreeze_status);
>> +    SLOG("va_fsstatus()");
>> +    return result;
>> +}
> 
> Hmm, you mentioned before that these freezes may be long-running
> jobs...do the ioctl()'s not return until completion? There is global
> timeout in virtagent, currently under a minute, to prevent a virtagent
> monitor command from hanging the monitor session, so if it's unlikely
> you'll fit in this window we'll need to work on something to better
> support these this kinds of situations.

I think 1 minute is fine, but we should probably look at something a
little more flexible for handling commands over the longer term. Maybe
have virtagent spawn threads for executing some commands?

> The 3 main approaches would be:
> 
> 1) allow command-specific timeouts with values that are sane for the
> command in question, and potentially allow timeouts to be disabled
> 2) fork() long running jobs and provide a mechanism for them to provide
> asynchronous updates to us to we can query status
> 3) fork() long running jobs, have them provide status information
> elsewhere, and provide a polling function to check that status
> 
> 3) would likely require something like writing status to a file and then
> provide a polling function to check it, which doesn't work here so
> that's probably out.
> 
> I'd initially planned on doing 2) at some point, but I'm beginning to
> think 1) is the better approach, since qemu "opts in" on how long it's
> willing to hang for a particular command, so there's not really any
> surprises. At least not to qemu...users might get worried after a while,
> so there is a bit of a trade-off. But it's also more user-friendly....no
> need for polling or dealing with asynchronous updates to figure out when
> an RPC has actually finished. Seem reasonable?

I am not sure which is really the best solution. Basically we will need
to classify commands into two categories, so if you issue a certain type
of command, like agent_fsfreeze() (basically when the agent is in
FREEZE_FROZEN state) only status commands are allowed to execute in
parallel. Anything that tries to issue a write to the file system will
hang until agent_fsthaw is called. However it would be useful to be able
to call in for non-blocking status commands etc.

I'll post a v2 in a minute that addresses the issues pointed out by
Stefan and you. I think the threading/timeout aspect is something we
need to look at for the longer term.

Cheers,
Jes
Jes Sorensen Feb. 2, 2011, 8:48 a.m. UTC | #12
On 02/02/11 08:57, Stefan Hajnoczi wrote:
> On Tue, Feb 1, 2011 at 10:58 AM,  <Jes.Sorensen@redhat.com> wrote:
>> From: Jes Sorensen <Jes.Sorensen@redhat.com>
>>
>> Implement freeze/thaw support in the guest, allowing the host to
>> request the guest freezes all it's file systems before a live snapshot
>> is performed.
>>  - fsfreeze(): Walk the list of mounted local real file systems,
>>               and freeze them.
> 
> Does this add a requirement that guest agent code issues no disk I/O
> in its main loop (e.g. logging)?  Otherwise we might deadlock
> ourselves waiting for I/O which is never issued.

Yes very much so[1] - one reason why it would be nice to have virtagent
use threads to execute the actual commands. We should probably add a
flag to agent commands indicating whether they issue disk I/O or not, so
we can block attempts to execute commands that do so, while the guest is
frozen.

Cheers,
Jes

[1] speaking from experience ... a Linux desktop gets really upset if
you freeze the file systems from a command in an xterm.... ho hum
Michael Roth Feb. 3, 2011, 5:41 p.m. UTC | #13
On 02/02/2011 02:48 AM, Jes Sorensen wrote:
> On 02/02/11 08:57, Stefan Hajnoczi wrote:
>> On Tue, Feb 1, 2011 at 10:58 AM,<Jes.Sorensen@redhat.com>  wrote:
>>> From: Jes Sorensen<Jes.Sorensen@redhat.com>
>>>
>>> Implement freeze/thaw support in the guest, allowing the host to
>>> request the guest freezes all it's file systems before a live snapshot
>>> is performed.
>>>   - fsfreeze(): Walk the list of mounted local real file systems,
>>>                and freeze them.
>>
>> Does this add a requirement that guest agent code issues no disk I/O
>> in its main loop (e.g. logging)?  Otherwise we might deadlock
>> ourselves waiting for I/O which is never issued.
>
> Yes very much so[1] - one reason why it would be nice to have virtagent
> use threads to execute the actual commands. We should probably add a
> flag to agent commands indicating whether they issue disk I/O or not, so
> we can block attempts to execute commands that do so, while the guest is
> frozen.

**Warning, epic response**

For things like logging and i/o on a frozen system...I agree we'd need 
some flag for these kinds of situations. Maybe a disable_logging() 
flag....i really don't like this though... I'd imagine even syslogd() 
could block virtagent in this type of situation, so that would need to 
be disabled as well.

But doing so completely subverts our attempts and providing proper 
accounting of what the agent is doing to the user. A user can freeze the 
filesystem, knowing that logging would be disabled, then prod at 
whatever he wants. So the handling should be something specific to 
fsfreeze, with stricter requirements:

If a user calls fsfreeze(), we disable logging, but also disable the 
ability to do anything other than fsthaw() or fsstatus(). This actually 
solves the potential deadlocking problem for other RPCs as well...since 
they cant be executed in the first place.

So I think that addresses the agent deadlocking itself, post-freeze.

However, fsfreeze() itself might lock-up the agent as well...I'm not 
confident we can really put any kind of bound on how long it'll take to 
execute, and if we timeout on the client-side the agent can still block 
here.

Plus there are any number of other situations where an RPC can still 
hang things...in the future when we potentially allow things like script 
execution, they might do something like attempt to connect to a socket 
that's already in use and wait on the server for an arbitrary amount of 
time, or open a file on an nfs share that in currently unresponsive.

So a solution for these situations is still needed, and I'm starting to 
agree that threads are needed, but I don't think we should do RPCs 
concurrently (not sure if that's what is being suggested or not). At 
least, there's no pressing reason for it as things currently stand 
(there aren't currently any RPCs where fast response times are all that 
important, so it's okay to serialize them behind previous RPCs, and 
HMP/QMP are command at a time), and it's something that Im fairly 
confident can be added if the need arises in the future.

But for dealing with a situation where an RPC can hang the agent, I 
think one thread should do it. Basically:

We associate each RPC with a time limit. Some RPCs, very special ones 
that we'd trust with our kids, could potentially specify an unlimited 
timeout. The client side should use this same timeout on it's end. In 
the future we might allow the user to explicitly disable the timeout for 
a certain RPC. The logic would then be:

- read in a client RPC request
- start a thread to do RPC
- if there's a timeout, register an alarm(<timeout>), with a handler 
that will call something like pthread_kill(current_worker_thread). On 
the thread side, this signal will induce a pthread_exit()
- wait for the thread to return (pthread_join(current_worker_thread))
- return it's response back to the caller if it finished, return a 
timeout indication otherwise

>
> Cheers,
> Jes
>
> [1] speaking from experience ... a Linux desktop gets really upset if
> you freeze the file systems from a command in an xterm.... ho hum
Stefan Hajnoczi Feb. 4, 2011, 6:13 a.m. UTC | #14
On Thu, Feb 3, 2011 at 5:41 PM, Michael Roth <mdroth@linux.vnet.ibm.com> wrote:
> For things like logging and i/o on a frozen system...I agree we'd need some
> flag for these kinds of situations. Maybe a disable_logging() flag....i
> really don't like this though... I'd imagine even syslogd() could block
> virtagent in this type of situation, so that would need to be disabled as
> well.
>
> But doing so completely subverts our attempts and providing proper
> accounting of what the agent is doing to the user. A user can freeze the
> filesystem, knowing that logging would be disabled, then prod at whatever he
> wants. So the handling should be something specific to fsfreeze, with
> stricter requirements:
>
> If a user calls fsfreeze(), we disable logging, but also disable the ability
> to do anything other than fsthaw() or fsstatus(). This actually solves the
> potential deadlocking problem for other RPCs as well...since they cant be
> executed in the first place.
>
> So I think that addresses the agent deadlocking itself, post-freeze.
>
> However, fsfreeze() itself might lock-up the agent as well...I'm not
> confident we can really put any kind of bound on how long it'll take to
> execute, and if we timeout on the client-side the agent can still block
> here.
>
> Plus there are any number of other situations where an RPC can still hang
> things...in the future when we potentially allow things like script
> execution, they might do something like attempt to connect to a socket
> that's already in use and wait on the server for an arbitrary amount of
> time, or open a file on an nfs share that in currently unresponsive.
>
> So a solution for these situations is still needed, and I'm starting to
> agree that threads are needed, but I don't think we should do RPCs
> concurrently (not sure if that's what is being suggested or not). At least,
> there's no pressing reason for it as things currently stand (there aren't
> currently any RPCs where fast response times are all that important, so it's
> okay to serialize them behind previous RPCs, and HMP/QMP are command at a
> time), and it's something that Im fairly confident can be added if the need
> arises in the future.
>
> But for dealing with a situation where an RPC can hang the agent, I think
> one thread should do it. Basically:
>
> We associate each RPC with a time limit. Some RPCs, very special ones that
> we'd trust with our kids, could potentially specify an unlimited timeout.
> The client side should use this same timeout on it's end. In the future we
> might allow the user to explicitly disable the timeout for a certain RPC.
> The logic would then be:
>
> - read in a client RPC request
> - start a thread to do RPC
> - if there's a timeout, register an alarm(<timeout>), with a handler that
> will call something like pthread_kill(current_worker_thread). On the thread
> side, this signal will induce a pthread_exit()
> - wait for the thread to return (pthread_join(current_worker_thread))
> - return it's response back to the caller if it finished, return a timeout
> indication otherwise

I'm not sure about a timeout inside virtagent.  A client needs to
protect itself with its own timeout and shouldn't rely on the server
to prevent it from locking up - especially since the server is a guest
which we have no control over.  So the timeout does not help the
guest.

Aborting an RPC handler could leave the system in an inconsistent
state unless we are careful.  For example, aborting freeze requires
thawing those file systems that have been successfully frozen so far.
For other handlers it might leave temporary files around, or if they
are not carefully written may partially update files in-place and
leave them corrupted.

So instead of a blanket timeout, I think handlers that perform
operations that may block for unknown periods of time could
specifically use timeouts.  That gives the handler control to perform
cleanup.

Stefan
Jes Sorensen Feb. 4, 2011, 11:03 a.m. UTC | #15
On 02/03/11 18:41, Michael Roth wrote:
> On 02/02/2011 02:48 AM, Jes Sorensen wrote:
>> Yes very much so[1] - one reason why it would be nice to have virtagent
>> use threads to execute the actual commands. We should probably add a
>> flag to agent commands indicating whether they issue disk I/O or not, so
>> we can block attempts to execute commands that do so, while the guest is
>> frozen.
> 
> **Warning, epic response**
> 
> For things like logging and i/o on a frozen system...I agree we'd need
> some flag for these kinds of situations. Maybe a disable_logging()
> flag....i really don't like this though... I'd imagine even syslogd()
> could block virtagent in this type of situation, so that would need to
> be disabled as well.

One way to resolve this would be to have the logging handled in it's own
thread, which uses non blocking calls to do the actual logging.
Obviously we'd have to use a non file system based communication method
between the main thread and the logging thread :)

> But doing so completely subverts our attempts and providing proper
> accounting of what the agent is doing to the user. A user can freeze the
> filesystem, knowing that logging would be disabled, then prod at
> whatever he wants. So the handling should be something specific to
> fsfreeze, with stricter requirements:
> 
> If a user calls fsfreeze(), we disable logging, but also disable the
> ability to do anything other than fsthaw() or fsstatus(). This actually
> solves the potential deadlocking problem for other RPCs as well...since
> they cant be executed in the first place.

I disagree that we should block all calls, except for fsfreeze/fsstatus/
fsthaw in this case. There are other calls that could be valid in this
situations, so I think it needs to be evaluated on a case by case basis.

> So a solution for these situations is still needed, and I'm starting to
> agree that threads are needed, but I don't think we should do RPCs
> concurrently (not sure if that's what is being suggested or not). At
> least, there's no pressing reason for it as things currently stand
> (there aren't currently any RPCs where fast response times are all that
> important, so it's okay to serialize them behind previous RPCs, and
> HMP/QMP are command at a time), and it's something that Im fairly
> confident can be added if the need arises in the future.

Eventually I think we will need to be able to support concurrent RPC
calls. There can be situations where an operation takes a long time
while it is valid to be able to ping the guest agent to verify that it
is still alive etc.

Cheers,
Jes
Michael Roth Feb. 4, 2011, 4:27 p.m. UTC | #16
On 02/04/2011 12:13 AM, Stefan Hajnoczi wrote:
> On Thu, Feb 3, 2011 at 5:41 PM, Michael Roth<mdroth@linux.vnet.ibm.com>  wrote:
>> For things like logging and i/o on a frozen system...I agree we'd need some
>> flag for these kinds of situations. Maybe a disable_logging() flag....i
>> really don't like this though... I'd imagine even syslogd() could block
>> virtagent in this type of situation, so that would need to be disabled as
>> well.
>>
>> But doing so completely subverts our attempts and providing proper
>> accounting of what the agent is doing to the user. A user can freeze the
>> filesystem, knowing that logging would be disabled, then prod at whatever he
>> wants. So the handling should be something specific to fsfreeze, with
>> stricter requirements:
>>
>> If a user calls fsfreeze(), we disable logging, but also disable the ability
>> to do anything other than fsthaw() or fsstatus(). This actually solves the
>> potential deadlocking problem for other RPCs as well...since they cant be
>> executed in the first place.
>>
>> So I think that addresses the agent deadlocking itself, post-freeze.
>>
>> However, fsfreeze() itself might lock-up the agent as well...I'm not
>> confident we can really put any kind of bound on how long it'll take to
>> execute, and if we timeout on the client-side the agent can still block
>> here.
>>
>> Plus there are any number of other situations where an RPC can still hang
>> things...in the future when we potentially allow things like script
>> execution, they might do something like attempt to connect to a socket
>> that's already in use and wait on the server for an arbitrary amount of
>> time, or open a file on an nfs share that in currently unresponsive.
>>
>> So a solution for these situations is still needed, and I'm starting to
>> agree that threads are needed, but I don't think we should do RPCs
>> concurrently (not sure if that's what is being suggested or not). At least,
>> there's no pressing reason for it as things currently stand (there aren't
>> currently any RPCs where fast response times are all that important, so it's
>> okay to serialize them behind previous RPCs, and HMP/QMP are command at a
>> time), and it's something that Im fairly confident can be added if the need
>> arises in the future.
>>
>> But for dealing with a situation where an RPC can hang the agent, I think
>> one thread should do it. Basically:
>>
>> We associate each RPC with a time limit. Some RPCs, very special ones that
>> we'd trust with our kids, could potentially specify an unlimited timeout.
>> The client side should use this same timeout on it's end. In the future we
>> might allow the user to explicitly disable the timeout for a certain RPC.
>> The logic would then be:
>>
>> - read in a client RPC request
>> - start a thread to do RPC
>> - if there's a timeout, register an alarm(<timeout>), with a handler that
>> will call something like pthread_kill(current_worker_thread). On the thread
>> side, this signal will induce a pthread_exit()
>> - wait for the thread to return (pthread_join(current_worker_thread))
>> - return it's response back to the caller if it finished, return a timeout
>> indication otherwise
>
> I'm not sure about a timeout inside virtagent.  A client needs to
> protect itself with its own timeout and shouldn't rely on the server
> to prevent it from locking up - especially since the server is a guest
> which we have no control over.  So the timeout does not help the
> guest.

We actually have timeouts for the client already (though they'll need to 
be reworked a bit to handle the proposed solutions), what I'm proposing 
is an additional timeout on the guest/server side for the actual RPCs, 
since a blocking RPC can still hang the guest agent.

>
> Aborting an RPC handler could leave the system in an inconsistent
> state unless we are careful.  For example, aborting freeze requires
> thawing those file systems that have been successfully frozen so far.
> For other handlers it might leave temporary files around, or if they
> are not carefully written may partially update files in-place and
> leave them corrupted.
>
> So instead of a blanket timeout, I think handlers that perform
> operations that may block for unknown periods of time could
> specifically use timeouts.  That gives the handler control to perform
> cleanup.

Good point. Although, I'm not sure I want to push timeout handling to 
the actual RPCs though....something as simple as open()/read() can block 
indefinitely in certain situations, and it'll be difficult to account 
for every situation, and the resulting code will be tedious as well. I'd 
really like the make the actual RPC as simple as possible, since it's 
something that may be extended heavily over time.

So what if we simply allow an RPC to register a timeout handler at the 
beginning of the RPC call? So when the thread doing the RPC exits we:

- check to see if thread exited as a result of timeout
- check to see if a timeout handler was registered, if so, call it, 
reset the handler, then return a timeout indication
- if it didn't time out, return the response

The only burden this puts on the RPC author is that information they 
need to recover state would need to be accessible outside the thread, 
which is easily done by encapsulating state in static/global structs. So 
the timeout handler for fsfreeze, as it is currently written, would be 
something like:

va_fsfreeze_timeout_handler():
     foreach mnt in fsfreeze.mount_list:
         unfreeze(mnt)
     fsfreeze.mount_list = NULL

We'll need to be careful about lists/objects being in weird states due 
to the forced exit, but I think it's doable.

>
> Stefan
Michael Roth Feb. 4, 2011, 4:51 p.m. UTC | #17
On 02/04/2011 05:03 AM, Jes Sorensen wrote:
> On 02/03/11 18:41, Michael Roth wrote:
>> On 02/02/2011 02:48 AM, Jes Sorensen wrote:
>>> Yes very much so[1] - one reason why it would be nice to have virtagent
>>> use threads to execute the actual commands. We should probably add a
>>> flag to agent commands indicating whether they issue disk I/O or not, so
>>> we can block attempts to execute commands that do so, while the guest is
>>> frozen.
>>
>> **Warning, epic response**
>>
>> For things like logging and i/o on a frozen system...I agree we'd need
>> some flag for these kinds of situations. Maybe a disable_logging()
>> flag....i really don't like this though... I'd imagine even syslogd()
>> could block virtagent in this type of situation, so that would need to
>> be disabled as well.
>
> One way to resolve this would be to have the logging handled in it's own
> thread, which uses non blocking calls to do the actual logging.
> Obviously we'd have to use a non file system based communication method
> between the main thread and the logging thread :)
>

I suspect syslogd() already buffers to some extent. But the problem 
there, as well the problem with having our own buffered logging 
implementation, is that we can't rely on being able to log an arbitrary 
number of messages. As some point that interface would need to either 
block, or stop dropping log messages.

If it blocks, we're deadlocked again. If it drops messages, it's trivial 
for someone to flood it with messages after the fsfreeze() to get back 
into a state where they can execute RPC without any accounting.

So a seperate logging thread doesn't buy us much, and what we come up 
with is rely gonna come down to syslogd:

1) if syslogd blocks, we must disable logging after fsfreeze(). 
Buffering, on syslogd's side or our own, only buys us time. If we 
disable logging, we must only allow absolutely required/benign RPCs. 
fsstatus/fsthaw are required, things like va.ping are benign, and 
potentially useful, in this particular situation. 
copyfile/shutdown/exec/etc should be considered "high-risk"...we want to 
make sure these are logged.

2) if syslogd doesnt block, it either drops messages at some point with 
no indication to us, or it drops them and provides some indication. If 
there's no indication, we must treat this the same way we treat 1), 
since we must assume that logging is effectively disabled. So only 
required or benign RPCs.

if we get some indication when a call to syslog() fails to log/buffer, 
we can allow all RPCs, but treat failures to log as a cue to immediately 
cleanup and exit the RPC. fsfreeze() under this circumstance will need 
to make sure it only logs after it does the unfreeze, else we may never 
be able to unfreeze at that point forward.

So the solution is dependent on syslogd's behavior. Ill have to do some 
testing to confirm...but I think the above covers the possibilities.

>> But doing so completely subverts our attempts and providing proper
>> accounting of what the agent is doing to the user. A user can freeze the
>> filesystem, knowing that logging would be disabled, then prod at
>> whatever he wants. So the handling should be something specific to
>> fsfreeze, with stricter requirements:
>>
>> If a user calls fsfreeze(), we disable logging, but also disable the
>> ability to do anything other than fsthaw() or fsstatus(). This actually
>> solves the potential deadlocking problem for other RPCs as well...since
>> they cant be executed in the first place.
>
> I disagree that we should block all calls, except for fsfreeze/fsstatus/
> fsthaw in this case. There are other calls that could be valid in this
> situations, so I think it needs to be evaluated on a case by case basis.
>
>> So a solution for these situations is still needed, and I'm starting to
>> agree that threads are needed, but I don't think we should do RPCs
>> concurrently (not sure if that's what is being suggested or not). At
>> least, there's no pressing reason for it as things currently stand
>> (there aren't currently any RPCs where fast response times are all that
>> important, so it's okay to serialize them behind previous RPCs, and
>> HMP/QMP are command at a time), and it's something that Im fairly
>> confident can be added if the need arises in the future.
>
> Eventually I think we will need to be able to support concurrent RPC
> calls. There can be situations where an operation takes a long time
> while it is valid to be able to ping the guest agent to verify that it
> is still alive etc.

Currently we're limited to 1 RPC at a time by the monitor/hmp/qmp 
(except for some niche cases where virtagent has multiple requests in 
flight). Ideally this will remain the interface we expose to users, but 
there could be situations where this isn't the case...for instance, 
other bits of qemu making direct calls into virtagent. I think we can 
add support for concurrent RPCs incrementally though...I've thought over 
the implementation details a bit and it seems to be fairly 
straightforward. I think we need to get the basic use cases down first 
though.

>
> Cheers,
> Jes
>
Stefan Hajnoczi Feb. 4, 2011, 4:52 p.m. UTC | #18
On Fri, Feb 4, 2011 at 4:27 PM, Michael Roth <mdroth@linux.vnet.ibm.com> wrote:
>> Aborting an RPC handler could leave the system in an inconsistent
>> state unless we are careful.  For example, aborting freeze requires
>> thawing those file systems that have been successfully frozen so far.
>> For other handlers it might leave temporary files around, or if they
>> are not carefully written may partially update files in-place and
>> leave them corrupted.
>>
>> So instead of a blanket timeout, I think handlers that perform
>> operations that may block for unknown periods of time could
>> specifically use timeouts.  That gives the handler control to perform
>> cleanup.
>
> Good point. Although, I'm not sure I want to push timeout handling to the
> actual RPCs though....something as simple as open()/read() can block
> indefinitely in certain situations, and it'll be difficult to account for
> every situation, and the resulting code will be tedious as well. I'd really
> like the make the actual RPC as simple as possible, since it's something
> that may be extended heavily over time.
>
> So what if we simply allow an RPC to register a timeout handler at the
> beginning of the RPC call? So when the thread doing the RPC exits we:
>
> - check to see if thread exited as a result of timeout
> - check to see if a timeout handler was registered, if so, call it, reset
> the handler, then return a timeout indication
> - if it didn't time out, return the response
>
> The only burden this puts on the RPC author is that information they need to
> recover state would need to be accessible outside the thread, which is
> easily done by encapsulating state in static/global structs. So the timeout
> handler for fsfreeze, as it is currently written, would be something like:
>
> va_fsfreeze_timeout_handler():
>    foreach mnt in fsfreeze.mount_list:
>        unfreeze(mnt)
>    fsfreeze.mount_list = NULL
>
> We'll need to be careful about lists/objects being in weird states due to
> the forced exit, but I think it's doable.

Yeah, still requires discipline but it could work.

Stefan
diff mbox

Patch

diff --git a/virtagent-common.h b/virtagent-common.h
index 5d8f5c1..220a4b6 100644
--- a/virtagent-common.h
+++ b/virtagent-common.h
@@ -61,6 +61,14 @@  typedef struct VAContext {
     const char *channel_path;
 } VAContext;
 
+enum vs_fsfreeze_status {
+    FREEZE_ERROR = -1,
+    FREEZE_THAWED = 0,
+    FREEZE_INPROGRESS = 1,
+    FREEZE_FROZEN = 2,
+    FREEZE_THAWINPROGRESS = 3,
+};
+
 enum va_job_status {
     VA_JOB_STATUS_PENDING = 0,
     VA_JOB_STATUS_OK,
diff --git a/virtagent-server.c b/virtagent-server.c
index 7bb35b2..cf2a3f0 100644
--- a/virtagent-server.c
+++ b/virtagent-server.c
@@ -14,6 +14,13 @@ 
 #include <syslog.h>
 #include "qemu_socket.h"
 #include "virtagent-common.h"
+#include <mntent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/errno.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/fs.h>
 
 static VAServerData *va_server_data;
 static bool va_enable_syslog = false; /* enable syslog'ing of RPCs */
@@ -217,6 +224,189 @@  static xmlrpc_value *va_hello(xmlrpc_env *env,
     return result;
 }
 
+
+/*
+ * Walk the mount table and build a list of local file systems
+ */
+
+struct direntry {
+    char *dirname;
+    char *devtype;
+    struct direntry *next;
+};
+
+static struct direntry *mount_list;
+static int fsfreeze_status;
+
+static int build_mount_list(void)
+{
+    struct mntent *mnt;
+    struct direntry *entry;
+    struct direntry *next;
+    char const *mtab = MOUNTED;
+    FILE *fp;
+
+    fp = setmntent(mtab, "r");
+    if (!fp) {
+	fprintf(stderr, "unable to read mtab\n");
+	goto fail;
+    }
+
+    while ((mnt = getmntent(fp))) {
+	/*
+	 * An entry which device name doesn't start with a '/' is
+	 * either a dummy file system or a network file system.
+	 * Add special handling for smbfs and cifs as is done by
+	 * coreutils as well.
+	 */
+	if ((mnt->mnt_fsname[0] != '/') ||
+	    (strcmp(mnt->mnt_type, "smbfs") == 0) ||
+	    (strcmp(mnt->mnt_type, "cifs") == 0)) {
+	    continue;
+	}
+
+	entry = qemu_malloc(sizeof(struct direntry));
+	if (!entry) {
+	    goto fail;
+	}
+	entry->dirname = qemu_strdup(mnt->mnt_dir);
+	entry->devtype = qemu_strdup(mnt->mnt_type);
+	entry->next = mount_list;
+
+	mount_list = entry;
+    }
+
+    endmntent(fp);
+
+    return 0;
+ 
+fail:
+    while(mount_list) {
+	next = mount_list->next;
+	qemu_free(mount_list->dirname);
+	qemu_free(mount_list->devtype);
+	qemu_free(mount_list);
+	mount_list = next;
+    }
+    
+    return -1;
+}
+
+/*
+ * va_fsfreeze(): Walk list of mounted file systems in the guest, and
+ *   freeze the ones which are real local file systems.
+ * rpc return values: Number of file systems frozen, -1 on error.
+ */
+static xmlrpc_value *va_fsfreeze(xmlrpc_env *env,
+                                 xmlrpc_value *params,
+                                 void *user_data)
+{
+    xmlrpc_int32 ret = 0, i = 0;
+    xmlrpc_value *result;
+    struct direntry *entry;
+    int fd;
+    SLOG("va_fsfreeze()");
+
+    if (fsfreeze_status == FREEZE_FROZEN) {
+        ret = 0;
+        goto out;
+    }
+
+    ret = build_mount_list();
+    if (ret < 0) {
+        goto out;
+    }
+
+    fsfreeze_status = FREEZE_INPROGRESS;
+
+    entry = mount_list;
+    while(entry) {
+        fd = qemu_open(entry->dirname, O_RDONLY);
+        if (fd == -1) {
+            ret = errno;
+            goto error;
+        }
+        ret = ioctl(fd, FIFREEZE);
+        if (ret < 0 && ret != EOPNOTSUPP) {
+            goto error;
+        }
+
+        close(fd);
+        entry = entry->next;
+        i++;
+    }
+
+    fsfreeze_status = FREEZE_FROZEN;
+    ret = i;
+out:
+    result = xmlrpc_build_value(env, "i", ret);
+    return result;
+error:
+    if (i > 0) {
+        fsfreeze_status = FREEZE_ERROR;
+    }
+    goto out;
+}
+
+/*
+ * va_fsthaw(): Walk list of frozen file systems in the guest, and
+ *   thaw them.
+ * rpc return values: Number of file systems thawed on success, -1 on error.
+ */
+static xmlrpc_value *va_fsthaw(xmlrpc_env *env,
+                               xmlrpc_value *params,
+                               void *user_data)
+{
+    xmlrpc_int32 ret;
+    xmlrpc_value *result;
+    struct direntry *entry;
+    int fd, i = 0;
+    SLOG("va_fsthaw()");
+
+    if (fsfreeze_status == FREEZE_THAWED) {
+        ret = 0;
+        goto out;
+    }
+
+    while((entry = mount_list)) {
+        fd = qemu_open(entry->dirname, O_RDONLY);
+        if (fd == -1) {
+            ret = -1;
+            goto out;
+        }
+        ret = ioctl(fd, FITHAW);
+        if (ret < 0 && ret != EOPNOTSUPP) {
+            ret = -1;
+            goto out;
+	}
+        close(fd);
+
+        mount_list = entry->next;
+        qemu_free(entry->dirname);
+        qemu_free(entry->devtype);
+        qemu_free(entry);
+        i++;
+    }
+
+    fsfreeze_status = FREEZE_THAWED;
+    ret = i;
+out:
+    result = xmlrpc_build_value(env, "i", ret);
+    return result;
+}
+
+/* va_fsstatus(): Return status of freeze/thaw
+ * rpc return values: fsfreeze_status
+ */
+static xmlrpc_value *va_fsstatus(xmlrpc_env *env,
+                                 xmlrpc_value *params,
+                                 void *user_data)
+{
+    xmlrpc_value *result = xmlrpc_build_value(env, "i", fsfreeze_status);
+    SLOG("va_fsstatus()");
+    return result;
+}
+
 typedef struct RPCFunction {
     xmlrpc_value *(*func)(xmlrpc_env *env, xmlrpc_value *param, void *unused);
     const char *func_name;
@@ -237,6 +427,12 @@  static RPCFunction guest_functions[] = {
       .func_name = "va.ping" },
     { .func = va_capabilities,
       .func_name = "va.capabilities" },
+    { .func = va_fsfreeze,
+      .func_name = "va.fsfreeze" },
+    { .func = va_fsthaw,
+      .func_name = "va.fsthaw" },
+    { .func = va_fsstatus,
+      .func_name = "va.fsstatus" },
     { NULL, NULL }
 };
 static RPCFunction host_functions[] = {