diff mbox

[-V2,4/6] hw/9pfs: Implement syncfs

Message ID 1298961534-8099-4-git-send-email-aneesh.kumar@linux.vnet.ibm.com
State New
Headers show

Commit Message

Aneesh Kumar K.V March 1, 2011, 6:38 a.m. UTC
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 hw/9pfs/virtio-9p.c |   31 +++++++++++++++++++++++++++++++
 hw/9pfs/virtio-9p.h |    2 ++
 2 files changed, 33 insertions(+), 0 deletions(-)

Comments

Stefan Hajnoczi March 1, 2011, 10:22 a.m. UTC | #1
On Tue, Mar 1, 2011 at 6:38 AM, Aneesh Kumar K.V
<aneesh.kumar@linux.vnet.ibm.com> wrote:
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
>  hw/9pfs/virtio-9p.c |   31 +++++++++++++++++++++++++++++++
>  hw/9pfs/virtio-9p.h |    2 ++
>  2 files changed, 33 insertions(+), 0 deletions(-)
>
> diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> index c4b0198..882f4f3 100644
> --- a/hw/9pfs/virtio-9p.c
> +++ b/hw/9pfs/virtio-9p.c
> @@ -1978,6 +1978,36 @@ static void v9fs_fsync(V9fsState *s, V9fsPDU *pdu)
>     v9fs_post_do_fsync(s, pdu, err);
>  }
>
> +static void v9fs_post_do_syncfs(V9fsState *s, V9fsPDU *pdu, int err)
> +{
> +    if (err == -1) {
> +        err = -errno;
> +    }
> +    complete_pdu(s, pdu, err);
> +}
> +
> +static void v9fs_syncfs(V9fsState *s, V9fsPDU *pdu)
> +{
> +    int err;
> +    int32_t fid;
> +    size_t offset = 7;
> +    V9fsFidState *fidp;
> +
> +    pdu_unmarshal(pdu, offset, "d", &fid);
> +    fidp = lookup_fid(s, fid);
> +    if (fidp == NULL) {
> +        err = -ENOENT;
> +        v9fs_post_do_syncfs(s, pdu, err);
> +        return;
> +    }
> +    /*
> +     * We don't have per file system syncfs
> +     * So just return success
> +     */
> +    err = 0;
> +    v9fs_post_do_syncfs(s, pdu, err);
> +}

Please explain the semantics of P9_TSYNCFS.  Won't returning success
without doing anything lead to data integrity issues?

It seems unnecessary to split v9fs_post_do_syncfs() into its own
function since there is no blocking point here and a callback will not
be needed.

Stefan
Aneesh Kumar K.V March 1, 2011, 3:02 p.m. UTC | #2
On Tue, 1 Mar 2011 10:22:07 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Tue, Mar 1, 2011 at 6:38 AM, Aneesh Kumar K.V
> <aneesh.kumar@linux.vnet.ibm.com> wrote:
> > Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> > ---
> >  hw/9pfs/virtio-9p.c |   31 +++++++++++++++++++++++++++++++
> >  hw/9pfs/virtio-9p.h |    2 ++
> >  2 files changed, 33 insertions(+), 0 deletions(-)
> >
> > diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> > index c4b0198..882f4f3 100644
> > --- a/hw/9pfs/virtio-9p.c
> > +++ b/hw/9pfs/virtio-9p.c
> > @@ -1978,6 +1978,36 @@ static void v9fs_fsync(V9fsState *s, V9fsPDU *pdu)
> >     v9fs_post_do_fsync(s, pdu, err);
> >  }
> >
> > +static void v9fs_post_do_syncfs(V9fsState *s, V9fsPDU *pdu, int err)
> > +{
> > +    if (err == -1) {
> > +        err = -errno;
> > +    }
> > +    complete_pdu(s, pdu, err);
> > +}
> > +
> > +static void v9fs_syncfs(V9fsState *s, V9fsPDU *pdu)
> > +{
> > +    int err;
> > +    int32_t fid;
> > +    size_t offset = 7;
> > +    V9fsFidState *fidp;
> > +
> > +    pdu_unmarshal(pdu, offset, "d", &fid);
> > +    fidp = lookup_fid(s, fid);
> > +    if (fidp == NULL) {
> > +        err = -ENOENT;
> > +        v9fs_post_do_syncfs(s, pdu, err);
> > +        return;
> > +    }
> > +    /*
> > +     * We don't have per file system syncfs
> > +     * So just return success
> > +     */
> > +    err = 0;
> > +    v9fs_post_do_syncfs(s, pdu, err);
> > +}
> 
> Please explain the semantics of P9_TSYNCFS.  Won't returning success
> without doing anything lead to data integrity issues?

I should actually do the 9P Operation format as commit message. Will
add in the next update. Whether returning here would cause a data
integrity issue, it depends what sort of guarantee we want to
provide. So calling sync on the guest will cause all the dirty pages in
the guest to be flushed to host. Now all those changes are in the host
page cache and it would be nice to flush them  as a part of sync but
then since we don't have a per file system sync, the above would imply
we flush all dirty pages on the host which can result in large
performance impact. 

> 
> It seems unnecessary to split v9fs_post_do_syncfs() into its own
> function since there is no blocking point here and a callback will not
> be needed.
> 

That is done as a place holder to add the per file system sync call
once we get the support
http://thread.gmane.org/gmane.linux.file-systems/44628


-aneesh
Stefan Hajnoczi March 1, 2011, 3:59 p.m. UTC | #3
On Tue, Mar 1, 2011 at 3:02 PM, Aneesh Kumar K. V
<aneesh.kumar@linux.vnet.ibm.com> wrote:
> On Tue, 1 Mar 2011 10:22:07 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
>> On Tue, Mar 1, 2011 at 6:38 AM, Aneesh Kumar K.V
>> <aneesh.kumar@linux.vnet.ibm.com> wrote:
>> > Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
>> > ---
>> >  hw/9pfs/virtio-9p.c |   31 +++++++++++++++++++++++++++++++
>> >  hw/9pfs/virtio-9p.h |    2 ++
>> >  2 files changed, 33 insertions(+), 0 deletions(-)
>> >
>> > diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
>> > index c4b0198..882f4f3 100644
>> > --- a/hw/9pfs/virtio-9p.c
>> > +++ b/hw/9pfs/virtio-9p.c
>> > @@ -1978,6 +1978,36 @@ static void v9fs_fsync(V9fsState *s, V9fsPDU *pdu)
>> >     v9fs_post_do_fsync(s, pdu, err);
>> >  }
>> >
>> > +static void v9fs_post_do_syncfs(V9fsState *s, V9fsPDU *pdu, int err)
>> > +{
>> > +    if (err == -1) {
>> > +        err = -errno;
>> > +    }
>> > +    complete_pdu(s, pdu, err);
>> > +}
>> > +
>> > +static void v9fs_syncfs(V9fsState *s, V9fsPDU *pdu)
>> > +{
>> > +    int err;
>> > +    int32_t fid;
>> > +    size_t offset = 7;
>> > +    V9fsFidState *fidp;
>> > +
>> > +    pdu_unmarshal(pdu, offset, "d", &fid);
>> > +    fidp = lookup_fid(s, fid);
>> > +    if (fidp == NULL) {
>> > +        err = -ENOENT;
>> > +        v9fs_post_do_syncfs(s, pdu, err);
>> > +        return;
>> > +    }
>> > +    /*
>> > +     * We don't have per file system syncfs
>> > +     * So just return success
>> > +     */
>> > +    err = 0;
>> > +    v9fs_post_do_syncfs(s, pdu, err);
>> > +}
>>
>> Please explain the semantics of P9_TSYNCFS.  Won't returning success
>> without doing anything lead to data integrity issues?
>
> I should actually do the 9P Operation format as commit message. Will
> add in the next update. Whether returning here would cause a data
> integrity issue, it depends what sort of guarantee we want to
> provide. So calling sync on the guest will cause all the dirty pages in
> the guest to be flushed to host. Now all those changes are in the host
> page cache and it would be nice to flush them  as a part of sync but
> then since we don't have a per file system sync, the above would imply
> we flush all dirty pages on the host which can result in large
> performance impact.

You get the define the semantics of P9_TSYNCFS?  I thought this is
part of a well-defined protocol :).  If this is a .L extension then
it's probably a bad design and shouldn't be added to the protocol if
we can't implement it.

Is this operation supposed to flush the disk write cache too?

I think virtio-9p has a file descriptor cache.  Would it be possible
to fsync() those file descriptors?

Stefan
Aneesh Kumar K.V March 1, 2011, 6:02 p.m. UTC | #4
On Tue, 1 Mar 2011 15:59:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Tue, Mar 1, 2011 at 3:02 PM, Aneesh Kumar K. V
> <aneesh.kumar@linux.vnet.ibm.com> wrote:
> > On Tue, 1 Mar 2011 10:22:07 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> >> On Tue, Mar 1, 2011 at 6:38 AM, Aneesh Kumar K.V
> >> <aneesh.kumar@linux.vnet.ibm.com> wrote:
> >> > Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> >> > ---
> >> >  hw/9pfs/virtio-9p.c |   31 +++++++++++++++++++++++++++++++
> >> >  hw/9pfs/virtio-9p.h |    2 ++
> >> >  2 files changed, 33 insertions(+), 0 deletions(-)
> >> >
> >> > diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
> >> > index c4b0198..882f4f3 100644
> >> > --- a/hw/9pfs/virtio-9p.c
> >> > +++ b/hw/9pfs/virtio-9p.c
> >> > @@ -1978,6 +1978,36 @@ static void v9fs_fsync(V9fsState *s, V9fsPDU *pdu)
> >> >     v9fs_post_do_fsync(s, pdu, err);
> >> >  }
> >> >
> >> > +static void v9fs_post_do_syncfs(V9fsState *s, V9fsPDU *pdu, int err)
> >> > +{
> >> > +    if (err == -1) {
> >> > +        err = -errno;
> >> > +    }
> >> > +    complete_pdu(s, pdu, err);
> >> > +}
> >> > +
> >> > +static void v9fs_syncfs(V9fsState *s, V9fsPDU *pdu)
> >> > +{
> >> > +    int err;
> >> > +    int32_t fid;
> >> > +    size_t offset = 7;
> >> > +    V9fsFidState *fidp;
> >> > +
> >> > +    pdu_unmarshal(pdu, offset, "d", &fid);
> >> > +    fidp = lookup_fid(s, fid);
> >> > +    if (fidp == NULL) {
> >> > +        err = -ENOENT;
> >> > +        v9fs_post_do_syncfs(s, pdu, err);
> >> > +        return;
> >> > +    }
> >> > +    /*
> >> > +     * We don't have per file system syncfs
> >> > +     * So just return success
> >> > +     */
> >> > +    err = 0;
> >> > +    v9fs_post_do_syncfs(s, pdu, err);
> >> > +}
> >>
> >> Please explain the semantics of P9_TSYNCFS.  Won't returning success
> >> without doing anything lead to data integrity issues?
> >
> > I should actually do the 9P Operation format as commit message. Will
> > add in the next update. Whether returning here would cause a data
> > integrity issue, it depends what sort of guarantee we want to
> > provide. So calling sync on the guest will cause all the dirty pages in
> > the guest to be flushed to host. Now all those changes are in the host
> > page cache and it would be nice to flush them  as a part of sync but
> > then since we don't have a per file system sync, the above would imply
> > we flush all dirty pages on the host which can result in large
> > performance impact.
> 
> You get the define the semantics of P9_TSYNCFS?  I thought this is
> part of a well-defined protocol :).  If this is a .L extension then
> it's probably a bad design and shouldn't be added to the protocol if
> we can't implement it.

It is a part of .L extension and we can definitely implement it. There
is patch out there which is yet to be merged 

http://thread.gmane.org/gmane.linux.file-systems/44628

> 
> Is this operation supposed to flush the disk write cache too?

I am not sure we need to specify that as a part of 9p operation. I guess
we can only say maximum possible data integrity. Whether a sync will
cause disk write cache flush depends on the file system. For ext* that
can be controlled by mount option barrier. 

> 
> I think virtio-9p has a file descriptor cache.  Would it be possible
> to fsync() those file descriptors?
> 

Ideally we should. But that would involve a large number of fsync calls.

-aneesh
Stefan Hajnoczi March 1, 2011, 8:27 p.m. UTC | #5
On Tue, Mar 1, 2011 at 6:02 PM, Aneesh Kumar K. V
<aneesh.kumar@linux.vnet.ibm.com> wrote:
> On Tue, 1 Mar 2011 15:59:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
>> >> Please explain the semantics of P9_TSYNCFS.  Won't returning success
>> >> without doing anything lead to data integrity issues?
>> >
>> > I should actually do the 9P Operation format as commit message. Will
>> > add in the next update. Whether returning here would cause a data
>> > integrity issue, it depends what sort of guarantee we want to
>> > provide. So calling sync on the guest will cause all the dirty pages in
>> > the guest to be flushed to host. Now all those changes are in the host
>> > page cache and it would be nice to flush them  as a part of sync but
>> > then since we don't have a per file system sync, the above would imply
>> > we flush all dirty pages on the host which can result in large
>> > performance impact.
>>
>> You get the define the semantics of P9_TSYNCFS?  I thought this is
>> part of a well-defined protocol :).  If this is a .L extension then
>> it's probably a bad design and shouldn't be added to the protocol if
>> we can't implement it.
>
> It is a part of .L extension and we can definitely implement it. There
> is patch out there which is yet to be merged
>
> http://thread.gmane.org/gmane.linux.file-systems/44628

A future Linux-only ioctl :/.

>> Is this operation supposed to flush the disk write cache too?
>
> I am not sure we need to specify that as a part of 9p operation. I guess
> we can only say maximum possible data integrity. Whether a sync will
> cause disk write cache flush depends on the file system. For ext* that
> can be controlled by mount option barrier.

So on a host with a safe configuration this operation should put data
on stable storage?

>>
>> I think virtio-9p has a file descriptor cache.  Would it be possible
>> to fsync() those file descriptors?
>>
>
> Ideally we should. But that would involve a large number of fsync calls.

Yep, that's why this is a weird operation to support, especially since
it's a .L add-on and not original 9P.  What's the use-case since
today's Linux userland cannot directly make use of this operation?  I
guess it has been added in order to pass-through a Linux internal vfs
super block sync function?

Stefan
Aneesh Kumar K.V March 2, 2011, 5:05 a.m. UTC | #6
On Tue, 1 Mar 2011 20:27:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Tue, Mar 1, 2011 at 6:02 PM, Aneesh Kumar K. V
> <aneesh.kumar@linux.vnet.ibm.com> wrote:
> > On Tue, 1 Mar 2011 15:59:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> >> >> Please explain the semantics of P9_TSYNCFS.  Won't returning success
> >> >> without doing anything lead to data integrity issues?
> >> >
> >> > I should actually do the 9P Operation format as commit message. Will
> >> > add in the next update. Whether returning here would cause a data
> >> > integrity issue, it depends what sort of guarantee we want to
> >> > provide. So calling sync on the guest will cause all the dirty pages in
> >> > the guest to be flushed to host. Now all those changes are in the host
> >> > page cache and it would be nice to flush them  as a part of sync but
> >> > then since we don't have a per file system sync, the above would imply
> >> > we flush all dirty pages on the host which can result in large
> >> > performance impact.
> >>
> >> You get the define the semantics of P9_TSYNCFS?  I thought this is
> >> part of a well-defined protocol :).  If this is a .L extension then
> >> it's probably a bad design and shouldn't be added to the protocol if
> >> we can't implement it.
> >
> > It is a part of .L extension and we can definitely implement it. There
> > is patch out there which is yet to be merged
> >
> > http://thread.gmane.org/gmane.linux.file-systems/44628
> 
> A future Linux-only ioctl :/.
> 
> >> Is this operation supposed to flush the disk write cache too?
> >
> > I am not sure we need to specify that as a part of 9p operation. I guess
> > we can only say maximum possible data integrity. Whether a sync will
> > cause disk write cache flush depends on the file system. For ext* that
> > can be controlled by mount option barrier.
> 
> So on a host with a safe configuration this operation should put data
> on stable storage?
> 
> >>
> >> I think virtio-9p has a file descriptor cache.  Would it be possible
> >> to fsync() those file descriptors?
> >>
> >
> > Ideally we should. But that would involve a large number of fsync calls.
> 
> Yep, that's why this is a weird operation to support, especially since
> it's a .L add-on and not original 9P.  What's the use-case since
> today's Linux userland cannot directly make use of this operation?  I
> guess it has been added in order to pass-through a Linux internal vfs
> super block sync function?

IMHO it would be nice to have a syncfs 9p operation because that enables
the client to say "if possible" flush the dirty data on the server. I
guess we should consider this as something server can choose to
ignore. In a cloud setup even doing a per file system sync can imply
performance impact because VirtFS export may not 1:1 map to mount point
on host. There is also plan to add a new option to the VirtFs export point
which enable write to exported files to be either O_SYNC or
O_DIRECT, similar to the way done for image files. That would imply
Tsyncfs doesn't have much to do because we don't have dirty data on host
pagecache anymore. 

So from 9p .L protocol point of view, it is a valid operation which
enables client to request a flush of server cache if possible. And qemu
9p server choose to ignore because of the performance impact. If you are
not comfortable with not doing anything specific in Tsyncfs
operation, we can add sync(2) call as part of this 9p operation and
later switch to  FS_IOC_SYNCFS when it become available.

-aneesh
Stefan Hajnoczi March 2, 2011, 10:20 a.m. UTC | #7
On Wed, Mar 2, 2011 at 5:05 AM, Aneesh Kumar K. V
<aneesh.kumar@linux.vnet.ibm.com> wrote:
> On Tue, 1 Mar 2011 20:27:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
>> On Tue, Mar 1, 2011 at 6:02 PM, Aneesh Kumar K. V
>> <aneesh.kumar@linux.vnet.ibm.com> wrote:
>> > On Tue, 1 Mar 2011 15:59:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
>> >> >> Please explain the semantics of P9_TSYNCFS.  Won't returning success
>> >> >> without doing anything lead to data integrity issues?
>> >> >
>> >> > I should actually do the 9P Operation format as commit message. Will
>> >> > add in the next update. Whether returning here would cause a data
>> >> > integrity issue, it depends what sort of guarantee we want to
>> >> > provide. So calling sync on the guest will cause all the dirty pages in
>> >> > the guest to be flushed to host. Now all those changes are in the host
>> >> > page cache and it would be nice to flush them  as a part of sync but
>> >> > then since we don't have a per file system sync, the above would imply
>> >> > we flush all dirty pages on the host which can result in large
>> >> > performance impact.
>> >>
>> >> You get the define the semantics of P9_TSYNCFS?  I thought this is
>> >> part of a well-defined protocol :).  If this is a .L extension then
>> >> it's probably a bad design and shouldn't be added to the protocol if
>> >> we can't implement it.
>> >
>> > It is a part of .L extension and we can definitely implement it. There
>> > is patch out there which is yet to be merged
>> >
>> > http://thread.gmane.org/gmane.linux.file-systems/44628
>>
>> A future Linux-only ioctl :/.
>>
>> >> Is this operation supposed to flush the disk write cache too?
>> >
>> > I am not sure we need to specify that as a part of 9p operation. I guess
>> > we can only say maximum possible data integrity. Whether a sync will
>> > cause disk write cache flush depends on the file system. For ext* that
>> > can be controlled by mount option barrier.
>>
>> So on a host with a safe configuration this operation should put data
>> on stable storage?
>>
>> >>
>> >> I think virtio-9p has a file descriptor cache.  Would it be possible
>> >> to fsync() those file descriptors?
>> >>
>> >
>> > Ideally we should. But that would involve a large number of fsync calls.
>>
>> Yep, that's why this is a weird operation to support, especially since
>> it's a .L add-on and not original 9P.  What's the use-case since
>> today's Linux userland cannot directly make use of this operation?  I
>> guess it has been added in order to pass-through a Linux internal vfs
>> super block sync function?
>
> IMHO it would be nice to have a syncfs 9p operation because that enables
> the client to say "if possible" flush the dirty data on the server. I
> guess we should consider this as something server can choose to
> ignore. In a cloud setup even doing a per file system sync can imply
> performance impact because VirtFS export may not 1:1 map to mount point
> on host. There is also plan to add a new option to the VirtFs export point
> which enable write to exported files to be either O_SYNC or
> O_DIRECT, similar to the way done for image files. That would imply
> Tsyncfs doesn't have much to do because we don't have dirty data on host
> pagecache anymore.
>
> So from 9p .L protocol point of view, it is a valid operation which
> enables client to request a flush of server cache if possible. And qemu
> 9p server choose to ignore because of the performance impact. If you are
> not comfortable with not doing anything specific in Tsyncfs
> operation, we can add sync(2) call as part of this 9p operation and
> later switch to  FS_IOC_SYNCFS when it become available.

The case we need to prevent is where applications running on virtfs
think they are getting guarantees that the implementation does not
provide.  Silently noping a sync operation is a step in that direction
so I agree that sync(2) would be safer.

I'm not sure I understand your 1:1 mount point mapping argument.  The
FS_IOC_SYNCFS ioctl does not help us there since it syncs the entire
filesystem, not the directory tree that virtfs is mapped to.  It will
do a bunch of extra I/O similar to how sync(2) does this across all
filesystems today.  This suggests again that P9_TSYNCFS is hard to
implement because FS_IOC_SYNCFS ends up not being useful.  Did I miss
something?

I'm looking for a use case where guests need a P9_TSYNCFS operation.
P9_TSYNCFS is not in linux-2.6 yet so I don't have any example code
that exploits it.  Can you point me to something that shows off why
this operation is necessary?  It must an optimization if 9P and NFS
make do without an equivalent?

Stefan
Aneesh Kumar K.V March 2, 2011, 11:28 a.m. UTC | #8
On Wed, 2 Mar 2011 10:20:41 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Wed, Mar 2, 2011 at 5:05 AM, Aneesh Kumar K. V
> <aneesh.kumar@linux.vnet.ibm.com> wrote:
> > On Tue, 1 Mar 2011 20:27:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> >> On Tue, Mar 1, 2011 at 6:02 PM, Aneesh Kumar K. V
> >> <aneesh.kumar@linux.vnet.ibm.com> wrote:
> >> > On Tue, 1 Mar 2011 15:59:19 +0000, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> >> >> >> Please explain the semantics of P9_TSYNCFS.  Won't returning success
> >> >> >> without doing anything lead to data integrity issues?
> >> >> >
> >> >> > I should actually do the 9P Operation format as commit message. Will
> >> >> > add in the next update. Whether returning here would cause a data
> >> >> > integrity issue, it depends what sort of guarantee we want to
> >> >> > provide. So calling sync on the guest will cause all the dirty pages in
> >> >> > the guest to be flushed to host. Now all those changes are in the host
> >> >> > page cache and it would be nice to flush them  as a part of sync but
> >> >> > then since we don't have a per file system sync, the above would imply
> >> >> > we flush all dirty pages on the host which can result in large
> >> >> > performance impact.
> >> >>
> >> >> You get the define the semantics of P9_TSYNCFS?  I thought this is
> >> >> part of a well-defined protocol :).  If this is a .L extension then
> >> >> it's probably a bad design and shouldn't be added to the protocol if
> >> >> we can't implement it.
> >> >
> >> > It is a part of .L extension and we can definitely implement it. There
> >> > is patch out there which is yet to be merged
> >> >
> >> > http://thread.gmane.org/gmane.linux.file-systems/44628
> >>
> >> A future Linux-only ioctl :/.
> >>
> >> >> Is this operation supposed to flush the disk write cache too?
> >> >
> >> > I am not sure we need to specify that as a part of 9p operation. I guess
> >> > we can only say maximum possible data integrity. Whether a sync will
> >> > cause disk write cache flush depends on the file system. For ext* that
> >> > can be controlled by mount option barrier.
> >>
> >> So on a host with a safe configuration this operation should put data
> >> on stable storage?
> >>
> >> >>
> >> >> I think virtio-9p has a file descriptor cache.  Would it be possible
> >> >> to fsync() those file descriptors?
> >> >>
> >> >
> >> > Ideally we should. But that would involve a large number of fsync calls.
> >>
> >> Yep, that's why this is a weird operation to support, especially since
> >> it's a .L add-on and not original 9P.  What's the use-case since
> >> today's Linux userland cannot directly make use of this operation?  I
> >> guess it has been added in order to pass-through a Linux internal vfs
> >> super block sync function?
> >
> > IMHO it would be nice to have a syncfs 9p operation because that enables
> > the client to say "if possible" flush the dirty data on the server. I
> > guess we should consider this as something server can choose to
> > ignore. In a cloud setup even doing a per file system sync can imply
> > performance impact because VirtFS export may not 1:1 map to mount point
> > on host. There is also plan to add a new option to the VirtFs export point
> > which enable write to exported files to be either O_SYNC or
> > O_DIRECT, similar to the way done for image files. That would imply
> > Tsyncfs doesn't have much to do because we don't have dirty data on host
> > pagecache anymore.
> >
> > So from 9p .L protocol point of view, it is a valid operation which
> > enables client to request a flush of server cache if possible. And qemu
> > 9p server choose to ignore because of the performance impact. If you are
> > not comfortable with not doing anything specific in Tsyncfs
> > operation, we can add sync(2) call as part of this 9p operation and
> > later switch to  FS_IOC_SYNCFS when it become available.
> 
> The case we need to prevent is where applications running on virtfs
> think they are getting guarantees that the implementation does not
> provide.  Silently noping a sync operation is a step in that direction
> so I agree that sync(2) would be safer.

I should capture as a part of the commit message that some
servers can chose to ignore the request and return success.

> 
> I'm not sure I understand your 1:1 mount point mapping argument.  The
> FS_IOC_SYNCFS ioctl does not help us there since it syncs the entire
> filesystem, not the directory tree that virtfs is mapped to.  It will
> do a bunch of extra I/O similar to how sync(2) does this across all
> filesystems today.  This suggests again that P9_TSYNCFS is hard to
> implement because FS_IOC_SYNCFS ends up not being useful.  Did I miss
> something?

What i meant was that even with 1:1  mount point mapping some
servers can choose to not implement cache flush due to the performance
implication associated with TSYNCFS. (That argument also justifies Qemu
9p server ignoring the TSYNCFS request.) So from the client perspective
it is just a hint to the server to flush the server cache "if possible".

> 
> I'm looking for a use case where guests need a P9_TSYNCFS operation.
> P9_TSYNCFS is not in linux-2.6 yet so I don't have any example code
> that exploits it.  Can you point me to something that shows off why
> this operation is necessary?  It must an optimization if 9P and NFS
> make do without an equivalent?
> 

The kernel side implementation is posted here.

http://thread.gmane.org/gmane.linux.kernel/1096376/focus=1096382

It is part of 9p kernel tree which is ready for merge in next merge window.

-aneesh
diff mbox

Patch

diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
index c4b0198..882f4f3 100644
--- a/hw/9pfs/virtio-9p.c
+++ b/hw/9pfs/virtio-9p.c
@@ -1978,6 +1978,36 @@  static void v9fs_fsync(V9fsState *s, V9fsPDU *pdu)
     v9fs_post_do_fsync(s, pdu, err);
 }
 
+static void v9fs_post_do_syncfs(V9fsState *s, V9fsPDU *pdu, int err)
+{
+    if (err == -1) {
+        err = -errno;
+    }
+    complete_pdu(s, pdu, err);
+}
+
+static void v9fs_syncfs(V9fsState *s, V9fsPDU *pdu)
+{
+    int err;
+    int32_t fid;
+    size_t offset = 7;
+    V9fsFidState *fidp;
+
+    pdu_unmarshal(pdu, offset, "d", &fid);
+    fidp = lookup_fid(s, fid);
+    if (fidp == NULL) {
+        err = -ENOENT;
+        v9fs_post_do_syncfs(s, pdu, err);
+        return;
+    }
+    /*
+     * We don't have per file system syncfs
+     * So just return success
+     */
+    err = 0;
+    v9fs_post_do_syncfs(s, pdu, err);
+}
+
 static void v9fs_clunk(V9fsState *s, V9fsPDU *pdu)
 {
     int32_t fid;
@@ -3676,6 +3706,7 @@  static pdu_handler_t *pdu_handlers[] = {
     [P9_TWALK] = v9fs_walk,
     [P9_TCLUNK] = v9fs_clunk,
     [P9_TFSYNC] = v9fs_fsync,
+    [P9_TSYNCFS] = v9fs_syncfs,
     [P9_TOPEN] = v9fs_open,
     [P9_TREAD] = v9fs_read,
 #if 0
diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
index 2f49641..23c14d8 100644
--- a/hw/9pfs/virtio-9p.h
+++ b/hw/9pfs/virtio-9p.h
@@ -13,6 +13,8 @@ 
 #define VIRTIO_9P_MOUNT_TAG 0
 
 enum {
+    P9_TSYNCFS = 0,
+    P9_RSYNCFS,
     P9_TLERROR = 6,
     P9_RLERROR,
     P9_TSTATFS = 8,