diff mbox

[RFC,v6,08/23] virtagent: add va.getfile RPC

Message ID 1295270117-24760-9-git-send-email-mdroth@linux.vnet.ibm.com
State New
Headers show

Commit Message

Michael Roth Jan. 17, 2011, 1:15 p.m. UTC
Add RPC to retrieve a guest file. This interface is intended
for smaller reads like peeking at logs and /proc and such.

Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
---
 virtagent-server.c |   59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 59 insertions(+), 0 deletions(-)

Comments

Jes Sorensen Jan. 21, 2011, 4:40 p.m. UTC | #1
On 01/17/11 14:15, Michael Roth wrote:
> Add RPC to retrieve a guest file. This interface is intended
> for smaller reads like peeking at logs and /proc and such.
> 
> Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
> ---
>  virtagent-server.c |   59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 files changed, 59 insertions(+), 0 deletions(-)
> 
> diff --git a/virtagent-server.c b/virtagent-server.c
> index c38a9e0..af4b940 100644
> --- a/virtagent-server.c
> +++ b/virtagent-server.c
> @@ -62,12 +62,71 @@ out:
>      return ret;
>  }
>  
> +/* RPC functions common to guest/host daemons */
> +
> +/* va_getfile(): return file contents
> + * rpc return values:
> + *   - base64-encoded file contents
> + */
> +static xmlrpc_value *va_getfile(xmlrpc_env *env,
> +                                xmlrpc_value *params,
> +                                void *user_data)
> +{
> +    const char *path;
> +    char *file_contents = NULL;
> +    char buf[VA_FILEBUF_LEN];

malloc()!

> +    int fd, ret, count = 0;
> +    xmlrpc_value *result = NULL;
> +
> +    /* parse argument array */
> +    xmlrpc_decompose_value(env, params, "(s)", &path);
> +    if (env->fault_occurred) {
> +        return NULL;
> +    }
> +
> +    SLOG("va_getfile(), path:%s", path);
> +
> +    fd = open(path, O_RDONLY);
> +    if (fd == -1) {
> +        LOG("open failed: %s", strerror(errno));
> +        xmlrpc_faultf(env, "open failed: %s", strerror(errno));
> +        return NULL;
> +    }
> +
> +    while ((ret = read(fd, buf, VA_FILEBUF_LEN)) > 0) {
> +        file_contents = qemu_realloc(file_contents, count + VA_FILEBUF_LEN);
> +        memcpy(file_contents + count, buf, ret);

Sorry, I brought this up before. This realloc() stuff is a disaster
waiting to happen. Please remove it from the patch series, until you
have an implementation that copies over a page of the time.

> +        count += ret;

Cheers,
Jes
Daniel P. Berrangé Jan. 21, 2011, 5:20 p.m. UTC | #2
On Fri, Jan 21, 2011 at 05:40:54PM +0100, Jes Sorensen wrote:
> On 01/17/11 14:15, Michael Roth wrote:
> > Add RPC to retrieve a guest file. This interface is intended
> > for smaller reads like peeking at logs and /proc and such.
> > 
> > Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
> > ---
> >  virtagent-server.c |   59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  1 files changed, 59 insertions(+), 0 deletions(-)
> > 
> > diff --git a/virtagent-server.c b/virtagent-server.c
> > index c38a9e0..af4b940 100644
> > --- a/virtagent-server.c
> > +++ b/virtagent-server.c
> > @@ -62,12 +62,71 @@ out:
> >      return ret;
> >  }
> >  
> > +/* RPC functions common to guest/host daemons */
> > +
> > +/* va_getfile(): return file contents
> > + * rpc return values:
> > + *   - base64-encoded file contents
> > + */
> > +static xmlrpc_value *va_getfile(xmlrpc_env *env,
> > +                                xmlrpc_value *params,
> > +                                void *user_data)
> > +{
> > +    const char *path;
> > +    char *file_contents = NULL;
> > +    char buf[VA_FILEBUF_LEN];
> 
> malloc()!
> 
> > +    int fd, ret, count = 0;
> > +    xmlrpc_value *result = NULL;
> > +
> > +    /* parse argument array */
> > +    xmlrpc_decompose_value(env, params, "(s)", &path);
> > +    if (env->fault_occurred) {
> > +        return NULL;
> > +    }
> > +
> > +    SLOG("va_getfile(), path:%s", path);
> > +
> > +    fd = open(path, O_RDONLY);
> > +    if (fd == -1) {
> > +        LOG("open failed: %s", strerror(errno));
> > +        xmlrpc_faultf(env, "open failed: %s", strerror(errno));
> > +        return NULL;
> > +    }
> > +
> > +    while ((ret = read(fd, buf, VA_FILEBUF_LEN)) > 0) {
> > +        file_contents = qemu_realloc(file_contents, count + VA_FILEBUF_LEN);
> > +        memcpy(file_contents + count, buf, ret);
> 
> Sorry, I brought this up before. This realloc() stuff is a disaster
> waiting to happen. Please remove it from the patch series, until you
> have an implementation that copies over a page of the time.

I can understand the need of virtagent for lifecycle control/interactions
with the guest OS (reboot, shutdown, ping, screen lock/unlock, etc), but
do we really want to reinvent libguestfs for file access ? A little dev
work could enable users to install the libguestfs agent into a guest OS,
and access it from the host over virtio-serial + the libguestfs API.

This would be quite compelling usage model for app developers, because
it would mean whether the guest OS was running, or shutoff, they can
use the same libguestfs API for processing guest filesystem images.
The level of functionality provided by libguestfs is really quite
considerable now, letting you do pretty much any operation against
files that you could do via local POSIX for non-virt access, as
well as providing many useful higher level constructs

Regards,
Daniel
Michael Roth Jan. 21, 2011, 6:23 p.m. UTC | #3
On 01/21/2011 11:20 AM, Daniel P. Berrange wrote:
> On Fri, Jan 21, 2011 at 05:40:54PM +0100, Jes Sorensen wrote:
>> On 01/17/11 14:15, Michael Roth wrote:
>>> Add RPC to retrieve a guest file. This interface is intended
>>> for smaller reads like peeking at logs and /proc and such.
>>>
>>> Signed-off-by: Michael Roth<mdroth@linux.vnet.ibm.com>
>>> ---
>>>   virtagent-server.c |   59 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>>   1 files changed, 59 insertions(+), 0 deletions(-)
>>>
>>> diff --git a/virtagent-server.c b/virtagent-server.c
>>> index c38a9e0..af4b940 100644
>>> --- a/virtagent-server.c
>>> +++ b/virtagent-server.c
>>> @@ -62,12 +62,71 @@ out:
>>>       return ret;
>>>   }
>>>
>>> +/* RPC functions common to guest/host daemons */
>>> +
>>> +/* va_getfile(): return file contents
>>> + * rpc return values:
>>> + *   - base64-encoded file contents
>>> + */
>>> +static xmlrpc_value *va_getfile(xmlrpc_env *env,
>>> +                                xmlrpc_value *params,
>>> +                                void *user_data)
>>> +{
>>> +    const char *path;
>>> +    char *file_contents = NULL;
>>> +    char buf[VA_FILEBUF_LEN];
>>
>> malloc()!
>>
>>> +    int fd, ret, count = 0;
>>> +    xmlrpc_value *result = NULL;
>>> +
>>> +    /* parse argument array */
>>> +    xmlrpc_decompose_value(env, params, "(s)",&path);
>>> +    if (env->fault_occurred) {
>>> +        return NULL;
>>> +    }
>>> +
>>> +    SLOG("va_getfile(), path:%s", path);
>>> +
>>> +    fd = open(path, O_RDONLY);
>>> +    if (fd == -1) {
>>> +        LOG("open failed: %s", strerror(errno));
>>> +        xmlrpc_faultf(env, "open failed: %s", strerror(errno));
>>> +        return NULL;
>>> +    }
>>> +
>>> +    while ((ret = read(fd, buf, VA_FILEBUF_LEN))>  0) {
>>> +        file_contents = qemu_realloc(file_contents, count + VA_FILEBUF_LEN);
>>> +        memcpy(file_contents + count, buf, ret);
>>
>> Sorry, I brought this up before. This realloc() stuff is a disaster
>> waiting to happen. Please remove it from the patch series, until you
>> have an implementation that copies over a page of the time.
>
> I can understand the need of virtagent for lifecycle control/interactions
> with the guest OS (reboot, shutdown, ping, screen lock/unlock, etc), but
> do we really want to reinvent libguestfs for file access ? A little dev
> work could enable users to install the libguestfs agent into a guest OS,
> and access it from the host over virtio-serial + the libguestfs API.

File/dmesg/etc access is a bit of a grey area. Technically it's not 
lifecycle-specific, but it tends to become a requirement for 
higher-level management policies, and being reliant on external tools to 
provide what, at least in our case, has been an extremely common 
request/requirement, greatly reduces the usefulness of such an agent.

Ultimately however these interfaces would be exposed via libvirt, which 
libguestfs already makes use of, so it'd be a logically way to extend it 
for disk access to live guests.

getfile() is confusingly named however, it's really just a means to peek 
at a text file like /proc/meminfo. general file access will be done via 
a stateful interface that implements similar semantics to 
open()/read()/write()/close().

>
> This would be quite compelling usage model for app developers, because
> it would mean whether the guest OS was running, or shutoff, they can
> use the same libguestfs API for processing guest filesystem images.
> The level of functionality provided by libguestfs is really quite
> considerable now, letting you do pretty much any operation against
> files that you could do via local POSIX for non-virt access, as
> well as providing many useful higher level constructs
>
> Regards,
> Daniel
Richard W.M. Jones Jan. 24, 2011, 10:08 p.m. UTC | #4
On Fri, Jan 21, 2011 at 12:23:40PM -0600, Michael Roth wrote:
> getfile() is confusingly named however, it's really just a means to
> peek at a text file like /proc/meminfo.

You might as well reuse the libguestfs API here because you get the
benefit of all the code that's been written, all the tools on top, and
a far more comprehensive API that would take you another 2 years to
implement.

There's various ways to do it: Encapsulate the libguestfs API messages
to and from guestfsd into the virtagent protocol.  Or give us another
8-bit-clean channel.  Or write a libguestfs generator component that
generates virtagent messages.

We got a long way through porting guestfsd to Windows last year when
we thought we needed a Windows-native daemon (since abandoned for
other reasons).  It works already for many of the API calls.

> general file access will be done via a stateful interface that
> implements similar semantics to open()/read()/write()/close().

This will be very slow.

Rich.
Richard W.M. Jones Jan. 24, 2011, 10:20 p.m. UTC | #5
On Mon, Jan 24, 2011 at 10:08:09PM +0000, Richard W.M. Jones wrote:
> You might as well reuse the libguestfs API here because you get the
> benefit of all the code that's been written, all the tools on top, and
> a far more comprehensive API that would take you another 2 years to
> implement.

To put it in some perspective, libguestfs is just shy of 500K lines of
code now, not including the tools built on top.  150 page manual just
for the core API.

Rich.
Anthony Liguori Jan. 24, 2011, 10:26 p.m. UTC | #6
On 01/24/2011 04:20 PM, Richard W.M. Jones wrote:
> On Mon, Jan 24, 2011 at 10:08:09PM +0000, Richard W.M. Jones wrote:
>    
>> You might as well reuse the libguestfs API here because you get the
>> benefit of all the code that's been written, all the tools on top, and
>> a far more comprehensive API that would take you another 2 years to
>> implement.
>>      
> To put it in some perspective, libguestfs is just shy of 500K lines of
> code now, not including the tools built on top.  150 page manual just
> for the core API.
>    

Yeah, but I think that's the reason that it might not be a good 
candidate for this use-case.

We need a *simple* interface that we can convince everyone to install by 
default and run in their guests.  It needs to be flexible enough that we 
can do lots of fun things but simple enough that a reasonable person can 
audit the code in a short period of time.

It will never replace something as sophisticated as guestfs but that's 
not it's point.  It's point is to let you do simple things like execute 
a command in the guest or peek at /proc/meminfo.  You don't need 500k 
LOCs for that.

Regards,

Anthony Liguori

> Rich.
>
>
Richard W.M. Jones Jan. 24, 2011, 10:48 p.m. UTC | #7
On Mon, Jan 24, 2011 at 04:26:09PM -0600, Anthony Liguori wrote:
> On 01/24/2011 04:20 PM, Richard W.M. Jones wrote:
> >On Mon, Jan 24, 2011 at 10:08:09PM +0000, Richard W.M. Jones wrote:
> >>You might as well reuse the libguestfs API here because you get the
> >>benefit of all the code that's been written, all the tools on top, and
> >>a far more comprehensive API that would take you another 2 years to
> >>implement.
> >To put it in some perspective, libguestfs is just shy of 500K lines of
> >code now, not including the tools built on top.  150 page manual just
> >for the core API.
> 
> Yeah, but I think that's the reason that it might not be a good
> candidate for this use-case.
> 
> We need a *simple* interface that we can convince everyone to
> install by default and run in their guests.  It needs to be flexible
> enough that we can do lots of fun things but simple enough that a
> reasonable person can audit the code in a short period of time.
> 
> It will never replace something as sophisticated as guestfs but
> that's not it's point.  It's point is to let you do simple things
> like execute a command in the guest or peek at /proc/meminfo.  You
> don't need 500k LOCs for that.

I don't really want to argue over this, since I think accessing live
VMs like this is a really useful feature, and it complements
libguestfs (image editing) very nicely.

I'll just say that you might not think you need it to start off with
(and we didn't either), but when you notice that "simple"
open/read/write/close in fact has terrible performance so you need to
specialize many operations, and then someone wants to create a
filesystem, and someone else wants a FUSE interface, suddenly you'll
be reimplementing large parts of libguestfs.

The daemon (guestfsd) is 36106 LoC.

Rich.
Anthony Liguori Jan. 24, 2011, 11:40 p.m. UTC | #8
On 01/24/2011 04:48 PM, Richard W.M. Jones wrote:
> On Mon, Jan 24, 2011 at 04:26:09PM -0600, Anthony Liguori wrote:
>    
>> On 01/24/2011 04:20 PM, Richard W.M. Jones wrote:
>>      
>>> On Mon, Jan 24, 2011 at 10:08:09PM +0000, Richard W.M. Jones wrote:
>>>        
>>>> You might as well reuse the libguestfs API here because you get the
>>>> benefit of all the code that's been written, all the tools on top, and
>>>> a far more comprehensive API that would take you another 2 years to
>>>> implement.
>>>>          
>>> To put it in some perspective, libguestfs is just shy of 500K lines of
>>> code now, not including the tools built on top.  150 page manual just
>>> for the core API.
>>>        
>> Yeah, but I think that's the reason that it might not be a good
>> candidate for this use-case.
>>
>> We need a *simple* interface that we can convince everyone to
>> install by default and run in their guests.  It needs to be flexible
>> enough that we can do lots of fun things but simple enough that a
>> reasonable person can audit the code in a short period of time.
>>
>> It will never replace something as sophisticated as guestfs but
>> that's not it's point.  It's point is to let you do simple things
>> like execute a command in the guest or peek at /proc/meminfo.  You
>> don't need 500k LOCs for that.
>>      
> I don't really want to argue over this, since I think accessing live
> VMs like this is a really useful feature, and it complements
> libguestfs (image editing) very nicely.
>
> I'll just say that you might not think you need it to start off with
> (and we didn't either), but when you notice that "simple"
> open/read/write/close

Oh I don't think there should be an open/read/write/close interface.  
I'm quite happy with the current copyfile interface.

>   in fact has terrible performance so you need to
> specialize many operations, and then someone wants to create a
> filesystem, and someone else wants a FUSE interface, suddenly you'll
> be reimplementing large parts of libguestfs.
>    

Nope.  If you want to do fancy things, use libguestfs :-)

BTW, how dependent is guestfsd on the guest that libguestfs uses?  I 
wasn't even aware that it could be used outside of that context.

Regards,

Anthony Liguori

> The daemon (guestfsd) is 36106 LoC.
>
> Rich.
>
>
Michael Roth Jan. 25, 2011, 12:22 a.m. UTC | #9
On 01/24/2011 05:40 PM, Anthony Liguori wrote:
> On 01/24/2011 04:48 PM, Richard W.M. Jones wrote:
>> On Mon, Jan 24, 2011 at 04:26:09PM -0600, Anthony Liguori wrote:
>>> On 01/24/2011 04:20 PM, Richard W.M. Jones wrote:
>>>> On Mon, Jan 24, 2011 at 10:08:09PM +0000, Richard W.M. Jones wrote:
>>>>> You might as well reuse the libguestfs API here because you get the
>>>>> benefit of all the code that's been written, all the tools on top, and
>>>>> a far more comprehensive API that would take you another 2 years to
>>>>> implement.
>>>> To put it in some perspective, libguestfs is just shy of 500K lines of
>>>> code now, not including the tools built on top. 150 page manual just
>>>> for the core API.
>>> Yeah, but I think that's the reason that it might not be a good
>>> candidate for this use-case.
>>>
>>> We need a *simple* interface that we can convince everyone to
>>> install by default and run in their guests. It needs to be flexible
>>> enough that we can do lots of fun things but simple enough that a
>>> reasonable person can audit the code in a short period of time.
>>>
>>> It will never replace something as sophisticated as guestfs but
>>> that's not it's point. It's point is to let you do simple things
>>> like execute a command in the guest or peek at /proc/meminfo. You
>>> don't need 500k LOCs for that.
>> I don't really want to argue over this, since I think accessing live
>> VMs like this is a really useful feature, and it complements
>> libguestfs (image editing) very nicely.
>>
>> I'll just say that you might not think you need it to start off with
>> (and we didn't either), but when you notice that "simple"
>> open/read/write/close
>
> Oh I don't think there should be an open/read/write/close interface. I'm
> quite happy with the current copyfile interface.

Actually, copyfile is the proposed open/read/write/close interface. 
getfile is the current interface, and it seems to be a contentious one. 
I've discussed it quite a bit with Jes here and in the last couple RFCs. 
I think the current course is that we'll end up ditching 
viewfile/viewdmesg in favor of copyfile, and that we should do it now 
rather than later.

The upshot is that "viewfile <remote>" is basically equivalent to:
copyfile_open <remote> /dev/stdout -> fd_handle;
copyfile_read fd <offset=0> <count=<MAX_CHUNK_SIZE>;
copyfile_close fd_handle".

Or we can output to a file and potentially introduce a monitor command 
that wraps these to provide simple oneliner like we have now, though 
there may be some reluctance there as well. But at least it'll be 
possible either way.

>
>> in fact has terrible performance so you need to
>> specialize many operations, and then someone wants to create a
>> filesystem, and someone else wants a FUSE interface, suddenly you'll
>> be reimplementing large parts of libguestfs.
>
> Nope. If you want to do fancy things, use libguestfs :-)
>
> BTW, how dependent is guestfsd on the guest that libguestfs uses? I
> wasn't even aware that it could be used outside of that context.
>
> Regards,
>
> Anthony Liguori
>
>> The daemon (guestfsd) is 36106 LoC.
>>
>> Rich.
>>
>
Anthony Liguori Jan. 25, 2011, 12:25 a.m. UTC | #10
On 01/24/2011 06:22 PM, Michael Roth wrote:
>
> Actually, copyfile is the proposed open/read/write/close interface. 
> getfile is the current interface, and it seems to be a contentious 
> one. I've discussed it quite a bit with Jes here and in the last 
> couple RFCs. I think the current course is that we'll end up ditching 
> viewfile/viewdmesg in favor of copyfile, and that we should do it now 
> rather than later.
>
> The upshot is that "viewfile <remote>" is basically equivalent to:
> copyfile_open <remote> /dev/stdout -> fd_handle;
> copyfile_read fd <offset=0> <count=<MAX_CHUNK_SIZE>;
> copyfile_close fd_handle".

I really just want getfile.

I think designing a partial read API at this stage isn't a good idea.  
Wait until there's a concrete use case before adding an interface.

Regards,

Anthony Liguori
Richard W.M. Jones Jan. 25, 2011, 9:21 a.m. UTC | #11
On Mon, Jan 24, 2011 at 05:40:05PM -0600, Anthony Liguori wrote:
> BTW, how dependent is guestfsd on the guest that libguestfs uses?  I
> wasn't even aware that it could be used outside of that context.

The daemon is compiled separately -- separate ./configure, make, etc.
You can run it on its own.

On the other hand, it does need to talk to something on the other end
of the virtio-serial guestfsd socket, and that other thing would
usually be the libguestfs library ...

One thing that Dan Berrange did was to patch[1] libguestfs so it could
talk to any existing guestfsd (you pointed it at a Unix domain
socket).  He was using this to write test regression tests for
'virt-install': ie. install a guest, put guestfsd inside it, then boot
up the guest and check that everything was installed correctly by
querying it from an external libguestfs.

For various unrelated reasons these patches weren't quite ready to go
upstream, but it's on our ROADMAP[2] to add something like this.

In which case what you would do would be:

(a) put guestfsd into existing guests

(b) add a nice option to guestfish to attach to existing VMs, eg:

  guestfish --attach Fedora14
  [guestfish live attached to Fedora 14's virtio-serial guestfsd socket]
  ><fs> copy-in ./dirs /tmp/

  "copy-in" would be dangerous currently if used on a live VM, but
  in this case it would be quite safe

(c) do the work of porting guestfsd to Windows, FreeBSD etc

Rich.

[1] https://www.redhat.com/archives/libguestfs/2010-July/msg00010.html
    refined a bit more later on.

[2] http://libguestfs.org/ROADMAP.txt
  "* Allow alternate methods to start the appliance, including through
     libvirt and by connecting to an existing appliance.  This was
     originally planned for 1.8 but we didn't get patches in time."
Anthony Liguori Jan. 25, 2011, 3:12 p.m. UTC | #12
On 01/25/2011 03:21 AM, Richard W.M. Jones wrote:
> On Mon, Jan 24, 2011 at 05:40:05PM -0600, Anthony Liguori wrote:
>    
>> BTW, how dependent is guestfsd on the guest that libguestfs uses?  I
>> wasn't even aware that it could be used outside of that context.
>>      
> The daemon is compiled separately -- separate ./configure, make, etc.
> You can run it on its own.
>
> On the other hand, it does need to talk to something on the other end
> of the virtio-serial guestfsd socket, and that other thing would
> usually be the libguestfs library ...
>
> One thing that Dan Berrange did was to patch[1] libguestfs so it could
> talk to any existing guestfsd (you pointed it at a Unix domain
> socket).  He was using this to write test regression tests for
> 'virt-install': ie. install a guest, put guestfsd inside it, then boot
> up the guest and check that everything was installed correctly by
> querying it from an external libguestfs.
>
> For various unrelated reasons these patches weren't quite ready to go
> upstream, but it's on our ROADMAP[2] to add something like this.
>
> In which case what you would do would be:
>
> (a) put guestfsd into existing guests
>    

How much infrastructure does guestfd depend on within the guest?  Do you 
need a full install with all of the bells and whistles or does it 
degrade cleanly when certain tools aren't available?

Regards,

Anthony Liguori

> (b) add a nice option to guestfish to attach to existing VMs, eg:
>
>    guestfish --attach Fedora14
>    [guestfish live attached to Fedora 14's virtio-serial guestfsd socket]
>    ><fs>  copy-in ./dirs /tmp/
>
>    "copy-in" would be dangerous currently if used on a live VM, but
>    in this case it would be quite safe
>
> (c) do the work of porting guestfsd to Windows, FreeBSD etc
>
> Rich.
>
> [1] https://www.redhat.com/archives/libguestfs/2010-July/msg00010.html
>      refined a bit more later on.
>
> [2] http://libguestfs.org/ROADMAP.txt
>    "* Allow alternate methods to start the appliance, including through
>       libvirt and by connecting to an existing appliance.  This was
>       originally planned for 1.8 but we didn't get patches in time."
>
>
Richard W.M. Jones Jan. 25, 2011, 3:43 p.m. UTC | #13
On Tue, Jan 25, 2011 at 09:12:15AM -0600, Anthony Liguori wrote:
> How much infrastructure does guestfd depend on within the guest?  Do
> you need a full install with all of the bells and whistles or does
> it degrade cleanly when certain tools aren't available?

On Linux these are the libraries, both *optional*:

- libselinux
- augeas

It also uses the following external programs if available, but will
degrade gracefully if they are not:

- blkid
- blockdev
- cmp
- cp
- cpio
- df
- du
- various programs from ext2progs if you want to support ext2/3/4
- grep
- grub-install
- hexdump
- ls
- various programs from lvm2 if you want to support LVM ops
- /sbin/mkfs.* depending on what filesystems you want to be able to create
- mount
- mv
- ntfs-3g.probe
- ntfsresize
- mkswap
- parted
- printenv
- rm
- scrub
- sfdisk
- strings
- tar
- wc
- zerofree

As you can probably tell, in many cases the job of guestfsd is to
unpack the structured C remote procedure call arguments, pass these to
an external program, then parse the result and pass it back as a
structured C return value.

There are other guestfsd features which are implemented using POSIX
functions and syscalls directly.

The port to Windows involved rewriting the POSIX bits and bundling
some of the commands above from mingw where that made sense (and where
it didn't, making those calls return sensible and discoverable error
values).  This port has likely bitrotted and really needs to be picked
up again.

HTH,

Rich.
Richard W.M. Jones Jan. 26, 2011, 1:01 p.m. UTC | #14
I posted my thoughts about how this could work here:

https://www.redhat.com/archives/libguestfs/2011-January/msg00066.html

Rich.

PS. You don't need to be a subscriber to post to that list -- I
manually triage any messages sent by non-subscribers.
diff mbox

Patch

diff --git a/virtagent-server.c b/virtagent-server.c
index c38a9e0..af4b940 100644
--- a/virtagent-server.c
+++ b/virtagent-server.c
@@ -62,12 +62,71 @@  out:
     return ret;
 }
 
+/* RPC functions common to guest/host daemons */
+
+/* va_getfile(): return file contents
+ * rpc return values:
+ *   - base64-encoded file contents
+ */
+static xmlrpc_value *va_getfile(xmlrpc_env *env,
+                                xmlrpc_value *params,
+                                void *user_data)
+{
+    const char *path;
+    char *file_contents = NULL;
+    char buf[VA_FILEBUF_LEN];
+    int fd, ret, count = 0;
+    xmlrpc_value *result = NULL;
+
+    /* parse argument array */
+    xmlrpc_decompose_value(env, params, "(s)", &path);
+    if (env->fault_occurred) {
+        return NULL;
+    }
+
+    SLOG("va_getfile(), path:%s", path);
+
+    fd = open(path, O_RDONLY);
+    if (fd == -1) {
+        LOG("open failed: %s", strerror(errno));
+        xmlrpc_faultf(env, "open failed: %s", strerror(errno));
+        return NULL;
+    }
+
+    while ((ret = read(fd, buf, VA_FILEBUF_LEN)) > 0) {
+        file_contents = qemu_realloc(file_contents, count + VA_FILEBUF_LEN);
+        memcpy(file_contents + count, buf, ret);
+        count += ret;
+        if (count > VA_GETFILE_MAX) {
+            xmlrpc_faultf(env, "max file size (%d bytes) exceeded",
+                          VA_GETFILE_MAX);
+            goto EXIT_CLOSE_BAD;
+        }
+    }
+    if (ret == -1) {
+        LOG("read failed: %s", strerror(errno));
+        xmlrpc_faultf(env, "read failed: %s", strerror(errno));
+        goto EXIT_CLOSE_BAD;
+    }
+
+    result = xmlrpc_build_value(env, "6", file_contents, count);
+
+EXIT_CLOSE_BAD:
+    if (file_contents) {
+        qemu_free(file_contents);
+    }
+    close(fd);
+    return result;
+}
+
 typedef struct RPCFunction {
     xmlrpc_value *(*func)(xmlrpc_env *env, xmlrpc_value *param, void *unused);
     const char *func_name;
 } RPCFunction;
 
 static RPCFunction guest_functions[] = {
+    { .func = va_getfile,
+      .func_name = "va.getfile" },
     { NULL, NULL }
 };
 static RPCFunction host_functions[] = {