diff mbox

[V12,05/17] xen: Add xenfv machine

Message ID 1301423290-12443-6-git-send-email-anthony.perard@citrix.com
State New
Headers show

Commit Message

Anthony PERARD March 29, 2011, 6:27 p.m. UTC
From: Anthony PERARD <anthony.perard@citrix.com>

Introduce the Xen FV (Fully Virtualized) machine to Qemu, some more Xen
specific call will be added in further patches.

Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
---
 hw/pc.c      |   19 +++++++++++++++++--
 hw/pc_piix.c |   17 +++++++++++++++++
 hw/xen.h     |    4 ++++
 3 files changed, 38 insertions(+), 2 deletions(-)

Comments

Jan Kiszka April 8, 2011, 1:48 p.m. UTC | #1
[ Late comments, I know, sorry. Just happen to came across this. ]

On 2011-03-29 20:27, anthony.perard@citrix.com wrote:
> From: Anthony PERARD <anthony.perard@citrix.com>
> 
> Introduce the Xen FV (Fully Virtualized) machine to Qemu, some more Xen
> specific call will be added in further patches.
> 
> Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
> ---
>  hw/pc.c      |   19 +++++++++++++++++--
>  hw/pc_piix.c |   17 +++++++++++++++++
>  hw/xen.h     |    4 ++++
>  3 files changed, 38 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/pc.c b/hw/pc.c
> index 6939c04..d7732d4 100644
> --- a/hw/pc.c
> +++ b/hw/pc.c
> @@ -41,6 +41,7 @@
>  #include "sysemu.h"
>  #include "blockdev.h"
>  #include "ui/qemu-spice.h"
> +#include "xen.h"
>  
>  /* output Bochs bios info messages */
>  //#define DEBUG_BIOS
> @@ -918,7 +919,11 @@ static void pc_cpu_reset(void *opaque)
>      CPUState *env = opaque;
>  
>      cpu_reset(env);
> -    env->halted = !cpu_is_bsp(env);
> +    if (!xen_enabled()) {
> +        env->halted = !cpu_is_bsp(env);
> +    } else {
> +        env->halted = 1;
> +    }

Not a fault of your patch, but pc_cpu_reset should not exist in the
first place. Setting env->halted should be done in i386's cpu_reset.

I think Xen would be better off with installing a custom VCPU reset
handler and overwrite halted according to its own needs. KVM is doing
the same. Then we could clean up pc_cpu_reset without bothering Xen.

>  }
>  
>  static CPUState *pc_new_cpu(const char *cpu_model)
> @@ -952,7 +957,12 @@ void pc_cpus_init(const char *cpu_model)
>  #endif
>      }
>  
> -    for(i = 0; i < smp_cpus; i++) {
> +    if (!xen_enabled()) {
> +        for(i = 0; i < smp_cpus; i++) {
> +            pc_new_cpu(cpu_model);
> +        }
> +    } else {
> +        /* Xen require only one Qemu VCPU */
>          pc_new_cpu(cpu_model);

This looks a bit fishy. What is the semantic of -smp 2 or more in Xen
mode? If that is an invalid/unused configuration option, catch that and
reject it instead of installing this workaround. If it has a valid
semantic, please elaborate why you need to restrict the number of
instantiated cpus. Just to optimize memory usage?

>      }
>  }
> @@ -980,6 +990,11 @@ void pc_memory_init(ram_addr_t ram_size,
>      *above_4g_mem_size_p = above_4g_mem_size;
>      *below_4g_mem_size_p = below_4g_mem_size;
>  
> +    if (xen_enabled()) {
> +        /* Nothing to do for Xen */
> +        return;
> +    }
> +

This looks fragile /wrt potential future changes of pc_memory_init.
Can't those bits Xen is interested in, ie. the above/below_4g_mem_size
calculation, be moved into a separate function or even to the caller
(should be trivial enough, the interface of pc_memory_init is clumsy in
this regard anyway) so that you can simply skip pc_memory_init when in
Xen mode?

Jan
Anthony PERARD April 11, 2011, 6:10 p.m. UTC | #2
On Fri, 8 Apr 2011, Jan Kiszka wrote:

> [ Late comments, I know, sorry. Just happen to came across this. ]
>
> On 2011-03-29 20:27, anthony.perard@citrix.com wrote:
> > From: Anthony PERARD <anthony.perard@citrix.com>
> >
> > Introduce the Xen FV (Fully Virtualized) machine to Qemu, some more Xen
> > specific call will be added in further patches.
> >
> > Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
> > ---
> >  hw/pc.c      |   19 +++++++++++++++++--
> >  hw/pc_piix.c |   17 +++++++++++++++++
> >  hw/xen.h     |    4 ++++
> >  3 files changed, 38 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/pc.c b/hw/pc.c
> > index 6939c04..d7732d4 100644
> > --- a/hw/pc.c
> > +++ b/hw/pc.c
> > @@ -41,6 +41,7 @@
> >  #include "sysemu.h"
> >  #include "blockdev.h"
> >  #include "ui/qemu-spice.h"
> > +#include "xen.h"
> >
> >  /* output Bochs bios info messages */
> >  //#define DEBUG_BIOS
> > @@ -918,7 +919,11 @@ static void pc_cpu_reset(void *opaque)
> >      CPUState *env = opaque;
> >
> >      cpu_reset(env);
> > -    env->halted = !cpu_is_bsp(env);
> > +    if (!xen_enabled()) {
> > +        env->halted = !cpu_is_bsp(env);
> > +    } else {
> > +        env->halted = 1;
> > +    }
>
> Not a fault of your patch, but pc_cpu_reset should not exist in the
> first place. Setting env->halted should be done in i386's cpu_reset.
>
> I think Xen would be better off with installing a custom VCPU reset
> handler and overwrite halted according to its own needs. KVM is doing
> the same. Then we could clean up pc_cpu_reset without bothering Xen.

I will do that.

> >  }
> >
> >  static CPUState *pc_new_cpu(const char *cpu_model)
> > @@ -952,7 +957,12 @@ void pc_cpus_init(const char *cpu_model)
> >  #endif
> >      }
> >
> > -    for(i = 0; i < smp_cpus; i++) {
> > +    if (!xen_enabled()) {
> > +        for(i = 0; i < smp_cpus; i++) {
> > +            pc_new_cpu(cpu_model);
> > +        }
> > +    } else {
> > +        /* Xen require only one Qemu VCPU */
> >          pc_new_cpu(cpu_model);
>
> This looks a bit fishy. What is the semantic of -smp 2 or more in Xen
> mode? If that is an invalid/unused configuration option, catch that and
> reject it instead of installing this workaround. If it has a valid
> semantic, please elaborate why you need to restrict the number of
> instantiated cpus. Just to optimize memory usage?

I thought in a first place that was needed to avoid errors. But it works
also when we initialise other CPUs. But I prefere to keep it that way to
save memory and in the case where there is a thread for each cpu that
will also avoid to have many useless threads.

Also, I use -smp i to initialise the xen's structures related to the
vcpu.

> >      }
> >  }
> > @@ -980,6 +990,11 @@ void pc_memory_init(ram_addr_t ram_size,
> >      *above_4g_mem_size_p = above_4g_mem_size;
> >      *below_4g_mem_size_p = below_4g_mem_size;
> >
> > +    if (xen_enabled()) {
> > +        /* Nothing to do for Xen */
> > +        return;
> > +    }
> > +
>
> This looks fragile /wrt potential future changes of pc_memory_init.
> Can't those bits Xen is interested in, ie. the above/below_4g_mem_size
> calculation, be moved into a separate function or even to the caller
> (should be trivial enough, the interface of pc_memory_init is clumsy in
> this regard anyway) so that you can simply skip pc_memory_init when in
> Xen mode?

I'll do that, put the calculation in the caller, and change the
pc_memory_init prototypes.


Thanks for your review,
Regards,
Jan Kiszka April 11, 2011, 7:55 p.m. UTC | #3
On 2011-04-11 20:10, Anthony PERARD wrote:
>>>  }
>>>
>>>  static CPUState *pc_new_cpu(const char *cpu_model)
>>> @@ -952,7 +957,12 @@ void pc_cpus_init(const char *cpu_model)
>>>  #endif
>>>      }
>>>
>>> -    for(i = 0; i < smp_cpus; i++) {
>>> +    if (!xen_enabled()) {
>>> +        for(i = 0; i < smp_cpus; i++) {
>>> +            pc_new_cpu(cpu_model);
>>> +        }
>>> +    } else {
>>> +        /* Xen require only one Qemu VCPU */
>>>          pc_new_cpu(cpu_model);
>>
>> This looks a bit fishy. What is the semantic of -smp 2 or more in Xen
>> mode? If that is an invalid/unused configuration option, catch that and
>> reject it instead of installing this workaround. If it has a valid
>> semantic, please elaborate why you need to restrict the number of
>> instantiated cpus. Just to optimize memory usage?
> 
> I thought in a first place that was needed to avoid errors. But it works
> also when we initialise other CPUs. But I prefere to keep it that way to
> save memory and in the case where there is a thread for each cpu that
> will also avoid to have many useless threads.

How much memory does this save? More than a few KB per VCPU? That should
be negligible compared to the normal size of VMs. And as long as we do
not support multi-threaded TCG VCPUs, Xen will only create on thread for
all VCPUs (once that may change, Xen could control the "execution" model
via qemu_init_vcpu).

So I would prefer to avoid this additional Xen-specific branch in
generic code.

Thanks,
Jan
Anthony PERARD April 12, 2011, 2:57 p.m. UTC | #4
On Mon, Apr 11, 2011 at 20:55, Jan Kiszka <jan.kiszka@web.de> wrote:
>
> On 2011-04-11 20:10, Anthony PERARD wrote:
> >>>  }
> >>>
> >>>  static CPUState *pc_new_cpu(const char *cpu_model)
> >>> @@ -952,7 +957,12 @@ void pc_cpus_init(const char *cpu_model)
> >>>  #endif
> >>>      }
> >>>
> >>> -    for(i = 0; i < smp_cpus; i++) {
> >>> +    if (!xen_enabled()) {
> >>> +        for(i = 0; i < smp_cpus; i++) {
> >>> +            pc_new_cpu(cpu_model);
> >>> +        }
> >>> +    } else {
> >>> +        /* Xen require only one Qemu VCPU */
> >>>          pc_new_cpu(cpu_model);
> >>
> >> This looks a bit fishy. What is the semantic of -smp 2 or more in Xen
> >> mode? If that is an invalid/unused configuration option, catch that and
> >> reject it instead of installing this workaround. If it has a valid
> >> semantic, please elaborate why you need to restrict the number of
> >> instantiated cpus. Just to optimize memory usage?
> >
> > I thought in a first place that was needed to avoid errors. But it works
> > also when we initialise other CPUs. But I prefere to keep it that way to
> > save memory and in the case where there is a thread for each cpu that
> > will also avoid to have many useless threads.
>
> How much memory does this save? More than a few KB per VCPU? That should
> be negligible compared to the normal size of VMs. And as long as we do
> not support multi-threaded TCG VCPUs, Xen will only create on thread for
> all VCPUs (once that may change, Xen could control the "execution" model
> via qemu_init_vcpu).
>
> So I would prefer to avoid this additional Xen-specific branch in
> generic code.

For this patch series, I will remove this Xen specific branch.

For information, we want to run qemu in a tiny domain (Xen guest) of
32MB, so each 30KB per VCPU can count and in a Xen environment, the VM
memory is allocated outside of QEMU, by the hypervisor.
So, we will deal with these extra bytes later, and maybe found a
better way to do it :).

Thanks,

--
Anthony PERARD
Jan Kiszka April 12, 2011, 3:52 p.m. UTC | #5
On 2011-04-12 16:57, Anthony PERARD wrote:
> On Mon, Apr 11, 2011 at 20:55, Jan Kiszka <jan.kiszka@web.de> wrote:
>>
>> On 2011-04-11 20:10, Anthony PERARD wrote:
>>>>>  }
>>>>>
>>>>>  static CPUState *pc_new_cpu(const char *cpu_model)
>>>>> @@ -952,7 +957,12 @@ void pc_cpus_init(const char *cpu_model)
>>>>>  #endif
>>>>>      }
>>>>>
>>>>> -    for(i = 0; i < smp_cpus; i++) {
>>>>> +    if (!xen_enabled()) {
>>>>> +        for(i = 0; i < smp_cpus; i++) {
>>>>> +            pc_new_cpu(cpu_model);
>>>>> +        }
>>>>> +    } else {
>>>>> +        /* Xen require only one Qemu VCPU */
>>>>>          pc_new_cpu(cpu_model);
>>>>
>>>> This looks a bit fishy. What is the semantic of -smp 2 or more in Xen
>>>> mode? If that is an invalid/unused configuration option, catch that and
>>>> reject it instead of installing this workaround. If it has a valid
>>>> semantic, please elaborate why you need to restrict the number of
>>>> instantiated cpus. Just to optimize memory usage?
>>>
>>> I thought in a first place that was needed to avoid errors. But it works
>>> also when we initialise other CPUs. But I prefere to keep it that way to
>>> save memory and in the case where there is a thread for each cpu that
>>> will also avoid to have many useless threads.
>>
>> How much memory does this save? More than a few KB per VCPU? That should
>> be negligible compared to the normal size of VMs. And as long as we do
>> not support multi-threaded TCG VCPUs, Xen will only create on thread for
>> all VCPUs (once that may change, Xen could control the "execution" model
>> via qemu_init_vcpu).
>>
>> So I would prefer to avoid this additional Xen-specific branch in
>> generic code.
> 
> For this patch series, I will remove this Xen specific branch.
> 
> For information, we want to run qemu in a tiny domain (Xen guest) of
> 32MB, so each 30KB per VCPU can count 

I even count 56 KB here (on 64 bit host).

> and in a Xen environment, the VM
> memory is allocated outside of QEMU, by the hypervisor.
> So, we will deal with these extra bytes later, and maybe found a
> better way to do it :).

Well, either you have a use for the VCPU state (how do you do migration
in Xen without it?), or you should probably teach QEMU in a careful &
clean way to run its device model without VCPUs - and without any
TCG-related memory consumption. For the latter, you would likely receive
kudos from KVM people as well.

BTW, if you happen to support that crazy vmport under Xen, not updating
the VCPU state will break your neck. Also, lacking VCPUs prevent the
usage of analysis and debugging features of QEMU (monitor, gdbstub).

Jan
Stefano Stabellini April 13, 2011, 10:56 a.m. UTC | #6
On Tue, 12 Apr 2011, Jan Kiszka wrote:
> Well, either you have a use for the VCPU state (how do you do migration
> in Xen without it?), or you should probably teach QEMU in a careful &
> clean way to run its device model without VCPUs - and without any
> TCG-related memory consumption. For the latter, you would likely receive
> kudos from KVM people as well.
>
> BTW, if you happen to support that crazy vmport under Xen, not updating
> the VCPU state will break your neck. Also, lacking VCPUs prevent the
> usage of analysis and debugging features of QEMU (monitor, gdbstub).

We don't use the vcpu state in qemu because qemu takes care of device
emulation only; under xen the vcpu state is saved and restored by the
hypervisor.
We are currently using the number of vcpus just to know how many event
channels we have to bind to receive and send io notifications.
Thus your suggestion of teaching qemu to run without vcpus is probably
the right thing to do here.
Jan Kiszka April 13, 2011, 11:28 a.m. UTC | #7
On 2011-04-13 12:56, Stefano Stabellini wrote:
> On Tue, 12 Apr 2011, Jan Kiszka wrote:
>> Well, either you have a use for the VCPU state (how do you do migration
>> in Xen without it?), or you should probably teach QEMU in a careful &
>> clean way to run its device model without VCPUs - and without any
>> TCG-related memory consumption. For the latter, you would likely receive
>> kudos from KVM people as well.
>>
>> BTW, if you happen to support that crazy vmport under Xen, not updating
>> the VCPU state will break your neck. Also, lacking VCPUs prevent the
>> usage of analysis and debugging features of QEMU (monitor, gdbstub).
> 
> We don't use the vcpu state in qemu because qemu takes care of device
> emulation only; under xen the vcpu state is saved and restored by the
> hypervisor.

Just out of curiosity: So you are extracting the device states out of
QEMU on migration, do the same with the VCPU states from the hypervisor
(which wouldn't be that different from KVM in fact), and then transfer
that to the destination node? Is there a technical or historical reason
for this split-up? I mean, you still need some managing instance that
does the state transportation and VM control on both sides, i.e. someone
for the job that QEMU is doing for TCG or KVM migrations.

Jan
Stefano Stabellini April 13, 2011, 11:49 a.m. UTC | #8
On Wed, 13 Apr 2011, Jan Kiszka wrote:
> On 2011-04-13 12:56, Stefano Stabellini wrote:
> > On Tue, 12 Apr 2011, Jan Kiszka wrote:
> >> Well, either you have a use for the VCPU state (how do you do migration
> >> in Xen without it?), or you should probably teach QEMU in a careful &
> >> clean way to run its device model without VCPUs - and without any
> >> TCG-related memory consumption. For the latter, you would likely receive
> >> kudos from KVM people as well.
> >>
> >> BTW, if you happen to support that crazy vmport under Xen, not updating
> >> the VCPU state will break your neck. Also, lacking VCPUs prevent the
> >> usage of analysis and debugging features of QEMU (monitor, gdbstub).
> > 
> > We don't use the vcpu state in qemu because qemu takes care of device
> > emulation only; under xen the vcpu state is saved and restored by the
> > hypervisor.
> 
> Just out of curiosity: So you are extracting the device states out of
> QEMU on migration, do the same with the VCPU states from the hypervisor
> (which wouldn't be that different from KVM in fact), and then transfer
> that to the destination node? Is there a technical or historical reason
> for this split-up? I mean, you still need some managing instance that
> does the state transportation and VM control on both sides, i.e. someone
> for the job that QEMU is doing for TCG or KVM migrations.

That someone is the "toolstack", I guess libvirt would be the closest
thing to our toolstack in the kvm world.
The reason why we have a toolstack performing this task rather than qemu
is that pure PV guests don't need device emulation, so we don't even
have qemu running most of the times if there are only linux guests
installed in the system.
Jan Kiszka April 13, 2011, 1:05 p.m. UTC | #9
On 2011-04-13 13:49, Stefano Stabellini wrote:
> On Wed, 13 Apr 2011, Jan Kiszka wrote:
>> On 2011-04-13 12:56, Stefano Stabellini wrote:
>>> On Tue, 12 Apr 2011, Jan Kiszka wrote:
>>>> Well, either you have a use for the VCPU state (how do you do migration
>>>> in Xen without it?), or you should probably teach QEMU in a careful &
>>>> clean way to run its device model without VCPUs - and without any
>>>> TCG-related memory consumption. For the latter, you would likely receive
>>>> kudos from KVM people as well.
>>>>
>>>> BTW, if you happen to support that crazy vmport under Xen, not updating
>>>> the VCPU state will break your neck. Also, lacking VCPUs prevent the
>>>> usage of analysis and debugging features of QEMU (monitor, gdbstub).
>>>
>>> We don't use the vcpu state in qemu because qemu takes care of device
>>> emulation only; under xen the vcpu state is saved and restored by the
>>> hypervisor.
>>
>> Just out of curiosity: So you are extracting the device states out of
>> QEMU on migration, do the same with the VCPU states from the hypervisor
>> (which wouldn't be that different from KVM in fact), and then transfer
>> that to the destination node? Is there a technical or historical reason
>> for this split-up? I mean, you still need some managing instance that
>> does the state transportation and VM control on both sides, i.e. someone
>> for the job that QEMU is doing for TCG or KVM migrations.
> 
> That someone is the "toolstack", I guess libvirt would be the closest
> thing to our toolstack in the kvm world.
> The reason why we have a toolstack performing this task rather than qemu
> is that pure PV guests don't need device emulation, so we don't even
> have qemu running most of the times if there are only linux guests
> installed in the system.

Ah, for that use case it makes some sense to me.

I bet there would also be some value in consolidating the "toolstack"
functionality over bare qemu/libvirt infrastructure (if we ignored all
existing interfaces and dependencies for a moment).

Thanks,
Jan
Stefano Stabellini April 13, 2011, 3:22 p.m. UTC | #10
On Wed, 13 Apr 2011, Jan Kiszka wrote:
> On 2011-04-13 13:49, Stefano Stabellini wrote:
> > On Wed, 13 Apr 2011, Jan Kiszka wrote:
> >> On 2011-04-13 12:56, Stefano Stabellini wrote:
> >>> On Tue, 12 Apr 2011, Jan Kiszka wrote:
> >>>> Well, either you have a use for the VCPU state (how do you do migration
> >>>> in Xen without it?), or you should probably teach QEMU in a careful &
> >>>> clean way to run its device model without VCPUs - and without any
> >>>> TCG-related memory consumption. For the latter, you would likely receive
> >>>> kudos from KVM people as well.
> >>>>
> >>>> BTW, if you happen to support that crazy vmport under Xen, not updating
> >>>> the VCPU state will break your neck. Also, lacking VCPUs prevent the
> >>>> usage of analysis and debugging features of QEMU (monitor, gdbstub).
> >>>
> >>> We don't use the vcpu state in qemu because qemu takes care of device
> >>> emulation only; under xen the vcpu state is saved and restored by the
> >>> hypervisor.
> >>
> >> Just out of curiosity: So you are extracting the device states out of
> >> QEMU on migration, do the same with the VCPU states from the hypervisor
> >> (which wouldn't be that different from KVM in fact), and then transfer
> >> that to the destination node? Is there a technical or historical reason
> >> for this split-up? I mean, you still need some managing instance that
> >> does the state transportation and VM control on both sides, i.e. someone
> >> for the job that QEMU is doing for TCG or KVM migrations.
> > 
> > That someone is the "toolstack", I guess libvirt would be the closest
> > thing to our toolstack in the kvm world.
> > The reason why we have a toolstack performing this task rather than qemu
> > is that pure PV guests don't need device emulation, so we don't even
> > have qemu running most of the times if there are only linux guests
> > installed in the system.
> 
> Ah, for that use case it makes some sense to me.
> 
> I bet there would also be some value in consolidating the "toolstack"
> functionality over bare qemu/libvirt infrastructure (if we ignored all
> existing interfaces and dependencies for a moment).

We have a libxenlight driver for libvirt already: it doesn't support
migration yet but when it does it will probably reuse the libvirt
infrastructure for doing that.
However it is probably going to be libvirt to make the libxenlight calls
to perfom the VCPU save/restore so that we don't add a qemu dependency
for traditional pv guests...
diff mbox

Patch

diff --git a/hw/pc.c b/hw/pc.c
index 6939c04..d7732d4 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -41,6 +41,7 @@ 
 #include "sysemu.h"
 #include "blockdev.h"
 #include "ui/qemu-spice.h"
+#include "xen.h"
 
 /* output Bochs bios info messages */
 //#define DEBUG_BIOS
@@ -918,7 +919,11 @@  static void pc_cpu_reset(void *opaque)
     CPUState *env = opaque;
 
     cpu_reset(env);
-    env->halted = !cpu_is_bsp(env);
+    if (!xen_enabled()) {
+        env->halted = !cpu_is_bsp(env);
+    } else {
+        env->halted = 1;
+    }
 }
 
 static CPUState *pc_new_cpu(const char *cpu_model)
@@ -952,7 +957,12 @@  void pc_cpus_init(const char *cpu_model)
 #endif
     }
 
-    for(i = 0; i < smp_cpus; i++) {
+    if (!xen_enabled()) {
+        for(i = 0; i < smp_cpus; i++) {
+            pc_new_cpu(cpu_model);
+        }
+    } else {
+        /* Xen require only one Qemu VCPU */
         pc_new_cpu(cpu_model);
     }
 }
@@ -980,6 +990,11 @@  void pc_memory_init(ram_addr_t ram_size,
     *above_4g_mem_size_p = above_4g_mem_size;
     *below_4g_mem_size_p = below_4g_mem_size;
 
+    if (xen_enabled()) {
+        /* Nothing to do for Xen */
+        return;
+    }
+
     linux_boot = (kernel_filename != NULL);
 
     /* allocate RAM */
diff --git a/hw/pc_piix.c b/hw/pc_piix.c
index b3ede89..6eff06e 100644
--- a/hw/pc_piix.c
+++ b/hw/pc_piix.c
@@ -37,6 +37,10 @@ 
 #include "sysbus.h"
 #include "arch_init.h"
 #include "blockdev.h"
+#include "xen.h"
+#ifdef CONFIG_XEN
+#  include <xen/hvm/hvm_info_table.h>
+#endif
 
 #define MAX_IDE_BUS 2
 
@@ -391,6 +395,16 @@  static QEMUMachine isapc_machine = {
     .max_cpus = 1,
 };
 
+#ifdef CONFIG_XEN
+static QEMUMachine xenfv_machine = {
+    .name = "xenfv",
+    .desc = "Xen Fully-virtualized PC",
+    .init = pc_init_pci,
+    .max_cpus = HVM_MAX_VCPUS,
+    .default_machine_opts = "accel=xen",
+};
+#endif
+
 static void pc_machine_init(void)
 {
     qemu_register_machine(&pc_machine);
@@ -399,6 +413,9 @@  static void pc_machine_init(void)
     qemu_register_machine(&pc_machine_v0_11);
     qemu_register_machine(&pc_machine_v0_10);
     qemu_register_machine(&isapc_machine);
+#ifdef CONFIG_XEN
+    qemu_register_machine(&xenfv_machine);
+#endif
 }
 
 machine_init(pc_machine_init);
diff --git a/hw/xen.h b/hw/xen.h
index 1fefe3a..726360a 100644
--- a/hw/xen.h
+++ b/hw/xen.h
@@ -31,4 +31,8 @@  static inline int xen_enabled(void)
 
 int xen_init(void);
 
+#if defined(CONFIG_XEN) && CONFIG_XEN_CTRL_INTERFACE_VERSION < 400
+#  define HVM_MAX_VCPUS 32
+#endif
+
 #endif /* QEMU_HW_XEN_H */