diff mbox

[RFC] kvm: Enable -cpu option to hide KVM

Message ID 20140601162414.28708.22775.stgit@bling.home
State New
Headers show

Commit Message

Alex Williamson June 1, 2014, 4:25 p.m. UTC
The latest Nvidia driver (337.88) specifically checks for KVM as the
hypervisor and reports Code 43 for the driver in a Windows guest when
found.  Removing or changing the KVM signature is sufficient to allow
the driver to load.  This patch adds an option to easily allow the KVM
hypervisor signature to be hidden using '-cpu no-kvm'.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
---
 target-i386/cpu-qom.h |    1 +
 target-i386/cpu.c     |    1 +
 target-i386/kvm.c     |   28 +++++++++++++++-------------
 3 files changed, 17 insertions(+), 13 deletions(-)

Comments

Paolo Bonzini June 1, 2014, 6:29 p.m. UTC | #1
Il 01/06/2014 18:25, Alex Williamson ha scritto:
> The latest Nvidia driver (337.88) specifically checks for KVM as the
> hypervisor and reports Code 43 for the driver in a Windows guest when
> found.  Removing or changing the KVM signature is sufficient to allow
> the driver to load.  This patch adds an option to easily allow the KVM
> hypervisor signature to be hidden using '-cpu no-kvm'.
>
> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>

It's really a nit, but I think "kvm=no" is preferrable (more consistent 
with how hyper-v leaves are enabled).

Paolo

> ---
>  target-i386/cpu-qom.h |    1 +
>  target-i386/cpu.c     |    1 +
>  target-i386/kvm.c     |   28 +++++++++++++++-------------
>  3 files changed, 17 insertions(+), 13 deletions(-)
>
> diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
> index e9b3d57..99bb059 100644
> --- a/target-i386/cpu-qom.h
> +++ b/target-i386/cpu-qom.h
> @@ -87,6 +87,7 @@ typedef struct X86CPU {
>      bool hyperv_time;
>      bool check_cpuid;
>      bool enforce_cpuid;
> +    bool no_kvm;
>
>      /* if true the CPUID code directly forward host cache leaves to the guest */
>      bool cache_info_passthrough;
> diff --git a/target-i386/cpu.c b/target-i386/cpu.c
> index 042a48d..8e6ce9c 100644
> --- a/target-i386/cpu.c
> +++ b/target-i386/cpu.c
> @@ -2792,6 +2792,7 @@ static Property x86_cpu_properties[] = {
>      DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
>      DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
>      DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
> +    DEFINE_PROP_BOOL("no-kvm", X86CPU, no_kvm, false),
>      DEFINE_PROP_END_OF_LIST()
>  };
>
> diff --git a/target-i386/kvm.c b/target-i386/kvm.c
> index 0d894ef..920898e 100644
> --- a/target-i386/kvm.c
> +++ b/target-i386/kvm.c
> @@ -528,23 +528,25 @@ int kvm_arch_init_vcpu(CPUState *cs)
>          has_msr_hv_hypercall = true;
>      }
>
> -    memcpy(signature, "KVMKVMKVM\0\0\0", 12);
> -    c = &cpuid_data.entries[cpuid_i++];
> -    c->function = KVM_CPUID_SIGNATURE | kvm_base;
> -    c->eax = 0;
> -    c->ebx = signature[0];
> -    c->ecx = signature[1];
> -    c->edx = signature[2];
> +    if (!cpu->no_kvm) {
> +        memcpy(signature, "KVMKVMKVM\0\0\0", 12);
> +        c = &cpuid_data.entries[cpuid_i++];
> +        c->function = KVM_CPUID_SIGNATURE | kvm_base;
> +        c->eax = 0;
> +        c->ebx = signature[0];
> +        c->ecx = signature[1];
> +        c->edx = signature[2];
>
> -    c = &cpuid_data.entries[cpuid_i++];
> -    c->function = KVM_CPUID_FEATURES | kvm_base;
> -    c->eax = env->features[FEAT_KVM];
> +        c = &cpuid_data.entries[cpuid_i++];
> +        c->function = KVM_CPUID_FEATURES | kvm_base;
> +        c->eax = env->features[FEAT_KVM];
>
> -    has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
> +        has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
>
> -    has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
> +        has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
>
> -    has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
> +        has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
> +    }
>
>      cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
Alex Williamson June 1, 2014, 9:11 p.m. UTC | #2
On Sun, 2014-06-01 at 20:29 +0200, Paolo Bonzini wrote:
> Il 01/06/2014 18:25, Alex Williamson ha scritto:
> > The latest Nvidia driver (337.88) specifically checks for KVM as the
> > hypervisor and reports Code 43 for the driver in a Windows guest when
> > found.  Removing or changing the KVM signature is sufficient to allow
> > the driver to load.  This patch adds an option to easily allow the KVM
> > hypervisor signature to be hidden using '-cpu no-kvm'.
> >
> > Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
> 
> It's really a nit, but I think "kvm=no" is preferrable (more consistent 
> with how hyper-v leaves are enabled).

Happy to oblige, but I'm not sure what I'm looking for.  We enably
hyper-v leaves if hyperv_enabled(), which seems to boil down to the kvm
kernel supporting KVM_CAP_HYPERV and one or more cpu->hyperv_foo
features enabled.  What's the commandline option I'm looking for that
has some sort of hyper-v=on|off?  Thanks,

Alex

> > ---
> >  target-i386/cpu-qom.h |    1 +
> >  target-i386/cpu.c     |    1 +
> >  target-i386/kvm.c     |   28 +++++++++++++++-------------
> >  3 files changed, 17 insertions(+), 13 deletions(-)
> >
> > diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
> > index e9b3d57..99bb059 100644
> > --- a/target-i386/cpu-qom.h
> > +++ b/target-i386/cpu-qom.h
> > @@ -87,6 +87,7 @@ typedef struct X86CPU {
> >      bool hyperv_time;
> >      bool check_cpuid;
> >      bool enforce_cpuid;
> > +    bool no_kvm;
> >
> >      /* if true the CPUID code directly forward host cache leaves to the guest */
> >      bool cache_info_passthrough;
> > diff --git a/target-i386/cpu.c b/target-i386/cpu.c
> > index 042a48d..8e6ce9c 100644
> > --- a/target-i386/cpu.c
> > +++ b/target-i386/cpu.c
> > @@ -2792,6 +2792,7 @@ static Property x86_cpu_properties[] = {
> >      DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
> >      DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
> >      DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
> > +    DEFINE_PROP_BOOL("no-kvm", X86CPU, no_kvm, false),
> >      DEFINE_PROP_END_OF_LIST()
> >  };
> >
> > diff --git a/target-i386/kvm.c b/target-i386/kvm.c
> > index 0d894ef..920898e 100644
> > --- a/target-i386/kvm.c
> > +++ b/target-i386/kvm.c
> > @@ -528,23 +528,25 @@ int kvm_arch_init_vcpu(CPUState *cs)
> >          has_msr_hv_hypercall = true;
> >      }
> >
> > -    memcpy(signature, "KVMKVMKVM\0\0\0", 12);
> > -    c = &cpuid_data.entries[cpuid_i++];
> > -    c->function = KVM_CPUID_SIGNATURE | kvm_base;
> > -    c->eax = 0;
> > -    c->ebx = signature[0];
> > -    c->ecx = signature[1];
> > -    c->edx = signature[2];
> > +    if (!cpu->no_kvm) {
> > +        memcpy(signature, "KVMKVMKVM\0\0\0", 12);
> > +        c = &cpuid_data.entries[cpuid_i++];
> > +        c->function = KVM_CPUID_SIGNATURE | kvm_base;
> > +        c->eax = 0;
> > +        c->ebx = signature[0];
> > +        c->ecx = signature[1];
> > +        c->edx = signature[2];
> >
> > -    c = &cpuid_data.entries[cpuid_i++];
> > -    c->function = KVM_CPUID_FEATURES | kvm_base;
> > -    c->eax = env->features[FEAT_KVM];
> > +        c = &cpuid_data.entries[cpuid_i++];
> > +        c->function = KVM_CPUID_FEATURES | kvm_base;
> > +        c->eax = env->features[FEAT_KVM];
> >
> > -    has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
> > +        has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
> >
> > -    has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
> > +        has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
> >
> > -    has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
> > +        has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
> > +    }
> >
> >      cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
> >
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe kvm" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> >
>
Paolo Bonzini June 2, 2014, 7:09 a.m. UTC | #3
Il 01/06/2014 23:11, Alex Williamson ha scritto:
>> >
>> > It's really a nit, but I think "kvm=no" is preferrable (more consistent
>> > with how hyper-v leaves are enabled).
> Happy to oblige, but I'm not sure what I'm looking for.  We enably
> hyper-v leaves if hyperv_enabled(), which seems to boil down to the kvm
> kernel supporting KVM_CAP_HYPERV and one or more cpu->hyperv_foo
> features enabled.  What's the commandline option I'm looking for that
> has some sort of hyper-v=on|off?  Thanks,

Same as your "no-kvm", just with the default flipped from false to true.

Paolo
Michael Tokarev June 2, 2014, 10:32 a.m. UTC | #4
01.06.2014 20:25, Alex Williamson цкщеу:
> The latest Nvidia driver (337.88) specifically checks for KVM as the
> hypervisor and reports Code 43 for the driver in a Windows guest when
> found.  Removing or changing the KVM signature is sufficient to allow
> the driver to load.

Hmm.. Why does it do such thing?  Is it in order to prevent the driver
to work in a virtualized windows, ie to prevent vga passthough to work?

If that's the case, I think it is a lost game.  Because they'll be adding
more, cleverer, checks in the next version.

Thanks,

/mjt
Alex Williamson June 2, 2014, 1:30 p.m. UTC | #5
On Mon, 2014-06-02 at 14:32 +0400, Michael Tokarev wrote:
> 01.06.2014 20:25, Alex Williamson цкщеу:
> > The latest Nvidia driver (337.88) specifically checks for KVM as the
> > hypervisor and reports Code 43 for the driver in a Windows guest when
> > found.  Removing or changing the KVM signature is sufficient to allow
> > the driver to load.
> 
> Hmm.. Why does it do such thing?  Is it in order to prevent the driver
> to work in a virtualized windows, ie to prevent vga passthough to work?
> 
> If that's the case, I think it is a lost game.  Because they'll be adding
> more, cleverer, checks in the next version.

Then they'll be pissing off more users and driving them to AMD by doing
so.  In any case, having the ability to hide the hypervisor seems to
stand on it's own.  What if we want to test whether a guest behavior is
the result of a paravirtual interface?  What if a user wants to hide the
hypervisor in order to further reduce the exposure surface to the VM?
There are reasons beyond an arms race with Nvidia to want a feature like
this.  Thanks,

Alex
Alex Williamson June 2, 2014, 2:42 p.m. UTC | #6
On Mon, 2014-06-02 at 09:09 +0200, Paolo Bonzini wrote:
> Il 01/06/2014 23:11, Alex Williamson ha scritto:
> >> >
> >> > It's really a nit, but I think "kvm=no" is preferrable (more consistent
> >> > with how hyper-v leaves are enabled).
> > Happy to oblige, but I'm not sure what I'm looking for.  We enably
> > hyper-v leaves if hyperv_enabled(), which seems to boil down to the kvm
> > kernel supporting KVM_CAP_HYPERV and one or more cpu->hyperv_foo
> > features enabled.  What's the commandline option I'm looking for that
> > has some sort of hyper-v=on|off?  Thanks,
> 
> Same as your "no-kvm", just with the default flipped from false to true.

Ah, easy enough.  Do we want to limit the scope a bit by indicating
exactly what is getting disabled, perhaps kvm-msr=on|off?  Thanks,

Alex
Paolo Bonzini June 2, 2014, 3:55 p.m. UTC | #7
Il 02/06/2014 16:42, Alex Williamson ha scritto:
>> > Same as your "no-kvm", just with the default flipped from false to true.
> Ah, easy enough.  Do we want to limit the scope a bit by indicating
> exactly what is getting disabled, perhaps kvm-msr=on|off?  Thanks,

The capabilities are actually already available for selective disabling 
via CPUID features (search for kvm_feature_name).  What's missing is a 
master property to disable the CPUID leaves themselves, so your patch 
provides exactly what's needed.

Paolo
Paolo Bonzini June 2, 2014, 3:55 p.m. UTC | #8
Il 02/06/2014 15:30, Alex Williamson ha scritto:
> Then they'll be pissing off more users and driving them to AMD by doing
> so.  In any case, having the ability to hide the hypervisor seems to
> stand on it's own.  What if we want to test whether a guest behavior is
> the result of a paravirtual interface?  What if a user wants to hide the
> hypervisor in order to further reduce the exposure surface to the VM?
> There are reasons beyond an arms race with Nvidia to want a feature like
> this.  Thanks,

I totally agree with you.

This doesn't mean that nVidia doesn't deserve some bad press for 
starting this kind of arms race, of course. :)

Paolo
Michael Tokarev June 2, 2014, 6:01 p.m. UTC | #9
02.06.2014 17:30, Alex Williamson wrote:
> On Mon, 2014-06-02 at 14:32 +0400, Michael Tokarev wrote:
>> 01.06.2014 20:25, Alex Williamson wrote:
>>> The latest Nvidia driver (337.88) specifically checks for KVM as the
>>> hypervisor and reports Code 43 for the driver in a Windows guest when
>>> found.  Removing or changing the KVM signature is sufficient to allow
>>> the driver to load.
>>
>> Hmm.. Why does it do such thing?  Is it in order to prevent the driver
>> to work in a virtualized windows, ie to prevent vga passthough to work?
>>
>> If that's the case, I think it is a lost game.  Because they'll be adding
>> more, cleverer, checks in the next version.
> 
> Then they'll be pissing off more users and driving them to AMD by doing
> so.  In any case, having the ability to hide the hypervisor seems to
> stand on it's own.  What if we want to test whether a guest behavior is
> the result of a paravirtual interface?  What if a user wants to hide the
> hypervisor in order to further reduce the exposure surface to the VM?
> There are reasons beyond an arms race with Nvidia to want a feature like
> this.  Thanks,

You answer as if I were strongly against the change.  I'm not.
What I'm against is about the reasoning.  This way you're just
accepting the arm race.

Thanks,

/mjt
Alex Williamson June 2, 2014, 6:37 p.m. UTC | #10
On Mon, 2014-06-02 at 22:01 +0400, Michael Tokarev wrote:
> 02.06.2014 17:30, Alex Williamson wrote:
> > On Mon, 2014-06-02 at 14:32 +0400, Michael Tokarev wrote:
> >> 01.06.2014 20:25, Alex Williamson wrote:
> >>> The latest Nvidia driver (337.88) specifically checks for KVM as the
> >>> hypervisor and reports Code 43 for the driver in a Windows guest when
> >>> found.  Removing or changing the KVM signature is sufficient to allow
> >>> the driver to load.
> >>
> >> Hmm.. Why does it do such thing?  Is it in order to prevent the driver
> >> to work in a virtualized windows, ie to prevent vga passthough to work?
> >>
> >> If that's the case, I think it is a lost game.  Because they'll be adding
> >> more, cleverer, checks in the next version.
> > 
> > Then they'll be pissing off more users and driving them to AMD by doing
> > so.  In any case, having the ability to hide the hypervisor seems to
> > stand on it's own.  What if we want to test whether a guest behavior is
> > the result of a paravirtual interface?  What if a user wants to hide the
> > hypervisor in order to further reduce the exposure surface to the VM?
> > There are reasons beyond an arms race with Nvidia to want a feature like
> > this.  Thanks,
> 
> You answer as if I were strongly against the change.  I'm not.
> What I'm against is about the reasoning.  This way you're just
> accepting the arm race.

I'm not sure what you're looking for.  Would it be more acceptable if I
don't mention the motivation for adding this feature in the commitlog?
Does it make the feature less worthwhile because it has an immediate
practical application?  If we agree that this feature is worthwhile
regardless of the Nvidia situation, how do we add it without
theoretically accepting an arms race?  Thanks,

Alex
Bandan Das June 2, 2014, 7:03 p.m. UTC | #11
Michael Tokarev <mjt@tls.msk.ru> writes:

> 02.06.2014 17:30, Alex Williamson wrote:
>> On Mon, 2014-06-02 at 14:32 +0400, Michael Tokarev wrote:
>>> 01.06.2014 20:25, Alex Williamson wrote:
>>>> The latest Nvidia driver (337.88) specifically checks for KVM as the
>>>> hypervisor and reports Code 43 for the driver in a Windows guest when
>>>> found.  Removing or changing the KVM signature is sufficient to allow
>>>> the driver to load.
>>>
>>> Hmm.. Why does it do such thing?  Is it in order to prevent the driver
>>> to work in a virtualized windows, ie to prevent vga passthough to work?
>>>
>>> If that's the case, I think it is a lost game.  Because they'll be adding
>>> more, cleverer, checks in the next version.
>> 
>> Then they'll be pissing off more users and driving them to AMD by doing
>> so.  In any case, having the ability to hide the hypervisor seems to
>> stand on it's own.  What if we want to test whether a guest behavior is
>> the result of a paravirtual interface?  What if a user wants to hide the
>> hypervisor in order to further reduce the exposure surface to the VM?
>> There are reasons beyond an arms race with Nvidia to want a feature like
>> this.  Thanks,
>
> You answer as if I were strongly against the change.  I'm not.
> What I'm against is about the reasoning.  This way you're just
> accepting the arm race.

Couldn't the arms race be a little less explicit if the commit message
is changed :) ? Why mention Nvidia at all ? Just state that the intended 
application is for cases where the user might still want to run a piece
of software that bails out when KVM is detected.

> Thanks,
>
> /mjt
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Alex Williamson June 2, 2014, 7:18 p.m. UTC | #12
On Mon, 2014-06-02 at 15:03 -0400, Bandan Das wrote:
> Michael Tokarev <mjt@tls.msk.ru> writes:
> 
> > 02.06.2014 17:30, Alex Williamson wrote:
> >> On Mon, 2014-06-02 at 14:32 +0400, Michael Tokarev wrote:
> >>> 01.06.2014 20:25, Alex Williamson wrote:
> >>>> The latest Nvidia driver (337.88) specifically checks for KVM as the
> >>>> hypervisor and reports Code 43 for the driver in a Windows guest when
> >>>> found.  Removing or changing the KVM signature is sufficient to allow
> >>>> the driver to load.
> >>>
> >>> Hmm.. Why does it do such thing?  Is it in order to prevent the driver
> >>> to work in a virtualized windows, ie to prevent vga passthough to work?
> >>>
> >>> If that's the case, I think it is a lost game.  Because they'll be adding
> >>> more, cleverer, checks in the next version.
> >> 
> >> Then they'll be pissing off more users and driving them to AMD by doing
> >> so.  In any case, having the ability to hide the hypervisor seems to
> >> stand on it's own.  What if we want to test whether a guest behavior is
> >> the result of a paravirtual interface?  What if a user wants to hide the
> >> hypervisor in order to further reduce the exposure surface to the VM?
> >> There are reasons beyond an arms race with Nvidia to want a feature like
> >> this.  Thanks,
> >
> > You answer as if I were strongly against the change.  I'm not.
> > What I'm against is about the reasoning.  This way you're just
> > accepting the arm race.
> 
> Couldn't the arms race be a little less explicit if the commit message
> is changed :) ? Why mention Nvidia at all ? Just state that the intended 
> application is for cases where the user might still want to run a piece
> of software that bails out when KVM is detected.

Would we be helping our users by omitting that from the commitlog
though?  Thanks,

Alex
diff mbox

Patch

diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index e9b3d57..99bb059 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -87,6 +87,7 @@  typedef struct X86CPU {
     bool hyperv_time;
     bool check_cpuid;
     bool enforce_cpuid;
+    bool no_kvm;
 
     /* if true the CPUID code directly forward host cache leaves to the guest */
     bool cache_info_passthrough;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 042a48d..8e6ce9c 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -2792,6 +2792,7 @@  static Property x86_cpu_properties[] = {
     DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
     DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
     DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
+    DEFINE_PROP_BOOL("no-kvm", X86CPU, no_kvm, false),
     DEFINE_PROP_END_OF_LIST()
 };
 
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 0d894ef..920898e 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -528,23 +528,25 @@  int kvm_arch_init_vcpu(CPUState *cs)
         has_msr_hv_hypercall = true;
     }
 
-    memcpy(signature, "KVMKVMKVM\0\0\0", 12);
-    c = &cpuid_data.entries[cpuid_i++];
-    c->function = KVM_CPUID_SIGNATURE | kvm_base;
-    c->eax = 0;
-    c->ebx = signature[0];
-    c->ecx = signature[1];
-    c->edx = signature[2];
+    if (!cpu->no_kvm) {
+        memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = KVM_CPUID_SIGNATURE | kvm_base;
+        c->eax = 0;
+        c->ebx = signature[0];
+        c->ecx = signature[1];
+        c->edx = signature[2];
 
-    c = &cpuid_data.entries[cpuid_i++];
-    c->function = KVM_CPUID_FEATURES | kvm_base;
-    c->eax = env->features[FEAT_KVM];
+        c = &cpuid_data.entries[cpuid_i++];
+        c->function = KVM_CPUID_FEATURES | kvm_base;
+        c->eax = env->features[FEAT_KVM];
 
-    has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
+        has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
 
-    has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
+        has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
 
-    has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
+        has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
+    }
 
     cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);