diff mbox

[v5] KVM: VMX: Enable XSAVE/XRSTORE for guest

Message ID 1274865557-31137-1-git-send-email-sheng@linux.intel.com
State New
Headers show

Commit Message

Sheng Yang May 26, 2010, 9:19 a.m. UTC
From: Dexuan Cui <dexuan.cui@intel.com>

This patch enable guest to use XSAVE/XRSTORE instructions.

We assume that host_xcr0 would use all possible bits that OS supported.

And we loaded xcr0 in the same way we handled fpu - do it as late as we can.

Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---

I've done a prototype of LM support, would send out tomorrow. But the test
case in QEmu side seems got something wrong. I always got an segfault at:
qemu-kvm/hw/fw_cfg.c:223
223         s->entries[arch][key].data = data;

Haven't looked into it yet. But maybe someone knows the reason...

 arch/x86/include/asm/kvm_host.h |    3 +
 arch/x86/include/asm/vmx.h      |    1 +
 arch/x86/kvm/kvm_cache_regs.h   |    6 ++
 arch/x86/kvm/vmx.c              |   26 ++++++++++
 arch/x86/kvm/x86.c              |  101 ++++++++++++++++++++++++++++++++++++---
 include/linux/kvm_host.h        |    2 +-
 6 files changed, 131 insertions(+), 8 deletions(-)

Comments

Avi Kivity May 26, 2010, 9:54 a.m. UTC | #1
On 05/26/2010 12:19 PM, Sheng Yang wrote:
> From: Dexuan Cui<dexuan.cui@intel.com>
>
> This patch enable guest to use XSAVE/XRSTORE instructions.
>
> We assume that host_xcr0 would use all possible bits that OS supported.
>
> And we loaded xcr0 in the same way we handled fpu - do it as late as we can.
>
>
>    

Looks really good now, only a couple of minor comments and I think we're 
done.

> I've done a prototype of LM support, would send out tomorrow. But the test
> case in QEmu side seems got something wrong. I always got an segfault at:
> qemu-kvm/hw/fw_cfg.c:223
> 223         s->entries[arch][key].data = data;
>
> Haven't looked into it yet. But maybe someone knows the reason...
>    

I saw this too, then it disappeared, can't remember why.  Perhaps a 
clean build is needed?

>
> +static int handle_xsetbv(struct kvm_vcpu *vcpu)
> +{
> +	u64 new_bv = kvm_read_edx_eax(vcpu);
> +
> +	if (kvm_register_read(vcpu, VCPU_REGS_RCX) != 0)
> +		goto err;
> +	if (vmx_get_cpl(vcpu) != 0)
> +		goto err;
> +	if (!(new_bv&  XSTATE_FP))
> +		goto err;
> +	if ((new_bv&  XSTATE_YMM)&&  !(new_bv&  XSTATE_SSE))
> +		goto err;
> +	if (new_bv&  ~host_xcr0)
> +		goto err;
> +	vcpu->arch.xcr0 = new_bv;
> +	xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
>    

Please move all the code above to kvm_set_xcr0() in x86.c, since it's 
not vendor specific.  This would also allow you to make host_xcr0 local 
to x86.c.

> +	skip_emulated_instruction(vcpu);
> +	return 1;
> +err:
> +	kvm_inject_gp(vcpu, 0);
> +	return 1;
> +}
>
>   /*
>    * List of msr numbers which we expose to userspace through KVM_GET_MSRS
>    * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
> @@ -1813,6 +1847,14 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
>   	r = 0;
>   	kvm_apic_set_version(vcpu);
>   	kvm_x86_ops->cpuid_update(vcpu);
> +	update_cpuid(vcpu);
> +
> +	/*
> +	 * Ensure guest xcr0 is valid for loading, also using as
> +	 * the indicator of if guest cpuid has XSAVE
> +	 */
> +	if (guest_cpuid_has_xsave(vcpu))
> +		vcpu->arch.xcr0 = XSTATE_FP;
>    

This is problematic because it enforces an ordering between KVM_SET_XCR 
and KVM_SET_CPUID.  So I think you can use kvm_read_cr4_bits(OSXSAVE) 
instead of checking vcpu->arch.xcr0.  Sorry for the bad advice earlier.

> @@ -5134,12 +5207,26 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
>
>   	vcpu->guest_fpu_loaded = 1;
>   	unlazy_fpu(current);
> +	/*
> +	 * Restore all possible states in the guest,
> +	 * and assume host would use all available bits.
> +	 * Guest xcr0 would be loaded later.
> +	 */
> +	if (cpu_has_xsave&&  vcpu->arch.xcr0) {
> +		xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +		vcpu->guest_xcr0_loaded = 0;
> +	}
>    

Has to be before unlazy_fpu(), so host fpu uses host xcr0.

It's sufficient to check for guest cr4.osxsave, no need to check for 
cpu_has_xsave.  But you need to check for guest_xcr0_loaded!

>   	fpu_restore_checking(&vcpu->arch.guest_fpu);
>   	trace_kvm_fpu(1);
>   }
>
>   void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
>   {
> +	if (vcpu->guest_xcr0_loaded) {
> +		vcpu->guest_xcr0_loaded = 0;
> +		xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
> +	}
> +
>    

This duplicates the above.  So better to have 
kvm_load_guest_xcr0()/kvm_put_guest_xcr0().
Jan Kiszka May 26, 2010, 10:52 a.m. UTC | #2
Avi Kivity wrote:
> On 05/26/2010 12:19 PM, Sheng Yang wrote:
>> I've done a prototype of LM support, would send out tomorrow. But the
>> test
>> case in QEmu side seems got something wrong. I always got an segfault at:
>> qemu-kvm/hw/fw_cfg.c:223
>> 223         s->entries[arch][key].data = data;
>>
>> Haven't looked into it yet. But maybe someone knows the reason...
>>    
> 
> I saw this too, then it disappeared, can't remember why.  Perhaps a
> clean build is needed?

qemu-kvm apparently lacks upstream commit
a71cd2a523f9b35ffeba8de3c536e494e255e6ea which should resolve at least
some of those mysterious crashes.

Jan
Avi Kivity May 26, 2010, 11:24 a.m. UTC | #3
On 05/26/2010 01:52 PM, Jan Kiszka wrote:
> Avi Kivity wrote:
>    
>> On 05/26/2010 12:19 PM, Sheng Yang wrote:
>>      
>>> I've done a prototype of LM support, would send out tomorrow. But the
>>> test
>>> case in QEmu side seems got something wrong. I always got an segfault at:
>>> qemu-kvm/hw/fw_cfg.c:223
>>> 223         s->entries[arch][key].data = data;
>>>
>>> Haven't looked into it yet. But maybe someone knows the reason...
>>>
>>>        
>> I saw this too, then it disappeared, can't remember why.  Perhaps a
>> clean build is needed?
>>      
> qemu-kvm apparently lacks upstream commit
> a71cd2a523f9b35ffeba8de3c536e494e255e6ea which should resolve at least
> some of those mysterious crashes.
>
>    

That explains the fact that rebuild fixed it.  I'll merge qemu.git shortly.
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d08bb4a..532b220 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -302,6 +302,7 @@  struct kvm_vcpu_arch {
 	} update_pte;
 
 	struct fpu guest_fpu;
+	u64 xcr0;
 
 	gva_t mmio_fault_cr2;
 	struct kvm_pio_request pio;
@@ -564,6 +565,8 @@  u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 extern bool tdp_enabled;
 
+extern u64 host_xcr0;
+
 enum emulation_result {
 	EMULATE_DONE,       /* no further processing */
 	EMULATE_DO_MMIO,      /* kvm_run filled with mmio request */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9e6779f..346ea66 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -266,6 +266,7 @@  enum vmcs_field {
 #define EXIT_REASON_EPT_VIOLATION       48
 #define EXIT_REASON_EPT_MISCONFIG       49
 #define EXIT_REASON_WBINVD		54
+#define EXIT_REASON_XSETBV		55
 
 /*
  * Interruption-information format
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index d2a98f8..6491ac8 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -71,4 +71,10 @@  static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
 	return kvm_read_cr4_bits(vcpu, ~0UL);
 }
 
+static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
+{
+	return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
+		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+}
+
 #endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 99ae513..a946841 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -36,6 +36,8 @@ 
 #include <asm/vmx.h>
 #include <asm/virtext.h>
 #include <asm/mce.h>
+#include <asm/i387.h>
+#include <asm/xcr.h>
 
 #include "trace.h"
 
@@ -3354,6 +3356,29 @@  static int handle_wbinvd(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
+static int handle_xsetbv(struct kvm_vcpu *vcpu)
+{
+	u64 new_bv = kvm_read_edx_eax(vcpu);
+
+	if (kvm_register_read(vcpu, VCPU_REGS_RCX) != 0)
+		goto err;
+	if (vmx_get_cpl(vcpu) != 0)
+		goto err;
+	if (!(new_bv & XSTATE_FP))
+		goto err;
+	if ((new_bv & XSTATE_YMM) && !(new_bv & XSTATE_SSE))
+		goto err;
+	if (new_bv & ~host_xcr0)
+		goto err;
+	vcpu->arch.xcr0 = new_bv;
+	xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+	skip_emulated_instruction(vcpu);
+	return 1;
+err:
+	kvm_inject_gp(vcpu, 0);
+	return 1;
+}
+
 static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
 	return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
@@ -3632,6 +3657,7 @@  static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
 	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
 	[EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
 	[EXIT_REASON_WBINVD]                  = handle_wbinvd,
+	[EXIT_REASON_XSETBV]                  = handle_xsetbv,
 	[EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
 	[EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
 	[EXIT_REASON_EPT_VIOLATION]	      = handle_ept_violation,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7be1d36..c04d3cb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -64,6 +64,7 @@ 
 	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
 			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE	\
 			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR	\
+			  | X86_CR4_OSXSAVE \
 			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
 
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -149,6 +150,14 @@  struct kvm_stats_debugfs_item debugfs_entries[] = {
 	{ NULL }
 };
 
+u64 __read_mostly host_xcr0;
+EXPORT_SYMBOL_GPL(host_xcr0);
+
+static inline u32 bit(int bitno)
+{
+	return 1 << (bitno & 31);
+}
+
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
 	unsigned slot;
@@ -473,6 +482,30 @@  void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
+static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+}
+
+static void update_cpuid(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (!best)
+		return;
+
+	/* Update OSXSAVE bit */
+	if (cpu_has_xsave && best->function == 0x1) {
+		best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+		if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
+			best->ecx |= bit(X86_FEATURE_OSXSAVE);
+	}
+}
+
 int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -481,6 +514,9 @@  int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	if (cr4 & CR4_RESERVED_BITS)
 		return 1;
 
+	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
+		return 1;
+
 	if (is_long_mode(vcpu)) {
 		if (!(cr4 & X86_CR4_PAE))
 			return 1;
@@ -497,6 +533,9 @@  int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 	if ((cr4 ^ old_cr4) & pdptr_bits)
 		kvm_mmu_reset_context(vcpu);
 
+	if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
+		update_cpuid(vcpu);
+
 	return 0;
 }
 
@@ -665,11 +704,6 @@  int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
 }
 EXPORT_SYMBOL_GPL(kvm_get_dr);
 
-static inline u32 bit(int bitno)
-{
-	return 1 << (bitno & 31);
-}
-
 /*
  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -1813,6 +1847,14 @@  static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 	r = 0;
 	kvm_apic_set_version(vcpu);
 	kvm_x86_ops->cpuid_update(vcpu);
+	update_cpuid(vcpu);
+
+	/*
+	 * Ensure guest xcr0 is valid for loading, also using as
+	 * the indicator of if guest cpuid has XSAVE
+	 */
+	if (guest_cpuid_has_xsave(vcpu))
+		vcpu->arch.xcr0 = XSTATE_FP;
 
 out_free:
 	vfree(cpuid_entries);
@@ -1836,6 +1878,15 @@  static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
 	vcpu->arch.cpuid_nent = cpuid->nent;
 	kvm_apic_set_version(vcpu);
 	kvm_x86_ops->cpuid_update(vcpu);
+	update_cpuid(vcpu);
+
+	/*
+	 * Ensure guest xcr0 is valid for loading, also using as
+	 * the indicator of if guest cpuid has XSAVE
+	 */
+	if (guest_cpuid_has_xsave(vcpu))
+		vcpu->arch.xcr0 = XSTATE_FP;
+
 	return 0;
 
 out:
@@ -1916,7 +1967,7 @@  static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 		0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
 		0 /* Reserved, DCA */ | F(XMM4_1) |
 		F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
-		0 /* Reserved, XSAVE, OSXSAVE */;
+		0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */;
 	/* cpuid 0x80000001.ecx */
 	const u32 kvm_supported_word6_x86_features =
 		F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
@@ -1931,7 +1982,7 @@  static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 
 	switch (function) {
 	case 0:
-		entry->eax = min(entry->eax, (u32)0xb);
+		entry->eax = min(entry->eax, (u32)0xd);
 		break;
 	case 1:
 		entry->edx &= kvm_supported_word0_x86_features;
@@ -1989,6 +2040,20 @@  static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 		}
 		break;
 	}
+	case 0xd: {
+		int i;
+
+		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+		for (i = 1; *nent < maxnent; ++i) {
+			if (entry[i - 1].eax == 0 && i != 2)
+				break;
+			do_cpuid_1_ent(&entry[i], function, i);
+			entry[i].flags |=
+			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+			++*nent;
+		}
+		break;
+	}
 	case KVM_CPUID_SIGNATURE: {
 		char signature[12] = "KVMKVMKVM\0\0";
 		u32 *sigptr = (u32 *)signature;
@@ -4124,6 +4189,9 @@  int kvm_arch_init(void *opaque)
 
 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
 
+	if (cpu_has_xsave)
+		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+
 	return 0;
 
 out:
@@ -4567,6 +4635,11 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	kvm_x86_ops->prepare_guest_switch(vcpu);
 	if (vcpu->fpu_active)
 		kvm_load_guest_fpu(vcpu);
+	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+			!vcpu->guest_xcr0_loaded) {
+		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+		vcpu->guest_xcr0_loaded = 1;
+	}
 
 	atomic_set(&vcpu->guest_mode, 1);
 	smp_wmb();
@@ -5134,12 +5207,26 @@  void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 
 	vcpu->guest_fpu_loaded = 1;
 	unlazy_fpu(current);
+	/*
+	 * Restore all possible states in the guest,
+	 * and assume host would use all available bits.
+	 * Guest xcr0 would be loaded later.
+	 */
+	if (cpu_has_xsave && vcpu->arch.xcr0) {
+		xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+		vcpu->guest_xcr0_loaded = 0;
+	}
 	fpu_restore_checking(&vcpu->arch.guest_fpu);
 	trace_kvm_fpu(1);
 }
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
+	if (vcpu->guest_xcr0_loaded) {
+		vcpu->guest_xcr0_loaded = 0;
+		xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+	}
+
 	if (!vcpu->guest_fpu_loaded)
 		return;
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4e8fdbf..3784d58 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -88,7 +88,7 @@  struct kvm_vcpu {
 	int srcu_idx;
 
 	int fpu_active;
-	int guest_fpu_loaded;
+	int guest_fpu_loaded, guest_xcr0_loaded;
 	wait_queue_head_t wq;
 	int sigset_active;
 	sigset_t sigset;