diff mbox series

[SRU,Cosmic,1/4] kvm: svm: Ensure an IBPB on all affected CPUs when freeing a vmcb

Message ID 20190325135555.23768-2-juergh@canonical.com
State New
Headers show
Series Spectre v2 updates | expand

Commit Message

Juerg Haefliger March 25, 2019, 1:55 p.m. UTC
From: Jim Mattson <jmattson@google.com>

Previously, we only called indirect_branch_prediction_barrier on the
logical CPU that freed a vmcb. This function should be called on all
logical CPUs that last loaded the vmcb in question.

Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
Reported-by: Neel Natu <neelnatu@google.com>
Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

CVE-2017-5715

(cherry picked from commit fd65d3142f734bc4376053c8d75670041903134d)
Signed-off-by: Juerg Haefliger <juergh@canonical.com>
---
 arch/x86/kvm/svm.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

Comments

Tyler Hicks March 25, 2019, 6:18 p.m. UTC | #1
On 2019-03-25 14:55:52, Juerg Haefliger wrote:
> From: Jim Mattson <jmattson@google.com>
> 
> Previously, we only called indirect_branch_prediction_barrier on the
> logical CPU that freed a vmcb. This function should be called on all
> logical CPUs that last loaded the vmcb in question.
> 
> Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
> Reported-by: Neel Natu <neelnatu@google.com>
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Cc: stable@vger.kernel.org
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> 
> CVE-2017-5715
> 
> (cherry picked from commit fd65d3142f734bc4376053c8d75670041903134d)
> Signed-off-by: Juerg Haefliger <juergh@canonical.com>

Acked-by: Tyler Hicks <tyhicks@canonical.com>

Tyler

> ---
>  arch/x86/kvm/svm.c | 20 +++++++++++++++-----
>  1 file changed, 15 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 3e59a187fe30..75d5f180ffa5 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2188,21 +2188,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	return ERR_PTR(err);
>  }
>  
> +static void svm_clear_current_vmcb(struct vmcb *vmcb)
> +{
> +	int i;
> +
> +	for_each_online_cpu(i)
> +		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
> +}
> +
>  static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  
> +	/*
> +	 * The vmcb page can be recycled, causing a false negative in
> +	 * svm_vcpu_load(). So, ensure that no logical CPU has this
> +	 * vmcb page recorded as its current vmcb.
> +	 */
> +	svm_clear_current_vmcb(svm->vmcb);
> +
>  	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
>  	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
>  	__free_page(virt_to_page(svm->nested.hsave));
>  	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
>  	kvm_vcpu_uninit(vcpu);
>  	kmem_cache_free(kvm_vcpu_cache, svm);
> -	/*
> -	 * The vmcb page can be recycled, causing a false negative in
> -	 * svm_vcpu_load(). So do a full IBPB now.
> -	 */
> -	indirect_branch_prediction_barrier();
>  }
>  
>  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> -- 
> 2.19.1
> 
> 
> -- 
> kernel-team mailing list
> kernel-team@lists.ubuntu.com
> https://lists.ubuntu.com/mailman/listinfo/kernel-team
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3e59a187fe30..75d5f180ffa5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2188,21 +2188,31 @@  static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	return ERR_PTR(err);
 }
 
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
+{
+	int i;
+
+	for_each_online_cpu(i)
+		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
+	/*
+	 * The vmcb page can be recycled, causing a false negative in
+	 * svm_vcpu_load(). So, ensure that no logical CPU has this
+	 * vmcb page recorded as its current vmcb.
+	 */
+	svm_clear_current_vmcb(svm->vmcb);
+
 	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
-	/*
-	 * The vmcb page can be recycled, causing a false negative in
-	 * svm_vcpu_load(). So do a full IBPB now.
-	 */
-	indirect_branch_prediction_barrier();
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)