diff mbox series

KVM: SVM: load control fields from VMCB12 before checking them

Message ID 20210408193649.25649-2-tim.gardner@canonical.com
State New
Headers show
Series KVM: SVM: load control fields from VMCB12 before checking them | expand

Commit Message

Tim Gardner April 8, 2021, 7:36 p.m. UTC
From: Paolo Bonzini <pbonzini@redhat.com>

CVE-2021-29657

Avoid races between check and use of the nested VMCB controls.  This
for example ensures that the VMRUN intercept is always reflected to the
nested hypervisor, instead of being processed by the host.  Without this
patch, it is possible to end up with svm->nested.hsave pointing to
the MSR permission bitmap for nested guests.

This bug is CVE-2021-29657.

Reported-by: Felix Wilhelm <fwilhelm@google.com>
Cc: stable@vger.kernel.org
Fixes: 2fcf4876ada ("KVM: nSVM: implement on demand allocation of the nested state")
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit a58d9166a756a0f4a6618e4f593232593d6df134)
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
---
 arch/x86/kvm/svm/nested.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

Comments

Stefan Bader April 9, 2021, 12:18 p.m. UTC | #1
On 08.04.21 21:36, Tim Gardner wrote:
> From: Paolo Bonzini <pbonzini@redhat.com>
> 
> CVE-2021-29657
> 
> Avoid races between check and use of the nested VMCB controls.  This
> for example ensures that the VMRUN intercept is always reflected to the
> nested hypervisor, instead of being processed by the host.  Without this
> patch, it is possible to end up with svm->nested.hsave pointing to
> the MSR permission bitmap for nested guests.
> 
> This bug is CVE-2021-29657.
> 
> Reported-by: Felix Wilhelm <fwilhelm@google.com>
> Cc: stable@vger.kernel.org
> Fixes: 2fcf4876ada ("KVM: nSVM: implement on demand allocation of the nested state")
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> (cherry picked from commit a58d9166a756a0f4a6618e4f593232593d6df134)
> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
> ---
>   arch/x86/kvm/svm/nested.c | 10 ++++++----
>   1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 1008cc6cb66c..dd318ca6c722 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -246,7 +246,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
>   	return true;
>   }
>   
> -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
> +static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
>   {
>   	struct kvm_vcpu *vcpu = &svm->vcpu;
>   	bool vmcb12_lma;
> @@ -271,7 +271,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
>   	if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
>   		return false;
>   
> -	return nested_vmcb_check_controls(&vmcb12->control);
> +	return true;
>   }
>   
>   static void load_nested_vmcb_control(struct vcpu_svm *svm,
> @@ -454,7 +454,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
>   	int ret;
>   
>   	svm->nested.vmcb12_gpa = vmcb12_gpa;
> -	load_nested_vmcb_control(svm, &vmcb12->control);
>   	nested_prepare_vmcb_save(svm, vmcb12);
>   	nested_prepare_vmcb_control(svm);
>   
> @@ -501,7 +500,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
>   	if (WARN_ON_ONCE(!svm->nested.initialized))
>   		return -EINVAL;
>   
> -	if (!nested_vmcb_checks(svm, vmcb12)) {
> +	load_nested_vmcb_control(svm, &vmcb12->control);
> +
> +	if (!nested_vmcb_check_save(svm, vmcb12) ||
> +	    !nested_vmcb_check_controls(&svm->nested.ctl)) {
>   		vmcb12->control.exit_code    = SVM_EXIT_ERR;
>   		vmcb12->control.exit_code_hi = 0;
>   		vmcb12->control.exit_info_1  = 0;
>
Kleber Sacilotto de Souza April 9, 2021, 4:23 p.m. UTC | #2
On 08.04.21 21:36, Tim Gardner wrote:
> From: Paolo Bonzini <pbonzini@redhat.com>
> 
> CVE-2021-29657
> 
> Avoid races between check and use of the nested VMCB controls.  This
> for example ensures that the VMRUN intercept is always reflected to the
> nested hypervisor, instead of being processed by the host.  Without this
> patch, it is possible to end up with svm->nested.hsave pointing to
> the MSR permission bitmap for nested guests.
> 
> This bug is CVE-2021-29657.
> 
> Reported-by: Felix Wilhelm <fwilhelm@google.com>
> Cc: stable@vger.kernel.org
> Fixes: 2fcf4876ada ("KVM: nSVM: implement on demand allocation of the nested state")
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> (cherry picked from commit a58d9166a756a0f4a6618e4f593232593d6df134)
> Signed-off-by: Tim Gardner <tim.gardner@canonical.com>

Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>

Thanks

> ---
>   arch/x86/kvm/svm/nested.c | 10 ++++++----
>   1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 1008cc6cb66c..dd318ca6c722 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -246,7 +246,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
>   	return true;
>   }
>   
> -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
> +static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
>   {
>   	struct kvm_vcpu *vcpu = &svm->vcpu;
>   	bool vmcb12_lma;
> @@ -271,7 +271,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
>   	if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
>   		return false;
>   
> -	return nested_vmcb_check_controls(&vmcb12->control);
> +	return true;
>   }
>   
>   static void load_nested_vmcb_control(struct vcpu_svm *svm,
> @@ -454,7 +454,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
>   	int ret;
>   
>   	svm->nested.vmcb12_gpa = vmcb12_gpa;
> -	load_nested_vmcb_control(svm, &vmcb12->control);
>   	nested_prepare_vmcb_save(svm, vmcb12);
>   	nested_prepare_vmcb_control(svm);
>   
> @@ -501,7 +500,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
>   	if (WARN_ON_ONCE(!svm->nested.initialized))
>   		return -EINVAL;
>   
> -	if (!nested_vmcb_checks(svm, vmcb12)) {
> +	load_nested_vmcb_control(svm, &vmcb12->control);
> +
> +	if (!nested_vmcb_check_save(svm, vmcb12) ||
> +	    !nested_vmcb_check_controls(&svm->nested.ctl)) {
>   		vmcb12->control.exit_code    = SVM_EXIT_ERR;
>   		vmcb12->control.exit_code_hi = 0;
>   		vmcb12->control.exit_info_1  = 0;
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1008cc6cb66c..dd318ca6c722 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -246,7 +246,7 @@  static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 	return true;
 }
 
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	bool vmcb12_lma;
@@ -271,7 +271,7 @@  static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 	if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
 		return false;
 
-	return nested_vmcb_check_controls(&vmcb12->control);
+	return true;
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -454,7 +454,6 @@  int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
 	int ret;
 
 	svm->nested.vmcb12_gpa = vmcb12_gpa;
-	load_nested_vmcb_control(svm, &vmcb12->control);
 	nested_prepare_vmcb_save(svm, vmcb12);
 	nested_prepare_vmcb_control(svm);
 
@@ -501,7 +500,10 @@  int nested_svm_vmrun(struct vcpu_svm *svm)
 	if (WARN_ON_ONCE(!svm->nested.initialized))
 		return -EINVAL;
 
-	if (!nested_vmcb_checks(svm, vmcb12)) {
+	load_nested_vmcb_control(svm, &vmcb12->control);
+
+	if (!nested_vmcb_check_save(svm, vmcb12) ||
+	    !nested_vmcb_check_controls(&svm->nested.ctl)) {
 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
 		vmcb12->control.exit_code_hi = 0;
 		vmcb12->control.exit_info_1  = 0;