Message ID | 20210408193649.25649-2-tim.gardner@canonical.com |
---|---|
State | New |
Headers | show |
Series | KVM: SVM: load control fields from VMCB12 before checking them | expand |
On 08.04.21 21:36, Tim Gardner wrote: > From: Paolo Bonzini <pbonzini@redhat.com> > > CVE-2021-29657 > > Avoid races between check and use of the nested VMCB controls. This > for example ensures that the VMRUN intercept is always reflected to the > nested hypervisor, instead of being processed by the host. Without this > patch, it is possible to end up with svm->nested.hsave pointing to > the MSR permission bitmap for nested guests. > > This bug is CVE-2021-29657. > > Reported-by: Felix Wilhelm <fwilhelm@google.com> > Cc: stable@vger.kernel.org > Fixes: 2fcf4876ada ("KVM: nSVM: implement on demand allocation of the nested state") > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > (cherry picked from commit a58d9166a756a0f4a6618e4f593232593d6df134) > Signed-off-by: Tim Gardner <tim.gardner@canonical.com> Acked-by: Stefan Bader <stefan.bader@canonical.com> > --- > arch/x86/kvm/svm/nested.c | 10 ++++++---- > 1 file changed, 6 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index 1008cc6cb66c..dd318ca6c722 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -246,7 +246,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) > return true; > } > > -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) > +static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12) > { > struct kvm_vcpu *vcpu = &svm->vcpu; > bool vmcb12_lma; > @@ -271,7 +271,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) > if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) > return false; > > - return nested_vmcb_check_controls(&vmcb12->control); > + return true; > } > > static void load_nested_vmcb_control(struct vcpu_svm *svm, > @@ -454,7 +454,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, > int ret; > > svm->nested.vmcb12_gpa = vmcb12_gpa; > - load_nested_vmcb_control(svm, &vmcb12->control); > nested_prepare_vmcb_save(svm, vmcb12); > nested_prepare_vmcb_control(svm); > > @@ -501,7 +500,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm) > if (WARN_ON_ONCE(!svm->nested.initialized)) > return -EINVAL; > > - if (!nested_vmcb_checks(svm, vmcb12)) { > + load_nested_vmcb_control(svm, &vmcb12->control); > + > + if (!nested_vmcb_check_save(svm, vmcb12) || > + !nested_vmcb_check_controls(&svm->nested.ctl)) { > vmcb12->control.exit_code = SVM_EXIT_ERR; > vmcb12->control.exit_code_hi = 0; > vmcb12->control.exit_info_1 = 0; >
On 08.04.21 21:36, Tim Gardner wrote: > From: Paolo Bonzini <pbonzini@redhat.com> > > CVE-2021-29657 > > Avoid races between check and use of the nested VMCB controls. This > for example ensures that the VMRUN intercept is always reflected to the > nested hypervisor, instead of being processed by the host. Without this > patch, it is possible to end up with svm->nested.hsave pointing to > the MSR permission bitmap for nested guests. > > This bug is CVE-2021-29657. > > Reported-by: Felix Wilhelm <fwilhelm@google.com> > Cc: stable@vger.kernel.org > Fixes: 2fcf4876ada ("KVM: nSVM: implement on demand allocation of the nested state") > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > (cherry picked from commit a58d9166a756a0f4a6618e4f593232593d6df134) > Signed-off-by: Tim Gardner <tim.gardner@canonical.com> Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com> Thanks > --- > arch/x86/kvm/svm/nested.c | 10 ++++++---- > 1 file changed, 6 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index 1008cc6cb66c..dd318ca6c722 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -246,7 +246,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) > return true; > } > > -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) > +static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12) > { > struct kvm_vcpu *vcpu = &svm->vcpu; > bool vmcb12_lma; > @@ -271,7 +271,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) > if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) > return false; > > - return nested_vmcb_check_controls(&vmcb12->control); > + return true; > } > > static void load_nested_vmcb_control(struct vcpu_svm *svm, > @@ -454,7 +454,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, > int ret; > > svm->nested.vmcb12_gpa = vmcb12_gpa; > - load_nested_vmcb_control(svm, &vmcb12->control); > nested_prepare_vmcb_save(svm, vmcb12); > nested_prepare_vmcb_control(svm); > > @@ -501,7 +500,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm) > if (WARN_ON_ONCE(!svm->nested.initialized)) > return -EINVAL; > > - if (!nested_vmcb_checks(svm, vmcb12)) { > + load_nested_vmcb_control(svm, &vmcb12->control); > + > + if (!nested_vmcb_check_save(svm, vmcb12) || > + !nested_vmcb_check_controls(&svm->nested.ctl)) { > vmcb12->control.exit_code = SVM_EXIT_ERR; > vmcb12->control.exit_code_hi = 0; > vmcb12->control.exit_info_1 = 0; >
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 1008cc6cb66c..dd318ca6c722 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -246,7 +246,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) return true; } -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) +static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12) { struct kvm_vcpu *vcpu = &svm->vcpu; bool vmcb12_lma; @@ -271,7 +271,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) return false; - return nested_vmcb_check_controls(&vmcb12->control); + return true; } static void load_nested_vmcb_control(struct vcpu_svm *svm, @@ -454,7 +454,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, int ret; svm->nested.vmcb12_gpa = vmcb12_gpa; - load_nested_vmcb_control(svm, &vmcb12->control); nested_prepare_vmcb_save(svm, vmcb12); nested_prepare_vmcb_control(svm); @@ -501,7 +500,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm) if (WARN_ON_ONCE(!svm->nested.initialized)) return -EINVAL; - if (!nested_vmcb_checks(svm, vmcb12)) { + load_nested_vmcb_control(svm, &vmcb12->control); + + if (!nested_vmcb_check_save(svm, vmcb12) || + !nested_vmcb_check_controls(&svm->nested.ctl)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = 0; vmcb12->control.exit_info_1 = 0;