diff mbox

[07/11] kvm/x86: added hyper-v crash data and ctl msr's get/set'ers

Message ID 1434989108-20924-8-git-send-email-den@openvz.org
State New
Headers show

Commit Message

Denis V. Lunev June 22, 2015, 4:05 p.m. UTC
From: Andrey Smetanin <asmetanin@virtuozzo.com>

Added hyper-v crash msr's(HV_X64_MSR_CRASH*) data and control
geters and setters.

Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Gleb Natapov <gleb@kernel.org>
---
 arch/x86/kvm/hyperv.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c    |  4 ++++
 2 files changed, 70 insertions(+)

Comments

Peter Hornyack June 22, 2015, 11:52 p.m. UTC | #1
On Mon, Jun 22, 2015 at 9:05 AM, Denis V. Lunev <den@openvz.org> wrote:
> From: Andrey Smetanin <asmetanin@virtuozzo.com>
>
> Added hyper-v crash msr's(HV_X64_MSR_CRASH*) data and control
> geters and setters.
>
> Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
> Signed-off-by: Denis V. Lunev <den@openvz.org>
> CC: Paolo Bonzini <pbonzini@redhat.com>
> CC: Gleb Natapov <gleb@kernel.org>
> ---
>  arch/x86/kvm/hyperv.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.c    |  4 ++++
>  2 files changed, 70 insertions(+)
>
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index f65fb622..0a7d373 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -46,6 +46,59 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
>         return r;
>  }
>
> +static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
> +{
> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
> +
> +       *pdata = hv->hv_crash_ctl;

I see that this is implemented so that userspace controls the Hyper-V
crash capabilities that are enabled - userspace must set
HV_X64_MSR_CRASH_CTL_NOTIFY before the guest reads
HV_X64_MSR_CRASH_CTL. I just want to check that you considered an
alternative: a simpler implementation would have the crash
capabilities statically defined by kvm, and
HV_X64_MSR_CRASH_CTL_CONTENTS could just be returned here (and
hv_crash_ctl could be removed from struct kvm_arch_hyperv).

The current implementation is potentially more flexible but makes the
MSR handling a little more awkward since the host_initiated bool needs
to be passed around (patch 09). I guess either approach seems ok to
me.

Also, if this patchset is used then it looks like
HV_X64_MSR_CRASH_CTL_CONTENTS can be removed.

> +       return 0;
> +}
> +
> +static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data)
> +{
> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
> +
> +       hv->hv_crash_ctl = data;
> +       if ((data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
> +               vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx "
> +                         "0x%llx)\n", hv->hv_crash_param[0],
> +                         hv->hv_crash_param[1],
> +                         hv->hv_crash_param[2],
> +                         hv->hv_crash_param[3],
> +                         hv->hv_crash_param[4]);
> +
> +               /* Send notification about crash to user space */
> +               kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
> +               return 0;

Returning from here seems unnecessary - if more crash capabilities are
added in the future, the guest might want to invoke multiple
capabilities at once, so we'd want to check for those here too.

> +       }
> +
> +       return 0;
> +}
> +
> +static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
> +                                    u32 index, u64 data)
> +{
> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
> +
> +       if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
> +               return -EINVAL;
> +
> +       hv->hv_crash_param[index] = data;
> +       return 0;
> +}
> +
> +static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
> +                                    u32 index, u64 *pdata)
> +{
> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
> +
> +       if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
> +               return -EINVAL;
> +
> +       *pdata = hv->hv_crash_param[index];
> +       return 0;
> +}
> +
>  static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
>  {
>         struct kvm *kvm = vcpu->kvm;
> @@ -98,6 +152,12 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
>                 mark_page_dirty(kvm, gfn);
>                 break;
>         }
> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
> +               return kvm_hv_msr_set_crash_data(vcpu,
> +                                                msr - HV_X64_MSR_CRASH_P0,
> +                                                data);
> +       case HV_X64_MSR_CRASH_CTL:
> +               return kvm_hv_msr_set_crash_ctl(vcpu, data);
>         default:
>                 vcpu_unimpl(vcpu, "Hyper-V unimpl wrmsr: 0x%x data 0x%llx\n",
>                                   msr, data);
> @@ -170,6 +230,12 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
>         case HV_X64_MSR_REFERENCE_TSC:
>                 data = hv->hv_tsc_page;
>                 break;
> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
> +               return kvm_hv_msr_get_crash_data(vcpu,
> +                                                msr - HV_X64_MSR_CRASH_P0,
> +                                                pdata);
> +       case HV_X64_MSR_CRASH_CTL:
> +               return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
>         default:
>                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
>                 return 1;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2755c37..2046b78 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2208,6 +2208,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>                  */
>                 break;
>         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
> +       case HV_X64_MSR_CRASH_CTL:
>                 return kvm_hv_set_msr_common(vcpu, msr, data);
>                 break;
>         case MSR_IA32_BBL_CR_CTL3:
> @@ -2451,6 +2453,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
>                 data = 0x20000000;
>                 break;
>         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
> +       case HV_X64_MSR_CRASH_CTL:
>                 return kvm_hv_get_msr_common(vcpu, msr, pdata);
>                 break;
>         case MSR_IA32_BBL_CR_CTL3:
> --
> 1.9.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
Denis V. Lunev June 23, 2015, 9:47 a.m. UTC | #2
On 23/06/15 02:52, Peter Hornyack wrote:
> On Mon, Jun 22, 2015 at 9:05 AM, Denis V. Lunev <den@openvz.org> wrote:
>> From: Andrey Smetanin <asmetanin@virtuozzo.com>
>>
>> Added hyper-v crash msr's(HV_X64_MSR_CRASH*) data and control
>> geters and setters.
>>
>> Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
>> Signed-off-by: Denis V. Lunev <den@openvz.org>
>> CC: Paolo Bonzini <pbonzini@redhat.com>
>> CC: Gleb Natapov <gleb@kernel.org>
>> ---
>>   arch/x86/kvm/hyperv.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++
>>   arch/x86/kvm/x86.c    |  4 ++++
>>   2 files changed, 70 insertions(+)
>>
>> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
>> index f65fb622..0a7d373 100644
>> --- a/arch/x86/kvm/hyperv.c
>> +++ b/arch/x86/kvm/hyperv.c
>> @@ -46,6 +46,59 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
>>          return r;
>>   }
>>
>> +static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
>> +{
>> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
>> +
>> +       *pdata = hv->hv_crash_ctl;
> I see that this is implemented so that userspace controls the Hyper-V
> crash capabilities that are enabled - userspace must set
> HV_X64_MSR_CRASH_CTL_NOTIFY before the guest reads
> HV_X64_MSR_CRASH_CTL. I just want to check that you considered an
> alternative: a simpler implementation would have the crash
> capabilities statically defined by kvm, and
> HV_X64_MSR_CRASH_CTL_CONTENTS could just be returned here (and
> hv_crash_ctl could be removed from struct kvm_arch_hyperv).
>
> The current implementation is potentially more flexible but makes the
> MSR handling a little more awkward since the host_initiated bool needs
> to be passed around (patch 09). I guess either approach seems ok to
> me.
>
> Also, if this patchset is used then it looks like
> HV_X64_MSR_CRASH_CTL_CONTENTS can be removed.

Paolo,

do you have any opinion on this? I prefer
configurable way but there is no problem
to enable it by default dropping 'hv_panic'
property.

Regards,
     Den

>> +       return 0;
>> +}
>> +
>> +static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data)
>> +{
>> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
>> +
>> +       hv->hv_crash_ctl = data;
>> +       if ((data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
>> +               vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx "
>> +                         "0x%llx)\n", hv->hv_crash_param[0],
>> +                         hv->hv_crash_param[1],
>> +                         hv->hv_crash_param[2],
>> +                         hv->hv_crash_param[3],
>> +                         hv->hv_crash_param[4]);
>> +
>> +               /* Send notification about crash to user space */
>> +               kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
>> +               return 0;
> Returning from here seems unnecessary - if more crash capabilities are
> added in the future, the guest might want to invoke multiple
> capabilities at once, so we'd want to check for those here too.
>
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
>> +                                    u32 index, u64 data)
>> +{
>> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
>> +
>> +       if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
>> +               return -EINVAL;
>> +
>> +       hv->hv_crash_param[index] = data;
>> +       return 0;
>> +}
>> +
>> +static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
>> +                                    u32 index, u64 *pdata)
>> +{
>> +       struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
>> +
>> +       if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
>> +               return -EINVAL;
>> +
>> +       *pdata = hv->hv_crash_param[index];
>> +       return 0;
>> +}
>> +
>>   static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
>>   {
>>          struct kvm *kvm = vcpu->kvm;
>> @@ -98,6 +152,12 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
>>                  mark_page_dirty(kvm, gfn);
>>                  break;
>>          }
>> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
>> +               return kvm_hv_msr_set_crash_data(vcpu,
>> +                                                msr - HV_X64_MSR_CRASH_P0,
>> +                                                data);
>> +       case HV_X64_MSR_CRASH_CTL:
>> +               return kvm_hv_msr_set_crash_ctl(vcpu, data);
>>          default:
>>                  vcpu_unimpl(vcpu, "Hyper-V unimpl wrmsr: 0x%x data 0x%llx\n",
>>                                    msr, data);
>> @@ -170,6 +230,12 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
>>          case HV_X64_MSR_REFERENCE_TSC:
>>                  data = hv->hv_tsc_page;
>>                  break;
>> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
>> +               return kvm_hv_msr_get_crash_data(vcpu,
>> +                                                msr - HV_X64_MSR_CRASH_P0,
>> +                                                pdata);
>> +       case HV_X64_MSR_CRASH_CTL:
>> +               return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
>>          default:
>>                  vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
>>                  return 1;
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 2755c37..2046b78 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -2208,6 +2208,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>                   */
>>                  break;
>>          case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
>> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
>> +       case HV_X64_MSR_CRASH_CTL:
>>                  return kvm_hv_set_msr_common(vcpu, msr, data);
>>                  break;
>>          case MSR_IA32_BBL_CR_CTL3:
>> @@ -2451,6 +2453,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
>>                  data = 0x20000000;
>>                  break;
>>          case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
>> +       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
>> +       case HV_X64_MSR_CRASH_CTL:
>>                  return kvm_hv_get_msr_common(vcpu, msr, pdata);
>>                  break;
>>          case MSR_IA32_BBL_CR_CTL3:
>> --
>> 1.9.1
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm" in
Paolo Bonzini June 23, 2015, 9:51 a.m. UTC | #3
On 23/06/2015 11:47, Denis V. Lunev wrote:
>>
>> The current implementation is potentially more flexible but makes the
>> MSR handling a little more awkward since the host_initiated bool needs
>> to be passed around (patch 09). I guess either approach seems ok to
>> me.
>>
>> Also, if this patchset is used then it looks like
>> HV_X64_MSR_CRASH_CTL_CONTENTS can be removed.
> 
> Paolo,
> 
> do you have any opinion on this? I prefer
> configurable way but there is no problem
> to enable it by default dropping 'hv_panic'
> property.

I also prefer the configurable way.  Would hv_panic prevent
reboot-on-BSOD from rebooting the VM?

Paolo
Denis V. Lunev June 23, 2015, 10:08 a.m. UTC | #4
On 23/06/15 12:51, Paolo Bonzini wrote:
>
> On 23/06/2015 11:47, Denis V. Lunev wrote:
>>> The current implementation is potentially more flexible but makes the
>>> MSR handling a little more awkward since the host_initiated bool needs
>>> to be passed around (patch 09). I guess either approach seems ok to
>>> me.
>>>
>>> Also, if this patchset is used then it looks like
>>> HV_X64_MSR_CRASH_CTL_CONTENTS can be removed.
>> Paolo,
>>
>> do you have any opinion on this? I prefer
>> configurable way but there is no problem
>> to enable it by default dropping 'hv_panic'
>> property.
> I also prefer the configurable way.  Would hv_panic prevent
> reboot-on-BSOD from rebooting the VM?
>
> Paolo
the behavior becomes controlled by libvirt, which has
appropriate event configuration, specified here
   https://libvirt.org/formatdomain.html
namely
   <on_crash>restart</on_crash>

Thus it looks like without libvirt help the guest will stuck
in RUN_STATE_GUEST_PANICKED state.

Den
Paolo Bonzini June 23, 2015, 10:30 a.m. UTC | #5
On 23/06/2015 12:08, Denis V. Lunev wrote:
> the behavior becomes controlled by libvirt, which has
> appropriate event configuration, specified here
>   https://libvirt.org/formatdomain.html
> namely
>   <on_crash>restart</on_crash>
> 
> Thus it looks like without libvirt help the guest will stuck
> in RUN_STATE_GUEST_PANICKED state.

Yes, exactly.  So it's better to keep it user-configurable.  We already
made the same mistake with pvpanic.

Paolo
diff mbox

Patch

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index f65fb622..0a7d373 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -46,6 +46,59 @@  static bool kvm_hv_msr_partition_wide(u32 msr)
 	return r;
 }
 
+static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
+{
+	struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
+
+	*pdata = hv->hv_crash_ctl;
+	return 0;
+}
+
+static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data)
+{
+	struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
+
+	hv->hv_crash_ctl = data;
+	if ((data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
+		vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx "
+			  "0x%llx)\n", hv->hv_crash_param[0],
+			  hv->hv_crash_param[1],
+			  hv->hv_crash_param[2],
+			  hv->hv_crash_param[3],
+			  hv->hv_crash_param[4]);
+
+		/* Send notification about crash to user space */
+		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
+		return 0;
+	}
+
+	return 0;
+}
+
+static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
+				     u32 index, u64 data)
+{
+	struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
+
+	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+		return -EINVAL;
+
+	hv->hv_crash_param[index] = data;
+	return 0;
+}
+
+static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
+				     u32 index, u64 *pdata)
+{
+	struct kvm_arch_hyperv *hv = &vcpu->kvm->arch.hyperv;
+
+	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+		return -EINVAL;
+
+	*pdata = hv->hv_crash_param[index];
+	return 0;
+}
+
 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	struct kvm *kvm = vcpu->kvm;
@@ -98,6 +152,12 @@  static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 		mark_page_dirty(kvm, gfn);
 		break;
 	}
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+		return kvm_hv_msr_set_crash_data(vcpu,
+						 msr - HV_X64_MSR_CRASH_P0,
+						 data);
+	case HV_X64_MSR_CRASH_CTL:
+		return kvm_hv_msr_set_crash_ctl(vcpu, data);
 	default:
 		vcpu_unimpl(vcpu, "Hyper-V unimpl wrmsr: 0x%x data 0x%llx\n",
 				  msr, data);
@@ -170,6 +230,12 @@  static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	case HV_X64_MSR_REFERENCE_TSC:
 		data = hv->hv_tsc_page;
 		break;
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+		return kvm_hv_msr_get_crash_data(vcpu,
+						 msr - HV_X64_MSR_CRASH_P0,
+						 pdata);
+	case HV_X64_MSR_CRASH_CTL:
+		return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
 	default:
 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
 		return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2755c37..2046b78 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2208,6 +2208,8 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		 */
 		break;
 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+	case HV_X64_MSR_CRASH_CTL:
 		return kvm_hv_set_msr_common(vcpu, msr, data);
 		break;
 	case MSR_IA32_BBL_CR_CTL3:
@@ -2451,6 +2453,8 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 		data = 0x20000000;
 		break;
 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+	case HV_X64_MSR_CRASH_CTL:
 		return kvm_hv_get_msr_common(vcpu, msr, pdata);
 		break;
 	case MSR_IA32_BBL_CR_CTL3: