diff mbox

[v2,2/5] target-i386/kvm: Hyper-V SynIC MSR's support

Message ID 1447159964-22987-3-git-send-email-asmetanin@virtuozzo.com
State New
Headers show

Commit Message

Andrey Smetanin Nov. 10, 2015, 12:52 p.m. UTC
This patch does Hyper-V Synthetic interrupt
controller(Hyper-V SynIC) MSR's support and
migration. Hyper-V SynIC is enabled by cpu's
'hv-synic' option.

This patch does not allow cpu creation if
'hv-synic' option specified but kernel
doesn't support Hyper-V SynIC.

Changes v2:
* activate Hyper-V SynIC by enabling corresponding vcpu cap
* reject cpu initialization if user requested Hyper-V SynIC
  but kernel does not support Hyper-V SynIC

Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Denis V. Lunev <den@openvz.org>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Richard Henderson <rth@twiddle.net>
CC: Eduardo Habkost <ehabkost@redhat.com>
CC: "Andreas Färber" <afaerber@suse.de>
CC: Marcelo Tosatti <mtosatti@redhat.com>
CC: Roman Kagan <rkagan@virtuozzo.com>
CC: Denis V. Lunev <den@openvz.org>
CC: kvm@vger.kernel.org

---
 target-i386/cpu-qom.h |  1 +
 target-i386/cpu.c     |  1 +
 target-i386/cpu.h     |  5 ++++
 target-i386/kvm.c     | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 target-i386/machine.c | 39 ++++++++++++++++++++++++++++++
 5 files changed, 112 insertions(+), 1 deletion(-)

Comments

Andrey Smetanin Nov. 11, 2015, 9:09 a.m. UTC | #1
On 11/10/2015 04:14 PM, Paolo Bonzini wrote:
>
>
> On 10/11/2015 13:52, Andrey Smetanin wrote:
>> This patch does Hyper-V Synthetic interrupt
>> controller(Hyper-V SynIC) MSR's support and
>> migration. Hyper-V SynIC is enabled by cpu's
>> 'hv-synic' option.
>>
>> This patch does not allow cpu creation if
>> 'hv-synic' option specified but kernel
>> doesn't support Hyper-V SynIC.
>>
>> Changes v2:
>> * activate Hyper-V SynIC by enabling corresponding vcpu cap
>> * reject cpu initialization if user requested Hyper-V SynIC
>>    but kernel does not support Hyper-V SynIC
>>
>> Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
>> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
>> Signed-off-by: Denis V. Lunev <den@openvz.org>
>> CC: Paolo Bonzini <pbonzini@redhat.com>
>> CC: Richard Henderson <rth@twiddle.net>
>> CC: Eduardo Habkost <ehabkost@redhat.com>
>> CC: "Andreas Färber" <afaerber@suse.de>
>> CC: Marcelo Tosatti <mtosatti@redhat.com>
>> CC: Roman Kagan <rkagan@virtuozzo.com>
>> CC: Denis V. Lunev <den@openvz.org>
>> CC: kvm@vger.kernel.org
>>
>> ---
>>   target-i386/cpu-qom.h |  1 +
>>   target-i386/cpu.c     |  1 +
>>   target-i386/cpu.h     |  5 ++++
>>   target-i386/kvm.c     | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++-
>>   target-i386/machine.c | 39 ++++++++++++++++++++++++++++++
>>   5 files changed, 112 insertions(+), 1 deletion(-)
>>
>> diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
>> index e3bfe9d..7ea5b34 100644
>> --- a/target-i386/cpu-qom.h
>> +++ b/target-i386/cpu-qom.h
>> @@ -94,6 +94,7 @@ typedef struct X86CPU {
>>       bool hyperv_reset;
>>       bool hyperv_vpindex;
>>       bool hyperv_runtime;
>> +    bool hyperv_synic;
>>       bool check_cpuid;
>>       bool enforce_cpuid;
>>       bool expose_kvm;
>> diff --git a/target-i386/cpu.c b/target-i386/cpu.c
>> index e5f1c5b..1462e19 100644
>> --- a/target-i386/cpu.c
>> +++ b/target-i386/cpu.c
>> @@ -3142,6 +3142,7 @@ static Property x86_cpu_properties[] = {
>>       DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
>>       DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
>>       DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
>> +    DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
>>       DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
>>       DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
>>       DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
>> diff --git a/target-i386/cpu.h b/target-i386/cpu.h
>> index fc4a605..8cf33df 100644
>> --- a/target-i386/cpu.h
>> +++ b/target-i386/cpu.h
>> @@ -918,6 +918,11 @@ typedef struct CPUX86State {
>>       uint64_t msr_hv_tsc;
>>       uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
>>       uint64_t msr_hv_runtime;
>> +    uint64_t msr_hv_synic_control;
>> +    uint64_t msr_hv_synic_version;
>> +    uint64_t msr_hv_synic_evt_page;
>> +    uint64_t msr_hv_synic_msg_page;
>> +    uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
>>
>>       /* exception/interrupt handling */
>>       int error_code;
>> diff --git a/target-i386/kvm.c b/target-i386/kvm.c
>> index 2a9953b..cfcd01d 100644
>> --- a/target-i386/kvm.c
>> +++ b/target-i386/kvm.c
>> @@ -86,6 +86,7 @@ static bool has_msr_hv_crash;
>>   static bool has_msr_hv_reset;
>>   static bool has_msr_hv_vpindex;
>>   static bool has_msr_hv_runtime;
>> +static bool has_msr_hv_synic;
>>   static bool has_msr_mtrr;
>>   static bool has_msr_xss;
>>
>> @@ -521,7 +522,8 @@ static bool hyperv_enabled(X86CPU *cpu)
>>               cpu->hyperv_crash ||
>>               cpu->hyperv_reset ||
>>               cpu->hyperv_vpindex ||
>> -            cpu->hyperv_runtime);
>> +            cpu->hyperv_runtime ||
>> +            cpu->hyperv_synic);
>>   }
>>
>>   static Error *invtsc_mig_blocker;
>> @@ -610,6 +612,14 @@ int kvm_arch_init_vcpu(CPUState *cs)
>>           if (cpu->hyperv_runtime && has_msr_hv_runtime) {
>>               c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
>>           }
>> +        if (cpu->hyperv_synic) {
>> +            if (!has_msr_hv_synic ||
>> +                kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
>> +                fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
>> +                return -ENOSYS;
>> +            }
>> +            c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
>> +        }
>>           c = &cpuid_data.entries[cpuid_i++];
>>           c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
>>           if (cpu->hyperv_relaxed_timing) {
>> @@ -950,6 +960,10 @@ static int kvm_get_supported_msrs(KVMState *s)
>>                       has_msr_hv_runtime = true;
>>                       continue;
>>                   }
>> +                if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
>> +                    has_msr_hv_synic = true;
>> +                    continue;
>> +                }
>>               }
>>           }
>>
>> @@ -1511,6 +1525,31 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
>>               kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
>>                                 env->msr_hv_runtime);
>>           }
>> +        if (cpu->hyperv_synic) {
>> +            int j;
>> +
>> +            if (!env->msr_hv_synic_version) {
>> +                /* First time initialization */
>> +                env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
>> +                for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
>> +                    env->msr_hv_synic_sint[j] = HV_SYNIC_SINT_MASKED;
>> +                }
>> +            }
>
> I would prefer to put this in kvm_arch_init_vcpu, if possible.
>
Ok. I think the kvm_arch_init_vcpu() is called after migration restores 
cpu->env->msr_hv_synic_* values, so unconditional initialization of
cpu->env->msr_hv_synic_* values can overwrite migrated values. The check 
"if (!env->msr_hv_synic_version) {" is neccessary for first time
initialization to protect against such overwriting. This is why this 
code migrates 'msr_hv_synic_version' value.

>> +            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
>> +                              env->msr_hv_synic_control);
>> +            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
>> +                              env->msr_hv_synic_version);
>> +            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
>> +                              env->msr_hv_synic_evt_page);
>> +            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
>> +                              env->msr_hv_synic_msg_page);
>> +
>> +            for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
>> +                kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
>> +                                  env->msr_hv_synic_sint[j]);
>> +            }
>> +        }
>>           if (has_msr_mtrr) {
>>               kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
>>               kvm_msr_entry_set(&msrs[n++],
>> @@ -1879,6 +1918,17 @@ static int kvm_get_msrs(X86CPU *cpu)
>>       if (has_msr_hv_runtime) {
>>           msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
>>       }
>> +    if (cpu->hyperv_synic) {
>> +        uint32_t msr;
>> +
>> +        msrs[n++].index = HV_X64_MSR_SCONTROL;
>> +        msrs[n++].index = HV_X64_MSR_SVERSION;
>> +        msrs[n++].index = HV_X64_MSR_SIEFP;
>> +        msrs[n++].index = HV_X64_MSR_SIMP;
>> +        for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
>> +            msrs[n++].index = msr;
>> +        }
>> +    }
>>       if (has_msr_mtrr) {
>>           msrs[n++].index = MSR_MTRRdefType;
>>           msrs[n++].index = MSR_MTRRfix64K_00000;
>> @@ -2035,6 +2085,21 @@ static int kvm_get_msrs(X86CPU *cpu)
>>           case HV_X64_MSR_VP_RUNTIME:
>>               env->msr_hv_runtime = msrs[i].data;
>>               break;
>> +        case HV_X64_MSR_SCONTROL:
>> +            env->msr_hv_synic_control = msrs[i].data;
>> +            break;
>> +        case HV_X64_MSR_SVERSION:
>> +            env->msr_hv_synic_version = msrs[i].data;
>> +            break;
>> +        case HV_X64_MSR_SIEFP:
>> +            env->msr_hv_synic_evt_page = msrs[i].data;
>> +            break;
>> +        case HV_X64_MSR_SIMP:
>> +            env->msr_hv_synic_msg_page = msrs[i].data;
>> +            break;
>> +        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
>> +            env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
>> +            break;
>>           case MSR_MTRRdefType:
>>               env->mtrr_deftype = msrs[i].data;
>>               break;
>> diff --git a/target-i386/machine.c b/target-i386/machine.c
>> index a18e16e..bdb2997 100644
>> --- a/target-i386/machine.c
>> +++ b/target-i386/machine.c
>> @@ -710,6 +710,44 @@ static const VMStateDescription vmstate_msr_hyperv_runtime = {
>>       }
>>   };
>>
>> +static bool hyperv_synic_enable_needed(void *opaque)
>> +{
>> +    X86CPU *cpu = opaque;
>> +    CPUX86State *env = &cpu->env;
>> +    int i;
>> +
>> +    if (env->msr_hv_synic_control != 0 ||
>> +        env->msr_hv_synic_version != 0 ||
>> +        env->msr_hv_synic_evt_page != 0 ||
>> +        env->msr_hv_synic_msg_page != 0) {
>> +        return true;
>> +    }
>> +
>> +    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
>> +        if (env->msr_hv_synic_sint[i] != 0) {
>> +            return true;
>> +        }
>> +    }
>> +
>> +    return false;
>> +}
>> +
>> +static const VMStateDescription vmstate_msr_hyperv_synic = {
>> +    .name = "cpu/msr_hyperv_synic",
>> +    .version_id = 1,
>> +    .minimum_version_id = 1,
>> +    .needed = hyperv_synic_enable_needed,
>> +    .fields = (VMStateField[]) {
>> +        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
>> +        VMSTATE_UINT64(env.msr_hv_synic_version, X86CPU),
>
> This one need not be migrated, since it's always the same value.
>
see my comment above
> Paolo
>
>> +        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
>> +        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
>> +        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
>> +                             HV_SYNIC_SINT_COUNT),
>> +        VMSTATE_END_OF_LIST()
>> +    }
>> +};
>> +
>>   static bool avx512_needed(void *opaque)
>>   {
>>       X86CPU *cpu = opaque;
>> @@ -893,6 +931,7 @@ VMStateDescription vmstate_x86_cpu = {
>>           &vmstate_msr_hyperv_time,
>>           &vmstate_msr_hyperv_crash,
>>           &vmstate_msr_hyperv_runtime,
>> +        &vmstate_msr_hyperv_synic,
>>           &vmstate_avx512,
>>           &vmstate_xss,
>>           NULL
>>
Paolo Bonzini Nov. 11, 2015, 9:17 a.m. UTC | #2
On 11/11/2015 10:09, Andrey Smetanin wrote:
>>
>> I would prefer to put this in kvm_arch_init_vcpu, if possible.
>>
> Ok. I think the kvm_arch_init_vcpu() is called after migration restores
> cpu->env->msr_hv_synic_* values, so unconditional initialization of
> cpu->env->msr_hv_synic_* values can overwrite migrated values. The check
> "if (!env->msr_hv_synic_version) {" is neccessary for first time
> initialization to protect against such overwriting. This is why this
> code migrates 'msr_hv_synic_version' value.

No, kvm_arch_init_vcpu is called at the very beginning, when the VCPU
thread is created.

main
-> machine_class->init
-> pc_init1
-> pc_cpus_init
-> pc_new_cpu
-> cpu_x86_create
-> object_property_set_bool
-> x86_cpu_realizefn
-> qemu_init_vcpu
-> qemu_kvm_start_vcpu
-> qemu_kvm_cpu_thread_fn (in new thread)
-> kvm_init_vcpu
-> kvm_arch_init_vcpu

This is long before qemu_start_incoming_migration, which is among the
last things done before calling main_loop

Paolo
Andrey Smetanin Nov. 11, 2015, 9:25 a.m. UTC | #3
On 11/11/2015 12:17 PM, Paolo Bonzini wrote:
>
>
> On 11/11/2015 10:09, Andrey Smetanin wrote:
>>>
>>> I would prefer to put this in kvm_arch_init_vcpu, if possible.
>>>
>> Ok. I think the kvm_arch_init_vcpu() is called after migration restores
>> cpu->env->msr_hv_synic_* values, so unconditional initialization of
>> cpu->env->msr_hv_synic_* values can overwrite migrated values. The check
>> "if (!env->msr_hv_synic_version) {" is neccessary for first time
>> initialization to protect against such overwriting. This is why this
>> code migrates 'msr_hv_synic_version' value.
>
> No, kvm_arch_init_vcpu is called at the very beginning, when the VCPU
> thread is created.
>
> main
> -> machine_class->init
> -> pc_init1
> -> pc_cpus_init
> -> pc_new_cpu
> -> cpu_x86_create
> -> object_property_set_bool
> -> x86_cpu_realizefn
> -> qemu_init_vcpu
> -> qemu_kvm_start_vcpu
> -> qemu_kvm_cpu_thread_fn (in new thread)
> -> kvm_init_vcpu
> -> kvm_arch_init_vcpu
>
> This is long before qemu_start_incoming_migration, which is among the
> last things done before calling main_loop
In this case I'll remove migration of msr_hv_synic_version and make 
first time initialization inside  kvm_arch_init_vcpu() - inside section 
where SynIC availability cpuid bit is set.

Thank you for clarification.
>
> Paolo
>
Paolo Bonzini Nov. 11, 2015, 10:05 a.m. UTC | #4
On 11/11/2015 10:25, Andrey Smetanin wrote:
> In this case I'll remove migration of msr_hv_synic_version and make
> first time initialization inside  kvm_arch_init_vcpu() - inside section
> where SynIC availability cpuid bit is set.

Thanks!

Paolo
diff mbox

Patch

diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index e3bfe9d..7ea5b34 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -94,6 +94,7 @@  typedef struct X86CPU {
     bool hyperv_reset;
     bool hyperv_vpindex;
     bool hyperv_runtime;
+    bool hyperv_synic;
     bool check_cpuid;
     bool enforce_cpuid;
     bool expose_kvm;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index e5f1c5b..1462e19 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -3142,6 +3142,7 @@  static Property x86_cpu_properties[] = {
     DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
     DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
     DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
+    DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
     DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
     DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
     DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index fc4a605..8cf33df 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -918,6 +918,11 @@  typedef struct CPUX86State {
     uint64_t msr_hv_tsc;
     uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
     uint64_t msr_hv_runtime;
+    uint64_t msr_hv_synic_control;
+    uint64_t msr_hv_synic_version;
+    uint64_t msr_hv_synic_evt_page;
+    uint64_t msr_hv_synic_msg_page;
+    uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
 
     /* exception/interrupt handling */
     int error_code;
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 2a9953b..cfcd01d 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -86,6 +86,7 @@  static bool has_msr_hv_crash;
 static bool has_msr_hv_reset;
 static bool has_msr_hv_vpindex;
 static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
 static bool has_msr_mtrr;
 static bool has_msr_xss;
 
@@ -521,7 +522,8 @@  static bool hyperv_enabled(X86CPU *cpu)
             cpu->hyperv_crash ||
             cpu->hyperv_reset ||
             cpu->hyperv_vpindex ||
-            cpu->hyperv_runtime);
+            cpu->hyperv_runtime ||
+            cpu->hyperv_synic);
 }
 
 static Error *invtsc_mig_blocker;
@@ -610,6 +612,14 @@  int kvm_arch_init_vcpu(CPUState *cs)
         if (cpu->hyperv_runtime && has_msr_hv_runtime) {
             c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
         }
+        if (cpu->hyperv_synic) {
+            if (!has_msr_hv_synic ||
+                kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
+                fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
+                return -ENOSYS;
+            }
+            c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+        }
         c = &cpuid_data.entries[cpuid_i++];
         c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
         if (cpu->hyperv_relaxed_timing) {
@@ -950,6 +960,10 @@  static int kvm_get_supported_msrs(KVMState *s)
                     has_msr_hv_runtime = true;
                     continue;
                 }
+                if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+                    has_msr_hv_synic = true;
+                    continue;
+                }
             }
         }
 
@@ -1511,6 +1525,31 @@  static int kvm_put_msrs(X86CPU *cpu, int level)
             kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
                               env->msr_hv_runtime);
         }
+        if (cpu->hyperv_synic) {
+            int j;
+
+            if (!env->msr_hv_synic_version) {
+                /* First time initialization */
+                env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+                for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+                    env->msr_hv_synic_sint[j] = HV_SYNIC_SINT_MASKED;
+                }
+            }
+
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
+                              env->msr_hv_synic_control);
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
+                              env->msr_hv_synic_version);
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
+                              env->msr_hv_synic_evt_page);
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
+                              env->msr_hv_synic_msg_page);
+
+            for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+                kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
+                                  env->msr_hv_synic_sint[j]);
+            }
+        }
         if (has_msr_mtrr) {
             kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
             kvm_msr_entry_set(&msrs[n++],
@@ -1879,6 +1918,17 @@  static int kvm_get_msrs(X86CPU *cpu)
     if (has_msr_hv_runtime) {
         msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
     }
+    if (cpu->hyperv_synic) {
+        uint32_t msr;
+
+        msrs[n++].index = HV_X64_MSR_SCONTROL;
+        msrs[n++].index = HV_X64_MSR_SVERSION;
+        msrs[n++].index = HV_X64_MSR_SIEFP;
+        msrs[n++].index = HV_X64_MSR_SIMP;
+        for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+            msrs[n++].index = msr;
+        }
+    }
     if (has_msr_mtrr) {
         msrs[n++].index = MSR_MTRRdefType;
         msrs[n++].index = MSR_MTRRfix64K_00000;
@@ -2035,6 +2085,21 @@  static int kvm_get_msrs(X86CPU *cpu)
         case HV_X64_MSR_VP_RUNTIME:
             env->msr_hv_runtime = msrs[i].data;
             break;
+        case HV_X64_MSR_SCONTROL:
+            env->msr_hv_synic_control = msrs[i].data;
+            break;
+        case HV_X64_MSR_SVERSION:
+            env->msr_hv_synic_version = msrs[i].data;
+            break;
+        case HV_X64_MSR_SIEFP:
+            env->msr_hv_synic_evt_page = msrs[i].data;
+            break;
+        case HV_X64_MSR_SIMP:
+            env->msr_hv_synic_msg_page = msrs[i].data;
+            break;
+        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+            env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+            break;
         case MSR_MTRRdefType:
             env->mtrr_deftype = msrs[i].data;
             break;
diff --git a/target-i386/machine.c b/target-i386/machine.c
index a18e16e..bdb2997 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -710,6 +710,44 @@  static const VMStateDescription vmstate_msr_hyperv_runtime = {
     }
 };
 
+static bool hyperv_synic_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+    int i;
+
+    if (env->msr_hv_synic_control != 0 ||
+        env->msr_hv_synic_version != 0 ||
+        env->msr_hv_synic_evt_page != 0 ||
+        env->msr_hv_synic_msg_page != 0) {
+        return true;
+    }
+
+    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+        if (env->msr_hv_synic_sint[i] != 0) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+    .name = "cpu/msr_hyperv_synic",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = hyperv_synic_enable_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_version, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
+                             HV_SYNIC_SINT_COUNT),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static bool avx512_needed(void *opaque)
 {
     X86CPU *cpu = opaque;
@@ -893,6 +931,7 @@  VMStateDescription vmstate_x86_cpu = {
         &vmstate_msr_hyperv_time,
         &vmstate_msr_hyperv_crash,
         &vmstate_msr_hyperv_runtime,
+        &vmstate_msr_hyperv_synic,
         &vmstate_avx512,
         &vmstate_xss,
         NULL