@@ -693,6 +693,11 @@ typedef struct CPUX86State {
/* For KVM */
uint32_t mp_state;
int32_t interrupt_injected;
+ uint8_t soft_interrupt;
+ uint8_t nmi_injected;
+ uint8_t nmi_pending;
+ uint8_t has_error_code;
+ uint32_t sipi_vector;
/* in order to simplify APIC support, we leave this pointer to the
user */
@@ -225,6 +225,8 @@ int kvm_arch_init_vcpu(CPUState *env)
void kvm_arch_reset_vcpu(CPUState *env)
{
env->interrupt_injected = -1;
+ env->nmi_injected = 0;
+ env->nmi_pending = 0;
}
static int kvm_has_msr_star(CPUState *env)
@@ -630,6 +632,48 @@ static void kvm_get_msrs(CPUState *env, struct kvm_msrs *msr_list)
}
}
+#ifdef KVM_CAP_VCPU_STATE
+static void kvm_put_events(CPUState *env, struct kvm_x86_event_state *events)
+{
+ events->exception.injected = (env->exception_index >= 0);
+ events->exception.nr = env->exception_index;
+ events->exception.has_error_code = env->has_error_code;
+ events->exception.error_code = env->error_code;
+
+ events->interrupt.injected = (env->interrupt_injected >= 0);
+ events->interrupt.nr = env->interrupt_injected;
+ events->interrupt.soft = env->soft_interrupt;
+
+ events->nmi.injected = env->nmi_injected;
+ events->nmi.pending = env->nmi_pending;
+ events->nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
+
+ events->sipi_vector = env->sipi_vector;
+}
+
+static void kvm_get_events(CPUState *env, struct kvm_x86_event_state *events)
+{
+ env->exception_index =
+ events->exception.injected ? events->exception.nr : -1;
+ env->has_error_code = events->exception.has_error_code;
+ env->error_code = events->exception.error_code;
+
+ env->interrupt_injected =
+ events->interrupt.injected ? events->interrupt.nr : -1;
+ env->soft_interrupt = events->interrupt.soft;
+
+ env->nmi_injected = events->nmi.injected;
+ env->nmi_pending = events->nmi.pending;
+ if (events->nmi.masked) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+
+ env->sipi_vector = events->sipi_vector;
+}
+#endif
+
int kvm_arch_put_registers(CPUState *env)
{
struct kvm_regs regs;
@@ -642,9 +686,10 @@ int kvm_arch_put_registers(CPUState *env)
int ret;
#ifdef KVM_CAP_VCPU_STATE
struct kvm_mp_state mp_state;
+ struct kvm_x86_event_state events;
struct {
struct kvm_vcpu_state header;
- struct kvm_vcpu_substate substates[5];
+ struct kvm_vcpu_substate substates[6];
} request;
#endif
@@ -654,6 +699,7 @@ int kvm_arch_put_registers(CPUState *env)
kvm_put_msrs(env, &msrs.info);
#ifdef KVM_CAP_VCPU_STATE
mp_state.mp_state = env->mp_state;
+ kvm_put_events(env, &events);
if (kvm_has_vcpu_state()) {
request.header.nsubstates = ARRAY_SIZE(request.header.substates);
@@ -667,6 +713,8 @@ int kvm_arch_put_registers(CPUState *env)
request.header.substates[3].offset = (size_t)&msrs - (size_t)&request;
request.header.substates[4].type = KVM_VCPU_STATE_MP;
request.header.substates[4].offset = (size_t)&mp_state - (size_t)&request;
+ request.header.substates[5].type = KVM_X86_VCPU_STATE_EVENTS;
+ request.header.substates[5].offset = (size_t)&events - (size_t)&request;
ret = kvm_vcpu_ioctl(env, KVM_SET_VCPU_STATE, &request);
if (ret < 0) {
@@ -712,9 +760,10 @@ int kvm_arch_get_registers(CPUState *env)
int ret = -1;
#ifdef KVM_CAP_VCPU_STATE
struct kvm_mp_state mp_state;
+ struct kvm_x86_event_state events;
struct {
struct kvm_vcpu_state header;
- struct kvm_vcpu_substate substates[5];
+ struct kvm_vcpu_substate substates[6];
} request;
#endif
@@ -733,6 +782,8 @@ int kvm_arch_get_registers(CPUState *env)
request.header.substates[3].offset = (size_t)&msrs - (size_t)&request;
request.header.substates[4].type = KVM_VCPU_STATE_MP;
request.header.substates[4].offset = (size_t)&mp_state - (size_t)&request;
+ request.header.substates[5].type = KVM_X86_VCPU_STATE_EVENTS;
+ request.header.substates[5].offset = (size_t)&events - (size_t)&request;
ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_STATE, &request);
if (ret < 0) {
@@ -740,6 +791,7 @@ int kvm_arch_get_registers(CPUState *env)
}
msrs.info.nmsrs = msrs.info.nprocessed;
env->mp_state = mp_state.mp_state;
+ kvm_get_events(env, &events);
} else
#endif
{
@@ -448,6 +448,11 @@ static const VMStateDescription vmstate_cpu = {
VMSTATE_INT32_V(interrupt_injected, CPUState, 9),
VMSTATE_UINT32_V(mp_state, CPUState, 9),
VMSTATE_UINT64_V(tsc, CPUState, 9),
+ VMSTATE_UINT8_V(soft_interrupt, CPUState, 11),
+ VMSTATE_UINT8_V(nmi_injected, CPUState, 11),
+ VMSTATE_UINT8_V(nmi_pending, CPUState, 11),
+ VMSTATE_UINT8_V(has_error_code, CPUState, 11),
+ VMSTATE_UINT32_V(sipi_vector, CPUState, 11),
/* MCE */
VMSTATE_UINT64_V(mcg_cap, CPUState, 10),
VMSTATE_UINT64_V(mcg_status, CPUState, 10),
This patch extends the KVM state sync logic with the event substate from the new VCPU state interface, giving access to yet missing exception, interrupt and NMI states. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> --- target-i386/cpu.h | 5 ++++ target-i386/kvm.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++-- target-i386/machine.c | 5 ++++ 3 files changed, 64 insertions(+), 2 deletions(-)