diff mbox

[4/4] qemu-kvm: Enable XSAVE live migration support

Message ID 1276759096-29104-5-git-send-email-sheng@linux.intel.com
State New
Headers show

Commit Message

Sheng Yang June 17, 2010, 7:18 a.m. UTC
Based on upstream xsave related fields.

Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 qemu-kvm-x86.c |   95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 qemu-kvm.c     |   24 ++++++++++++++
 qemu-kvm.h     |   28 ++++++++++++++++
 3 files changed, 146 insertions(+), 1 deletions(-)

Comments

Jan Kiszka June 17, 2010, 7:41 a.m. UTC | #1
Sheng Yang wrote:
> Based on upstream xsave related fields.
> 
> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> ---
>  qemu-kvm-x86.c |   95 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  qemu-kvm.c     |   24 ++++++++++++++
>  qemu-kvm.h     |   28 ++++++++++++++++
>  3 files changed, 146 insertions(+), 1 deletions(-)
> 
> diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
> index 3c33e64..dcef8b5 100644
> --- a/qemu-kvm-x86.c
> +++ b/qemu-kvm-x86.c
> @@ -772,10 +772,26 @@ static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
>  	| (rhs->avl * DESC_AVL_MASK);
>  }
>  
> +#ifdef KVM_CAP_XSAVE
> +#define XSAVE_CWD_RIP     2
> +#define XSAVE_CWD_RDP     4
> +#define XSAVE_MXCSR       6
> +#define XSAVE_ST_SPACE    8
> +#define XSAVE_XMM_SPACE   40
> +#define XSAVE_XSTATE_BV   128
> +#define XSAVE_YMMH_SPACE  144
> +#endif
> +
>  void kvm_arch_load_regs(CPUState *env, int level)
>  {
>      struct kvm_regs regs;
>      struct kvm_fpu fpu;
> +#ifdef KVM_CAP_XSAVE
> +    struct kvm_xsave* xsave;
> +#endif
> +#ifdef KVM_CAP_XCRS
> +    struct kvm_xcrs xcrs;
> +#endif
>      struct kvm_sregs sregs;
>      struct kvm_msr_entry msrs[100];
>      int rc, n, i;
> @@ -806,16 +822,53 @@ void kvm_arch_load_regs(CPUState *env, int level)
>  
>      kvm_set_regs(env, &regs);
>  
> +#ifdef KVM_CAP_XSAVE
> +    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
> +        uint16_t cwd, swd, twd, fop;
> +
> +        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
> +        memset(xsave, 0, sizeof(struct kvm_xsave));
> +        cwd = swd = twd = fop = 0;
> +        swd = env->fpus & ~(7 << 11);
> +        swd |= (env->fpstt & 7) << 11;
> +        cwd = env->fpuc;
> +        for (i = 0; i < 8; ++i)
> +            twd |= (!env->fptags[i]) << i;
> +        xsave->region[0] = (uint32_t)(swd << 16) + cwd;
> +        xsave->region[1] = (uint32_t)(fop << 16) + twd;
> +        memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
> +                sizeof env->fpregs);
> +        memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
> +                sizeof env->xmm_regs);
> +        xsave->region[XSAVE_MXCSR] = env->mxcsr;
> +        *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
> +        memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
> +                sizeof env->ymmh_regs);
> +        kvm_set_xsave(env, xsave);
> +#ifdef KVM_CAP_XCRS
> +        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
> +            xcrs.nr_xcrs = 1;
> +            xcrs.flags = 0;
> +            xcrs.xcrs[0].xcr = 0;
> +            xcrs.xcrs[0].value = env->xcr0;
> +            kvm_set_xcrs(env, &xcrs);
> +        }
> +#endif /* KVM_CAP_XCRS */
> +    } else {
> +#endif /* KVM_CAP_XSAVE */

Why not reusing kvm_put/get_xsave as defined for upstream? There should
be enough examples for that pattern. The result will be a tiny qemu-kvm
patch.

Jan

>      memset(&fpu, 0, sizeof fpu);
>      fpu.fsw = env->fpus & ~(7 << 11);
>      fpu.fsw |= (env->fpstt & 7) << 11;
>      fpu.fcw = env->fpuc;
>      for (i = 0; i < 8; ++i)
> -	fpu.ftwx |= (!env->fptags[i]) << i;
> +        fpu.ftwx |= (!env->fptags[i]) << i;
>      memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
>      memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
>      fpu.mxcsr = env->mxcsr;
>      kvm_set_fpu(env, &fpu);
> +#ifdef KVM_CAP_XSAVE
> +    }
> +#endif
>  
>      memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
>      if (env->interrupt_injected >= 0) {
> @@ -934,6 +987,12 @@ void kvm_arch_save_regs(CPUState *env)
>  {
>      struct kvm_regs regs;
>      struct kvm_fpu fpu;
> +#ifdef KVM_CAP_XSAVE
> +    struct kvm_xsave* xsave;
> +#endif
> +#ifdef KVM_CAP_XCRS
> +    struct kvm_xcrs xcrs;
> +#endif
>      struct kvm_sregs sregs;
>      struct kvm_msr_entry msrs[100];
>      uint32_t hflags;
> @@ -965,6 +1024,37 @@ void kvm_arch_save_regs(CPUState *env)
>      env->eflags = regs.rflags;
>      env->eip = regs.rip;
>  
> +#ifdef KVM_CAP_XSAVE
> +    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
> +        uint16_t cwd, swd, twd, fop;
> +        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
> +        kvm_get_xsave(env, xsave);
> +        cwd = (uint16_t)xsave->region[0];
> +        swd = (uint16_t)(xsave->region[0] >> 16);
> +        twd = (uint16_t)xsave->region[1];
> +        fop = (uint16_t)(xsave->region[1] >> 16);
> +        env->fpstt = (swd >> 11) & 7;
> +        env->fpus = swd;
> +        env->fpuc = cwd;
> +        for (i = 0; i < 8; ++i)
> +            env->fptags[i] = !((twd >> i) & 1);
> +        env->mxcsr = xsave->region[XSAVE_MXCSR];
> +        memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
> +                sizeof env->fpregs);
> +        memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
> +                sizeof env->xmm_regs);
> +        env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
> +        memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
> +                sizeof env->ymmh_regs);
> +#ifdef KVM_CAP_XCRS
> +        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
> +            kvm_get_xcrs(env, &xcrs);
> +            if (xcrs.xcrs[0].xcr == 0)
> +                env->xcr0 = xcrs.xcrs[0].value;
> +        }
> +#endif
> +    } else {
> +#endif
>      kvm_get_fpu(env, &fpu);
>      env->fpstt = (fpu.fsw >> 11) & 7;
>      env->fpus = fpu.fsw;
> @@ -974,6 +1064,9 @@ void kvm_arch_save_regs(CPUState *env)
>      memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
>      memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
>      env->mxcsr = fpu.mxcsr;
> +#ifdef KVM_CAP_XSAVE
> +    }
> +#endif
>  
>      kvm_get_sregs(env, &sregs);
>  
> diff --git a/qemu-kvm.c b/qemu-kvm.c
> index 96d458c..be1dac2 100644
> --- a/qemu-kvm.c
> +++ b/qemu-kvm.c
> @@ -503,6 +503,30 @@ int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state)
>  }
>  #endif
>  
> +#ifdef KVM_CAP_XSAVE
> +int kvm_get_xsave(CPUState *env, struct kvm_xsave *xsave)
> +{
> +    return kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
> +}
> +
> +int kvm_set_xsave(CPUState *env, struct kvm_xsave *xsave)
> +{
> +    return kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
> +}
> +#endif
> +
> +#ifdef KVM_CAP_XCRS
> +int kvm_get_xcrs(CPUState *env, struct kvm_xcrs *xcrs)
> +{
> +    return kvm_vcpu_ioctl(env, KVM_GET_XCRS, xcrs);
> +}
> +
> +int kvm_set_xcrs(CPUState *env, struct kvm_xcrs *xcrs)
> +{
> +    return kvm_vcpu_ioctl(env, KVM_SET_XCRS, xcrs);
> +}
> +#endif
> +
>  static int handle_mmio(CPUState *env)
>  {
>      unsigned long addr = env->kvm_run->mmio.phys_addr;
> diff --git a/qemu-kvm.h b/qemu-kvm.h
> index 6f6c6d8..3ace503 100644
> --- a/qemu-kvm.h
> +++ b/qemu-kvm.h
> @@ -300,6 +300,34 @@ int kvm_get_mpstate(CPUState *env, struct kvm_mp_state *mp_state);
>  int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state);
>  #endif
>  
> +#ifdef KVM_CAP_XSAVE
> +/*!
> + *  * \brief Read VCPU xsave state
> + *
> + */
> +int kvm_get_xsave(CPUState *env, struct kvm_xsave *xsave);
> +
> +/*!
> + *  * \brief Write VCPU xsave state
> + *
> + */
> +int kvm_set_xsave(CPUState *env, struct kvm_xsave *xsave);
> +#endif
> +
> +#ifdef KVM_CAP_XCRS
> +/*!
> + *  * \brief Read VCPU XCRs
> + *
> + */
> +int kvm_get_xcrs(CPUState *env, struct kvm_xcrs *xcrs);
> +
> +/*!
> + *  * \brief Write VCPU XCRs
> + *
> + */
> +int kvm_set_xcrs(CPUState *env, struct kvm_xcrs *xcrs);
> +#endif
> +
>  /*!
>   * \brief Simulate an external vectored interrupt
>   *
Sheng Yang June 17, 2010, 8:32 a.m. UTC | #2
On Thursday 17 June 2010 15:41:43 Jan Kiszka wrote:
> Sheng Yang wrote:
> > Based on upstream xsave related fields.
> > 
> > Signed-off-by: Sheng Yang <sheng@linux.intel.com>
> > ---
> > 
> >  qemu-kvm-x86.c |   95
> >  +++++++++++++++++++++++++++++++++++++++++++++++++++++++- qemu-kvm.c    
> >  |   24 ++++++++++++++
> >  qemu-kvm.h     |   28 ++++++++++++++++
> >  3 files changed, 146 insertions(+), 1 deletions(-)
> > 
> > diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
> > index 3c33e64..dcef8b5 100644
> > --- a/qemu-kvm-x86.c
> > +++ b/qemu-kvm-x86.c
> > @@ -772,10 +772,26 @@ static void get_seg(SegmentCache *lhs, const struct
> > kvm_segment *rhs)
> > 
> >  	| (rhs->avl * DESC_AVL_MASK);
> >  
> >  }
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +#define XSAVE_CWD_RIP     2
> > +#define XSAVE_CWD_RDP     4
> > +#define XSAVE_MXCSR       6
> > +#define XSAVE_ST_SPACE    8
> > +#define XSAVE_XMM_SPACE   40
> > +#define XSAVE_XSTATE_BV   128
> > +#define XSAVE_YMMH_SPACE  144
> > +#endif
> > +
> > 
> >  void kvm_arch_load_regs(CPUState *env, int level)
> >  {
> >  
> >      struct kvm_regs regs;
> >      struct kvm_fpu fpu;
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +    struct kvm_xsave* xsave;
> > +#endif
> > +#ifdef KVM_CAP_XCRS
> > +    struct kvm_xcrs xcrs;
> > +#endif
> > 
> >      struct kvm_sregs sregs;
> >      struct kvm_msr_entry msrs[100];
> >      int rc, n, i;
> > 
> > @@ -806,16 +822,53 @@ void kvm_arch_load_regs(CPUState *env, int level)
> > 
> >      kvm_set_regs(env, &regs);
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
> > +        uint16_t cwd, swd, twd, fop;
> > +
> > +        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
> > +        memset(xsave, 0, sizeof(struct kvm_xsave));
> > +        cwd = swd = twd = fop = 0;
> > +        swd = env->fpus & ~(7 << 11);
> > +        swd |= (env->fpstt & 7) << 11;
> > +        cwd = env->fpuc;
> > +        for (i = 0; i < 8; ++i)
> > +            twd |= (!env->fptags[i]) << i;
> > +        xsave->region[0] = (uint32_t)(swd << 16) + cwd;
> > +        xsave->region[1] = (uint32_t)(fop << 16) + twd;
> > +        memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
> > +                sizeof env->fpregs);
> > +        memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
> > +                sizeof env->xmm_regs);
> > +        xsave->region[XSAVE_MXCSR] = env->mxcsr;
> > +        *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
> > +        memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
> > +                sizeof env->ymmh_regs);
> > +        kvm_set_xsave(env, xsave);
> > +#ifdef KVM_CAP_XCRS
> > +        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
> > +            xcrs.nr_xcrs = 1;
> > +            xcrs.flags = 0;
> > +            xcrs.xcrs[0].xcr = 0;
> > +            xcrs.xcrs[0].value = env->xcr0;
> > +            kvm_set_xcrs(env, &xcrs);
> > +        }
> > +#endif /* KVM_CAP_XCRS */
> > +    } else {
> > +#endif /* KVM_CAP_XSAVE */
> 
> Why not reusing kvm_put/get_xsave as defined for upstream? There should
> be enough examples for that pattern. The result will be a tiny qemu-kvm
> patch.

Still lots of codes in kvm_arch_load/save_regs() duplicate with ones in kvm.c, 
e.g. kvm_get/put_sregs, kvm_get/put_msrs. So would like to wait for merging.

--
regards
Yang, Sheng

> 
> Jan
> 
> >      memset(&fpu, 0, sizeof fpu);
> >      fpu.fsw = env->fpus & ~(7 << 11);
> >      fpu.fsw |= (env->fpstt & 7) << 11;
> >      fpu.fcw = env->fpuc;
> >      for (i = 0; i < 8; ++i)
> > 
> > -	fpu.ftwx |= (!env->fptags[i]) << i;
> > +        fpu.ftwx |= (!env->fptags[i]) << i;
> > 
> >      memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
> >      memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
> >      fpu.mxcsr = env->mxcsr;
> >      kvm_set_fpu(env, &fpu);
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +    }
> > +#endif
> > 
> >      memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
> >      if (env->interrupt_injected >= 0) {
> > 
> > @@ -934,6 +987,12 @@ void kvm_arch_save_regs(CPUState *env)
> > 
> >  {
> >  
> >      struct kvm_regs regs;
> >      struct kvm_fpu fpu;
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +    struct kvm_xsave* xsave;
> > +#endif
> > +#ifdef KVM_CAP_XCRS
> > +    struct kvm_xcrs xcrs;
> > +#endif
> > 
> >      struct kvm_sregs sregs;
> >      struct kvm_msr_entry msrs[100];
> >      uint32_t hflags;
> > 
> > @@ -965,6 +1024,37 @@ void kvm_arch_save_regs(CPUState *env)
> > 
> >      env->eflags = regs.rflags;
> >      env->eip = regs.rip;
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
> > +        uint16_t cwd, swd, twd, fop;
> > +        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
> > +        kvm_get_xsave(env, xsave);
> > +        cwd = (uint16_t)xsave->region[0];
> > +        swd = (uint16_t)(xsave->region[0] >> 16);
> > +        twd = (uint16_t)xsave->region[1];
> > +        fop = (uint16_t)(xsave->region[1] >> 16);
> > +        env->fpstt = (swd >> 11) & 7;
> > +        env->fpus = swd;
> > +        env->fpuc = cwd;
> > +        for (i = 0; i < 8; ++i)
> > +            env->fptags[i] = !((twd >> i) & 1);
> > +        env->mxcsr = xsave->region[XSAVE_MXCSR];
> > +        memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
> > +                sizeof env->fpregs);
> > +        memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
> > +                sizeof env->xmm_regs);
> > +        env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
> > +        memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
> > +                sizeof env->ymmh_regs);
> > +#ifdef KVM_CAP_XCRS
> > +        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
> > +            kvm_get_xcrs(env, &xcrs);
> > +            if (xcrs.xcrs[0].xcr == 0)
> > +                env->xcr0 = xcrs.xcrs[0].value;
> > +        }
> > +#endif
> > +    } else {
> > +#endif
> > 
> >      kvm_get_fpu(env, &fpu);
> >      env->fpstt = (fpu.fsw >> 11) & 7;
> >      env->fpus = fpu.fsw;
> > 
> > @@ -974,6 +1064,9 @@ void kvm_arch_save_regs(CPUState *env)
> > 
> >      memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
> >      memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
> >      env->mxcsr = fpu.mxcsr;
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +    }
> > +#endif
> > 
> >      kvm_get_sregs(env, &sregs);
> > 
> > diff --git a/qemu-kvm.c b/qemu-kvm.c
> > index 96d458c..be1dac2 100644
> > --- a/qemu-kvm.c
> > +++ b/qemu-kvm.c
> > @@ -503,6 +503,30 @@ int kvm_set_mpstate(CPUState *env, struct
> > kvm_mp_state *mp_state)
> > 
> >  }
> >  #endif
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +int kvm_get_xsave(CPUState *env, struct kvm_xsave *xsave)
> > +{
> > +    return kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
> > +}
> > +
> > +int kvm_set_xsave(CPUState *env, struct kvm_xsave *xsave)
> > +{
> > +    return kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
> > +}
> > +#endif
> > +
> > +#ifdef KVM_CAP_XCRS
> > +int kvm_get_xcrs(CPUState *env, struct kvm_xcrs *xcrs)
> > +{
> > +    return kvm_vcpu_ioctl(env, KVM_GET_XCRS, xcrs);
> > +}
> > +
> > +int kvm_set_xcrs(CPUState *env, struct kvm_xcrs *xcrs)
> > +{
> > +    return kvm_vcpu_ioctl(env, KVM_SET_XCRS, xcrs);
> > +}
> > +#endif
> > +
> > 
> >  static int handle_mmio(CPUState *env)
> >  {
> >  
> >      unsigned long addr = env->kvm_run->mmio.phys_addr;
> > 
> > diff --git a/qemu-kvm.h b/qemu-kvm.h
> > index 6f6c6d8..3ace503 100644
> > --- a/qemu-kvm.h
> > +++ b/qemu-kvm.h
> > @@ -300,6 +300,34 @@ int kvm_get_mpstate(CPUState *env, struct
> > kvm_mp_state *mp_state);
> > 
> >  int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state);
> >  #endif
> > 
> > +#ifdef KVM_CAP_XSAVE
> > +/*!
> > + *  * \brief Read VCPU xsave state
> > + *
> > + */
> > +int kvm_get_xsave(CPUState *env, struct kvm_xsave *xsave);
> > +
> > +/*!
> > + *  * \brief Write VCPU xsave state
> > + *
> > + */
> > +int kvm_set_xsave(CPUState *env, struct kvm_xsave *xsave);
> > +#endif
> > +
> > +#ifdef KVM_CAP_XCRS
> > +/*!
> > + *  * \brief Read VCPU XCRs
> > + *
> > + */
> > +int kvm_get_xcrs(CPUState *env, struct kvm_xcrs *xcrs);
> > +
> > +/*!
> > + *  * \brief Write VCPU XCRs
> > + *
> > + */
> > +int kvm_set_xcrs(CPUState *env, struct kvm_xcrs *xcrs);
> > +#endif
> > +
> > 
> >  /*!
> >  
> >   * \brief Simulate an external vectored interrupt
> >   *
Jan Kiszka June 17, 2010, 8:44 a.m. UTC | #3
Sheng Yang wrote:
> On Thursday 17 June 2010 15:41:43 Jan Kiszka wrote:
>> Sheng Yang wrote:
>>> Based on upstream xsave related fields.
>>>
>>> Signed-off-by: Sheng Yang <sheng@linux.intel.com>
>>> ---
>>>
>>>  qemu-kvm-x86.c |   95
>>>  +++++++++++++++++++++++++++++++++++++++++++++++++++++++- qemu-kvm.c    
>>>  |   24 ++++++++++++++
>>>  qemu-kvm.h     |   28 ++++++++++++++++
>>>  3 files changed, 146 insertions(+), 1 deletions(-)
>>>
>>> diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
>>> index 3c33e64..dcef8b5 100644
>>> --- a/qemu-kvm-x86.c
>>> +++ b/qemu-kvm-x86.c
>>> @@ -772,10 +772,26 @@ static void get_seg(SegmentCache *lhs, const struct
>>> kvm_segment *rhs)
>>>
>>>  	| (rhs->avl * DESC_AVL_MASK);
>>>  
>>>  }
>>>
>>> +#ifdef KVM_CAP_XSAVE
>>> +#define XSAVE_CWD_RIP     2
>>> +#define XSAVE_CWD_RDP     4
>>> +#define XSAVE_MXCSR       6
>>> +#define XSAVE_ST_SPACE    8
>>> +#define XSAVE_XMM_SPACE   40
>>> +#define XSAVE_XSTATE_BV   128
>>> +#define XSAVE_YMMH_SPACE  144
>>> +#endif
>>> +
>>>
>>>  void kvm_arch_load_regs(CPUState *env, int level)
>>>  {
>>>  
>>>      struct kvm_regs regs;
>>>      struct kvm_fpu fpu;
>>>
>>> +#ifdef KVM_CAP_XSAVE
>>> +    struct kvm_xsave* xsave;
>>> +#endif
>>> +#ifdef KVM_CAP_XCRS
>>> +    struct kvm_xcrs xcrs;
>>> +#endif
>>>
>>>      struct kvm_sregs sregs;
>>>      struct kvm_msr_entry msrs[100];
>>>      int rc, n, i;
>>>
>>> @@ -806,16 +822,53 @@ void kvm_arch_load_regs(CPUState *env, int level)
>>>
>>>      kvm_set_regs(env, &regs);
>>>
>>> +#ifdef KVM_CAP_XSAVE
>>> +    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
>>> +        uint16_t cwd, swd, twd, fop;
>>> +
>>> +        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
>>> +        memset(xsave, 0, sizeof(struct kvm_xsave));
>>> +        cwd = swd = twd = fop = 0;
>>> +        swd = env->fpus & ~(7 << 11);
>>> +        swd |= (env->fpstt & 7) << 11;
>>> +        cwd = env->fpuc;
>>> +        for (i = 0; i < 8; ++i)
>>> +            twd |= (!env->fptags[i]) << i;
>>> +        xsave->region[0] = (uint32_t)(swd << 16) + cwd;
>>> +        xsave->region[1] = (uint32_t)(fop << 16) + twd;
>>> +        memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
>>> +                sizeof env->fpregs);
>>> +        memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
>>> +                sizeof env->xmm_regs);
>>> +        xsave->region[XSAVE_MXCSR] = env->mxcsr;
>>> +        *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
>>> +        memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
>>> +                sizeof env->ymmh_regs);
>>> +        kvm_set_xsave(env, xsave);
>>> +#ifdef KVM_CAP_XCRS
>>> +        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
>>> +            xcrs.nr_xcrs = 1;
>>> +            xcrs.flags = 0;
>>> +            xcrs.xcrs[0].xcr = 0;
>>> +            xcrs.xcrs[0].value = env->xcr0;
>>> +            kvm_set_xcrs(env, &xcrs);
>>> +        }
>>> +#endif /* KVM_CAP_XCRS */
>>> +    } else {
>>> +#endif /* KVM_CAP_XSAVE */
>> Why not reusing kvm_put/get_xsave as defined for upstream? There should
>> be enough examples for that pattern. The result will be a tiny qemu-kvm
>> patch.
> 
> Still lots of codes in kvm_arch_load/save_regs() duplicate with ones in kvm.c, 
> e.g. kvm_get/put_sregs, kvm_get/put_msrs. So would like to wait for merging.

That we still have some legacy here is no good reason to increase it.
Just check how debugregs were introduced.

Jan
diff mbox

Patch

diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
index 3c33e64..dcef8b5 100644
--- a/qemu-kvm-x86.c
+++ b/qemu-kvm-x86.c
@@ -772,10 +772,26 @@  static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
 	| (rhs->avl * DESC_AVL_MASK);
 }
 
+#ifdef KVM_CAP_XSAVE
+#define XSAVE_CWD_RIP     2
+#define XSAVE_CWD_RDP     4
+#define XSAVE_MXCSR       6
+#define XSAVE_ST_SPACE    8
+#define XSAVE_XMM_SPACE   40
+#define XSAVE_XSTATE_BV   128
+#define XSAVE_YMMH_SPACE  144
+#endif
+
 void kvm_arch_load_regs(CPUState *env, int level)
 {
     struct kvm_regs regs;
     struct kvm_fpu fpu;
+#ifdef KVM_CAP_XSAVE
+    struct kvm_xsave* xsave;
+#endif
+#ifdef KVM_CAP_XCRS
+    struct kvm_xcrs xcrs;
+#endif
     struct kvm_sregs sregs;
     struct kvm_msr_entry msrs[100];
     int rc, n, i;
@@ -806,16 +822,53 @@  void kvm_arch_load_regs(CPUState *env, int level)
 
     kvm_set_regs(env, &regs);
 
+#ifdef KVM_CAP_XSAVE
+    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
+        uint16_t cwd, swd, twd, fop;
+
+        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
+        memset(xsave, 0, sizeof(struct kvm_xsave));
+        cwd = swd = twd = fop = 0;
+        swd = env->fpus & ~(7 << 11);
+        swd |= (env->fpstt & 7) << 11;
+        cwd = env->fpuc;
+        for (i = 0; i < 8; ++i)
+            twd |= (!env->fptags[i]) << i;
+        xsave->region[0] = (uint32_t)(swd << 16) + cwd;
+        xsave->region[1] = (uint32_t)(fop << 16) + twd;
+        memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
+                sizeof env->fpregs);
+        memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
+                sizeof env->xmm_regs);
+        xsave->region[XSAVE_MXCSR] = env->mxcsr;
+        *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
+        memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
+                sizeof env->ymmh_regs);
+        kvm_set_xsave(env, xsave);
+#ifdef KVM_CAP_XCRS
+        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
+            xcrs.nr_xcrs = 1;
+            xcrs.flags = 0;
+            xcrs.xcrs[0].xcr = 0;
+            xcrs.xcrs[0].value = env->xcr0;
+            kvm_set_xcrs(env, &xcrs);
+        }
+#endif /* KVM_CAP_XCRS */
+    } else {
+#endif /* KVM_CAP_XSAVE */
     memset(&fpu, 0, sizeof fpu);
     fpu.fsw = env->fpus & ~(7 << 11);
     fpu.fsw |= (env->fpstt & 7) << 11;
     fpu.fcw = env->fpuc;
     for (i = 0; i < 8; ++i)
-	fpu.ftwx |= (!env->fptags[i]) << i;
+        fpu.ftwx |= (!env->fptags[i]) << i;
     memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
     memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
     fpu.mxcsr = env->mxcsr;
     kvm_set_fpu(env, &fpu);
+#ifdef KVM_CAP_XSAVE
+    }
+#endif
 
     memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
     if (env->interrupt_injected >= 0) {
@@ -934,6 +987,12 @@  void kvm_arch_save_regs(CPUState *env)
 {
     struct kvm_regs regs;
     struct kvm_fpu fpu;
+#ifdef KVM_CAP_XSAVE
+    struct kvm_xsave* xsave;
+#endif
+#ifdef KVM_CAP_XCRS
+    struct kvm_xcrs xcrs;
+#endif
     struct kvm_sregs sregs;
     struct kvm_msr_entry msrs[100];
     uint32_t hflags;
@@ -965,6 +1024,37 @@  void kvm_arch_save_regs(CPUState *env)
     env->eflags = regs.rflags;
     env->eip = regs.rip;
 
+#ifdef KVM_CAP_XSAVE
+    if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) {
+        uint16_t cwd, swd, twd, fop;
+        xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
+        kvm_get_xsave(env, xsave);
+        cwd = (uint16_t)xsave->region[0];
+        swd = (uint16_t)(xsave->region[0] >> 16);
+        twd = (uint16_t)xsave->region[1];
+        fop = (uint16_t)(xsave->region[1] >> 16);
+        env->fpstt = (swd >> 11) & 7;
+        env->fpus = swd;
+        env->fpuc = cwd;
+        for (i = 0; i < 8; ++i)
+            env->fptags[i] = !((twd >> i) & 1);
+        env->mxcsr = xsave->region[XSAVE_MXCSR];
+        memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
+                sizeof env->fpregs);
+        memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
+                sizeof env->xmm_regs);
+        env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
+        memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
+                sizeof env->ymmh_regs);
+#ifdef KVM_CAP_XCRS
+        if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) {
+            kvm_get_xcrs(env, &xcrs);
+            if (xcrs.xcrs[0].xcr == 0)
+                env->xcr0 = xcrs.xcrs[0].value;
+        }
+#endif
+    } else {
+#endif
     kvm_get_fpu(env, &fpu);
     env->fpstt = (fpu.fsw >> 11) & 7;
     env->fpus = fpu.fsw;
@@ -974,6 +1064,9 @@  void kvm_arch_save_regs(CPUState *env)
     memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
     memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
     env->mxcsr = fpu.mxcsr;
+#ifdef KVM_CAP_XSAVE
+    }
+#endif
 
     kvm_get_sregs(env, &sregs);
 
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 96d458c..be1dac2 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -503,6 +503,30 @@  int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state)
 }
 #endif
 
+#ifdef KVM_CAP_XSAVE
+int kvm_get_xsave(CPUState *env, struct kvm_xsave *xsave)
+{
+    return kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
+}
+
+int kvm_set_xsave(CPUState *env, struct kvm_xsave *xsave)
+{
+    return kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
+}
+#endif
+
+#ifdef KVM_CAP_XCRS
+int kvm_get_xcrs(CPUState *env, struct kvm_xcrs *xcrs)
+{
+    return kvm_vcpu_ioctl(env, KVM_GET_XCRS, xcrs);
+}
+
+int kvm_set_xcrs(CPUState *env, struct kvm_xcrs *xcrs)
+{
+    return kvm_vcpu_ioctl(env, KVM_SET_XCRS, xcrs);
+}
+#endif
+
 static int handle_mmio(CPUState *env)
 {
     unsigned long addr = env->kvm_run->mmio.phys_addr;
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 6f6c6d8..3ace503 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -300,6 +300,34 @@  int kvm_get_mpstate(CPUState *env, struct kvm_mp_state *mp_state);
 int kvm_set_mpstate(CPUState *env, struct kvm_mp_state *mp_state);
 #endif
 
+#ifdef KVM_CAP_XSAVE
+/*!
+ *  * \brief Read VCPU xsave state
+ *
+ */
+int kvm_get_xsave(CPUState *env, struct kvm_xsave *xsave);
+
+/*!
+ *  * \brief Write VCPU xsave state
+ *
+ */
+int kvm_set_xsave(CPUState *env, struct kvm_xsave *xsave);
+#endif
+
+#ifdef KVM_CAP_XCRS
+/*!
+ *  * \brief Read VCPU XCRs
+ *
+ */
+int kvm_get_xcrs(CPUState *env, struct kvm_xcrs *xcrs);
+
+/*!
+ *  * \brief Write VCPU XCRs
+ *
+ */
+int kvm_set_xcrs(CPUState *env, struct kvm_xcrs *xcrs);
+#endif
+
 /*!
  * \brief Simulate an external vectored interrupt
  *