diff mbox series

target/i386: VMRUN and VMLOAD canonicalizations

Message ID 20210804113058.45186-1-laramglazier@gmail.com
State New
Headers show
Series target/i386: VMRUN and VMLOAD canonicalizations | expand

Commit Message

Lara Lazier Aug. 4, 2021, 11:30 a.m. UTC
APM2 requires that VMRUN and VMLOAD canonicalize (sign extend to 63
from 48/57) all base addresses in the segment registers that have been
respectively loaded.

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
 target/i386/cpu.c                   | 19 +++++++++++--------
 target/i386/cpu.h                   |  2 ++
 target/i386/tcg/sysemu/svm_helper.c | 27 +++++++++++++++++----------
 3 files changed, 30 insertions(+), 18 deletions(-)

Comments

Paolo Bonzini Aug. 6, 2021, 2:06 p.m. UTC | #1
On 04/08/21 13:30, Lara Lazier wrote:
> APM2 requires that VMRUN and VMLOAD canonicalize (sign extend to 63
> from 48/57) all base addresses in the segment registers that have been
> respectively loaded.
> 
> Signed-off-by: Lara Lazier <laramglazier@gmail.com>
> ---
>   target/i386/cpu.c                   | 19 +++++++++++--------
>   target/i386/cpu.h                   |  2 ++
>   target/i386/tcg/sysemu/svm_helper.c | 27 +++++++++++++++++----------
>   3 files changed, 30 insertions(+), 18 deletions(-)
> 
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 71d26cf1bd..de4c8316c9 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -5108,6 +5108,15 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def)
>   
>   }
>   
> +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
> +{
> +    if  (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
> +        return 57; /* 57 bits virtual */
> +    } else {
> +        return 48; /* 48 bits virtual */
> +    }
> +}
> +
>   void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
>                      uint32_t *eax, uint32_t *ebx,
>                      uint32_t *ecx, uint32_t *edx)
> @@ -5510,16 +5519,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
>           break;
>       case 0x80000008:
>           /* virtual & phys address size in low 2 bytes. */
> +        *eax = cpu->phys_bits;
>           if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
>               /* 64 bit processor */
> -            *eax = cpu->phys_bits; /* configurable physical bits */
> -            if  (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
> -                *eax |= 0x00003900; /* 57 bits virtual */
> -            } else {
> -                *eax |= 0x00003000; /* 48 bits virtual */
> -            }
> -        } else {
> -            *eax = cpu->phys_bits;
> +             *eax |= (cpu_x86_virtual_addr_width(env) << 8);
>           }
>           *ebx = env->features[FEAT_8000_0008_EBX];
>           if (cs->nr_cores * cs->nr_threads > 1) {
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 6c50d3ab4f..c9c7350c76 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -1954,6 +1954,8 @@ typedef struct PropValue {
>   } PropValue;
>   void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
>   
> +uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
> +
>   /* cpu.c other functions (cpuid) */
>   void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
>                      uint32_t *eax, uint32_t *ebx,
> diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
> index 6c29a6a778..032561ef8c 100644
> --- a/target/i386/tcg/sysemu/svm_helper.c
> +++ b/target/i386/tcg/sysemu/svm_helper.c
> @@ -41,6 +41,16 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
>                ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
>   }
>   
> +/*
> + * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
> + * addresses in the segment registers that have been loaded.
> + */
> +static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
> +{
> +    uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
> +    *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
> +}
> +
>   static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
>                                   SegmentCache *sc)
>   {
> @@ -53,6 +63,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
>       sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
>       flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
>       sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
> +    svm_canonicalization(env, &sc->base);
>   }
>   
>   static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
> @@ -256,16 +267,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
>       env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
>                                  offsetof(struct vmcb, control.tsc_offset));
>   
> -    env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> -                                                      save.gdtr.base));
> -    env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> -                                                      save.gdtr.limit));
> -
> -    env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> -                                                      save.idtr.base));
> -    env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
> -                                                      save.idtr.limit));
> -
>       new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
>       if (new_cr0 & SVM_CR0_RESERVED_MASK) {
>           cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
> @@ -319,6 +320,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
>                          R_SS);
>       svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
>                          R_DS);
> +    svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
> +                       &env->idt);
> +    svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
> +                       &env->gdt);
>   
>       env->eip = x86_ldq_phys(cs,
>                           env->vm_vmcb + offsetof(struct vmcb, save.rip));
> @@ -456,6 +461,7 @@ void helper_vmload(CPUX86State *env, int aflag)
>       env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
>       env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
>       env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
> +    svm_canonicalization(env, &env->kernelgsbase);
>   #endif
>       env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
>       env->sysenter_cs = x86_ldq_phys(cs,
> @@ -464,6 +470,7 @@ void helper_vmload(CPUX86State *env, int aflag)
>                                                    save.sysenter_esp));
>       env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
>                                                    save.sysenter_eip));
> +
>   }
>   
>   void helper_vmsave(CPUX86State *env, int aflag)
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 71d26cf1bd..de4c8316c9 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5108,6 +5108,15 @@  static void x86_register_cpudef_types(const X86CPUDefinition *def)
 
 }
 
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
+{
+    if  (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
+        return 57; /* 57 bits virtual */
+    } else {
+        return 48; /* 48 bits virtual */
+    }
+}
+
 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
                    uint32_t *eax, uint32_t *ebx,
                    uint32_t *ecx, uint32_t *edx)
@@ -5510,16 +5519,10 @@  void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
         break;
     case 0x80000008:
         /* virtual & phys address size in low 2 bytes. */
+        *eax = cpu->phys_bits;
         if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
             /* 64 bit processor */
-            *eax = cpu->phys_bits; /* configurable physical bits */
-            if  (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
-                *eax |= 0x00003900; /* 57 bits virtual */
-            } else {
-                *eax |= 0x00003000; /* 48 bits virtual */
-            }
-        } else {
-            *eax = cpu->phys_bits;
+             *eax |= (cpu_x86_virtual_addr_width(env) << 8);
         }
         *ebx = env->features[FEAT_8000_0008_EBX];
         if (cs->nr_cores * cs->nr_threads > 1) {
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 6c50d3ab4f..c9c7350c76 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1954,6 +1954,8 @@  typedef struct PropValue {
 } PropValue;
 void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
 
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
+
 /* cpu.c other functions (cpuid) */
 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
                    uint32_t *eax, uint32_t *ebx,
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 6c29a6a778..032561ef8c 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -41,6 +41,16 @@  static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
 }
 
+/*
+ * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
+ * addresses in the segment registers that have been loaded.
+ */
+static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
+{
+    uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
+    *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
+}
+
 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
                                 SegmentCache *sc)
 {
@@ -53,6 +63,7 @@  static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+    svm_canonicalization(env, &sc->base);
 }
 
 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
@@ -256,16 +267,6 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
                                offsetof(struct vmcb, control.tsc_offset));
 
-    env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
-                                                      save.gdtr.base));
-    env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
-                                                      save.gdtr.limit));
-
-    env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
-                                                      save.idtr.base));
-    env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
-                                                      save.idtr.limit));
-
     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
@@ -319,6 +320,10 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
                        R_SS);
     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
                        R_DS);
+    svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
+                       &env->idt);
+    svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
+                       &env->gdt);
 
     env->eip = x86_ldq_phys(cs,
                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
@@ -456,6 +461,7 @@  void helper_vmload(CPUX86State *env, int aflag)
     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
+    svm_canonicalization(env, &env->kernelgsbase);
 #endif
     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
     env->sysenter_cs = x86_ldq_phys(cs,
@@ -464,6 +470,7 @@  void helper_vmload(CPUX86State *env, int aflag)
                                                  save.sysenter_esp));
     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
                                                  save.sysenter_eip));
+
 }
 
 void helper_vmsave(CPUX86State *env, int aflag)