diff mbox

[03/31] target-i386: Use correct memory attributes for memory accesses

Message ID 1431352157-40283-4-git-send-email-pbonzini@redhat.com
State New
Headers show

Commit Message

Paolo Bonzini May 11, 2015, 1:48 p.m. UTC
These include page table walks, SVM accesses and SMM state save accesses.

The bulk of the patch is obtained with

   sed -i 's/\(\<[a-z_]*_phys\(_notdirty\)\?\>(cs\)->as,/x86_\1,/'

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 target-i386/cpu.h        |  12 ++
 target-i386/helper.c     | 132 +++++++++++++++++----
 target-i386/seg_helper.c |  12 +-
 target-i386/smm_helper.c | 298 +++++++++++++++++++++++------------------------
 target-i386/svm_helper.c | 230 ++++++++++++++++++------------------
 5 files changed, 394 insertions(+), 290 deletions(-)
diff mbox

Patch

diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 64c2783..9f57fe9 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1105,6 +1105,18 @@  int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
                              int is_write, int mmu_idx);
 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
 
+#ifndef CONFIG_USER_ONLY
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
+#endif
+
 static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
 {
     return (dr7 >> (index * 2)) & 1;
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 62e801b..5480a96 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -565,7 +565,7 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
 
             pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
                 env->a20_mask;
-            pml4e = ldq_phys(cs->as, pml4e_addr);
+            pml4e = x86_ldq_phys(cs, pml4e_addr);
             if (!(pml4e & PG_PRESENT_MASK)) {
                 goto do_fault;
             }
@@ -574,12 +574,12 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
             }
             if (!(pml4e & PG_ACCESSED_MASK)) {
                 pml4e |= PG_ACCESSED_MASK;
-                stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
+                x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
             }
             ptep = pml4e ^ PG_NX_MASK;
             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
                 env->a20_mask;
-            pdpe = ldq_phys(cs->as, pdpe_addr);
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
             if (!(pdpe & PG_PRESENT_MASK)) {
                 goto do_fault;
             }
@@ -589,7 +589,7 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
             ptep &= pdpe ^ PG_NX_MASK;
             if (!(pdpe & PG_ACCESSED_MASK)) {
                 pdpe |= PG_ACCESSED_MASK;
-                stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
+                x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
             }
             if (pdpe & PG_PSE_MASK) {
                 /* 1 GB page */
@@ -604,7 +604,7 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
             /* XXX: load them when cr3 is loaded ? */
             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
                 env->a20_mask;
-            pdpe = ldq_phys(cs->as, pdpe_addr);
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
             if (!(pdpe & PG_PRESENT_MASK)) {
                 goto do_fault;
             }
@@ -617,7 +617,7 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
 
         pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
             env->a20_mask;
-        pde = ldq_phys(cs->as, pde_addr);
+        pde = x86_ldq_phys(cs, pde_addr);
         if (!(pde & PG_PRESENT_MASK)) {
             goto do_fault;
         }
@@ -635,11 +635,11 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
         /* 4 KB page */
         if (!(pde & PG_ACCESSED_MASK)) {
             pde |= PG_ACCESSED_MASK;
-            stl_phys_notdirty(cs->as, pde_addr, pde);
+            x86_stl_phys_notdirty(cs, pde_addr, pde);
         }
         pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
             env->a20_mask;
-        pte = ldq_phys(cs->as, pte_addr);
+        pte = x86_ldq_phys(cs, pte_addr);
         if (!(pte & PG_PRESENT_MASK)) {
             goto do_fault;
         }
@@ -655,7 +655,7 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
         /* page directory entry */
         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
             env->a20_mask;
-        pde = ldl_phys(cs->as, pde_addr);
+        pde = x86_ldl_phys(cs, pde_addr);
         if (!(pde & PG_PRESENT_MASK)) {
             goto do_fault;
         }
@@ -676,13 +676,13 @@  int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
 
         if (!(pde & PG_ACCESSED_MASK)) {
             pde |= PG_ACCESSED_MASK;
-            stl_phys_notdirty(cs->as, pde_addr, pde);
+            x86_stl_phys_notdirty(cs, pde_addr, pde);
         }
 
         /* page directory entry */
         pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
             env->a20_mask;
-        pte = ldl_phys(cs->as, pte_addr);
+        pte = x86_ldl_phys(cs, pte_addr);
         if (!(pte & PG_PRESENT_MASK)) {
             goto do_fault;
         }
@@ -737,7 +737,7 @@  do_check_protect_pse36:
         if (is_dirty) {
             pte |= PG_DIRTY_MASK;
         }
-        stl_phys_notdirty(cs->as, pte_addr, pte);
+        x86_stl_phys_notdirty(cs, pte_addr, pte);
     }
 
     /* the page can be put in the TLB */
@@ -789,7 +789,7 @@  do_check_protect_pse36:
         error_code |= PG_ERROR_I_D_MASK;
     if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
         /* cr2 is not modified in case of exceptions */
-        stq_phys(cs->as,
+        x86_stq_phys(cs,
                  env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
                  addr);
     } else {
@@ -828,13 +828,13 @@  hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
             }
             pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
                 env->a20_mask;
-            pml4e = ldq_phys(cs->as, pml4e_addr);
+            pml4e = x86_ldq_phys(cs, pml4e_addr);
             if (!(pml4e & PG_PRESENT_MASK)) {
                 return -1;
             }
             pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
                          (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
-            pdpe = ldq_phys(cs->as, pdpe_addr);
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
             if (!(pdpe & PG_PRESENT_MASK)) {
                 return -1;
             }
@@ -849,14 +849,14 @@  hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
         {
             pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
                 env->a20_mask;
-            pdpe = ldq_phys(cs->as, pdpe_addr);
+            pdpe = x86_ldq_phys(cs, pdpe_addr);
             if (!(pdpe & PG_PRESENT_MASK))
                 return -1;
         }
 
         pde_addr = ((pdpe & PG_ADDRESS_MASK) +
                     (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
-        pde = ldq_phys(cs->as, pde_addr);
+        pde = x86_ldq_phys(cs, pde_addr);
         if (!(pde & PG_PRESENT_MASK)) {
             return -1;
         }
@@ -869,7 +869,7 @@  hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
             pte_addr = ((pde & PG_ADDRESS_MASK) +
                         (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
             page_size = 4096;
-            pte = ldq_phys(cs->as, pte_addr);
+            pte = x86_ldq_phys(cs, pte_addr);
         }
         if (!(pte & PG_PRESENT_MASK)) {
             return -1;
@@ -879,7 +879,7 @@  hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
 
         /* page directory entry */
         pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
-        pde = ldl_phys(cs->as, pde_addr);
+        pde = x86_ldl_phys(cs, pde_addr);
         if (!(pde & PG_PRESENT_MASK))
             return -1;
         if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
@@ -888,7 +888,7 @@  hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
         } else {
             /* page directory entry */
             pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
-            pte = ldl_phys(cs->as, pte_addr);
+            pte = x86_ldl_phys(cs, pte_addr);
             if (!(pte & PG_PRESENT_MASK)) {
                 return -1;
             }
@@ -1277,3 +1277,95 @@  void x86_cpu_exec_exit(CPUState *cs)
 
     env->eflags = cpu_compute_eflags(env);
 }
+
+#ifndef CONFIG_USER_ONLY
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_ldub(cs->as, addr,
+                              cpu_get_mem_attrs(env),
+                              NULL);
+}
+
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_lduw(cs->as, addr,
+                              cpu_get_mem_attrs(env),
+                              NULL);
+}
+
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_ldl(cs->as, addr,
+                             cpu_get_mem_attrs(env),
+                             NULL);
+}
+
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    return address_space_ldq(cs->as, addr,
+                             cpu_get_mem_attrs(env),
+                             NULL);
+}
+
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stb(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stl_notdirty(cs->as, addr, val,
+                               cpu_get_mem_attrs(env),
+                               NULL);
+}
+
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stw(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stl(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
+{
+    X86CPU *cpu = X86_CPU(cs);
+    CPUX86State *env = &cpu->env;
+
+    address_space_stq(cs->as, addr, val,
+                      cpu_get_mem_attrs(env),
+                      NULL);
+}
+#endif
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 2bc757a..8a4271e 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -1144,7 +1144,7 @@  static void handle_even_inj(CPUX86State *env, int intno, int is_int,
                             int error_code, int is_hw, int rm)
 {
     CPUState *cs = CPU(x86_env_get_cpu(env));
-    uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                           control.event_inj));
 
     if (!(event_inj & SVM_EVTINJ_VALID)) {
@@ -1158,11 +1158,11 @@  static void handle_even_inj(CPUX86State *env, int intno, int is_int,
         event_inj = intno | type | SVM_EVTINJ_VALID;
         if (!rm && exception_has_error_code(intno)) {
             event_inj |= SVM_EVTINJ_VALID_ERR;
-            stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+            x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                              control.event_inj_err),
                      error_code);
         }
-        stl_phys(cs->as,
+        x86_stl_phys(cs,
                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
                  event_inj);
     }
@@ -1240,11 +1240,11 @@  static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
 #if !defined(CONFIG_USER_ONLY)
     if (env->hflags & HF_SVMI_MASK) {
         CPUState *cs = CPU(cpu);
-        uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
+        uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
                                       offsetof(struct vmcb,
                                                control.event_inj));
 
-        stl_phys(cs->as,
+        x86_stl_phys(cs,
                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
                  event_inj & ~SVM_EVTINJ_VALID);
     }
@@ -1339,7 +1339,7 @@  bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
             int intno;
             /* FIXME: this should respect TPR */
             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
-            intno = ldl_phys(cs->as, env->vm_vmcb
+            intno = x86_ldl_phys(cs, env->vm_vmcb
                              + offsetof(struct vmcb, control.int_vector));
             qemu_log_mask(CPU_LOG_TB_IN_ASM,
                           "Servicing virtual hardware INT=0x%02x\n", intno);
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index c62f468..b9971b6 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -60,83 +60,83 @@  void do_smm_enter(X86CPU *cpu)
     for (i = 0; i < 6; i++) {
         dt = &env->segs[i];
         offset = 0x7e00 + i * 16;
-        stw_phys(cs->as, sm_state + offset, dt->selector);
-        stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
-        stl_phys(cs->as, sm_state + offset + 4, dt->limit);
-        stq_phys(cs->as, sm_state + offset + 8, dt->base);
+        x86_stw_phys(cs, sm_state + offset, dt->selector);
+        x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+        x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+        x86_stq_phys(cs, sm_state + offset + 8, dt->base);
     }
 
-    stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base);
-    stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit);
+    x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
+    x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
 
-    stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector);
-    stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base);
-    stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit);
-    stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+    x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
+    x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
+    x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
+    x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
 
-    stq_phys(cs->as, sm_state + 0x7e88, env->idt.base);
-    stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit);
+    x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
+    x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
 
-    stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector);
-    stq_phys(cs->as, sm_state + 0x7e98, env->tr.base);
-    stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit);
-    stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+    x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
+    x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
+    x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
+    x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
 
-    stq_phys(cs->as, sm_state + 0x7ed0, env->efer);
+    x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
 
-    stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]);
-    stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]);
-    stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]);
-    stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]);
-    stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]);
-    stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]);
-    stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]);
-    stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]);
+    x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
+    x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
+    x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
+    x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
+    x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
+    x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
+    x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
+    x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
     for (i = 8; i < 16; i++) {
-        stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]);
+        x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
     }
-    stq_phys(cs->as, sm_state + 0x7f78, env->eip);
-    stl_phys(cs->as, sm_state + 0x7f70, cpu_compute_eflags(env));
-    stl_phys(cs->as, sm_state + 0x7f68, env->dr[6]);
-    stl_phys(cs->as, sm_state + 0x7f60, env->dr[7]);
+    x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
+    x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
+    x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
+    x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
 
-    stl_phys(cs->as, sm_state + 0x7f48, env->cr[4]);
-    stq_phys(cs->as, sm_state + 0x7f50, env->cr[3]);
-    stl_phys(cs->as, sm_state + 0x7f58, env->cr[0]);
+    x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
+    x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
+    x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
 
-    stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
-    stl_phys(cs->as, sm_state + 0x7f00, env->smbase);
+    x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+    x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
 #else
-    stl_phys(cs->as, sm_state + 0x7ffc, env->cr[0]);
-    stl_phys(cs->as, sm_state + 0x7ff8, env->cr[3]);
-    stl_phys(cs->as, sm_state + 0x7ff4, cpu_compute_eflags(env));
-    stl_phys(cs->as, sm_state + 0x7ff0, env->eip);
-    stl_phys(cs->as, sm_state + 0x7fec, env->regs[R_EDI]);
-    stl_phys(cs->as, sm_state + 0x7fe8, env->regs[R_ESI]);
-    stl_phys(cs->as, sm_state + 0x7fe4, env->regs[R_EBP]);
-    stl_phys(cs->as, sm_state + 0x7fe0, env->regs[R_ESP]);
-    stl_phys(cs->as, sm_state + 0x7fdc, env->regs[R_EBX]);
-    stl_phys(cs->as, sm_state + 0x7fd8, env->regs[R_EDX]);
-    stl_phys(cs->as, sm_state + 0x7fd4, env->regs[R_ECX]);
-    stl_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EAX]);
-    stl_phys(cs->as, sm_state + 0x7fcc, env->dr[6]);
-    stl_phys(cs->as, sm_state + 0x7fc8, env->dr[7]);
-
-    stl_phys(cs->as, sm_state + 0x7fc4, env->tr.selector);
-    stl_phys(cs->as, sm_state + 0x7f64, env->tr.base);
-    stl_phys(cs->as, sm_state + 0x7f60, env->tr.limit);
-    stl_phys(cs->as, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
-
-    stl_phys(cs->as, sm_state + 0x7fc0, env->ldt.selector);
-    stl_phys(cs->as, sm_state + 0x7f80, env->ldt.base);
-    stl_phys(cs->as, sm_state + 0x7f7c, env->ldt.limit);
-    stl_phys(cs->as, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
-
-    stl_phys(cs->as, sm_state + 0x7f74, env->gdt.base);
-    stl_phys(cs->as, sm_state + 0x7f70, env->gdt.limit);
-
-    stl_phys(cs->as, sm_state + 0x7f58, env->idt.base);
-    stl_phys(cs->as, sm_state + 0x7f54, env->idt.limit);
+    x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
+    x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
+    x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
+    x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
+    x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
+    x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
+    x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
+    x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
+    x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
+    x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
+    x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
+    x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
+    x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
+    x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
+
+    x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
+    x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
+    x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
+    x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+    x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
+    x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
+    x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
+    x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+    x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
+    x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
+
+    x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
+    x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
 
     for (i = 0; i < 6; i++) {
         dt = &env->segs[i];
@@ -145,15 +145,15 @@  void do_smm_enter(X86CPU *cpu)
         } else {
             offset = 0x7f2c + (i - 3) * 12;
         }
-        stl_phys(cs->as, sm_state + 0x7fa8 + i * 4, dt->selector);
-        stl_phys(cs->as, sm_state + offset + 8, dt->base);
-        stl_phys(cs->as, sm_state + offset + 4, dt->limit);
-        stl_phys(cs->as, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+        x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
+        x86_stl_phys(cs, sm_state + offset + 8, dt->base);
+        x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+        x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
     }
-    stl_phys(cs->as, sm_state + 0x7f14, env->cr[4]);
+    x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
 
-    stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID);
-    stl_phys(cs->as, sm_state + 0x7ef8, env->smbase);
+    x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+    x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
 #endif
     /* init SMM cpu state */
 
@@ -200,91 +200,91 @@  void helper_rsm(CPUX86State *env)
 
     sm_state = env->smbase + 0x8000;
 #ifdef TARGET_X86_64
-    cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0));
-
-    env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68);
-    env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64);
-
-    env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70);
-    env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78);
-    env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74);
-    env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8;
-
-    env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88);
-    env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84);
-
-    env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90);
-    env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98);
-    env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94);
-    env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8;
-
-    env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8);
-    env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0);
-    env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8);
-    env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0);
-    env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8);
-    env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0);
-    env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8);
-    env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0);
+    cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
+
+    env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
+    env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
+
+    env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
+    env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
+    env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
+    env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
+
+    env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
+    env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
+
+    env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
+    env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
+    env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
+    env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
+
+    env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
+    env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
+    env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
+    env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
+    env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
+    env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
+    env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
+    env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
     for (i = 8; i < 16; i++) {
-        env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8);
+        env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
     }
-    env->eip = ldq_phys(cs->as, sm_state + 0x7f78);
-    cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70),
+    env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
+    cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
-    env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68);
-    env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60);
+    env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
+    env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
 
-    cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48));
-    cpu_x86_update_cr3(env, ldq_phys(cs->as, sm_state + 0x7f50));
-    cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58));
+    cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
+    cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
+    cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
 
     for (i = 0; i < 6; i++) {
         offset = 0x7e00 + i * 16;
         cpu_x86_load_seg_cache(env, i,
-                               lduw_phys(cs->as, sm_state + offset),
-                               ldq_phys(cs->as, sm_state + offset + 8),
-                               ldl_phys(cs->as, sm_state + offset + 4),
-                               (lduw_phys(cs->as, sm_state + offset + 2) &
+                               x86_lduw_phys(cs, sm_state + offset),
+                               x86_ldq_phys(cs, sm_state + offset + 8),
+                               x86_ldl_phys(cs, sm_state + offset + 4),
+                               (x86_lduw_phys(cs, sm_state + offset + 2) &
                                 0xf0ff) << 8);
     }
 
-    val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
+    val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
     if (val & 0x20000) {
-        env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff;
+        env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00) & ~0x7fff;
     }
 #else
-    cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc));
-    cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8));
-    cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4),
+    cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
+    cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
+    cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
-    env->eip = ldl_phys(cs->as, sm_state + 0x7ff0);
-    env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec);
-    env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8);
-    env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4);
-    env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0);
-    env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc);
-    env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8);
-    env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4);
-    env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0);
-    env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc);
-    env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8);
-
-    env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff;
-    env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64);
-    env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60);
-    env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8;
-
-    env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff;
-    env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80);
-    env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c);
-    env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8;
-
-    env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74);
-    env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70);
-
-    env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58);
-    env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54);
+    env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
+    env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
+    env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
+    env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
+    env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
+    env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
+    env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
+    env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
+    env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
+    env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
+    env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
+
+    env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
+    env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
+    env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
+    env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+    env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
+    env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
+    env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
+    env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
+
+    env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
+    env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
+
+    env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
+    env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
 
     for (i = 0; i < 6; i++) {
         if (i < 3) {
@@ -293,18 +293,18 @@  void helper_rsm(CPUX86State *env)
             offset = 0x7f2c + (i - 3) * 12;
         }
         cpu_x86_load_seg_cache(env, i,
-                               ldl_phys(cs->as,
+                               x86_ldl_phys(cs,
                                         sm_state + 0x7fa8 + i * 4) & 0xffff,
-                               ldl_phys(cs->as, sm_state + offset + 8),
-                               ldl_phys(cs->as, sm_state + offset + 4),
-                               (ldl_phys(cs->as,
+                               x86_ldl_phys(cs, sm_state + offset + 8),
+                               x86_ldl_phys(cs, sm_state + offset + 4),
+                               (x86_ldl_phys(cs,
                                          sm_state + offset) & 0xf0ff) << 8);
     }
-    cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14));
+    cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
 
-    val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
+    val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
     if (val & 0x20000) {
-        env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff;
+        env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8) & ~0x7fff;
     }
 #endif
     env->hflags &= ~HF_SMM_MASK;
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index 429d029..f1fabf5 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -87,13 +87,13 @@  static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
 {
     CPUState *cs = CPU(x86_env_get_cpu(env));
 
-    stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
+    x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
              sc->selector);
-    stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
+    x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
              sc->base);
-    stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
+    x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
              sc->limit);
-    stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
+    x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
 }
 
@@ -103,11 +103,11 @@  static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
     CPUState *cs = CPU(x86_env_get_cpu(env));
     unsigned int flags;
 
-    sc->selector = lduw_phys(cs->as,
+    sc->selector = x86_lduw_phys(cs,
                              addr + offsetof(struct vmcb_seg, selector));
-    sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
-    sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
-    flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
+    sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
+    sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
+    flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
 }
 
@@ -141,32 +141,32 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     env->vm_vmcb = addr;
 
     /* save the current CPU state in the hsave page */
-    stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+    x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
              env->gdt.base);
-    stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+    x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
              env->gdt.limit);
 
-    stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+    x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
              env->idt.base);
-    stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+    x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
              env->idt.limit);
 
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
 
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.rflags),
              cpu_compute_eflags(env));
 
@@ -179,30 +179,30 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
                  &env->segs[R_DS]);
 
-    stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
+    x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
              env->eip + next_eip_addend);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
 
     /* load the interception bitmaps so we do not need to access the
        vmcb in svm mode */
-    env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                       control.intercept));
-    env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
+    env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
                                        offsetof(struct vmcb,
                                                 control.intercept_cr_read));
-    env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
+    env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
                                         offsetof(struct vmcb,
                                                  control.intercept_cr_write));
-    env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
+    env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
                                        offsetof(struct vmcb,
                                                 control.intercept_dr_read));
-    env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
+    env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
                                         offsetof(struct vmcb,
                                                  control.intercept_dr_write));
-    env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
+    env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
                                          offsetof(struct vmcb,
                                                   control.intercept_exceptions
                                                   ));
@@ -210,35 +210,35 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     /* enable intercepts */
     env->hflags |= HF_SVMI_MASK;
 
-    env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
+    env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
                                offsetof(struct vmcb, control.tsc_offset));
 
-    env->gdt.base  = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                       save.gdtr.base));
-    env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                       save.gdtr.limit));
 
-    env->idt.base  = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                       save.idtr.base));
-    env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                       save.idtr.limit));
 
     /* clear exit_info_2 so we behave like the real hardware */
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
 
-    cpu_x86_update_cr0(env, ldq_phys(cs->as,
+    cpu_x86_update_cr0(env, x86_ldq_phys(cs,
                                      env->vm_vmcb + offsetof(struct vmcb,
                                                              save.cr0)));
-    cpu_x86_update_cr4(env, ldq_phys(cs->as,
+    cpu_x86_update_cr4(env, x86_ldq_phys(cs,
                                      env->vm_vmcb + offsetof(struct vmcb,
                                                              save.cr4)));
-    cpu_x86_update_cr3(env, ldq_phys(cs->as,
+    cpu_x86_update_cr3(env, x86_ldq_phys(cs,
                                      env->vm_vmcb + offsetof(struct vmcb,
                                                              save.cr3)));
-    env->cr[2] = ldq_phys(cs->as,
+    env->cr[2] = x86_ldq_phys(cs,
                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
-    int_ctl = ldl_phys(cs->as,
+    int_ctl = x86_ldl_phys(cs,
                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
     if (int_ctl & V_INTR_MASKING_MASK) {
@@ -250,10 +250,10 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     }
 
     cpu_load_efer(env,
-                  ldq_phys(cs->as,
+                  x86_ldq_phys(cs,
                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
     env->eflags = 0;
-    cpu_load_eflags(env, ldq_phys(cs->as,
+    cpu_load_eflags(env, x86_ldq_phys(cs,
                                   env->vm_vmcb + offsetof(struct vmcb,
                                                           save.rflags)),
                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
@@ -267,21 +267,21 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
                        R_DS);
 
-    env->eip = ldq_phys(cs->as,
+    env->eip = x86_ldq_phys(cs,
                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
 
-    env->regs[R_ESP] = ldq_phys(cs->as,
+    env->regs[R_ESP] = x86_ldq_phys(cs,
                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
-    env->regs[R_EAX] = ldq_phys(cs->as,
+    env->regs[R_EAX] = x86_ldq_phys(cs,
                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
-    env->dr[7] = ldq_phys(cs->as,
+    env->dr[7] = x86_ldq_phys(cs,
                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
-    env->dr[6] = ldq_phys(cs->as,
+    env->dr[6] = x86_ldq_phys(cs,
                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
 
     /* FIXME: guest state consistency checks */
 
-    switch (ldub_phys(cs->as,
+    switch (x86_ldub_phys(cs,
                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
     case TLB_CONTROL_DO_NOTHING:
         break;
@@ -300,12 +300,12 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     }
 
     /* maybe we need to inject an event */
-    event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+    event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                  control.event_inj));
     if (event_inj & SVM_EVTINJ_VALID) {
         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
-        uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
+        uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
                                           offsetof(struct vmcb,
                                                    control.event_inj_err));
 
@@ -372,7 +372,7 @@  void helper_vmload(CPUX86State *env, int aflag)
 
     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
-                  addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
+                  addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
                                                           save.fs.base)),
                   env->segs[R_FS].base);
 
@@ -382,18 +382,18 @@  void helper_vmload(CPUX86State *env, int aflag)
     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
 
 #ifdef TARGET_X86_64
-    env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
+    env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
                                                  save.kernel_gs_base));
-    env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
-    env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
-    env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
+    env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
+    env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
+    env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
 #endif
-    env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
-    env->sysenter_cs = ldq_phys(cs->as,
+    env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
+    env->sysenter_cs = x86_ldq_phys(cs,
                                 addr + offsetof(struct vmcb, save.sysenter_cs));
-    env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
+    env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
                                                  save.sysenter_esp));
-    env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
+    env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
                                                  save.sysenter_eip));
 }
 
@@ -412,7 +412,7 @@  void helper_vmsave(CPUX86State *env, int aflag)
 
     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
-                  addr, ldq_phys(cs->as,
+                  addr, x86_ldq_phys(cs,
                                  addr + offsetof(struct vmcb, save.fs.base)),
                   env->segs[R_FS].base);
 
@@ -426,18 +426,18 @@  void helper_vmsave(CPUX86State *env, int aflag)
                  &env->ldt);
 
 #ifdef TARGET_X86_64
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
              env->kernelgsbase);
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
 #endif
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
-    stq_phys(cs->as,
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
+    x86_stq_phys(cs,
              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
              env->sysenter_esp);
-    stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
+    x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
              env->sysenter_eip);
 }
 
@@ -515,7 +515,7 @@  void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
     case SVM_EXIT_MSR:
         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
             /* FIXME: this should be read in at vmrun (faster this way?) */
-            uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
+            uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
                                      offsetof(struct vmcb,
                                               control.msrpm_base_pa));
             uint32_t t0, t1;
@@ -541,7 +541,7 @@  void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
                 t1 = 0;
                 break;
             }
-            if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
+            if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
                 helper_vmexit(env, type, param);
             }
         }
@@ -567,13 +567,13 @@  void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
 
     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
         /* FIXME: this should be read in at vmrun (faster this way?) */
-        uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
+        uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
                                  offsetof(struct vmcb, control.iopm_base_pa));
         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
 
-        if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
+        if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
             /* next env->eip */
-            stq_phys(cs->as,
+            x86_stq_phys(cs,
                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
                      env->eip + next_eip_addend);
             helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
@@ -590,17 +590,17 @@  void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
                   PRIx64 ", " TARGET_FMT_lx ")!\n",
                   exit_code, exit_info_1,
-                  ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+                  x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                                    control.exit_info_2)),
                   env->eip);
 
     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
-        stl_phys(cs->as,
+        x86_stl_phys(cs,
                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
                  SVM_INTERRUPT_SHADOW_MASK);
         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
     } else {
-        stl_phys(cs->as,
+        x86_stl_phys(cs,
                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
     }
 
@@ -614,50 +614,50 @@  void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
                  &env->segs[R_DS]);
 
-    stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
              env->gdt.base);
-    stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+    x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
              env->gdt.limit);
 
-    stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
              env->idt.base);
-    stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+    x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
              env->idt.limit);
 
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
 
-    int_ctl = ldl_phys(cs->as,
+    int_ctl = x86_ldl_phys(cs,
                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
     int_ctl |= env->v_tpr & V_TPR_MASK;
     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
         int_ctl |= V_IRQ_MASK;
     }
-    stl_phys(cs->as,
+    x86_stl_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
 
-    stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
              cpu_compute_eflags(env));
-    stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
              env->eip);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
-    stq_phys(cs->as,
+    x86_stq_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
-    stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+    x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
              env->hflags & HF_CPL_MASK);
 
     /* Reload the host state from vm_hsave */
@@ -668,32 +668,32 @@  void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
     env->tsc_offset = 0;
 
-    env->gdt.base  = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
+    env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
                                                        save.gdtr.base));
-    env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
+    env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
                                                        save.gdtr.limit));
 
-    env->idt.base  = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
+    env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
                                                        save.idtr.base));
-    env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
+    env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
                                                        save.idtr.limit));
 
-    cpu_x86_update_cr0(env, ldq_phys(cs->as,
+    cpu_x86_update_cr0(env, x86_ldq_phys(cs,
                                      env->vm_hsave + offsetof(struct vmcb,
                                                               save.cr0)) |
                        CR0_PE_MASK);
-    cpu_x86_update_cr4(env, ldq_phys(cs->as,
+    cpu_x86_update_cr4(env, x86_ldq_phys(cs,
                                      env->vm_hsave + offsetof(struct vmcb,
                                                               save.cr4)));
-    cpu_x86_update_cr3(env, ldq_phys(cs->as,
+    cpu_x86_update_cr3(env, x86_ldq_phys(cs,
                                      env->vm_hsave + offsetof(struct vmcb,
                                                               save.cr3)));
     /* we need to set the efer after the crs so the hidden flags get
        set properly */
-    cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
+    cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
                                                          save.efer)));
     env->eflags = 0;
-    cpu_load_eflags(env, ldq_phys(cs->as,
+    cpu_load_eflags(env, x86_ldq_phys(cs,
                                   env->vm_hsave + offsetof(struct vmcb,
                                                            save.rflags)),
                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
@@ -708,33 +708,33 @@  void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
                        R_DS);
 
-    env->eip = ldq_phys(cs->as,
+    env->eip = x86_ldq_phys(cs,
                         env->vm_hsave + offsetof(struct vmcb, save.rip));
-    env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
+    env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
                                 offsetof(struct vmcb, save.rsp));
-    env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
+    env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
                                 offsetof(struct vmcb, save.rax));
 
-    env->dr[6] = ldq_phys(cs->as,
+    env->dr[6] = x86_ldq_phys(cs,
                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
-    env->dr[7] = ldq_phys(cs->as,
+    env->dr[7] = x86_ldq_phys(cs,
                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
 
     /* other setups */
-    stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
              exit_code);
-    stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
+    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
              exit_info_1);
 
-    stl_phys(cs->as,
+    x86_stl_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
-             ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+             x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                               control.event_inj)));
-    stl_phys(cs->as,
+    x86_stl_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
-             ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
+             x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
                                               control.event_inj_err)));
-    stl_phys(cs->as,
+    x86_stl_phys(cs,
              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
 
     env->hflags2 &= ~HF2_GIF_MASK;