Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/806347/?format=api
{ "id": 806347, "url": "http://patchwork.ozlabs.org/api/patches/806347/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170828015654.2530-13-Sergio.G.DelReal@gmail.com/", "project": { "id": 14, "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api", "name": "QEMU Development", "link_name": "qemu-devel", "list_id": "qemu-devel.nongnu.org", "list_email": "qemu-devel@nongnu.org", "web_url": "", "scm_url": "", "webscm_url": "", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<20170828015654.2530-13-Sergio.G.DelReal@gmail.com>", "list_archive_url": null, "date": "2017-08-28T01:56:52", "name": "[12/14] hvf: move fields from CPUState to CPUX86State", "commit_ref": null, "pull_url": null, "state": "new", "archived": false, "hash": "109eff71286a693b7f1b3c7a63befeca0259d5ad", "submitter": { "id": 70675, "url": "http://patchwork.ozlabs.org/api/people/70675/?format=api", "name": "Sergio Andres Gomez Del Real", "email": "sergio.g.delreal@gmail.com" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20170828015654.2530-13-Sergio.G.DelReal@gmail.com/mbox/", "series": [ { "id": 56, "url": "http://patchwork.ozlabs.org/api/series/56/?format=api", "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=56", "date": "2017-08-28T01:56:40", "name": "add support for Hypervisor.framework in QEMU", "version": 1, "mbox": "http://patchwork.ozlabs.org/series/56/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/806347/comments/", "check": "pending", "checks": "http://patchwork.ozlabs.org/api/patches/806347/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>", "X-Original-To": "incoming@patchwork.ozlabs.org", "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org", "Authentication-Results": [ "ozlabs.org;\n\tspf=pass (mailfrom) smtp.mailfrom=nongnu.org\n\t(client-ip=2001:4830:134:3::11; helo=lists.gnu.org;\n\tenvelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n\treceiver=<UNKNOWN>)", "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n\tunprotected) header.d=gmail.com header.i=@gmail.com\n\theader.b=\"O7c9Qhey\"; dkim-atps=neutral" ], "Received": [ "from lists.gnu.org (lists.gnu.org [IPv6:2001:4830:134:3::11])\n\t(using TLSv1 with cipher AES256-SHA (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 3xgZtn4s7wz9s78\n\tfor <incoming@patchwork.ozlabs.org>;\n\tMon, 28 Aug 2017 12:08:33 +1000 (AEST)", "from localhost ([::1]:36539 helo=lists.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.71) (envelope-from\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1dm9Tb-00046E-HT\n\tfor incoming@patchwork.ozlabs.org; Sun, 27 Aug 2017 22:08:31 -0400", "from eggs.gnu.org ([2001:4830:134:3::10]:42205)\n\tby lists.gnu.org with esmtp (Exim 4.71)\n\t(envelope-from <sergio.g.delreal@gmail.com>) id 1dm9JU-0005JZ-Nh\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:58:16 -0400", "from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71)\n\t(envelope-from <sergio.g.delreal@gmail.com>) id 1dm9JI-0001La-C6\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:58:04 -0400", "from mail-ua0-x22b.google.com ([2607:f8b0:400c:c08::22b]:36779)\n\tby eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16)\n\t(Exim 4.71) (envelope-from <sergio.g.delreal@gmail.com>)\n\tid 1dm9JI-0001Kf-1G\n\tfor qemu-devel@nongnu.org; Sun, 27 Aug 2017 21:57:52 -0400", "by mail-ua0-x22b.google.com with SMTP id 105so5396680uad.3\n\tfor <qemu-devel@nongnu.org>; Sun, 27 Aug 2017 18:57:51 -0700 (PDT)", "from localhost.localdomain ([191.109.6.191])\n\tby smtp.gmail.com with ESMTPSA id\n\ty12sm2696824uad.22.2017.08.27.18.57.46\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tSun, 27 Aug 2017 18:57:49 -0700 (PDT)" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=yoGw9BkZB91o6BA3Kt2eSIRTEVF7b0cFlEwwESelyuI=;\n\tb=O7c9QheySHFHBVi15WBG5k8C69rPzMgxVj9L1Vxyc6+HRzRIbyZtxpoADaSEsya08m\n\tXmcQ0hVB6BpL42fz9u9HEVY7e6V+CYvjbVd63yMhQoB8MkJ/79Rm/ldkaFVk+JMusNm3\n\td2sLFyEcqeZ2J5Ep0eEgBHfH73XEVUy7sn9TCm4SbSM09Dqqm90d7jqSaAbQUY1/mQCu\n\ta6gAbhi0Z9lcEcTBkKwN8S8qjXVx5aW/8ZVvhd6/dtSnH7s1BZDxxykbsvPgx+5hZcpP\n\tX1sWpGfrceSOvhQ+VwYRuc2c5IgDp0++iIAieCR0YPta9cCAlZmYkrRsw/+f130ureyi\n\tYUjg==", "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=yoGw9BkZB91o6BA3Kt2eSIRTEVF7b0cFlEwwESelyuI=;\n\tb=VMUTatYb4JPsbRjCoQDeOjLHiHuhgxs3j7bkJY/9TNhyn+T8wYD8B743lijkiYzMni\n\tR4xXhuvJe50xy5PqGJZSLxMpvCUZ6Ceaj9a1drQs9JsrnEbqdFUHRal2GIQEyK+ihf4U\n\tI/qh4/88WYSnG/8/LfWkd7z0nv21TERWcvlW6OiebJth173n4UrlESLD68VbuqT8o3l8\n\tHSU9yXWyW2BovhX7oI3aF3rw60nX6k/ugD2tGSQ5UgeZwIa1fLhdQYJbhgm/JTDd27EB\n\tm59TqitLEnpGFHtbkgnjpoPJB3petwB2xxgjxrzJPkzvBKJAKfBut1ccyM5N6FDCoIhS\n\tLxdQ==", "X-Gm-Message-State": "AHYfb5iilqepKOd2pRb6dAeoCJgix9QLNRNdXJI+TcLzba8qk9jayjYq\n\t6X5BKdtrNY/KC9eS", "X-Received": "by 10.159.55.108 with SMTP id a41mr4013062uae.84.1503885470008; \n\tSun, 27 Aug 2017 18:57:50 -0700 (PDT)", "From": "Sergio Andres Gomez Del Real <sergio.g.delreal@gmail.com>", "X-Google-Original-From": "Sergio Andres Gomez Del Real\n\t<Sergio.G.DelReal@gmail.com>", "To": "qemu-devel@nongnu.org", "Date": "Sun, 27 Aug 2017 20:56:52 -0500", "Message-Id": "<20170828015654.2530-13-Sergio.G.DelReal@gmail.com>", "X-Mailer": "git-send-email 2.11.0", "In-Reply-To": "<20170828015654.2530-1-Sergio.G.DelReal@gmail.com>", "References": "<20170828015654.2530-1-Sergio.G.DelReal@gmail.com>", "X-detected-operating-system": "by eggs.gnu.org: Genre and OS details not\n\trecognized.", "X-Received-From": "2607:f8b0:400c:c08::22b", "Subject": "[Qemu-devel] [PATCH 12/14] hvf: move fields from CPUState to\n\tCPUX86State", "X-BeenThere": "qemu-devel@nongnu.org", "X-Mailman-Version": "2.1.21", "Precedence": "list", "List-Id": "<qemu-devel.nongnu.org>", "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>", "List-Archive": "<http://lists.nongnu.org/archive/html/qemu-devel/>", "List-Post": "<mailto:qemu-devel@nongnu.org>", "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>", "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n\t<mailto:qemu-devel-request@nongnu.org?subject=subscribe>", "Cc": "Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>", "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org", "Sender": "\"Qemu-devel\"\n\t<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>" }, "content": "This commit is a small refactoring of hvf's emulation code: it moves the\nHVFX86EmulatorState field to CPUX86State, and in general changes, for\nthe emulation functions, the parameter with signature 'CPUState *' for\n'CPUX86State *' so we don't have to get the 'env' (which is what we\nreally need) through the 'cpu' everytime.\n\nSigned-off-by: Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>\n---\n include/qom/cpu.h | 2 -\n target/i386/cpu.h | 7 +\n target/i386/hvf-all.c | 125 +++----\n target/i386/hvf-utils/x86.c | 4 +-\n target/i386/hvf-utils/x86.h | 34 +-\n target/i386/hvf-utils/x86_decode.c | 357 ++++++++++----------\n target/i386/hvf-utils/x86_decode.h | 23 +-\n target/i386/hvf-utils/x86_emu.c | 673 +++++++++++++++++++------------------\n target/i386/hvf-utils/x86_emu.h | 29 +-\n target/i386/hvf-utils/x86_flags.c | 194 +++++------\n target/i386/hvf-utils/x86_flags.h | 106 +++---\n target/i386/hvf-utils/x86hvf.c | 14 +-\n 12 files changed, 794 insertions(+), 774 deletions(-)", "diff": "diff --git a/include/qom/cpu.h b/include/qom/cpu.h\nindex ef74c2ce3c..abe82a5b7c 100644\n--- a/include/qom/cpu.h\n+++ b/include/qom/cpu.h\n@@ -408,9 +408,7 @@ struct CPUState {\n */\n uint16_t pending_tlb_flush;\n \n- bool hvf_vcpu_dirty;\n uint64_t hvf_fd; // fd of vcpu created by HVF\n- struct hvf_x86_state *hvf_x86;\n };\n \n QTAILQ_HEAD(CPUTailQ, CPUState);\ndiff --git a/target/i386/cpu.h b/target/i386/cpu.h\nindex 7d90f08b98..1d056ee343 100644\n--- a/target/i386/cpu.h\n+++ b/target/i386/cpu.h\n@@ -23,6 +23,9 @@\n #include \"qemu-common.h\"\n #include \"cpu-qom.h\"\n #include \"standard-headers/asm-x86/hyperv.h\"\n+#if defined(CONFIG_HVF)\n+#include \"target/i386/hvf-utils/x86.h\"\n+#endif\n \n #ifdef TARGET_X86_64\n #define TARGET_LONG_BITS 64\n@@ -1187,11 +1190,15 @@ typedef struct CPUX86State {\n int32_t interrupt_injected;\n uint8_t soft_interrupt;\n uint8_t has_error_code;\n+ uint32_t ins_len;\n uint32_t sipi_vector;\n bool tsc_valid;\n int64_t tsc_khz;\n int64_t user_tsc_khz; /* for sanity check only */\n void *kvm_xsave_buf;\n+#if defined(CONFIG_HVF)\n+ HVFX86EmulatorState *hvf_emul;\n+#endif\n \n uint64_t mcg_cap;\n uint64_t mcg_ctl;\ndiff --git a/target/i386/hvf-all.c b/target/i386/hvf-all.c\nindex 20c796089d..8a75723dcf 100644\n--- a/target/i386/hvf-all.c\n+++ b/target/i386/hvf-all.c\n@@ -208,17 +208,20 @@ void update_apic_tpr(CPUState *cpu)\n /* TODO: taskswitch handling */\n static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n {\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n+\n /* CR3 and ldt selector are not saved intentionally */\n- tss->eip = EIP(cpu);\n- tss->eflags = EFLAGS(cpu);\n- tss->eax = EAX(cpu);\n- tss->ecx = ECX(cpu);\n- tss->edx = EDX(cpu);\n- tss->ebx = EBX(cpu);\n- tss->esp = ESP(cpu);\n- tss->ebp = EBP(cpu);\n- tss->esi = ESI(cpu);\n- tss->edi = EDI(cpu);\n+ tss->eip = EIP(env);\n+ tss->eflags = EFLAGS(env);\n+ tss->eax = EAX(env);\n+ tss->ecx = ECX(env);\n+ tss->edx = EDX(env);\n+ tss->ebx = EBX(env);\n+ tss->esp = ESP(env);\n+ tss->ebp = EBP(env);\n+ tss->esi = ESI(env);\n+ tss->edi = EDI(env);\n \n tss->es = vmx_read_segment_selector(cpu, REG_SEG_ES).sel;\n tss->cs = vmx_read_segment_selector(cpu, REG_SEG_CS).sel;\n@@ -230,20 +233,23 @@ static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n \n static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n {\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n+\n wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);\n \n- RIP(cpu) = tss->eip;\n- EFLAGS(cpu) = tss->eflags | 2;\n+ RIP(env) = tss->eip;\n+ EFLAGS(env) = tss->eflags | 2;\n \n /* General purpose registers */\n- RAX(cpu) = tss->eax;\n- RCX(cpu) = tss->ecx;\n- RDX(cpu) = tss->edx;\n- RBX(cpu) = tss->ebx;\n- RSP(cpu) = tss->esp;\n- RBP(cpu) = tss->ebp;\n- RSI(cpu) = tss->esi;\n- RDI(cpu) = tss->edi;\n+ RAX(env) = tss->eax;\n+ RCX(env) = tss->ecx;\n+ RDX(env) = tss->edx;\n+ RBX(env) = tss->ebx;\n+ RSP(env) = tss->esp;\n+ RBP(env) = tss->ebp;\n+ RSI(env) = tss->esi;\n+ RDI(env) = tss->edi;\n \n vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}},\n REG_SEG_LDTR);\n@@ -319,6 +325,8 @@ static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,\n uint32_t desc_limit;\n struct x86_call_gate task_gate_desc;\n struct vmx_segment vmx_seg;\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n \n x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);\n x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);\n@@ -347,7 +355,7 @@ static void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,\n }\n \n if (reason == TSR_IRET) {\n- EFLAGS(cpu) &= ~RFLAGS_NT;\n+ EFLAGS(env) &= ~RFLAGS_NT;\n }\n \n if (reason != TSR_CALL && reason != TSR_IDT_GATE) {\n@@ -404,16 +412,16 @@ void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,\n static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)\n {\n CPUState *cpu_state = cpu;\n- if (cpu_state->hvf_vcpu_dirty == 0) {\n+ if (cpu_state->vcpu_dirty == 0) {\n hvf_get_registers(cpu_state);\n }\n \n- cpu_state->hvf_vcpu_dirty = 1;\n+ cpu_state->vcpu_dirty = 1;\n }\n \n void hvf_cpu_synchronize_state(CPUState *cpu_state)\n {\n- if (cpu_state->hvf_vcpu_dirty == 0) {\n+ if (cpu_state->vcpu_dirty == 0) {\n run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);\n }\n }\n@@ -422,7 +430,7 @@ static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg\n {\n CPUState *cpu_state = cpu;\n hvf_put_registers(cpu_state);\n- cpu_state->hvf_vcpu_dirty = false;\n+ cpu_state->vcpu_dirty = false;\n }\n \n void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)\n@@ -434,7 +442,7 @@ void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)\n {\n CPUState *cpu_state = cpu;\n hvf_put_registers(cpu_state);\n- cpu_state->hvf_vcpu_dirty = false;\n+ cpu_state->vcpu_dirty = false;\n }\n \n void hvf_cpu_synchronize_post_init(CPUState *cpu_state)\n@@ -647,7 +655,8 @@ static void dummy_signal(int sig)\n int hvf_init_vcpu(CPUState *cpu)\n {\n \n- X86CPU *x86cpu;\n+ X86CPU *x86cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86cpu->env;\n \n /* init cpu signals */\n sigset_t set;\n@@ -661,15 +670,15 @@ int hvf_init_vcpu(CPUState *cpu)\n sigdelset(&set, SIG_IPI);\n \n int r;\n- init_emu(cpu);\n- init_decoder(cpu);\n+ init_emu();\n+ init_decoder();\n init_cpuid(cpu);\n \n hvf_state->hvf_caps = (struct hvf_vcpu_caps *)g_malloc0(sizeof(struct hvf_vcpu_caps));\n- cpu->hvf_x86 = (struct hvf_x86_state *)g_malloc0(sizeof(struct hvf_x86_state));\n+ env->hvf_emul = (HVFX86EmulatorState *)g_malloc0(sizeof(HVFX86EmulatorState));\n \n r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);\n- cpu->hvf_vcpu_dirty = 1;\n+ cpu->vcpu_dirty = 1;\n assert_hvf_ok(r);\n \n if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,\n@@ -755,12 +764,12 @@ int hvf_vcpu_exec(CPUState *cpu)\n }\n \n do {\n- if (cpu->hvf_vcpu_dirty) {\n+ if (cpu->vcpu_dirty) {\n hvf_put_registers(cpu);\n- cpu->hvf_vcpu_dirty = false;\n+ cpu->vcpu_dirty = false;\n }\n \n- cpu->hvf_x86->interruptable =\n+ env->hvf_emul->interruptable =\n !(rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n (VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n@@ -785,8 +794,8 @@ int hvf_vcpu_exec(CPUState *cpu)\n VMCS_EXIT_INSTRUCTION_LENGTH);\n uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n- RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n- env->eflags = RFLAGS(cpu);\n+ RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n+ env->eflags = RFLAGS(env);\n \n qemu_mutex_lock_iothread();\n \n@@ -798,7 +807,7 @@ int hvf_vcpu_exec(CPUState *cpu)\n case EXIT_REASON_HLT: {\n macvm_set_rip(cpu, rip + ins_len);\n if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&\n- (EFLAGS(cpu) & IF_MASK))\n+ (EFLAGS(env) & IF_MASK))\n && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&\n !(idtvec_info & VMCS_IDT_VEC_VALID)) {\n cpu->halted = 1;\n@@ -828,10 +837,10 @@ int hvf_vcpu_exec(CPUState *cpu)\n struct x86_decode decode;\n \n load_regs(cpu);\n- cpu->hvf_x86->fetch_rip = rip;\n+ env->hvf_emul->fetch_rip = rip;\n \n- decode_instruction(cpu, &decode);\n- exec_instruction(cpu, &decode);\n+ decode_instruction(env, &decode);\n+ exec_instruction(env, &decode);\n store_regs(cpu);\n break;\n }\n@@ -851,20 +860,20 @@ int hvf_vcpu_exec(CPUState *cpu)\n load_regs(cpu);\n hvf_handle_io(env, port, &val, 0, size, 1);\n if (size == 1) {\n- AL(cpu) = val;\n+ AL(env) = val;\n } else if (size == 2) {\n- AX(cpu) = val;\n+ AX(env) = val;\n } else if (size == 4) {\n- RAX(cpu) = (uint32_t)val;\n+ RAX(env) = (uint32_t)val;\n } else {\n VM_PANIC(\"size\");\n }\n- RIP(cpu) += ins_len;\n+ RIP(env) += ins_len;\n store_regs(cpu);\n break;\n } else if (!string && !in) {\n- RAX(cpu) = rreg(cpu->hvf_fd, HV_X86_RAX);\n- hvf_handle_io(env, port, &RAX(cpu), 1, size, 1);\n+ RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);\n+ hvf_handle_io(env, port, &RAX(env), 1, size, 1);\n macvm_set_rip(cpu, rip + ins_len);\n break;\n }\n@@ -872,11 +881,11 @@ int hvf_vcpu_exec(CPUState *cpu)\n struct x86_decode decode;\n \n load_regs(cpu);\n- cpu->hvf_x86->fetch_rip = rip;\n+ env->hvf_emul->fetch_rip = rip;\n \n- decode_instruction(cpu, &decode);\n+ decode_instruction(env, &decode);\n VM_PANIC_ON(ins_len != decode.len);\n- exec_instruction(cpu, &decode);\n+ exec_instruction(env, &decode);\n store_regs(cpu);\n \n break;\n@@ -934,7 +943,7 @@ int hvf_vcpu_exec(CPUState *cpu)\n } else {\n simulate_wrmsr(cpu);\n }\n- RIP(cpu) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+ RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n store_regs(cpu);\n break;\n }\n@@ -948,19 +957,19 @@ int hvf_vcpu_exec(CPUState *cpu)\n \n switch (cr) {\n case 0x0: {\n- macvm_set_cr0(cpu->hvf_fd, RRX(cpu, reg));\n+ macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));\n break;\n }\n case 4: {\n- macvm_set_cr4(cpu->hvf_fd, RRX(cpu, reg));\n+ macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));\n break;\n }\n case 8: {\n X86CPU *x86_cpu = X86_CPU(cpu);\n if (exit_qual & 0x10) {\n- RRX(cpu, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);\n+ RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);\n } else {\n- int tpr = RRX(cpu, reg);\n+ int tpr = RRX(env, reg);\n cpu_set_apic_tpr(x86_cpu->apic_state, tpr);\n ret = EXCP_INTERRUPT;\n }\n@@ -970,7 +979,7 @@ int hvf_vcpu_exec(CPUState *cpu)\n fprintf(stderr, \"Unrecognized CR %d\\n\", cr);\n abort();\n }\n- RIP(cpu) += ins_len;\n+ RIP(env) += ins_len;\n store_regs(cpu);\n break;\n }\n@@ -978,10 +987,10 @@ int hvf_vcpu_exec(CPUState *cpu)\n struct x86_decode decode;\n \n load_regs(cpu);\n- cpu->hvf_x86->fetch_rip = rip;\n+ env->hvf_emul->fetch_rip = rip;\n \n- decode_instruction(cpu, &decode);\n- exec_instruction(cpu, &decode);\n+ decode_instruction(env, &decode);\n+ exec_instruction(env, &decode);\n store_regs(cpu);\n break;\n }\ndiff --git a/target/i386/hvf-utils/x86.c b/target/i386/hvf-utils/x86.c\nindex 07eb5a8586..0df3d23ec9 100644\n--- a/target/i386/hvf-utils/x86.c\n+++ b/target/i386/hvf-utils/x86.c\n@@ -127,7 +127,9 @@ bool x86_is_real(struct CPUState *cpu)\n \n bool x86_is_v8086(struct CPUState *cpu)\n {\n- return x86_is_protected(cpu) && (RFLAGS(cpu) & RFLAGS_VM);\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n+ return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);\n }\n \n bool x86_is_long_mode(struct CPUState *cpu)\ndiff --git a/target/i386/hvf-utils/x86.h b/target/i386/hvf-utils/x86.h\nindex 435b49ae04..59a21b6e41 100644\n--- a/target/i386/hvf-utils/x86.h\n+++ b/target/i386/hvf-utils/x86.h\n@@ -23,7 +23,7 @@\n #include <sys/mman.h>\n #include <stdarg.h>\n #include \"qemu-common.h\"\n-#include \"x86_flags.h\"\n+#include \"x86_gen.h\"\n \n /* exceptions */\n typedef enum x86_exception {\n@@ -356,13 +356,14 @@ typedef struct x68_segment_selector {\n };\n } __attribute__ ((__packed__)) x68_segment_selector;\n \n-/* Definition of hvf_x86_state is here */\n-struct hvf_x86_state {\n- int hlt;\n- uint64_t init_tsc;\n+typedef struct lazy_flags {\n+ addr_t result;\n+ addr_t auxbits;\n+} lazy_flags;\n \n+/* Definition of hvf_x86_state is here */\n+typedef struct HVFX86EmulatorState {\n int interruptable;\n- uint64_t exp_rip;\n uint64_t fetch_rip;\n uint64_t rip;\n struct x86_register regs[16];\n@@ -370,8 +371,7 @@ struct hvf_x86_state {\n struct lazy_flags lflags;\n struct x86_efer efer;\n uint8_t mmio_buf[4096];\n- uint8_t *apic_page;\n-};\n+} HVFX86EmulatorState;\n \n /*\n * hvf xsave area\n@@ -381,12 +381,12 @@ struct hvf_xsave_buf {\n };\n \n /* useful register access macros */\n-#define RIP(cpu) (cpu->hvf_x86->rip)\n-#define EIP(cpu) ((uint32_t)cpu->hvf_x86->rip)\n-#define RFLAGS(cpu) (cpu->hvf_x86->rflags.rflags)\n-#define EFLAGS(cpu) (cpu->hvf_x86->rflags.eflags)\n+#define RIP(cpu) (cpu->hvf_emul->rip)\n+#define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)\n+#define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)\n+#define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)\n \n-#define RRX(cpu, reg) (cpu->hvf_x86->regs[reg].rrx)\n+#define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)\n #define RAX(cpu) RRX(cpu, REG_RAX)\n #define RCX(cpu) RRX(cpu, REG_RCX)\n #define RDX(cpu) RRX(cpu, REG_RDX)\n@@ -404,7 +404,7 @@ struct hvf_xsave_buf {\n #define R14(cpu) RRX(cpu, REG_R14)\n #define R15(cpu) RRX(cpu, REG_R15)\n \n-#define ERX(cpu, reg) (cpu->hvf_x86->regs[reg].erx)\n+#define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)\n #define EAX(cpu) ERX(cpu, REG_RAX)\n #define ECX(cpu) ERX(cpu, REG_RCX)\n #define EDX(cpu) ERX(cpu, REG_RDX)\n@@ -414,7 +414,7 @@ struct hvf_xsave_buf {\n #define ESI(cpu) ERX(cpu, REG_RSI)\n #define EDI(cpu) ERX(cpu, REG_RDI)\n \n-#define RX(cpu, reg) (cpu->hvf_x86->regs[reg].rx)\n+#define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)\n #define AX(cpu) RX(cpu, REG_RAX)\n #define CX(cpu) RX(cpu, REG_RCX)\n #define DX(cpu) RX(cpu, REG_RDX)\n@@ -424,13 +424,13 @@ struct hvf_xsave_buf {\n #define SI(cpu) RX(cpu, REG_RSI)\n #define DI(cpu) RX(cpu, REG_RDI)\n \n-#define RL(cpu, reg) (cpu->hvf_x86->regs[reg].lx)\n+#define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)\n #define AL(cpu) RL(cpu, REG_RAX)\n #define CL(cpu) RL(cpu, REG_RCX)\n #define DL(cpu) RL(cpu, REG_RDX)\n #define BL(cpu) RL(cpu, REG_RBX)\n \n-#define RH(cpu, reg) (cpu->hvf_x86->regs[reg].hx)\n+#define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)\n #define AH(cpu) RH(cpu, REG_RAX)\n #define CH(cpu) RH(cpu, REG_RCX)\n #define DH(cpu) RH(cpu, REG_RDX)\ndiff --git a/target/i386/hvf-utils/x86_decode.c b/target/i386/hvf-utils/x86_decode.c\nindex 4faf82f721..e28b1ddade 100644\n--- a/target/i386/hvf-utils/x86_decode.c\n+++ b/target/i386/hvf-utils/x86_decode.c\n@@ -27,9 +27,9 @@\n \n #define OPCODE_ESCAPE 0xf\n \n-static void decode_invalid(CPUState *cpu, struct x86_decode *decode)\n+static void decode_invalid(CPUX86State *env, struct x86_decode *decode)\n {\n- printf(\"%llx: failed to decode instruction \", cpu->hvf_x86->fetch_rip -\n+ printf(\"%llx: failed to decode instruction \", env->hvf_emul->fetch_rip -\n decode->len);\n for (int i = 0; i < decode->opcode_len; i++) {\n printf(\"%x \", decode->opcode[i]);\n@@ -60,7 +60,7 @@ uint64_t sign(uint64_t val, int size)\n return val;\n }\n \n-static inline uint64_t decode_bytes(CPUState *cpu, struct x86_decode *decode,\n+static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,\n int size)\n {\n addr_t val = 0;\n@@ -75,129 +75,129 @@ static inline uint64_t decode_bytes(CPUState *cpu, struct x86_decode *decode,\n VM_PANIC_EX(\"%s invalid size %d\\n\", __func__, size);\n break;\n }\n- addr_t va = linear_rip(cpu, RIP(cpu)) + decode->len;\n- vmx_read_mem(cpu, &val, va, size);\n+ addr_t va = linear_rip(ENV_GET_CPU(env), RIP(env)) + decode->len;\n+ vmx_read_mem(ENV_GET_CPU(env), &val, va, size);\n decode->len += size;\n \n return val;\n }\n \n-static inline uint8_t decode_byte(CPUState *cpu, struct x86_decode *decode)\n+static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode)\n {\n- return (uint8_t)decode_bytes(cpu, decode, 1);\n+ return (uint8_t)decode_bytes(env, decode, 1);\n }\n \n-static inline uint16_t decode_word(CPUState *cpu, struct x86_decode *decode)\n+static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode)\n {\n- return (uint16_t)decode_bytes(cpu, decode, 2);\n+ return (uint16_t)decode_bytes(env, decode, 2);\n }\n \n-static inline uint32_t decode_dword(CPUState *cpu, struct x86_decode *decode)\n+static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode)\n {\n- return (uint32_t)decode_bytes(cpu, decode, 4);\n+ return (uint32_t)decode_bytes(env, decode, 4);\n }\n \n-static inline uint64_t decode_qword(CPUState *cpu, struct x86_decode *decode)\n+static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode)\n {\n- return decode_bytes(cpu, decode, 8);\n+ return decode_bytes(env, decode, 8);\n }\n \n-static void decode_modrm_rm(CPUState *cpu, struct x86_decode *decode,\n+static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_RM;\n }\n \n-static void decode_modrm_reg(CPUState *cpu, struct x86_decode *decode,\n+static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_REG;\n op->reg = decode->modrm.reg;\n- op->ptr = get_reg_ref(cpu, op->reg, decode->rex.r, decode->operand_size);\n+ op->ptr = get_reg_ref(env, op->reg, decode->rex.r, decode->operand_size);\n }\n \n-static void decode_rax(CPUState *cpu, struct x86_decode *decode,\n+static void decode_rax(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_REG;\n op->reg = REG_RAX;\n- op->ptr = get_reg_ref(cpu, op->reg, 0, decode->operand_size);\n+ op->ptr = get_reg_ref(env, op->reg, 0, decode->operand_size);\n }\n \n-static inline void decode_immediate(CPUState *cpu, struct x86_decode *decode,\n+static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *var, int size)\n {\n var->type = X86_VAR_IMMEDIATE;\n var->size = size;\n switch (size) {\n case 1:\n- var->val = decode_byte(cpu, decode);\n+ var->val = decode_byte(env, decode);\n break;\n case 2:\n- var->val = decode_word(cpu, decode);\n+ var->val = decode_word(env, decode);\n break;\n case 4:\n- var->val = decode_dword(cpu, decode);\n+ var->val = decode_dword(env, decode);\n break;\n case 8:\n- var->val = decode_qword(cpu, decode);\n+ var->val = decode_qword(env, decode);\n break;\n default:\n VM_PANIC_EX(\"bad size %d\\n\", size);\n }\n }\n \n-static void decode_imm8(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm8(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n- decode_immediate(cpu, decode, op, 1);\n+ decode_immediate(env, decode, op, 1);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm8_signed(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n- decode_immediate(cpu, decode, op, 1);\n+ decode_immediate(env, decode, op, 1);\n op->val = sign(op->val, 1);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm16(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm16(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n- decode_immediate(cpu, decode, op, 2);\n+ decode_immediate(env, decode, op, 2);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n \n-static void decode_imm(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n if (8 == decode->operand_size) {\n- decode_immediate(cpu, decode, op, 4);\n+ decode_immediate(env, decode, op, 4);\n op->val = sign(op->val, decode->operand_size);\n } else {\n- decode_immediate(cpu, decode, op, decode->operand_size);\n+ decode_immediate(env, decode, op, decode->operand_size);\n }\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm_signed(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n- decode_immediate(cpu, decode, op, decode->operand_size);\n+ decode_immediate(env, decode, op, decode->operand_size);\n op->val = sign(op->val, decode->operand_size);\n op->type = X86_VAR_IMMEDIATE;\n }\n \n-static void decode_imm_1(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm_1(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_IMMEDIATE;\n op->val = 1;\n }\n \n-static void decode_imm_0(CPUState *cpu, struct x86_decode *decode,\n+static void decode_imm_0(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_IMMEDIATE;\n@@ -205,7 +205,7 @@ static void decode_imm_0(CPUState *cpu, struct x86_decode *decode,\n }\n \n \n-static void decode_pushseg(CPUState *cpu, struct x86_decode *decode)\n+static void decode_pushseg(CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];\n \n@@ -232,7 +232,7 @@ static void decode_pushseg(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_popseg(CPUState *cpu, struct x86_decode *decode)\n+static void decode_popseg(CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];\n \n@@ -259,23 +259,23 @@ static void decode_popseg(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_incgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x40;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n }\n \n-static void decode_decgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x48;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n }\n \n-static void decode_incgroup2(CPUState *cpu, struct x86_decode *decode)\n+static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)\n {\n if (!decode->modrm.reg) {\n decode->cmd = X86_DECODE_CMD_INC;\n@@ -284,36 +284,36 @@ static void decode_incgroup2(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_pushgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x50;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n }\n \n-static void decode_popgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x58;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n }\n \n-static void decode_jxx(CPUState *cpu, struct x86_decode *decode)\n+static void decode_jxx(CPUX86State *env, struct x86_decode *decode)\n {\n- decode->displacement = decode_bytes(cpu, decode, decode->operand_size);\n+ decode->displacement = decode_bytes(env, decode, decode->operand_size);\n decode->displacement_size = decode->operand_size;\n }\n \n-static void decode_farjmp(CPUState *cpu, struct x86_decode *decode)\n+static void decode_farjmp(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_IMMEDIATE;\n- decode->op[0].val = decode_bytes(cpu, decode, decode->operand_size);\n- decode->displacement = decode_word(cpu, decode);\n+ decode->op[0].val = decode_bytes(env, decode, decode->operand_size);\n+ decode->displacement = decode_word(env, decode);\n }\n \n-static void decode_addgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_addgroup(CPUX86State *env, struct x86_decode *decode)\n {\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_ADD,\n@@ -328,7 +328,7 @@ static void decode_addgroup(CPUState *cpu, struct x86_decode *decode)\n decode->cmd = group[decode->modrm.reg];\n }\n \n-static void decode_rotgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode)\n {\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_ROL,\n@@ -343,7 +343,7 @@ static void decode_rotgroup(CPUState *cpu, struct x86_decode *decode)\n decode->cmd = group[decode->modrm.reg];\n }\n \n-static void decode_f7group(CPUState *cpu, struct x86_decode *decode)\n+static void decode_f7group(CPUX86State *env, struct x86_decode *decode)\n {\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_TST,\n@@ -356,12 +356,12 @@ static void decode_f7group(CPUState *cpu, struct x86_decode *decode)\n X86_DECODE_CMD_IDIV\n };\n decode->cmd = group[decode->modrm.reg];\n- decode_modrm_rm(cpu, decode, &decode->op[0]);\n+ decode_modrm_rm(env, decode, &decode->op[0]);\n \n switch (decode->modrm.reg) {\n case 0:\n case 1:\n- decode_imm(cpu, decode, &decode->op[1]);\n+ decode_imm(env, decode, &decode->op[1]);\n break;\n case 2:\n break;\n@@ -374,45 +374,45 @@ static void decode_f7group(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_xchgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0x90;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n }\n \n-static void decode_movgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0xb8;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n- decode_immediate(cpu, decode, &decode->op[1], decode->operand_size);\n+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);\n }\n \n-static void fetch_moffs(CPUState *cpu, struct x86_decode *decode,\n+static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_OFFSET;\n- op->ptr = decode_bytes(cpu, decode, decode->addressing_size);\n+ op->ptr = decode_bytes(env, decode, decode->addressing_size);\n }\n \n-static void decode_movgroup8(CPUState *cpu, struct x86_decode *decode)\n+static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[0] - 0xb0;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n- decode_immediate(cpu, decode, &decode->op[1], decode->operand_size);\n+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);\n }\n \n-static void decode_rcx(CPUState *cpu, struct x86_decode *decode,\n+static void decode_rcx(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X86_VAR_REG;\n op->reg = REG_RCX;\n- op->ptr = get_reg_ref(cpu, op->reg, decode->rex.b, decode->operand_size);\n+ op->ptr = get_reg_ref(env, op->reg, decode->rex.b, decode->operand_size);\n }\n \n struct decode_tbl {\n@@ -420,15 +420,15 @@ struct decode_tbl {\n enum x86_decode_cmd cmd;\n uint8_t operand_size;\n bool is_modrm;\n- void (*decode_op1)(CPUState *cpu, struct x86_decode *decode,\n+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op1);\n- void (*decode_op2)(CPUState *cpu, struct x86_decode *decode,\n+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op2);\n- void (*decode_op3)(CPUState *cpu, struct x86_decode *decode,\n+ void (*decode_op3)(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op3);\n- void (*decode_op4)(CPUState *cpu, struct x86_decode *decode,\n+ void (*decode_op4)(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op4);\n- void (*decode_postfix)(CPUState *cpu, struct x86_decode *decode);\n+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);\n addr_t flags_mask;\n };\n \n@@ -440,11 +440,11 @@ struct decode_x87_tbl {\n uint8_t operand_size;\n bool rev;\n bool pop;\n- void (*decode_op1)(CPUState *cpu, struct x86_decode *decode,\n+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op1);\n- void (*decode_op2)(CPUState *cpu, struct x86_decode *decode,\n+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op2);\n- void (*decode_postfix)(CPUState *cpu, struct x86_decode *decode);\n+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);\n addr_t flags_mask;\n };\n \n@@ -455,7 +455,7 @@ struct decode_tbl _decode_tbl1[255];\n struct decode_tbl _decode_tbl2[255];\n struct decode_x87_tbl _decode_tbl3[255];\n \n-static void decode_x87_ins(CPUState *cpu, struct x86_decode *decode)\n+static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode)\n {\n struct decode_x87_tbl *decoder;\n \n@@ -475,13 +475,13 @@ static void decode_x87_ins(CPUState *cpu, struct x86_decode *decode)\n decode->frev = decoder->rev;\n \n if (decoder->decode_op1) {\n- decoder->decode_op1(cpu, decode, &decode->op[0]);\n+ decoder->decode_op1(env, decode, &decode->op[0]);\n }\n if (decoder->decode_op2) {\n- decoder->decode_op2(cpu, decode, &decode->op[1]);\n+ decoder->decode_op2(env, decode, &decode->op[1]);\n }\n if (decoder->decode_postfix) {\n- decoder->decode_postfix(cpu, decode);\n+ decoder->decode_postfix(env, decode);\n }\n \n VM_PANIC_ON_EX(!decode->cmd, \"x87 opcode %x %x (%x %x) not decoded\\n\",\n@@ -489,7 +489,7 @@ static void decode_x87_ins(CPUState *cpu, struct x86_decode *decode)\n decoder->modrm_mod);\n }\n \n-static void decode_ffgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode)\n {\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_INC,\n@@ -508,8 +508,9 @@ static void decode_ffgroup(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_sldtgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode)\n {\n+\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_SLDT,\n X86_DECODE_CMD_STR,\n@@ -521,11 +522,11 @@ static void decode_sldtgroup(CPUState *cpu, struct x86_decode *decode)\n X86_DECODE_CMD_INVL\n };\n decode->cmd = group[decode->modrm.reg];\n- printf(\"%llx: decode_sldtgroup: %d\\n\", cpu->hvf_x86->fetch_rip,\n+ printf(\"%llx: decode_sldtgroup: %d\\n\", env->hvf_emul->fetch_rip,\n decode->modrm.reg);\n }\n \n-static void decode_lidtgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode)\n {\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_SGDT,\n@@ -544,7 +545,7 @@ static void decode_lidtgroup(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_btgroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_btgroup(CPUX86State *env, struct x86_decode *decode)\n {\n enum x86_decode_cmd group[] = {\n X86_DECODE_CMD_INVL,\n@@ -559,37 +560,37 @@ static void decode_btgroup(CPUState *cpu, struct x86_decode *decode)\n decode->cmd = group[decode->modrm.reg];\n }\n \n-static void decode_x87_general(CPUState *cpu, struct x86_decode *decode)\n+static void decode_x87_general(CPUX86State *env, struct x86_decode *decode)\n {\n decode->is_fpu = true;\n }\n \n-static void decode_x87_modrm_floatp(CPUState *cpu, struct x86_decode *decode,\n+static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X87_VAR_FLOATP;\n }\n \n-static void decode_x87_modrm_intp(CPUState *cpu, struct x86_decode *decode,\n+static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X87_VAR_INTP;\n }\n \n-static void decode_x87_modrm_bytep(CPUState *cpu, struct x86_decode *decode,\n+static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X87_VAR_BYTEP;\n }\n \n-static void decode_x87_modrm_st0(CPUState *cpu, struct x86_decode *decode,\n+static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n op->type = X87_VAR_REG;\n op->reg = 0;\n }\n \n-static void decode_decode_x87_modrm_st0(CPUState *cpu,\n+static void decode_decode_x87_modrm_st0(CPUX86State *env,\n struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n@@ -598,16 +599,16 @@ static void decode_decode_x87_modrm_st0(CPUState *cpu,\n }\n \n \n-static void decode_aegroup(CPUState *cpu, struct x86_decode *decode)\n+static void decode_aegroup(CPUX86State *env, struct x86_decode *decode)\n {\n decode->is_fpu = true;\n switch (decode->modrm.reg) {\n case 0:\n decode->cmd = X86_DECODE_CMD_FXSAVE;\n- decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);\n break;\n case 1:\n- decode_x87_modrm_bytep(cpu, decode, &decode->op[0]);\n+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);\n decode->cmd = X86_DECODE_CMD_FXRSTOR;\n break;\n case 5:\n@@ -634,15 +635,15 @@ static void decode_aegroup(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_bswap(CPUState *cpu, struct x86_decode *decode)\n+static void decode_bswap(CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = decode->opcode[1] - 0xc8;\n- decode->op[0].ptr = get_reg_ref(cpu, decode->op[0].reg, decode->rex.b,\n+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,\n decode->operand_size);\n }\n \n-static void decode_d9_4(CPUState *cpu, struct x86_decode *decode)\n+static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)\n {\n switch (decode->modrm.modrm) {\n case 0xe0:\n@@ -665,7 +666,7 @@ static void decode_d9_4(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_db_4(CPUState *cpu, struct x86_decode *decode)\n+static void decode_db_4(CPUX86State *env, struct x86_decode *decode)\n {\n switch (decode->modrm.modrm) {\n case 0xe0:\n@@ -1633,7 +1634,7 @@ struct decode_x87_tbl _x87_inst[] = {\n decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},\n };\n \n-void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode,\n+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n addr_t ptr = 0;\n@@ -1650,42 +1651,42 @@ void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode,\n \n switch (decode->modrm.rm) {\n case 0:\n- ptr += BX(cpu) + SI(cpu);\n+ ptr += BX(env) + SI(env);\n break;\n case 1:\n- ptr += BX(cpu) + DI(cpu);\n+ ptr += BX(env) + DI(env);\n break;\n case 2:\n- ptr += BP(cpu) + SI(cpu);\n+ ptr += BP(env) + SI(env);\n seg = REG_SEG_SS;\n break;\n case 3:\n- ptr += BP(cpu) + DI(cpu);\n+ ptr += BP(env) + DI(env);\n seg = REG_SEG_SS;\n break;\n case 4:\n- ptr += SI(cpu);\n+ ptr += SI(env);\n break;\n case 5:\n- ptr += DI(cpu);\n+ ptr += DI(env);\n break;\n case 6:\n- ptr += BP(cpu);\n+ ptr += BP(env);\n seg = REG_SEG_SS;\n break;\n case 7:\n- ptr += BX(cpu);\n+ ptr += BX(env);\n break;\n }\n calc_addr:\n if (X86_DECODE_CMD_LEA == decode->cmd) {\n op->ptr = (uint16_t)ptr;\n } else {\n- op->ptr = decode_linear_addr(cpu, decode, (uint16_t)ptr, seg);\n+ op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);\n }\n }\n \n-addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size)\n+addr_t get_reg_ref(CPUX86State *env, int reg, int is_extended, int size)\n {\n addr_t ptr = 0;\n int which = 0;\n@@ -1699,28 +1700,28 @@ addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size)\n case 1:\n if (is_extended || reg < 4) {\n which = 1;\n- ptr = (addr_t)&RL(cpu, reg);\n+ ptr = (addr_t)&RL(env, reg);\n } else {\n which = 2;\n- ptr = (addr_t)&RH(cpu, reg - 4);\n+ ptr = (addr_t)&RH(env, reg - 4);\n }\n break;\n default:\n which = 3;\n- ptr = (addr_t)&RRX(cpu, reg);\n+ ptr = (addr_t)&RRX(env, reg);\n break;\n }\n return ptr;\n }\n \n-addr_t get_reg_val(CPUState *cpu, int reg, int is_extended, int size)\n+addr_t get_reg_val(CPUX86State *env, int reg, int is_extended, int size)\n {\n addr_t val = 0;\n- memcpy(&val, (void *)get_reg_ref(cpu, reg, is_extended, size), size);\n+ memcpy(&val, (void *)get_reg_ref(env, reg, is_extended, size), size);\n return val;\n }\n \n-static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode,\n+static addr_t get_sib_val(CPUX86State *env, struct x86_decode *decode,\n x86_reg_segment *sel)\n {\n addr_t base = 0;\n@@ -1738,7 +1739,7 @@ static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode,\n if (REG_RSP == base_reg || REG_RBP == base_reg) {\n *sel = REG_SEG_SS;\n }\n- base = get_reg_val(cpu, decode->sib.base, decode->rex.b, addr_size);\n+ base = get_reg_val(env, decode->sib.base, decode->rex.b, addr_size);\n }\n \n if (decode->rex.x) {\n@@ -1746,13 +1747,13 @@ static addr_t get_sib_val(CPUState *cpu, struct x86_decode *decode,\n }\n \n if (index_reg != REG_RSP) {\n- scaled_index = get_reg_val(cpu, index_reg, decode->rex.x, addr_size) <<\n+ scaled_index = get_reg_val(env, index_reg, decode->rex.x, addr_size) <<\n decode->sib.scale;\n }\n return base + scaled_index;\n }\n \n-void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode,\n+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n x86_reg_segment seg = REG_SEG_DS;\n@@ -1764,10 +1765,10 @@ void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode,\n }\n \n if (4 == decode->modrm.rm) {\n- ptr += get_sib_val(cpu, decode, &seg);\n+ ptr += get_sib_val(env, decode, &seg);\n } else if (!decode->modrm.mod && 5 == decode->modrm.rm) {\n- if (x86_is_long_mode(cpu)) {\n- ptr += RIP(cpu) + decode->len;\n+ if (x86_is_long_mode(ENV_GET_CPU(env))) {\n+ ptr += RIP(env) + decode->len;\n } else {\n ptr = decode->displacement;\n }\n@@ -1775,17 +1776,17 @@ void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode,\n if (REG_RBP == decode->modrm.rm || REG_RSP == decode->modrm.rm) {\n seg = REG_SEG_SS;\n }\n- ptr += get_reg_val(cpu, decode->modrm.rm, decode->rex.b, addr_size);\n+ ptr += get_reg_val(env, decode->modrm.rm, decode->rex.b, addr_size);\n }\n \n if (X86_DECODE_CMD_LEA == decode->cmd) {\n op->ptr = (uint32_t)ptr;\n } else {\n- op->ptr = decode_linear_addr(cpu, decode, (uint32_t)ptr, seg);\n+ op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);\n }\n }\n \n-void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode,\n+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n x86_reg_segment seg = REG_SEG_DS;\n@@ -1800,41 +1801,41 @@ void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode,\n }\n \n if (4 == rm) {\n- ptr = get_sib_val(cpu, decode, &seg) + offset;\n+ ptr = get_sib_val(env, decode, &seg) + offset;\n } else if (0 == mod && 5 == rm) {\n- ptr = RIP(cpu) + decode->len + (int32_t) offset;\n+ ptr = RIP(env) + decode->len + (int32_t) offset;\n } else {\n- ptr = get_reg_val(cpu, src, decode->rex.b, 8) + (int64_t) offset;\n+ ptr = get_reg_val(env, src, decode->rex.b, 8) + (int64_t) offset;\n }\n \n if (X86_DECODE_CMD_LEA == decode->cmd) {\n op->ptr = ptr;\n } else {\n- op->ptr = decode_linear_addr(cpu, decode, ptr, seg);\n+ op->ptr = decode_linear_addr(env, decode, ptr, seg);\n }\n }\n \n \n-void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode,\n+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op)\n {\n if (3 == decode->modrm.mod) {\n op->reg = decode->modrm.reg;\n op->type = X86_VAR_REG;\n- op->ptr = get_reg_ref(cpu, decode->modrm.rm, decode->rex.b,\n+ op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.b,\n decode->operand_size);\n return;\n }\n \n switch (decode->addressing_size) {\n case 2:\n- calc_modrm_operand16(cpu, decode, op);\n+ calc_modrm_operand16(env, decode, op);\n break;\n case 4:\n- calc_modrm_operand32(cpu, decode, op);\n+ calc_modrm_operand32(env, decode, op);\n break;\n case 8:\n- calc_modrm_operand64(cpu, decode, op);\n+ calc_modrm_operand64(env, decode, op);\n break;\n default:\n VM_PANIC_EX(\"unsupported address size %d\\n\", decode->addressing_size);\n@@ -1842,10 +1843,10 @@ void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode,\n }\n }\n \n-static void decode_prefix(CPUState *cpu, struct x86_decode *decode)\n+static void decode_prefix(CPUX86State *env, struct x86_decode *decode)\n {\n while (1) {\n- uint8_t byte = decode_byte(cpu, decode);\n+ uint8_t byte = decode_byte(env, decode);\n switch (byte) {\n case PREFIX_LOCK:\n decode->lock = byte;\n@@ -1869,7 +1870,7 @@ static void decode_prefix(CPUState *cpu, struct x86_decode *decode)\n decode->addr_size_override = byte;\n break;\n case PREFIX_REX ... (PREFIX_REX + 0xf):\n- if (x86_is_long_mode(cpu)) {\n+ if (x86_is_long_mode(ENV_GET_CPU(env))) {\n decode->rex.rex = byte;\n break;\n }\n@@ -1881,19 +1882,19 @@ static void decode_prefix(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-void set_addressing_size(CPUState *cpu, struct x86_decode *decode)\n+void set_addressing_size(CPUX86State *env, struct x86_decode *decode)\n {\n decode->addressing_size = -1;\n- if (x86_is_real(cpu) || x86_is_v8086(cpu)) {\n+ if (x86_is_real(ENV_GET_CPU(env)) || x86_is_v8086(ENV_GET_CPU(env))) {\n if (decode->addr_size_override) {\n decode->addressing_size = 4;\n } else {\n decode->addressing_size = 2;\n }\n- } else if (!x86_is_long_mode(cpu)) {\n+ } else if (!x86_is_long_mode(ENV_GET_CPU(env))) {\n /* protected */\n struct vmx_segment cs;\n- vmx_read_segment_descriptor(cpu, &cs, REG_SEG_CS);\n+ vmx_read_segment_descriptor(ENV_GET_CPU(env), &cs, REG_SEG_CS);\n /* check db */\n if ((cs.ar >> 14) & 1) {\n if (decode->addr_size_override) {\n@@ -1918,19 +1919,19 @@ void set_addressing_size(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-void set_operand_size(CPUState *cpu, struct x86_decode *decode)\n+void set_operand_size(CPUX86State *env, struct x86_decode *decode)\n {\n decode->operand_size = -1;\n- if (x86_is_real(cpu) || x86_is_v8086(cpu)) {\n+ if (x86_is_real(ENV_GET_CPU(env)) || x86_is_v8086(ENV_GET_CPU(env))) {\n if (decode->op_size_override) {\n decode->operand_size = 4;\n } else {\n decode->operand_size = 2;\n }\n- } else if (!x86_is_long_mode(cpu)) {\n+ } else if (!x86_is_long_mode(ENV_GET_CPU(env))) {\n /* protected */\n struct vmx_segment cs;\n- vmx_read_segment_descriptor(cpu, &cs, REG_SEG_CS);\n+ vmx_read_segment_descriptor(ENV_GET_CPU(env), &cs, REG_SEG_CS);\n /* check db */\n if ((cs.ar >> 14) & 1) {\n if (decode->op_size_override) {\n@@ -1959,11 +1960,11 @@ void set_operand_size(CPUState *cpu, struct x86_decode *decode)\n }\n }\n \n-static void decode_sib(CPUState *cpu, struct x86_decode *decode)\n+static void decode_sib(CPUX86State *env, struct x86_decode *decode)\n {\n if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) &&\n (decode->addressing_size != 2)) {\n- decode->sib.sib = decode_byte(cpu, decode);\n+ decode->sib.sib = decode_byte(env, decode);\n decode->sib_present = true;\n }\n }\n@@ -1984,7 +1985,7 @@ int disp32_tbl[4][8] = {\n {0, 0, 0, 0, 0, 0, 0, 0}\n };\n \n-static inline void decode_displacement(CPUState *cpu, struct x86_decode *decode)\n+static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode)\n {\n int addressing_size = decode->addressing_size;\n int mod = decode->modrm.mod;\n@@ -1995,7 +1996,7 @@ static inline void decode_displacement(CPUState *cpu, struct x86_decode *decode)\n case 2:\n decode->displacement_size = disp16_tbl[mod][rm];\n if (decode->displacement_size) {\n- decode->displacement = (uint16_t)decode_bytes(cpu, decode,\n+ decode->displacement = (uint16_t)decode_bytes(env, decode,\n decode->displacement_size);\n }\n break;\n@@ -2010,23 +2011,23 @@ static inline void decode_displacement(CPUState *cpu, struct x86_decode *decode)\n }\n \n if (decode->displacement_size) {\n- decode->displacement = (uint32_t)decode_bytes(cpu, decode,\n+ decode->displacement = (uint32_t)decode_bytes(env, decode,\n decode->displacement_size);\n }\n break;\n }\n }\n \n-static inline void decode_modrm(CPUState *cpu, struct x86_decode *decode)\n+static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode)\n {\n- decode->modrm.modrm = decode_byte(cpu, decode);\n+ decode->modrm.modrm = decode_byte(env, decode);\n decode->is_modrm = true;\n \n- decode_sib(cpu, decode);\n- decode_displacement(cpu, decode);\n+ decode_sib(env, decode);\n+ decode_displacement(env, decode);\n }\n \n-static inline void decode_opcode_general(CPUState *cpu,\n+static inline void decode_opcode_general(CPUX86State *env,\n struct x86_decode *decode,\n uint8_t opcode,\n struct decode_tbl *inst_decoder)\n@@ -2038,69 +2039,69 @@ static inline void decode_opcode_general(CPUState *cpu,\n decode->flags_mask = inst_decoder->flags_mask;\n \n if (inst_decoder->is_modrm) {\n- decode_modrm(cpu, decode);\n+ decode_modrm(env, decode);\n }\n if (inst_decoder->decode_op1) {\n- inst_decoder->decode_op1(cpu, decode, &decode->op[0]);\n+ inst_decoder->decode_op1(env, decode, &decode->op[0]);\n }\n if (inst_decoder->decode_op2) {\n- inst_decoder->decode_op2(cpu, decode, &decode->op[1]);\n+ inst_decoder->decode_op2(env, decode, &decode->op[1]);\n }\n if (inst_decoder->decode_op3) {\n- inst_decoder->decode_op3(cpu, decode, &decode->op[2]);\n+ inst_decoder->decode_op3(env, decode, &decode->op[2]);\n }\n if (inst_decoder->decode_op4) {\n- inst_decoder->decode_op4(cpu, decode, &decode->op[3]);\n+ inst_decoder->decode_op4(env, decode, &decode->op[3]);\n }\n if (inst_decoder->decode_postfix) {\n- inst_decoder->decode_postfix(cpu, decode);\n+ inst_decoder->decode_postfix(env, decode);\n }\n }\n \n-static inline void decode_opcode_1(CPUState *cpu, struct x86_decode *decode,\n+static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode,\n uint8_t opcode)\n {\n struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];\n- decode_opcode_general(cpu, decode, opcode, inst_decoder);\n+ decode_opcode_general(env, decode, opcode, inst_decoder);\n }\n \n \n-static inline void decode_opcode_2(CPUState *cpu, struct x86_decode *decode,\n+static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode,\n uint8_t opcode)\n {\n struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];\n- decode_opcode_general(cpu, decode, opcode, inst_decoder);\n+ decode_opcode_general(env, decode, opcode, inst_decoder);\n }\n \n-static void decode_opcodes(CPUState *cpu, struct x86_decode *decode)\n+static void decode_opcodes(CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t opcode;\n \n- opcode = decode_byte(cpu, decode);\n+ opcode = decode_byte(env, decode);\n decode->opcode[decode->opcode_len++] = opcode;\n if (opcode != OPCODE_ESCAPE) {\n- decode_opcode_1(cpu, decode, opcode);\n+ decode_opcode_1(env, decode, opcode);\n } else {\n- opcode = decode_byte(cpu, decode);\n+ opcode = decode_byte(env, decode);\n decode->opcode[decode->opcode_len++] = opcode;\n- decode_opcode_2(cpu, decode, opcode);\n+ decode_opcode_2(env, decode, opcode);\n }\n }\n \n-uint32_t decode_instruction(CPUState *cpu, struct x86_decode *decode)\n+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)\n {\n ZERO_INIT(*decode);\n \n- decode_prefix(cpu, decode);\n- set_addressing_size(cpu, decode);\n- set_operand_size(cpu, decode);\n+ decode_prefix(env, decode);\n+ set_addressing_size(env, decode);\n+ set_operand_size(env, decode);\n \n- decode_opcodes(cpu, decode);\n+ decode_opcodes(env, decode);\n \n return decode->len;\n }\n \n-void init_decoder(CPUState *cpu)\n+void init_decoder()\n {\n int i;\n \n@@ -2156,7 +2157,7 @@ const char *decode_cmd_to_string(enum x86_decode_cmd cmd)\n return cmds[cmd];\n }\n \n-addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode,\n+addr_t decode_linear_addr(CPUX86State *env, struct x86_decode *decode,\n addr_t addr, x86_reg_segment seg)\n {\n switch (decode->segment_override) {\n@@ -2181,5 +2182,5 @@ addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode,\n default:\n break;\n }\n- return linear_addr_size(cpu, addr, decode->addressing_size, seg);\n+ return linear_addr_size(ENV_GET_CPU(env), addr, decode->addressing_size, seg);\n }\ndiff --git a/target/i386/hvf-utils/x86_decode.h b/target/i386/hvf-utils/x86_decode.h\nindex b6763e1ba1..1c41cc9456 100644\n--- a/target/i386/hvf-utils/x86_decode.h\n+++ b/target/i386/hvf-utils/x86_decode.h\n@@ -23,6 +23,7 @@\n #include <stdarg.h>\n #include \"qemu-common.h\"\n #include \"x86.h\"\n+#include \"cpu.h\"\n \n typedef enum x86_prefix {\n /* group 1 */\n@@ -304,21 +305,21 @@ typedef struct x86_decode {\n \n uint64_t sign(uint64_t val, int size);\n \n-uint32_t decode_instruction(CPUState *cpu, struct x86_decode *decode);\n+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);\n \n-addr_t get_reg_ref(CPUState *cpu, int reg, int is_extended, int size);\n-addr_t get_reg_val(CPUState *cpu, int reg, int is_extended, int size);\n-void calc_modrm_operand(CPUState *cpu, struct x86_decode *decode,\n+addr_t get_reg_ref(CPUX86State *env, int reg, int is_extended, int size);\n+addr_t get_reg_val(CPUX86State *env, int reg, int is_extended, int size);\n+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op);\n-addr_t decode_linear_addr(struct CPUState *cpu, struct x86_decode *decode,\n+addr_t decode_linear_addr(CPUX86State *env, struct x86_decode *decode,\n addr_t addr, x86_reg_segment seg);\n \n-void init_decoder(CPUState *cpu);\n-void calc_modrm_operand16(CPUState *cpu, struct x86_decode *decode,\n+void init_decoder(void);\n+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op);\n-void calc_modrm_operand32(CPUState *cpu, struct x86_decode *decode,\n+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op);\n-void calc_modrm_operand64(CPUState *cpu, struct x86_decode *decode,\n+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,\n struct x86_decode_op *op);\n-void set_addressing_size(CPUState *cpu, struct x86_decode *decode);\n-void set_operand_size(CPUState *cpu, struct x86_decode *decode);\n+void set_addressing_size(CPUX86State *env, struct x86_decode *decode);\n+void set_operand_size(CPUX86State *env, struct x86_decode *decode);\ndiff --git a/target/i386/hvf-utils/x86_emu.c b/target/i386/hvf-utils/x86_emu.c\nindex dc33cd2576..10eed3b606 100644\n--- a/target/i386/hvf-utils/x86_emu.c\n+++ b/target/i386/hvf-utils/x86_emu.c\n@@ -42,15 +42,16 @@\n #include \"x86.h\"\n #include \"x86_emu.h\"\n #include \"x86_mmu.h\"\n+#include \"x86_flags.h\"\n #include \"vmcs.h\"\n #include \"vmx.h\"\n \n void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n int direction, int size, uint32_t count);\n \n-#define EXEC_2OP_LOGIC_CMD(cpu, decode, cmd, FLAGS_FUNC, save_res) \\\n+#define EXEC_2OP_LOGIC_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \\\n { \\\n- fetch_operands(cpu, decode, 2, true, true, false); \\\n+ fetch_operands(env, decode, 2, true, true, false); \\\n switch (decode->operand_size) { \\\n case 1: \\\n { \\\n@@ -58,7 +59,7 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n uint8_t v2 = (uint8_t)decode->op[1].val; \\\n uint8_t diff = v1 cmd v2; \\\n if (save_res) { \\\n- write_val_ext(cpu, decode->op[0].ptr, diff, 1); \\\n+ write_val_ext(env, decode->op[0].ptr, diff, 1); \\\n } \\\n FLAGS_FUNC##_8(diff); \\\n break; \\\n@@ -69,7 +70,7 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n uint16_t v2 = (uint16_t)decode->op[1].val; \\\n uint16_t diff = v1 cmd v2; \\\n if (save_res) { \\\n- write_val_ext(cpu, decode->op[0].ptr, diff, 2); \\\n+ write_val_ext(env, decode->op[0].ptr, diff, 2); \\\n } \\\n FLAGS_FUNC##_16(diff); \\\n break; \\\n@@ -80,7 +81,7 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n uint32_t v2 = (uint32_t)decode->op[1].val; \\\n uint32_t diff = v1 cmd v2; \\\n if (save_res) { \\\n- write_val_ext(cpu, decode->op[0].ptr, diff, 4); \\\n+ write_val_ext(env, decode->op[0].ptr, diff, 4); \\\n } \\\n FLAGS_FUNC##_32(diff); \\\n break; \\\n@@ -91,9 +92,9 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n } \\\n \n \n-#define EXEC_2OP_ARITH_CMD(cpu, decode, cmd, FLAGS_FUNC, save_res) \\\n+#define EXEC_2OP_ARITH_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \\\n { \\\n- fetch_operands(cpu, decode, 2, true, true, false); \\\n+ fetch_operands(env, decode, 2, true, true, false); \\\n switch (decode->operand_size) { \\\n case 1: \\\n { \\\n@@ -101,7 +102,7 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n uint8_t v2 = (uint8_t)decode->op[1].val; \\\n uint8_t diff = v1 cmd v2; \\\n if (save_res) { \\\n- write_val_ext(cpu, decode->op[0].ptr, diff, 1); \\\n+ write_val_ext(env, decode->op[0].ptr, diff, 1); \\\n } \\\n FLAGS_FUNC##_8(v1, v2, diff); \\\n break; \\\n@@ -112,7 +113,7 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n uint16_t v2 = (uint16_t)decode->op[1].val; \\\n uint16_t diff = v1 cmd v2; \\\n if (save_res) { \\\n- write_val_ext(cpu, decode->op[0].ptr, diff, 2); \\\n+ write_val_ext(env, decode->op[0].ptr, diff, 2); \\\n } \\\n FLAGS_FUNC##_16(v1, v2, diff); \\\n break; \\\n@@ -123,7 +124,7 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n uint32_t v2 = (uint32_t)decode->op[1].val; \\\n uint32_t diff = v1 cmd v2; \\\n if (save_res) { \\\n- write_val_ext(cpu, decode->op[0].ptr, diff, 4); \\\n+ write_val_ext(env, decode->op[0].ptr, diff, 4); \\\n } \\\n FLAGS_FUNC##_32(v1, v2, diff); \\\n break; \\\n@@ -133,37 +134,37 @@ void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,\n } \\\n }\n \n-addr_t read_reg(struct CPUState *cpu, int reg, int size)\n+addr_t read_reg(CPUX86State *env, int reg, int size)\n {\n switch (size) {\n case 1:\n- return cpu->hvf_x86->regs[reg].lx;\n+ return env->hvf_emul->regs[reg].lx;\n case 2:\n- return cpu->hvf_x86->regs[reg].rx;\n+ return env->hvf_emul->regs[reg].rx;\n case 4:\n- return cpu->hvf_x86->regs[reg].erx;\n+ return env->hvf_emul->regs[reg].erx;\n case 8:\n- return cpu->hvf_x86->regs[reg].rrx;\n+ return env->hvf_emul->regs[reg].rrx;\n default:\n VM_PANIC_ON(\"read_reg size\");\n }\n return 0;\n }\n \n-void write_reg(struct CPUState *cpu, int reg, addr_t val, int size)\n+void write_reg(CPUX86State *env, int reg, addr_t val, int size)\n {\n switch (size) {\n case 1:\n- cpu->hvf_x86->regs[reg].lx = val;\n+ env->hvf_emul->regs[reg].lx = val;\n break;\n case 2:\n- cpu->hvf_x86->regs[reg].rx = val;\n+ env->hvf_emul->regs[reg].rx = val;\n break;\n case 4:\n- cpu->hvf_x86->regs[reg].rrx = (uint32_t)val;\n+ env->hvf_emul->regs[reg].rrx = (uint32_t)val;\n break;\n case 8:\n- cpu->hvf_x86->regs[reg].rrx = val;\n+ env->hvf_emul->regs[reg].rrx = val;\n break;\n default:\n VM_PANIC_ON(\"write_reg size\");\n@@ -215,38 +216,36 @@ void write_val_to_reg(addr_t reg_ptr, addr_t val, int size)\n }\n }\n \n-static bool is_host_reg(struct CPUState *cpu, addr_t ptr)\n+static bool is_host_reg(struct CPUX86State *env, addr_t ptr)\n {\n- return (ptr > (addr_t)cpu && ptr < (addr_t)cpu + sizeof(struct CPUState)) ||\n- (ptr > (addr_t)cpu->hvf_x86 && ptr <\n- (addr_t)(cpu->hvf_x86 + sizeof(struct hvf_x86_state)));\n+ return (ptr - (addr_t)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);\n }\n \n-void write_val_ext(struct CPUState *cpu, addr_t ptr, addr_t val, int size)\n+void write_val_ext(struct CPUX86State *env, addr_t ptr, addr_t val, int size)\n {\n- if (is_host_reg(cpu, ptr)) {\n+ if (is_host_reg(env, ptr)) {\n write_val_to_reg(ptr, val, size);\n return;\n }\n- vmx_write_mem(cpu, ptr, &val, size);\n+ vmx_write_mem(ENV_GET_CPU(env), ptr, &val, size);\n }\n \n-uint8_t *read_mmio(struct CPUState *cpu, addr_t ptr, int bytes)\n+uint8_t *read_mmio(struct CPUX86State *env, addr_t ptr, int bytes)\n {\n- vmx_read_mem(cpu, cpu->hvf_x86->mmio_buf, ptr, bytes);\n- return cpu->hvf_x86->mmio_buf;\n+ vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, ptr, bytes);\n+ return env->hvf_emul->mmio_buf;\n }\n \n-addr_t read_val_ext(struct CPUState *cpu, addr_t ptr, int size)\n+addr_t read_val_ext(struct CPUX86State *env, addr_t ptr, int size)\n {\n addr_t val;\n uint8_t *mmio_ptr;\n \n- if (is_host_reg(cpu, ptr)) {\n+ if (is_host_reg(env, ptr)) {\n return read_val_from_reg(ptr, size);\n }\n \n- mmio_ptr = read_mmio(cpu, ptr, size);\n+ mmio_ptr = read_mmio(env, ptr, size);\n switch (size) {\n case 1:\n val = *(uint8_t *)mmio_ptr;\n@@ -267,7 +266,7 @@ addr_t read_val_ext(struct CPUState *cpu, addr_t ptr, int size)\n return val;\n }\n \n-static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode,\n+static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,\n int n, bool val_op0, bool val_op1, bool val_op2)\n {\n int i;\n@@ -285,18 +284,18 @@ static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode,\n }\n break;\n case X86_VAR_RM:\n- calc_modrm_operand(cpu, decode, &decode->op[i]);\n+ calc_modrm_operand(env, decode, &decode->op[i]);\n if (calc_val[i]) {\n- decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr,\n+ decode->op[i].val = read_val_ext(env, decode->op[i].ptr,\n decode->operand_size);\n }\n break;\n case X86_VAR_OFFSET:\n- decode->op[i].ptr = decode_linear_addr(cpu, decode,\n+ decode->op[i].ptr = decode_linear_addr(env, decode,\n decode->op[i].ptr,\n REG_SEG_DS);\n if (calc_val[i]) {\n- decode->op[i].val = read_val_ext(cpu, decode->op[i].ptr,\n+ decode->op[i].val = read_val_ext(env, decode->op[i].ptr,\n decode->operand_size);\n }\n break;\n@@ -306,65 +305,65 @@ static void fetch_operands(struct CPUState *cpu, struct x86_decode *decode,\n }\n }\n \n-static void exec_mov(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)\n {\n- fetch_operands(cpu, decode, 2, false, true, false);\n- write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val,\n+ fetch_operands(env, decode, 2, false, true, false);\n+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val,\n decode->operand_size);\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_add(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_add(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_ARITH_CMD(cpu, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_ARITH_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_or(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_or(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_LOGIC_CMD(cpu, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_LOGIC_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_adc(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_ARITH_CMD(cpu, decode, +get_CF(cpu)+, SET_FLAGS_OSZAPC_ADD, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_ARITH_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_sbb(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_ARITH_CMD(cpu, decode, -get_CF(cpu)-, SET_FLAGS_OSZAPC_SUB, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_ARITH_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_and(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_and(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_LOGIC_CMD(cpu, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_LOGIC_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_sub(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_xor(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_LOGIC_CMD(cpu, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_LOGIC_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_neg(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)\n {\n- /*EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/\n+ /*EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/\n int32_t val;\n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n \n val = 0 - sign(decode->op[1].val, decode->operand_size);\n- write_val_ext(cpu, decode->op[1].ptr, val, decode->operand_size);\n+ write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);\n \n if (4 == decode->operand_size) {\n SET_FLAGS_OSZAPC_SUB_32(0, 0 - val, val);\n@@ -376,56 +375,56 @@ static void exec_neg(struct CPUState *cpu, struct x86_decode *decode)\n VM_PANIC(\"bad op size\\n\");\n }\n \n- /*lflags_to_rflags(cpu);*/\n- RIP(cpu) += decode->len;\n+ /*lflags_to_rflags(env);*/\n+ RIP(env) += decode->len;\n }\n \n-static void exec_cmp(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_inc(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[1].type = X86_VAR_IMMEDIATE;\n decode->op[1].val = 0;\n \n- EXEC_2OP_ARITH_CMD(cpu, decode, +1+, SET_FLAGS_OSZAP_ADD, true);\n+ EXEC_2OP_ARITH_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_dec(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[1].type = X86_VAR_IMMEDIATE;\n decode->op[1].val = 0;\n \n- EXEC_2OP_ARITH_CMD(cpu, decode, -1-, SET_FLAGS_OSZAP_SUB, true);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_ARITH_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_tst(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_LOGIC_CMD(cpu, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);\n- RIP(cpu) += decode->len;\n+ EXEC_2OP_LOGIC_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_not(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_not(struct CPUX86State *env, struct x86_decode *decode)\n {\n- fetch_operands(cpu, decode, 1, true, false, false);\n+ fetch_operands(env, decode, 1, true, false, false);\n \n- write_val_ext(cpu, decode->op[0].ptr, ~decode->op[0].val,\n+ write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,\n decode->operand_size);\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-void exec_movzx(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)\n {\n int src_op_size;\n int op_size = decode->operand_size;\n \n- fetch_operands(cpu, decode, 1, false, false, false);\n+ fetch_operands(env, decode, 1, false, false, false);\n \n if (0xb6 == decode->opcode[1]) {\n src_op_size = 1;\n@@ -433,60 +432,60 @@ void exec_movzx(struct CPUState *cpu, struct x86_decode *decode)\n src_op_size = 2;\n }\n decode->operand_size = src_op_size;\n- calc_modrm_operand(cpu, decode, &decode->op[1]);\n- decode->op[1].val = read_val_ext(cpu, decode->op[1].ptr, src_op_size);\n- write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, op_size);\n+ calc_modrm_operand(env, decode, &decode->op[1]);\n+ decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);\n+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_out(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_out(struct CPUX86State *env, struct x86_decode *decode)\n {\n switch (decode->opcode[0]) {\n case 0xe6:\n- hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 1, 1, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 1, 1, 1);\n break;\n case 0xe7:\n- hvf_handle_io(cpu, decode->op[0].val, &RAX(cpu), 1,\n+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &RAX(env), 1,\n decode->operand_size, 1);\n break;\n case 0xee:\n- hvf_handle_io(cpu, DX(cpu), &AL(cpu), 1, 1, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 1, 1, 1);\n break;\n case 0xef:\n- hvf_handle_io(cpu, DX(cpu), &RAX(cpu), 1, decode->operand_size, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &RAX(env), 1, decode->operand_size, 1);\n break;\n default:\n VM_PANIC(\"Bad out opcode\\n\");\n break;\n }\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_in(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_in(struct CPUX86State *env, struct x86_decode *decode)\n {\n addr_t val = 0;\n switch (decode->opcode[0]) {\n case 0xe4:\n- hvf_handle_io(cpu, decode->op[0].val, &AL(cpu), 0, 1, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 0, 1, 1);\n break;\n case 0xe5:\n- hvf_handle_io(cpu, decode->op[0].val, &val, 0, decode->operand_size, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &val, 0, decode->operand_size, 1);\n if (decode->operand_size == 2) {\n- AX(cpu) = val;\n+ AX(env) = val;\n } else {\n- RAX(cpu) = (uint32_t)val;\n+ RAX(env) = (uint32_t)val;\n }\n break;\n case 0xec:\n- hvf_handle_io(cpu, DX(cpu), &AL(cpu), 0, 1, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 0, 1, 1);\n break;\n case 0xed:\n- hvf_handle_io(cpu, DX(cpu), &val, 0, decode->operand_size, 1);\n+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &val, 0, decode->operand_size, 1);\n if (decode->operand_size == 2) {\n- AX(cpu) = val;\n+ AX(env) = val;\n } else {\n- RAX(cpu) = (uint32_t)val;\n+ RAX(env) = (uint32_t)val;\n }\n \n break;\n@@ -495,212 +494,212 @@ static void exec_in(struct CPUState *cpu, struct x86_decode *decode)\n break;\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static inline void string_increment_reg(struct CPUState *cpu, int reg,\n+static inline void string_increment_reg(struct CPUX86State *env, int reg,\n struct x86_decode *decode)\n {\n- addr_t val = read_reg(cpu, reg, decode->addressing_size);\n- if (cpu->hvf_x86->rflags.df) {\n+ addr_t val = read_reg(env, reg, decode->addressing_size);\n+ if (env->hvf_emul->rflags.df) {\n val -= decode->operand_size;\n } else {\n val += decode->operand_size;\n }\n- write_reg(cpu, reg, val, decode->addressing_size);\n+ write_reg(env, reg, val, decode->addressing_size);\n }\n \n-static inline void string_rep(struct CPUState *cpu, struct x86_decode *decode,\n- void (*func)(struct CPUState *cpu,\n+static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,\n+ void (*func)(struct CPUX86State *env,\n struct x86_decode *ins), int rep)\n {\n- addr_t rcx = read_reg(cpu, REG_RCX, decode->addressing_size);\n+ addr_t rcx = read_reg(env, REG_RCX, decode->addressing_size);\n while (rcx--) {\n- func(cpu, decode);\n- write_reg(cpu, REG_RCX, rcx, decode->addressing_size);\n- if ((PREFIX_REP == rep) && !get_ZF(cpu)) {\n+ func(env, decode);\n+ write_reg(env, REG_RCX, rcx, decode->addressing_size);\n+ if ((PREFIX_REP == rep) && !get_ZF(env)) {\n break;\n }\n- if ((PREFIX_REPN == rep) && get_ZF(cpu)) {\n+ if ((PREFIX_REPN == rep) && get_ZF(env)) {\n break;\n }\n }\n }\n \n-static void exec_ins_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n- addr_t addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size,\n+ addr_t addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,\n REG_SEG_ES);\n \n- hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 0,\n+ hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 0,\n decode->operand_size, 1);\n- vmx_write_mem(cpu, addr, cpu->hvf_x86->mmio_buf, decode->operand_size);\n+ vmx_write_mem(ENV_GET_CPU(env), addr, env->hvf_emul->mmio_buf, decode->operand_size);\n \n- string_increment_reg(cpu, REG_RDI, decode);\n+ string_increment_reg(env, REG_RDI, decode);\n }\n \n-static void exec_ins(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)\n {\n if (decode->rep) {\n- string_rep(cpu, decode, exec_ins_single, 0);\n+ string_rep(env, decode, exec_ins_single, 0);\n } else {\n- exec_ins_single(cpu, decode);\n+ exec_ins_single(env, decode);\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_outs_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n- addr_t addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n+ addr_t addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);\n \n- vmx_read_mem(cpu, cpu->hvf_x86->mmio_buf, addr, decode->operand_size);\n- hvf_handle_io(cpu, DX(cpu), cpu->hvf_x86->mmio_buf, 1,\n+ vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, addr, decode->operand_size);\n+ hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 1,\n decode->operand_size, 1);\n \n- string_increment_reg(cpu, REG_RSI, decode);\n+ string_increment_reg(env, REG_RSI, decode);\n }\n \n-static void exec_outs(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)\n {\n if (decode->rep) {\n- string_rep(cpu, decode, exec_outs_single, 0);\n+ string_rep(env, decode, exec_outs_single, 0);\n } else {\n- exec_outs_single(cpu, decode);\n+ exec_outs_single(env, decode);\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_movs_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n addr_t src_addr;\n addr_t dst_addr;\n addr_t val;\n \n- src_addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n- dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size,\n+ src_addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);\n+ dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,\n REG_SEG_ES);\n \n- val = read_val_ext(cpu, src_addr, decode->operand_size);\n- write_val_ext(cpu, dst_addr, val, decode->operand_size);\n+ val = read_val_ext(env, src_addr, decode->operand_size);\n+ write_val_ext(env, dst_addr, val, decode->operand_size);\n \n- string_increment_reg(cpu, REG_RSI, decode);\n- string_increment_reg(cpu, REG_RDI, decode);\n+ string_increment_reg(env, REG_RSI, decode);\n+ string_increment_reg(env, REG_RDI, decode);\n }\n \n-static void exec_movs(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)\n {\n if (decode->rep) {\n- string_rep(cpu, decode, exec_movs_single, 0);\n+ string_rep(env, decode, exec_movs_single, 0);\n } else {\n- exec_movs_single(cpu, decode);\n+ exec_movs_single(env, decode);\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_cmps_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n addr_t src_addr;\n addr_t dst_addr;\n \n- src_addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n- dst_addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size,\n+ src_addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);\n+ dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,\n REG_SEG_ES);\n \n decode->op[0].type = X86_VAR_IMMEDIATE;\n- decode->op[0].val = read_val_ext(cpu, src_addr, decode->operand_size);\n+ decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);\n decode->op[1].type = X86_VAR_IMMEDIATE;\n- decode->op[1].val = read_val_ext(cpu, dst_addr, decode->operand_size);\n+ decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);\n \n- EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+ EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n \n- string_increment_reg(cpu, REG_RSI, decode);\n- string_increment_reg(cpu, REG_RDI, decode);\n+ string_increment_reg(env, REG_RSI, decode);\n+ string_increment_reg(env, REG_RDI, decode);\n }\n \n-static void exec_cmps(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)\n {\n if (decode->rep) {\n- string_rep(cpu, decode, exec_cmps_single, decode->rep);\n+ string_rep(env, decode, exec_cmps_single, decode->rep);\n } else {\n- exec_cmps_single(cpu, decode);\n+ exec_cmps_single(env, decode);\n }\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n \n-static void exec_stos_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n addr_t addr;\n addr_t val;\n \n- addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n- val = read_reg(cpu, REG_RAX, decode->operand_size);\n- vmx_write_mem(cpu, addr, &val, decode->operand_size);\n+ addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, REG_SEG_ES);\n+ val = read_reg(env, REG_RAX, decode->operand_size);\n+ vmx_write_mem(ENV_GET_CPU(env), addr, &val, decode->operand_size);\n \n- string_increment_reg(cpu, REG_RDI, decode);\n+ string_increment_reg(env, REG_RDI, decode);\n }\n \n \n-static void exec_stos(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)\n {\n if (decode->rep) {\n- string_rep(cpu, decode, exec_stos_single, 0);\n+ string_rep(env, decode, exec_stos_single, 0);\n } else {\n- exec_stos_single(cpu, decode);\n+ exec_stos_single(env, decode);\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_scas_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n addr_t addr;\n \n- addr = linear_addr_size(cpu, RDI(cpu), decode->addressing_size, REG_SEG_ES);\n+ addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, REG_SEG_ES);\n decode->op[1].type = X86_VAR_IMMEDIATE;\n- vmx_read_mem(cpu, &decode->op[1].val, addr, decode->operand_size);\n+ vmx_read_mem(ENV_GET_CPU(env), &decode->op[1].val, addr, decode->operand_size);\n \n- EXEC_2OP_ARITH_CMD(cpu, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n- string_increment_reg(cpu, REG_RDI, decode);\n+ EXEC_2OP_ARITH_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);\n+ string_increment_reg(env, REG_RDI, decode);\n }\n \n-static void exec_scas(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)\n {\n decode->op[0].type = X86_VAR_REG;\n decode->op[0].reg = REG_RAX;\n if (decode->rep) {\n- string_rep(cpu, decode, exec_scas_single, decode->rep);\n+ string_rep(env, decode, exec_scas_single, decode->rep);\n } else {\n- exec_scas_single(cpu, decode);\n+ exec_scas_single(env, decode);\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_lods_single(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)\n {\n addr_t addr;\n addr_t val = 0;\n \n- addr = decode_linear_addr(cpu, decode, RSI(cpu), REG_SEG_DS);\n- vmx_read_mem(cpu, &val, addr, decode->operand_size);\n- write_reg(cpu, REG_RAX, val, decode->operand_size);\n+ addr = decode_linear_addr(env, decode, RSI(env), REG_SEG_DS);\n+ vmx_read_mem(ENV_GET_CPU(env), &val, addr, decode->operand_size);\n+ write_reg(env, REG_RAX, val, decode->operand_size);\n \n- string_increment_reg(cpu, REG_RSI, decode);\n+ string_increment_reg(env, REG_RSI, decode);\n }\n \n-static void exec_lods(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)\n {\n if (decode->rep) {\n- string_rep(cpu, decode, exec_lods_single, 0);\n+ string_rep(env, decode, exec_lods_single, 0);\n } else {\n- exec_lods_single(cpu, decode);\n+ exec_lods_single(env, decode);\n }\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n #define MSR_IA32_UCODE_REV 0x00000017\n@@ -709,7 +708,7 @@ void simulate_rdmsr(struct CPUState *cpu)\n {\n X86CPU *x86_cpu = X86_CPU(cpu);\n CPUX86State *env = &x86_cpu->env;\n- uint32_t msr = ECX(cpu);\n+ uint32_t msr = ECX(env);\n uint64_t val = 0;\n \n switch (msr) {\n@@ -754,7 +753,7 @@ void simulate_rdmsr(struct CPUState *cpu)\n case MSR_MTRRphysBase(5):\n case MSR_MTRRphysBase(6):\n case MSR_MTRRphysBase(7):\n- val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base;\n+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;\n break;\n case MSR_MTRRphysMask(0):\n case MSR_MTRRphysMask(1):\n@@ -764,14 +763,14 @@ void simulate_rdmsr(struct CPUState *cpu)\n case MSR_MTRRphysMask(5):\n case MSR_MTRRphysMask(6):\n case MSR_MTRRphysMask(7):\n- val = env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask;\n+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;\n break;\n case MSR_MTRRfix64K_00000:\n val = env->mtrr_fixed[0];\n break;\n case MSR_MTRRfix16K_80000:\n case MSR_MTRRfix16K_A0000:\n- val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1];\n+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];\n break;\n case MSR_MTRRfix4K_C0000:\n case MSR_MTRRfix4K_C8000:\n@@ -781,7 +780,7 @@ void simulate_rdmsr(struct CPUState *cpu)\n case MSR_MTRRfix4K_E8000:\n case MSR_MTRRfix4K_F0000:\n case MSR_MTRRfix4K_F8000:\n- val = env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3];\n+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];\n break;\n case MSR_MTRRdefType:\n val = env->mtrr_deftype;\n@@ -792,22 +791,22 @@ void simulate_rdmsr(struct CPUState *cpu)\n break;\n }\n \n- RAX(cpu) = (uint32_t)val;\n- RDX(cpu) = (uint32_t)(val >> 32);\n+ RAX(env) = (uint32_t)val;\n+ RDX(env) = (uint32_t)(val >> 32);\n }\n \n-static void exec_rdmsr(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)\n {\n- simulate_rdmsr(cpu);\n- RIP(cpu) += decode->len;\n+ simulate_rdmsr(ENV_GET_CPU(env));\n+ RIP(env) += decode->len;\n }\n \n void simulate_wrmsr(struct CPUState *cpu)\n {\n X86CPU *x86_cpu = X86_CPU(cpu);\n CPUX86State *env = &x86_cpu->env;\n- uint32_t msr = ECX(cpu);\n- uint64_t data = ((uint64_t)EDX(cpu) << 32) | EAX(cpu);\n+ uint32_t msr = ECX(env);\n+ uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);\n \n switch (msr) {\n case MSR_IA32_TSC:\n@@ -837,7 +836,7 @@ void simulate_wrmsr(struct CPUState *cpu)\n abort();\n break;\n case MSR_EFER:\n- cpu->hvf_x86->efer.efer = data;\n+ env->hvf_emul->efer.efer = data;\n /*printf(\"new efer %llx\\n\", EFER(cpu));*/\n wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);\n if (data & EFER_NXE) {\n@@ -852,7 +851,7 @@ void simulate_wrmsr(struct CPUState *cpu)\n case MSR_MTRRphysBase(5):\n case MSR_MTRRphysBase(6):\n case MSR_MTRRphysBase(7):\n- env->mtrr_var[(ECX(cpu) - MSR_MTRRphysBase(0)) / 2].base = data;\n+ env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;\n break;\n case MSR_MTRRphysMask(0):\n case MSR_MTRRphysMask(1):\n@@ -862,14 +861,14 @@ void simulate_wrmsr(struct CPUState *cpu)\n case MSR_MTRRphysMask(5):\n case MSR_MTRRphysMask(6):\n case MSR_MTRRphysMask(7):\n- env->mtrr_var[(ECX(cpu) - MSR_MTRRphysMask(0)) / 2].mask = data;\n+ env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;\n break;\n case MSR_MTRRfix64K_00000:\n- env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix64K_00000] = data;\n+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;\n break;\n case MSR_MTRRfix16K_80000:\n case MSR_MTRRfix16K_A0000:\n- env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix16K_80000 + 1] = data;\n+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;\n break;\n case MSR_MTRRfix4K_C0000:\n case MSR_MTRRfix4K_C8000:\n@@ -879,7 +878,7 @@ void simulate_wrmsr(struct CPUState *cpu)\n case MSR_MTRRfix4K_E8000:\n case MSR_MTRRfix4K_F0000:\n case MSR_MTRRfix4K_F8000:\n- env->mtrr_fixed[ECX(cpu) - MSR_MTRRfix4K_C0000 + 3] = data;\n+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;\n break;\n case MSR_MTRRdefType:\n env->mtrr_deftype = data;\n@@ -895,17 +894,17 @@ void simulate_wrmsr(struct CPUState *cpu)\n printf(\"write msr %llx\\n\", RCX(cpu));*/\n }\n \n-static void exec_wrmsr(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)\n {\n- simulate_wrmsr(cpu);\n- RIP(cpu) += decode->len;\n+ simulate_wrmsr(ENV_GET_CPU(env));\n+ RIP(env) += decode->len;\n }\n \n /*\n * flag:\n * 0 - bt, 1 - btc, 2 - bts, 3 - btr\n */\n-static void do_bt(struct CPUState *cpu, struct x86_decode *decode, int flag)\n+static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)\n {\n int32_t displacement;\n uint8_t index;\n@@ -914,7 +913,7 @@ static void do_bt(struct CPUState *cpu, struct x86_decode *decode, int flag)\n \n VM_PANIC_ON(decode->rex.rex);\n \n- fetch_operands(cpu, decode, 2, false, true, false);\n+ fetch_operands(env, decode, 2, false, true, false);\n index = decode->op[1].val & mask;\n \n if (decode->op[0].type != X86_VAR_REG) {\n@@ -928,13 +927,13 @@ static void do_bt(struct CPUState *cpu, struct x86_decode *decode, int flag)\n VM_PANIC(\"bt 64bit\\n\");\n }\n }\n- decode->op[0].val = read_val_ext(cpu, decode->op[0].ptr,\n+ decode->op[0].val = read_val_ext(env, decode->op[0].ptr,\n decode->operand_size);\n cf = (decode->op[0].val >> index) & 0x01;\n \n switch (flag) {\n case 0:\n- set_CF(cpu, cf);\n+ set_CF(env, cf);\n return;\n case 1:\n decode->op[0].val ^= (1u << index);\n@@ -946,41 +945,41 @@ static void do_bt(struct CPUState *cpu, struct x86_decode *decode, int flag)\n decode->op[0].val &= ~(1u << index);\n break;\n }\n- write_val_ext(cpu, decode->op[0].ptr, decode->op[0].val,\n+ write_val_ext(env, decode->op[0].ptr, decode->op[0].val,\n decode->operand_size);\n- set_CF(cpu, cf);\n+ set_CF(env, cf);\n }\n \n-static void exec_bt(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)\n {\n- do_bt(cpu, decode, 0);\n- RIP(cpu) += decode->len;\n+ do_bt(env, decode, 0);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_btc(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)\n {\n- do_bt(cpu, decode, 1);\n- RIP(cpu) += decode->len;\n+ do_bt(env, decode, 1);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_btr(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)\n {\n- do_bt(cpu, decode, 3);\n- RIP(cpu) += decode->len;\n+ do_bt(env, decode, 3);\n+ RIP(env) += decode->len;\n }\n \n-static void exec_bts(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)\n {\n- do_bt(cpu, decode, 2);\n- RIP(cpu) += decode->len;\n+ do_bt(env, decode, 2);\n+ RIP(env) += decode->len;\n }\n \n-void exec_shl(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_shl(struct CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t count;\n int of = 0, cf = 0;\n \n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n \n count = decode->op[1].val;\n count &= 0x1f; /* count is masked to 5 bits*/\n@@ -998,9 +997,9 @@ void exec_shl(struct CPUState *cpu, struct x86_decode *decode)\n of = cf ^ (res >> 7);\n }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ write_val_ext(env, decode->op[0].ptr, res, 1);\n SET_FLAGS_OSZAPC_LOGIC_8(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n case 2:\n@@ -1014,20 +1013,20 @@ void exec_shl(struct CPUState *cpu, struct x86_decode *decode)\n of = cf ^ (res >> 15); /* of = cf ^ result15 */\n }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ write_val_ext(env, decode->op[0].ptr, res, 2);\n SET_FLAGS_OSZAPC_LOGIC_16(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n case 4:\n {\n uint32_t res = decode->op[0].val << count;\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ write_val_ext(env, decode->op[0].ptr, res, 4);\n SET_FLAGS_OSZAPC_LOGIC_32(res);\n cf = (decode->op[0].val >> (32 - count)) & 0x1;\n of = cf ^ (res >> 31); /* of = cf ^ result31 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n default:\n@@ -1035,16 +1034,16 @@ void exec_shl(struct CPUState *cpu, struct x86_decode *decode)\n }\n \n exit:\n- /* lflags_to_rflags(cpu); */\n- RIP(cpu) += decode->len;\n+ /* lflags_to_rflags(env); */\n+ RIP(env) += decode->len;\n }\n \n-void exec_movsx(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_movsx(CPUX86State *env, struct x86_decode *decode)\n {\n int src_op_size;\n int op_size = decode->operand_size;\n \n- fetch_operands(cpu, decode, 2, false, false, false);\n+ fetch_operands(env, decode, 2, false, false, false);\n \n if (0xbe == decode->opcode[1]) {\n src_op_size = 1;\n@@ -1053,20 +1052,20 @@ void exec_movsx(struct CPUState *cpu, struct x86_decode *decode)\n }\n \n decode->operand_size = src_op_size;\n- calc_modrm_operand(cpu, decode, &decode->op[1]);\n- decode->op[1].val = sign(read_val_ext(cpu, decode->op[1].ptr, src_op_size),\n+ calc_modrm_operand(env, decode, &decode->op[1]);\n+ decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),\n src_op_size);\n \n- write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val, op_size);\n+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-void exec_ror(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_ror(struct CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t count;\n \n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n count = decode->op[1].val;\n \n switch (decode->operand_size) {\n@@ -1079,17 +1078,17 @@ void exec_ror(struct CPUState *cpu, struct x86_decode *decode)\n if (count & 0x18) {\n bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;\n bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;\n- SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);\n }\n } else {\n count &= 0x7; /* use only bottom 3 bits */\n res = ((uint8_t)decode->op[0].val >> count) |\n ((uint8_t)decode->op[0].val << (8 - count));\n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ write_val_ext(env, decode->op[0].ptr, res, 1);\n bit6 = (res >> 6) & 1;\n bit7 = (res >> 7) & 1;\n /* set eflags: ROR count affects the following flags: C, O */\n- SET_FLAGS_OxxxxC(cpu, bit6 ^ bit7, bit7);\n+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);\n }\n break;\n }\n@@ -1103,18 +1102,18 @@ void exec_ror(struct CPUState *cpu, struct x86_decode *decode)\n bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;\n bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;\n /* of = result14 ^ result15 */\n- SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);\n }\n } else {\n count &= 0x0f; /* use only 4 LSB's */\n res = ((uint16_t)decode->op[0].val >> count) |\n ((uint16_t)decode->op[0].val << (16 - count));\n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ write_val_ext(env, decode->op[0].ptr, res, 2);\n \n bit14 = (res >> 14) & 1;\n bit15 = (res >> 15) & 1;\n /* of = result14 ^ result15 */\n- SET_FLAGS_OxxxxC(cpu, bit14 ^ bit15, bit15);\n+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);\n }\n break;\n }\n@@ -1127,24 +1126,24 @@ void exec_ror(struct CPUState *cpu, struct x86_decode *decode)\n if (count) {\n res = ((uint32_t)decode->op[0].val >> count) |\n ((uint32_t)decode->op[0].val << (32 - count));\n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ write_val_ext(env, decode->op[0].ptr, res, 4);\n \n bit31 = (res >> 31) & 1;\n bit30 = (res >> 30) & 1;\n /* of = result30 ^ result31 */\n- SET_FLAGS_OxxxxC(cpu, bit30 ^ bit31, bit31);\n+ SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);\n }\n break;\n }\n }\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-void exec_rol(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_rol(struct CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t count;\n \n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n count = decode->op[1].val;\n \n switch (decode->operand_size) {\n@@ -1157,20 +1156,20 @@ void exec_rol(struct CPUState *cpu, struct x86_decode *decode)\n if (count & 0x18) {\n bit0 = ((uint8_t)decode->op[0].val & 1);\n bit7 = ((uint8_t)decode->op[0].val >> 7);\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);\n }\n } else {\n count &= 0x7; /* use only lowest 3 bits */\n res = ((uint8_t)decode->op[0].val << count) |\n ((uint8_t)decode->op[0].val >> (8 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ write_val_ext(env, decode->op[0].ptr, res, 1);\n /* set eflags:\n * ROL count affects the following flags: C, O\n */\n bit0 = (res & 1);\n bit7 = (res >> 7);\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit7, bit0);\n+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);\n }\n break;\n }\n@@ -1184,18 +1183,18 @@ void exec_rol(struct CPUState *cpu, struct x86_decode *decode)\n bit0 = ((uint16_t)decode->op[0].val & 0x1);\n bit15 = ((uint16_t)decode->op[0].val >> 15);\n /* of = cf ^ result15 */\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);\n }\n } else {\n count &= 0x0f; /* only use bottom 4 bits */\n res = ((uint16_t)decode->op[0].val << count) |\n ((uint16_t)decode->op[0].val >> (16 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ write_val_ext(env, decode->op[0].ptr, res, 2);\n bit0 = (res & 0x1);\n bit15 = (res >> 15);\n /* of = cf ^ result15 */\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit15, bit0);\n+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);\n }\n break;\n }\n@@ -1209,25 +1208,25 @@ void exec_rol(struct CPUState *cpu, struct x86_decode *decode)\n res = ((uint32_t)decode->op[0].val << count) |\n ((uint32_t)decode->op[0].val >> (32 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ write_val_ext(env, decode->op[0].ptr, res, 4);\n bit0 = (res & 0x1);\n bit31 = (res >> 31);\n /* of = cf ^ result31 */\n- SET_FLAGS_OxxxxC(cpu, bit0 ^ bit31, bit0);\n+ SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);\n }\n break;\n }\n }\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n \n-void exec_rcl(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t count;\n int of = 0, cf = 0;\n \n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n count = decode->op[1].val & 0x1f;\n \n switch (decode->operand_size) {\n@@ -1241,17 +1240,17 @@ void exec_rcl(struct CPUState *cpu, struct x86_decode *decode)\n }\n \n if (1 == count) {\n- res = (op1_8 << 1) | get_CF(cpu);\n+ res = (op1_8 << 1) | get_CF(env);\n } else {\n- res = (op1_8 << count) | (get_CF(cpu) << (count - 1)) |\n+ res = (op1_8 << count) | (get_CF(env) << (count - 1)) |\n (op1_8 >> (9 - count));\n }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ write_val_ext(env, decode->op[0].ptr, res, 1);\n \n cf = (op1_8 >> (8 - count)) & 0x01;\n of = cf ^ (res >> 7); /* of = cf ^ result7 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n case 2:\n@@ -1265,19 +1264,19 @@ void exec_rcl(struct CPUState *cpu, struct x86_decode *decode)\n }\n \n if (1 == count) {\n- res = (op1_16 << 1) | get_CF(cpu);\n+ res = (op1_16 << 1) | get_CF(env);\n } else if (count == 16) {\n- res = (get_CF(cpu) << 15) | (op1_16 >> 1);\n+ res = (get_CF(env) << 15) | (op1_16 >> 1);\n } else { /* 2..15 */\n- res = (op1_16 << count) | (get_CF(cpu) << (count - 1)) |\n+ res = (op1_16 << count) | (get_CF(env) << (count - 1)) |\n (op1_16 >> (17 - count));\n }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ write_val_ext(env, decode->op[0].ptr, res, 2);\n \n cf = (op1_16 >> (16 - count)) & 0x1;\n of = cf ^ (res >> 15); /* of = cf ^ result15 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n case 4:\n@@ -1290,29 +1289,29 @@ void exec_rcl(struct CPUState *cpu, struct x86_decode *decode)\n }\n \n if (1 == count) {\n- res = (op1_32 << 1) | get_CF(cpu);\n+ res = (op1_32 << 1) | get_CF(env);\n } else {\n- res = (op1_32 << count) | (get_CF(cpu) << (count - 1)) |\n+ res = (op1_32 << count) | (get_CF(env) << (count - 1)) |\n (op1_32 >> (33 - count));\n }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ write_val_ext(env, decode->op[0].ptr, res, 4);\n \n cf = (op1_32 >> (32 - count)) & 0x1;\n of = cf ^ (res >> 31); /* of = cf ^ result31 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n }\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-void exec_rcr(struct CPUState *cpu, struct x86_decode *decode)\n+void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)\n {\n uint8_t count;\n int of = 0, cf = 0;\n \n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n count = decode->op[1].val & 0x1f;\n \n switch (decode->operand_size) {\n@@ -1325,14 +1324,14 @@ void exec_rcr(struct CPUState *cpu, struct x86_decode *decode)\n if (!count) {\n break;\n }\n- res = (op1_8 >> count) | (get_CF(cpu) << (8 - count)) |\n+ res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |\n (op1_8 << (9 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 1);\n+ write_val_ext(env, decode->op[0].ptr, res, 1);\n \n cf = (op1_8 >> (count - 1)) & 0x1;\n of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n case 2:\n@@ -1344,15 +1343,15 @@ void exec_rcr(struct CPUState *cpu, struct x86_decode *decode)\n if (!count) {\n break;\n }\n- res = (op1_16 >> count) | (get_CF(cpu) << (16 - count)) |\n+ res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |\n (op1_16 << (17 - count));\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 2);\n+ write_val_ext(env, decode->op[0].ptr, res, 2);\n \n cf = (op1_16 >> (count - 1)) & 0x1;\n of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^\n result14 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n case 4:\n@@ -1365,47 +1364,47 @@ void exec_rcr(struct CPUState *cpu, struct x86_decode *decode)\n }\n \n if (1 == count) {\n- res = (op1_32 >> 1) | (get_CF(cpu) << 31);\n+ res = (op1_32 >> 1) | (get_CF(env) << 31);\n } else {\n- res = (op1_32 >> count) | (get_CF(cpu) << (32 - count)) |\n+ res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |\n (op1_32 << (33 - count));\n }\n \n- write_val_ext(cpu, decode->op[0].ptr, res, 4);\n+ write_val_ext(env, decode->op[0].ptr, res, 4);\n \n cf = (op1_32 >> (count - 1)) & 0x1;\n of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n break;\n }\n }\n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_xchg(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)\n {\n- fetch_operands(cpu, decode, 2, true, true, false);\n+ fetch_operands(env, decode, 2, true, true, false);\n \n- write_val_ext(cpu, decode->op[0].ptr, decode->op[1].val,\n+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val,\n decode->operand_size);\n- write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val,\n+ write_val_ext(env, decode->op[1].ptr, decode->op[0].val,\n decode->operand_size);\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n-static void exec_xadd(struct CPUState *cpu, struct x86_decode *decode)\n+static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)\n {\n- EXEC_2OP_ARITH_CMD(cpu, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n- write_val_ext(cpu, decode->op[1].ptr, decode->op[0].val,\n+ EXEC_2OP_ARITH_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);\n+ write_val_ext(env, decode->op[1].ptr, decode->op[0].val,\n decode->operand_size);\n \n- RIP(cpu) += decode->len;\n+ RIP(env) += decode->len;\n }\n \n static struct cmd_handler {\n enum x86_decode_cmd cmd;\n- void (*handler)(struct CPUState *cpu, struct x86_decode *ins);\n+ void (*handler)(struct CPUX86State *env, struct x86_decode *ins);\n } handlers[] = {\n {X86_DECODE_CMD_INVL, NULL,},\n {X86_DECODE_CMD_MOV, exec_mov},\n@@ -1451,7 +1450,7 @@ static struct cmd_handler {\n \n static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];\n \n-static void init_cmd_handler(CPUState *cpu)\n+static void init_cmd_handler()\n {\n int i;\n for (i = 0; i < ARRAY_SIZE(handlers); i++) {\n@@ -1461,45 +1460,51 @@ static void init_cmd_handler(CPUState *cpu)\n \n void load_regs(struct CPUState *cpu)\n {\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n+\n int i = 0;\n- RRX(cpu, REG_RAX) = rreg(cpu->hvf_fd, HV_X86_RAX);\n- RRX(cpu, REG_RBX) = rreg(cpu->hvf_fd, HV_X86_RBX);\n- RRX(cpu, REG_RCX) = rreg(cpu->hvf_fd, HV_X86_RCX);\n- RRX(cpu, REG_RDX) = rreg(cpu->hvf_fd, HV_X86_RDX);\n- RRX(cpu, REG_RSI) = rreg(cpu->hvf_fd, HV_X86_RSI);\n- RRX(cpu, REG_RDI) = rreg(cpu->hvf_fd, HV_X86_RDI);\n- RRX(cpu, REG_RSP) = rreg(cpu->hvf_fd, HV_X86_RSP);\n- RRX(cpu, REG_RBP) = rreg(cpu->hvf_fd, HV_X86_RBP);\n+ RRX(env, REG_RAX) = rreg(cpu->hvf_fd, HV_X86_RAX);\n+ RRX(env, REG_RBX) = rreg(cpu->hvf_fd, HV_X86_RBX);\n+ RRX(env, REG_RCX) = rreg(cpu->hvf_fd, HV_X86_RCX);\n+ RRX(env, REG_RDX) = rreg(cpu->hvf_fd, HV_X86_RDX);\n+ RRX(env, REG_RSI) = rreg(cpu->hvf_fd, HV_X86_RSI);\n+ RRX(env, REG_RDI) = rreg(cpu->hvf_fd, HV_X86_RDI);\n+ RRX(env, REG_RSP) = rreg(cpu->hvf_fd, HV_X86_RSP);\n+ RRX(env, REG_RBP) = rreg(cpu->hvf_fd, HV_X86_RBP);\n for (i = 8; i < 16; i++) {\n- RRX(cpu, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);\n+ RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);\n }\n \n- RFLAGS(cpu) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n- rflags_to_lflags(cpu);\n- RIP(cpu) = rreg(cpu->hvf_fd, HV_X86_RIP);\n+ RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n+ rflags_to_lflags(env);\n+ RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);\n }\n \n void store_regs(struct CPUState *cpu)\n {\n+ X86CPU *x86_cpu = X86_CPU(cpu);\n+ CPUX86State *env = &x86_cpu->env;\n+\n int i = 0;\n- wreg(cpu->hvf_fd, HV_X86_RAX, RAX(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RBX, RBX(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RCX, RCX(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RDX, RDX(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RSI, RSI(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RDI, RDI(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RBP, RBP(cpu));\n- wreg(cpu->hvf_fd, HV_X86_RSP, RSP(cpu));\n+ wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));\n+ wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));\n+ wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));\n+ wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));\n+ wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));\n+ wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));\n+ wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));\n+ wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));\n for (i = 8; i < 16; i++) {\n- wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(cpu, i));\n+ wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));\n }\n \n- lflags_to_rflags(cpu);\n- wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(cpu));\n- macvm_set_rip(cpu, RIP(cpu));\n+ lflags_to_rflags(env);\n+ wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));\n+ macvm_set_rip(cpu, RIP(env));\n }\n \n-bool exec_instruction(struct CPUState *cpu, struct x86_decode *ins)\n+bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)\n {\n /*if (hvf_vcpu_id(cpu))\n printf(\"%d, %llx: exec_instruction %s\\n\", hvf_vcpu_id(cpu), RIP(cpu),\n@@ -1509,23 +1514,23 @@ bool exec_instruction(struct CPUState *cpu, struct x86_decode *ins)\n VM_PANIC(\"emulate fpu\\n\");\n } else {\n if (!_cmd_handler[ins->cmd].handler) {\n- printf(\"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu),\n+ printf(\"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(env),\n ins->cmd, ins->opcode[0],\n ins->opcode_len > 1 ? ins->opcode[1] : 0);\n- RIP(cpu) += ins->len;\n+ RIP(env) += ins->len;\n return true;\n }\n \n VM_PANIC_ON_EX(!_cmd_handler[ins->cmd].handler,\n- \"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(cpu),\n+ \"Unimplemented handler (%llx) for %d (%x %x) \\n\", RIP(env),\n ins->cmd, ins->opcode[0],\n ins->opcode_len > 1 ? ins->opcode[1] : 0);\n- _cmd_handler[ins->cmd].handler(cpu, ins);\n+ _cmd_handler[ins->cmd].handler(env, ins);\n }\n return true;\n }\n \n-void init_emu(struct CPUState *cpu)\n+void init_emu()\n {\n- init_cmd_handler(cpu);\n+ init_cmd_handler();\n }\ndiff --git a/target/i386/hvf-utils/x86_emu.h b/target/i386/hvf-utils/x86_emu.h\nindex f7a739bb0a..27d1f5b4d4 100644\n--- a/target/i386/hvf-utils/x86_emu.h\n+++ b/target/i386/hvf-utils/x86_emu.h\n@@ -3,9 +3,10 @@\n \n #include \"x86.h\"\n #include \"x86_decode.h\"\n+#include \"cpu.h\"\n \n-void init_emu(struct CPUState *cpu);\n-bool exec_instruction(struct CPUState *cpu, struct x86_decode *ins);\n+void init_emu(void);\n+bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins);\n \n void load_regs(struct CPUState *cpu);\n void store_regs(struct CPUState *cpu);\n@@ -13,19 +14,19 @@ void store_regs(struct CPUState *cpu);\n void simulate_rdmsr(struct CPUState *cpu);\n void simulate_wrmsr(struct CPUState *cpu);\n \n-addr_t read_reg(struct CPUState *cpu, int reg, int size);\n-void write_reg(struct CPUState *cpu, int reg, addr_t val, int size);\n+addr_t read_reg(CPUX86State *env, int reg, int size);\n+void write_reg(CPUX86State *env, int reg, addr_t val, int size);\n addr_t read_val_from_reg(addr_t reg_ptr, int size);\n void write_val_to_reg(addr_t reg_ptr, addr_t val, int size);\n-void write_val_ext(struct CPUState *cpu, addr_t ptr, addr_t val, int size);\n-uint8_t *read_mmio(struct CPUState *cpu, addr_t ptr, int bytes);\n-addr_t read_val_ext(struct CPUState *cpu, addr_t ptr, int size);\n+void write_val_ext(struct CPUX86State *env, addr_t ptr, addr_t val, int size);\n+uint8_t *read_mmio(struct CPUX86State *env, addr_t ptr, int bytes);\n+addr_t read_val_ext(struct CPUX86State *env, addr_t ptr, int size);\n \n-void exec_movzx(struct CPUState *cpu, struct x86_decode *decode);\n-void exec_shl(struct CPUState *cpu, struct x86_decode *decode);\n-void exec_movsx(struct CPUState *cpu, struct x86_decode *decode);\n-void exec_ror(struct CPUState *cpu, struct x86_decode *decode);\n-void exec_rol(struct CPUState *cpu, struct x86_decode *decode);\n-void exec_rcl(struct CPUState *cpu, struct x86_decode *decode);\n-void exec_rcr(struct CPUState *cpu, struct x86_decode *decode);\n+void exec_movzx(struct CPUX86State *env, struct x86_decode *decode);\n+void exec_shl(struct CPUX86State *env, struct x86_decode *decode);\n+void exec_movsx(struct CPUX86State *env, struct x86_decode *decode);\n+void exec_ror(struct CPUX86State *env, struct x86_decode *decode);\n+void exec_rol(struct CPUX86State *env, struct x86_decode *decode);\n+void exec_rcl(struct CPUX86State *env, struct x86_decode *decode);\n+void exec_rcr(struct CPUX86State *env, struct x86_decode *decode);\n #endif\ndiff --git a/target/i386/hvf-utils/x86_flags.c b/target/i386/hvf-utils/x86_flags.c\nindex 187ab9b56b..c833774485 100644\n--- a/target/i386/hvf-utils/x86_flags.c\n+++ b/target/i386/hvf-utils/x86_flags.c\n@@ -28,155 +28,155 @@\n #include \"x86_flags.h\"\n #include \"x86.h\"\n \n-void SET_FLAGS_OxxxxC(struct CPUState *cpu, uint32_t new_of, uint32_t new_cf)\n+void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)\n {\n uint32_t temp_po = new_of ^ new_cf;\n- cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);\n- cpu->hvf_x86->lflags.auxbits |= (temp_po << LF_BIT_PO) |\n+ env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);\n+ env->hvf_emul->lflags.auxbits |= (temp_po << LF_BIT_PO) |\n (new_cf << LF_BIT_CF);\n }\n \n-void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff)\n {\n SET_FLAGS_OSZAPC_SUB_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff)\n {\n SET_FLAGS_OSZAPC_SUB_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff)\n {\n SET_FLAGS_OSZAPC_SUB_8(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff)\n {\n SET_FLAGS_OSZAPC_ADD_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff)\n {\n SET_FLAGS_OSZAPC_ADD_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff)\n {\n SET_FLAGS_OSZAPC_ADD_8(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff)\n {\n SET_FLAGS_OSZAP_SUB_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff)\n {\n SET_FLAGS_OSZAP_SUB_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff)\n {\n SET_FLAGS_OSZAP_SUB_8(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff)\n {\n SET_FLAGS_OSZAP_ADD_32(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff)\n {\n SET_FLAGS_OSZAP_ADD_16(v1, v2, diff);\n }\n \n-void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff)\n {\n SET_FLAGS_OSZAP_ADD_8(v1, v2, diff);\n }\n \n \n-void SET_FLAGS_OSZAPC_LOGIC32(struct CPUState *cpu, uint32_t diff)\n+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t diff)\n {\n SET_FLAGS_OSZAPC_LOGIC_32(diff);\n }\n \n-void SET_FLAGS_OSZAPC_LOGIC16(struct CPUState *cpu, uint16_t diff)\n+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t diff)\n {\n SET_FLAGS_OSZAPC_LOGIC_16(diff);\n }\n \n-void SET_FLAGS_OSZAPC_LOGIC8(struct CPUState *cpu, uint8_t diff)\n+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t diff)\n {\n SET_FLAGS_OSZAPC_LOGIC_8(diff);\n }\n \n-void SET_FLAGS_SHR32(struct CPUState *cpu, uint32_t v, int count, uint32_t res)\n+void SET_FLAGS_SHR32(CPUX86State *env, uint32_t v, int count, uint32_t res)\n {\n int cf = (v >> (count - 1)) & 0x1;\n int of = (((res << 1) ^ res) >> 31);\n \n SET_FLAGS_OSZAPC_LOGIC_32(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n }\n \n-void SET_FLAGS_SHR16(struct CPUState *cpu, uint16_t v, int count, uint16_t res)\n+void SET_FLAGS_SHR16(CPUX86State *env, uint16_t v, int count, uint16_t res)\n {\n int cf = (v >> (count - 1)) & 0x1;\n int of = (((res << 1) ^ res) >> 15);\n \n SET_FLAGS_OSZAPC_LOGIC_16(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n }\n \n-void SET_FLAGS_SHR8(struct CPUState *cpu, uint8_t v, int count, uint8_t res)\n+void SET_FLAGS_SHR8(CPUX86State *env, uint8_t v, int count, uint8_t res)\n {\n int cf = (v >> (count - 1)) & 0x1;\n int of = (((res << 1) ^ res) >> 7);\n \n SET_FLAGS_OSZAPC_LOGIC_8(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n }\n \n-void SET_FLAGS_SAR32(struct CPUState *cpu, int32_t v, int count, uint32_t res)\n+void SET_FLAGS_SAR32(CPUX86State *env, int32_t v, int count, uint32_t res)\n {\n int cf = (v >> (count - 1)) & 0x1;\n \n SET_FLAGS_OSZAPC_LOGIC_32(res);\n- SET_FLAGS_OxxxxC(cpu, 0, cf);\n+ SET_FLAGS_OxxxxC(env, 0, cf);\n }\n \n-void SET_FLAGS_SAR16(struct CPUState *cpu, int16_t v, int count, uint16_t res)\n+void SET_FLAGS_SAR16(CPUX86State *env, int16_t v, int count, uint16_t res)\n {\n int cf = (v >> (count - 1)) & 0x1;\n \n SET_FLAGS_OSZAPC_LOGIC_16(res);\n- SET_FLAGS_OxxxxC(cpu, 0, cf);\n+ SET_FLAGS_OxxxxC(env, 0, cf);\n }\n \n-void SET_FLAGS_SAR8(struct CPUState *cpu, int8_t v, int count, uint8_t res)\n+void SET_FLAGS_SAR8(CPUX86State *env, int8_t v, int count, uint8_t res)\n {\n int cf = (v >> (count - 1)) & 0x1;\n \n SET_FLAGS_OSZAPC_LOGIC_8(res);\n- SET_FLAGS_OxxxxC(cpu, 0, cf);\n+ SET_FLAGS_OxxxxC(env, 0, cf);\n }\n \n \n-void SET_FLAGS_SHL32(struct CPUState *cpu, uint32_t v, int count, uint32_t res)\n+void SET_FLAGS_SHL32(CPUX86State *env, uint32_t v, int count, uint32_t res)\n {\n int of, cf;\n \n@@ -184,10 +184,10 @@ void SET_FLAGS_SHL32(struct CPUState *cpu, uint32_t v, int count, uint32_t res)\n of = cf ^ (res >> 31);\n \n SET_FLAGS_OSZAPC_LOGIC_32(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n }\n \n-void SET_FLAGS_SHL16(struct CPUState *cpu, uint16_t v, int count, uint16_t res)\n+void SET_FLAGS_SHL16(CPUX86State *env, uint16_t v, int count, uint16_t res)\n {\n int of = 0, cf = 0;\n \n@@ -197,10 +197,10 @@ void SET_FLAGS_SHL16(struct CPUState *cpu, uint16_t v, int count, uint16_t res)\n }\n \n SET_FLAGS_OSZAPC_LOGIC_16(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n }\n \n-void SET_FLAGS_SHL8(struct CPUState *cpu, uint8_t v, int count, uint8_t res)\n+void SET_FLAGS_SHL8(CPUX86State *env, uint8_t v, int count, uint8_t res)\n {\n int of = 0, cf = 0;\n \n@@ -210,124 +210,124 @@ void SET_FLAGS_SHL8(struct CPUState *cpu, uint8_t v, int count, uint8_t res)\n }\n \n SET_FLAGS_OSZAPC_LOGIC_8(res);\n- SET_FLAGS_OxxxxC(cpu, of, cf);\n+ SET_FLAGS_OxxxxC(env, of, cf);\n }\n \n-bool get_PF(struct CPUState *cpu)\n+bool get_PF(CPUX86State *env)\n {\n- uint32_t temp = (255 & cpu->hvf_x86->lflags.result);\n- temp = temp ^ (255 & (cpu->hvf_x86->lflags.auxbits >> LF_BIT_PDB));\n+ uint32_t temp = (255 & env->hvf_emul->lflags.result);\n+ temp = temp ^ (255 & (env->hvf_emul->lflags.auxbits >> LF_BIT_PDB));\n temp = (temp ^ (temp >> 4)) & 0x0F;\n return (0x9669U >> temp) & 1;\n }\n \n-void set_PF(struct CPUState *cpu, bool val)\n+void set_PF(CPUX86State *env, bool val)\n {\n- uint32_t temp = (255 & cpu->hvf_x86->lflags.result) ^ (!val);\n- cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_PDB);\n- cpu->hvf_x86->lflags.auxbits |= (temp << LF_BIT_PDB);\n+ uint32_t temp = (255 & env->hvf_emul->lflags.result) ^ (!val);\n+ env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PDB);\n+ env->hvf_emul->lflags.auxbits |= (temp << LF_BIT_PDB);\n }\n \n-bool _get_OF(struct CPUState *cpu)\n+bool _get_OF(CPUX86State *env)\n {\n- return ((cpu->hvf_x86->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;\n+ return ((env->hvf_emul->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;\n }\n \n-bool get_OF(struct CPUState *cpu)\n+bool get_OF(CPUX86State *env)\n {\n- return _get_OF(cpu);\n+ return _get_OF(env);\n }\n \n-bool _get_CF(struct CPUState *cpu)\n+bool _get_CF(CPUX86State *env)\n {\n- return (cpu->hvf_x86->lflags.auxbits >> LF_BIT_CF) & 1;\n+ return (env->hvf_emul->lflags.auxbits >> LF_BIT_CF) & 1;\n }\n \n-bool get_CF(struct CPUState *cpu)\n+bool get_CF(CPUX86State *env)\n {\n- return _get_CF(cpu);\n+ return _get_CF(env);\n }\n \n-void set_OF(struct CPUState *cpu, bool val)\n+void set_OF(CPUX86State *env, bool val)\n {\n- SET_FLAGS_OxxxxC(cpu, val, _get_CF(cpu));\n+ SET_FLAGS_OxxxxC(env, val, _get_CF(env));\n }\n \n-void set_CF(struct CPUState *cpu, bool val)\n+void set_CF(CPUX86State *env, bool val)\n {\n- SET_FLAGS_OxxxxC(cpu, _get_OF(cpu), (val));\n+ SET_FLAGS_OxxxxC(env, _get_OF(env), (val));\n }\n \n-bool get_AF(struct CPUState *cpu)\n+bool get_AF(CPUX86State *env)\n {\n- return (cpu->hvf_x86->lflags.auxbits >> LF_BIT_AF) & 1;\n+ return (env->hvf_emul->lflags.auxbits >> LF_BIT_AF) & 1;\n }\n \n-void set_AF(struct CPUState *cpu, bool val)\n+void set_AF(CPUX86State *env, bool val)\n {\n- cpu->hvf_x86->lflags.auxbits &= ~(LF_MASK_AF);\n- cpu->hvf_x86->lflags.auxbits |= (val) << LF_BIT_AF;\n+ env->hvf_emul->lflags.auxbits &= ~(LF_MASK_AF);\n+ env->hvf_emul->lflags.auxbits |= (val) << LF_BIT_AF;\n }\n \n-bool get_ZF(struct CPUState *cpu)\n+bool get_ZF(CPUX86State *env)\n {\n- return !cpu->hvf_x86->lflags.result;\n+ return !env->hvf_emul->lflags.result;\n }\n \n-void set_ZF(struct CPUState *cpu, bool val)\n+void set_ZF(CPUX86State *env, bool val)\n {\n if (val) {\n- cpu->hvf_x86->lflags.auxbits ^=\n- (((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);\n+ env->hvf_emul->lflags.auxbits ^=\n+ (((env->hvf_emul->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);\n /* merge the parity bits into the Parity Delta Byte */\n- uint32_t temp_pdb = (255 & cpu->hvf_x86->lflags.result);\n- cpu->hvf_x86->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);\n+ uint32_t temp_pdb = (255 & env->hvf_emul->lflags.result);\n+ env->hvf_emul->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);\n /* now zero the .result value */\n- cpu->hvf_x86->lflags.result = 0;\n+ env->hvf_emul->lflags.result = 0;\n } else {\n- cpu->hvf_x86->lflags.result |= (1 << 8);\n+ env->hvf_emul->lflags.result |= (1 << 8);\n }\n }\n \n-bool get_SF(struct CPUState *cpu)\n+bool get_SF(CPUX86State *env)\n {\n- return ((cpu->hvf_x86->lflags.result >> LF_SIGN_BIT) ^\n- (cpu->hvf_x86->lflags.auxbits >> LF_BIT_SD)) & 1;\n+ return ((env->hvf_emul->lflags.result >> LF_SIGN_BIT) ^\n+ (env->hvf_emul->lflags.auxbits >> LF_BIT_SD)) & 1;\n }\n \n-void set_SF(struct CPUState *cpu, bool val)\n+void set_SF(CPUX86State *env, bool val)\n {\n- bool temp_sf = get_SF(cpu);\n- cpu->hvf_x86->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;\n+ bool temp_sf = get_SF(env);\n+ env->hvf_emul->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;\n }\n \n-void set_OSZAPC(struct CPUState *cpu, uint32_t flags32)\n+void set_OSZAPC(CPUX86State *env, uint32_t flags32)\n {\n- set_OF(cpu, cpu->hvf_x86->rflags.of);\n- set_SF(cpu, cpu->hvf_x86->rflags.sf);\n- set_ZF(cpu, cpu->hvf_x86->rflags.zf);\n- set_AF(cpu, cpu->hvf_x86->rflags.af);\n- set_PF(cpu, cpu->hvf_x86->rflags.pf);\n- set_CF(cpu, cpu->hvf_x86->rflags.cf);\n+ set_OF(env, env->hvf_emul->rflags.of);\n+ set_SF(env, env->hvf_emul->rflags.sf);\n+ set_ZF(env, env->hvf_emul->rflags.zf);\n+ set_AF(env, env->hvf_emul->rflags.af);\n+ set_PF(env, env->hvf_emul->rflags.pf);\n+ set_CF(env, env->hvf_emul->rflags.cf);\n }\n \n-void lflags_to_rflags(struct CPUState *cpu)\n+void lflags_to_rflags(CPUX86State *env)\n {\n- cpu->hvf_x86->rflags.cf = get_CF(cpu);\n- cpu->hvf_x86->rflags.pf = get_PF(cpu);\n- cpu->hvf_x86->rflags.af = get_AF(cpu);\n- cpu->hvf_x86->rflags.zf = get_ZF(cpu);\n- cpu->hvf_x86->rflags.sf = get_SF(cpu);\n- cpu->hvf_x86->rflags.of = get_OF(cpu);\n+ env->hvf_emul->rflags.cf = get_CF(env);\n+ env->hvf_emul->rflags.pf = get_PF(env);\n+ env->hvf_emul->rflags.af = get_AF(env);\n+ env->hvf_emul->rflags.zf = get_ZF(env);\n+ env->hvf_emul->rflags.sf = get_SF(env);\n+ env->hvf_emul->rflags.of = get_OF(env);\n }\n \n-void rflags_to_lflags(struct CPUState *cpu)\n+void rflags_to_lflags(CPUX86State *env)\n {\n- cpu->hvf_x86->lflags.auxbits = cpu->hvf_x86->lflags.result = 0;\n- set_OF(cpu, cpu->hvf_x86->rflags.of);\n- set_SF(cpu, cpu->hvf_x86->rflags.sf);\n- set_ZF(cpu, cpu->hvf_x86->rflags.zf);\n- set_AF(cpu, cpu->hvf_x86->rflags.af);\n- set_PF(cpu, cpu->hvf_x86->rflags.pf);\n- set_CF(cpu, cpu->hvf_x86->rflags.cf);\n+ env->hvf_emul->lflags.auxbits = env->hvf_emul->lflags.result = 0;\n+ set_OF(env, env->hvf_emul->rflags.of);\n+ set_SF(env, env->hvf_emul->rflags.sf);\n+ set_ZF(env, env->hvf_emul->rflags.zf);\n+ set_AF(env, env->hvf_emul->rflags.af);\n+ set_PF(env, env->hvf_emul->rflags.pf);\n+ set_CF(env, env->hvf_emul->rflags.cf);\n }\ndiff --git a/target/i386/hvf-utils/x86_flags.h b/target/i386/hvf-utils/x86_flags.h\nindex 68a0c10b90..57a524240c 100644\n--- a/target/i386/hvf-utils/x86_flags.h\n+++ b/target/i386/hvf-utils/x86_flags.h\n@@ -24,14 +24,10 @@\n #define __X86_FLAGS_H__\n \n #include \"x86_gen.h\"\n+#include \"cpu.h\"\n \n /* this is basically bocsh code */\n \n-typedef struct lazy_flags {\n- addr_t result;\n- addr_t auxbits;\n-} lazy_flags;\n-\n #define LF_SIGN_BIT 31\n \n #define LF_BIT_SD (0) /* lazy Sign Flag Delta */\n@@ -63,7 +59,7 @@ typedef struct lazy_flags {\n #define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \\\n addr_t temp = ((lf_carries) & (LF_MASK_AF)) | \\\n (((lf_carries) >> (size - 2)) << LF_BIT_PO); \\\n- cpu->hvf_x86->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n+ env->hvf_emul->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n if ((size) == 32) { \\\n temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \\\n } else if ((size) == 16) { \\\n@@ -73,7 +69,7 @@ typedef struct lazy_flags {\n } else { \\\n VM_PANIC(\"unimplemented\"); \\\n } \\\n- cpu->hvf_x86->lflags.auxbits = (addr_t)(uint32_t)temp; \\\n+ env->hvf_emul->lflags.auxbits = (addr_t)(uint32_t)temp; \\\n }\n \n /* carries, result */\n@@ -135,10 +131,10 @@ typedef struct lazy_flags {\n } else { \\\n VM_PANIC(\"unimplemented\"); \\\n } \\\n- cpu->hvf_x86->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n- addr_t delta_c = (cpu->hvf_x86->lflags.auxbits ^ temp) & LF_MASK_CF; \\\n+ env->hvf_emul->lflags.result = (addr_t)(int##size##_t)(lf_result); \\\n+ addr_t delta_c = (env->hvf_emul->lflags.auxbits ^ temp) & LF_MASK_CF; \\\n delta_c ^= (delta_c >> 1); \\\n- cpu->hvf_x86->lflags.auxbits = (addr_t)(uint32_t)(temp ^ delta_c); \\\n+ env->hvf_emul->lflags.auxbits = (addr_t)(uint32_t)(temp ^ delta_c); \\\n }\n \n /* carries, result */\n@@ -179,69 +175,69 @@ typedef struct lazy_flags {\n #define SET_FLAGS_OSZAxC_LOGIC_32(result_32) \\\n SET_FLAGS_OSZAxC_LOGIC_SIZE(32, (result_32))\n \n-void lflags_to_rflags(struct CPUState *cpu);\n-void rflags_to_lflags(struct CPUState *cpu);\n-\n-bool get_PF(struct CPUState *cpu);\n-void set_PF(struct CPUState *cpu, bool val);\n-bool get_CF(struct CPUState *cpu);\n-void set_CF(struct CPUState *cpu, bool val);\n-bool get_AF(struct CPUState *cpu);\n-void set_AF(struct CPUState *cpu, bool val);\n-bool get_ZF(struct CPUState *cpu);\n-void set_ZF(struct CPUState *cpu, bool val);\n-bool get_SF(struct CPUState *cpu);\n-void set_SF(struct CPUState *cpu, bool val);\n-bool get_OF(struct CPUState *cpu);\n-void set_OF(struct CPUState *cpu, bool val);\n-void set_OSZAPC(struct CPUState *cpu, uint32_t flags32);\n-\n-void SET_FLAGS_OxxxxC(struct CPUState *cpu, uint32_t new_of, uint32_t new_cf);\n-\n-void SET_FLAGS_OSZAPC_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void lflags_to_rflags(CPUX86State *env);\n+void rflags_to_lflags(CPUX86State *env);\n+\n+bool get_PF(CPUX86State *env);\n+void set_PF(CPUX86State *env, bool val);\n+bool get_CF(CPUX86State *env);\n+void set_CF(CPUX86State *env, bool val);\n+bool get_AF(CPUX86State *env);\n+void set_AF(CPUX86State *env, bool val);\n+bool get_ZF(CPUX86State *env);\n+void set_ZF(CPUX86State *env, bool val);\n+bool get_SF(CPUX86State *env);\n+void set_SF(CPUX86State *env, bool val);\n+bool get_OF(CPUX86State *env);\n+void set_OF(CPUX86State *env, bool val);\n+void set_OSZAPC(CPUX86State *env, uint32_t flags32);\n+\n+void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);\n+\n+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff);\n-void SET_FLAGS_OSZAPC_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff);\n-void SET_FLAGS_OSZAPC_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff);\n \n-void SET_FLAGS_OSZAPC_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff);\n-void SET_FLAGS_OSZAPC_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff);\n-void SET_FLAGS_OSZAPC_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff);\n \n-void SET_FLAGS_OSZAP_SUB32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff);\n-void SET_FLAGS_OSZAP_SUB16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff);\n-void SET_FLAGS_OSZAP_SUB8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff);\n \n-void SET_FLAGS_OSZAP_ADD32(struct CPUState *cpu, uint32_t v1, uint32_t v2,\n+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,\n uint32_t diff);\n-void SET_FLAGS_OSZAP_ADD16(struct CPUState *cpu, uint16_t v1, uint16_t v2,\n+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,\n uint16_t diff);\n-void SET_FLAGS_OSZAP_ADD8(struct CPUState *cpu, uint8_t v1, uint8_t v2,\n+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,\n uint8_t diff);\n \n-void SET_FLAGS_OSZAPC_LOGIC32(struct CPUState *cpu, uint32_t diff);\n-void SET_FLAGS_OSZAPC_LOGIC16(struct CPUState *cpu, uint16_t diff);\n-void SET_FLAGS_OSZAPC_LOGIC8(struct CPUState *cpu, uint8_t diff);\n+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t diff);\n+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t diff);\n+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t diff);\n \n-void SET_FLAGS_SHR32(struct CPUState *cpu, uint32_t v, int count, uint32_t res);\n-void SET_FLAGS_SHR16(struct CPUState *cpu, uint16_t v, int count, uint16_t res);\n-void SET_FLAGS_SHR8(struct CPUState *cpu, uint8_t v, int count, uint8_t res);\n+void SET_FLAGS_SHR32(CPUX86State *env, uint32_t v, int count, uint32_t res);\n+void SET_FLAGS_SHR16(CPUX86State *env, uint16_t v, int count, uint16_t res);\n+void SET_FLAGS_SHR8(CPUX86State *env, uint8_t v, int count, uint8_t res);\n \n-void SET_FLAGS_SAR32(struct CPUState *cpu, int32_t v, int count, uint32_t res);\n-void SET_FLAGS_SAR16(struct CPUState *cpu, int16_t v, int count, uint16_t res);\n-void SET_FLAGS_SAR8(struct CPUState *cpu, int8_t v, int count, uint8_t res);\n+void SET_FLAGS_SAR32(CPUX86State *env, int32_t v, int count, uint32_t res);\n+void SET_FLAGS_SAR16(CPUX86State *env, int16_t v, int count, uint16_t res);\n+void SET_FLAGS_SAR8(CPUX86State *env, int8_t v, int count, uint8_t res);\n \n-void SET_FLAGS_SHL32(struct CPUState *cpu, uint32_t v, int count, uint32_t res);\n-void SET_FLAGS_SHL16(struct CPUState *cpu, uint16_t v, int count, uint16_t res);\n-void SET_FLAGS_SHL8(struct CPUState *cpu, uint8_t v, int count, uint8_t res);\n+void SET_FLAGS_SHL32(CPUX86State *env, uint32_t v, int count, uint32_t res);\n+void SET_FLAGS_SHL16(CPUX86State *env, uint16_t v, int count, uint16_t res);\n+void SET_FLAGS_SHL8(CPUX86State *env, uint8_t v, int count, uint8_t res);\n \n-bool _get_OF(struct CPUState *cpu);\n-bool _get_CF(struct CPUState *cpu);\n+bool _get_OF(CPUX86State *env);\n+bool _get_CF(CPUX86State *env);\n #endif /* __X86_FLAGS_H__ */\ndiff --git a/target/i386/hvf-utils/x86hvf.c b/target/i386/hvf-utils/x86hvf.c\nindex 819d760624..8986b4e5e5 100644\n--- a/target/i386/hvf-utils/x86hvf.c\n+++ b/target/i386/hvf-utils/x86hvf.c\n@@ -356,9 +356,10 @@ void vmx_clear_int_window_exiting(CPUState *cpu)\n \n void hvf_inject_interrupts(CPUState *cpu_state)\n {\n- X86CPU *x86cpu = X86_CPU(cpu_state);\n int allow_nmi = !(rvmcs(cpu_state->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n- VMCS_INTERRUPTIBILITY_NMI_BLOCKING);\n+ VMCS_INTERRUPTIBILITY_NMI_BLOCKING);\n+ X86CPU *x86cpu = X86_CPU(cpu_state);\n+ CPUX86State *env = &x86cpu->env;\n \n uint64_t idt_info = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_INFO);\n uint64_t info = 0;\n@@ -415,9 +416,9 @@ void hvf_inject_interrupts(CPUState *cpu_state)\n }\n }\n \n- if (cpu_state->hvf_x86->interruptable &&\n+ if (env->hvf_emul->interruptable &&\n (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&\n- (EFLAGS(cpu_state) & IF_MASK) && !(info & VMCS_INTR_VALID)) {\n+ (EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {\n int line = cpu_get_pic_interrupt(&x86cpu->env);\n cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;\n if (line >= 0) {\n@@ -435,7 +436,7 @@ int hvf_process_events(CPUState *cpu_state)\n X86CPU *cpu = X86_CPU(cpu_state);\n CPUX86State *env = &cpu->env;\n \n- EFLAGS(cpu_state) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n+ EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n \n if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {\n hvf_cpu_synchronize_state(cpu_state);\n@@ -447,7 +448,7 @@ int hvf_process_events(CPUState *cpu_state)\n apic_poll_irq(cpu->apic_state);\n }\n if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&\n- (EFLAGS(cpu_state) & IF_MASK)) ||\n+ (EFLAGS(env) & IF_MASK)) ||\n (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {\n cpu_state->halted = 0;\n }\n@@ -463,4 +464,3 @@ int hvf_process_events(CPUState *cpu_state)\n }\n return cpu_state->halted;\n }\n-\n", "prefixes": [ "12/14" ] }