get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1429471/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1429471,
    "url": "http://patchwork.ozlabs.org/api/patches/1429471/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20210120224444.71840-5-agraf@csgraf.de/",
    "project": {
        "id": 14,
        "url": "http://patchwork.ozlabs.org/api/projects/14/?format=api",
        "name": "QEMU Development",
        "link_name": "qemu-devel",
        "list_id": "qemu-devel.nongnu.org",
        "list_email": "qemu-devel@nongnu.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20210120224444.71840-5-agraf@csgraf.de>",
    "list_archive_url": null,
    "date": "2021-01-20T22:44:37",
    "name": "[v6,04/11] hvf: Introduce hvf vcpu struct",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "4a601e4454108a33c935cce2d2c0f6cf6049fa87",
    "submitter": {
        "id": 65661,
        "url": "http://patchwork.ozlabs.org/api/people/65661/?format=api",
        "name": "Alexander Graf",
        "email": "agraf@csgraf.de"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/qemu-devel/patch/20210120224444.71840-5-agraf@csgraf.de/mbox/",
    "series": [
        {
            "id": 225556,
            "url": "http://patchwork.ozlabs.org/api/series/225556/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/qemu-devel/list/?series=225556",
            "date": "2021-01-20T22:44:33",
            "name": "hvf: Implement Apple Silicon Support",
            "version": 6,
            "mbox": "http://patchwork.ozlabs.org/series/225556/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1429471/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/1429471/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>",
        "X-Original-To": "incoming@patchwork.ozlabs.org",
        "Delivered-To": "patchwork-incoming@bilbo.ozlabs.org",
        "Authentication-Results": "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=nongnu.org\n (client-ip=209.51.188.17; helo=lists.gnu.org;\n envelope-from=qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org;\n receiver=<UNKNOWN>)",
        "Received": [
            "from lists.gnu.org (lists.gnu.org [209.51.188.17])\n\t(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4DLgh80Dnsz9sT6\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 21 Jan 2021 09:49:44 +1100 (AEDT)",
            "from localhost ([::1]:45934 helo=lists1p.gnu.org)\n\tby lists.gnu.org with esmtp (Exim 4.90_1)\n\t(envelope-from <qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>)\n\tid 1l2MIH-0000QA-Vv\n\tfor incoming@patchwork.ozlabs.org; Wed, 20 Jan 2021 17:49:42 -0500",
            "from eggs.gnu.org ([2001:470:142:3::10]:56596)\n by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.90_1) (envelope-from <agraf@csgraf.de>)\n id 1l2MDh-0003tr-SC; Wed, 20 Jan 2021 17:44:58 -0500",
            "from mail.csgraf.de ([188.138.100.120]:45132\n helo=zulu616.server4you.de) by eggs.gnu.org with esmtp (Exim 4.90_1)\n (envelope-from <agraf@csgraf.de>)\n id 1l2MDb-0001Tj-7b; Wed, 20 Jan 2021 17:44:57 -0500",
            "from localhost.localdomain\n (dynamic-077-002-091-253.77.2.pool.telefonica.de [77.2.91.253])\n by csgraf.de (Postfix) with ESMTPSA id 862FC3900545;\n Wed, 20 Jan 2021 23:44:48 +0100 (CET)"
        ],
        "From": "Alexander Graf <agraf@csgraf.de>",
        "To": "qemu-devel@nongnu.org",
        "Subject": "[PATCH v6 04/11] hvf: Introduce hvf vcpu struct",
        "Date": "Wed, 20 Jan 2021 23:44:37 +0100",
        "Message-Id": "<20210120224444.71840-5-agraf@csgraf.de>",
        "X-Mailer": "git-send-email 2.24.3 (Apple Git-128)",
        "In-Reply-To": "<20210120224444.71840-1-agraf@csgraf.de>",
        "References": "<20210120224444.71840-1-agraf@csgraf.de>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "Received-SPF": "pass client-ip=188.138.100.120; envelope-from=agraf@csgraf.de;\n helo=zulu616.server4you.de",
        "X-Spam_score_int": "-18",
        "X-Spam_score": "-1.9",
        "X-Spam_bar": "-",
        "X-Spam_report": "(-1.9 / 5.0 requ) BAYES_00=-1.9, SPF_HELO_NONE=0.001,\n SPF_PASS=-0.001 autolearn=ham autolearn_force=no",
        "X-Spam_action": "no action",
        "X-BeenThere": "qemu-devel@nongnu.org",
        "X-Mailman-Version": "2.1.23",
        "Precedence": "list",
        "List-Id": "<qemu-devel.nongnu.org>",
        "List-Unsubscribe": "<https://lists.nongnu.org/mailman/options/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=unsubscribe>",
        "List-Archive": "<https://lists.nongnu.org/archive/html/qemu-devel>",
        "List-Post": "<mailto:qemu-devel@nongnu.org>",
        "List-Help": "<mailto:qemu-devel-request@nongnu.org?subject=help>",
        "List-Subscribe": "<https://lists.nongnu.org/mailman/listinfo/qemu-devel>,\n <mailto:qemu-devel-request@nongnu.org?subject=subscribe>",
        "Cc": "Peter Maydell <peter.maydell@linaro.org>,\n Eduardo Habkost <ehabkost@redhat.com>,\n =?utf-8?q?Alex_Benn=C3=A9e?= <alex.bennee@linaro.org>,\n Richard Henderson <richard.henderson@linaro.org>,\n Cameron Esfahani <dirty@apple.com>, Roman Bolshakov <r.bolshakov@yadro.com>,\n qemu-arm@nongnu.org, Frank Yang <lfy@google.com>,\n Paolo Bonzini <pbonzini@redhat.com>, Peter Collingbourne <pcc@google.com>",
        "Errors-To": "qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org",
        "Sender": "\"Qemu-devel\"\n <qemu-devel-bounces+incoming=patchwork.ozlabs.org@nongnu.org>"
    },
    "content": "We will need more than a single field for hvf going forward. To keep\nthe global vcpu struct uncluttered, let's allocate a special hvf vcpu\nstruct, similar to how hax does it.\n\nSigned-off-by: Alexander Graf <agraf@csgraf.de>\nReviewed-by: Roman Bolshakov <r.bolshakov@yadro.com>\nTested-by: Roman Bolshakov <r.bolshakov@yadro.com>\nReviewed-by: Alex Bennée <alex.bennee@linaro.org>\n\n---\n\nv4 -> v5:\n\n  - Use g_free() on destroy\n---\n accel/hvf/hvf-cpus.c        |   8 +-\n include/hw/core/cpu.h       |   3 +-\n include/sysemu/hvf_int.h    |   4 +\n target/i386/hvf/hvf.c       | 102 +++++++++---------\n target/i386/hvf/vmx.h       |  24 +++--\n target/i386/hvf/x86.c       |  28 ++---\n target/i386/hvf/x86_descr.c |  26 ++---\n target/i386/hvf/x86_emu.c   |  62 +++++------\n target/i386/hvf/x86_mmu.c   |   4 +-\n target/i386/hvf/x86_task.c  |  12 +--\n target/i386/hvf/x86hvf.c    | 210 ++++++++++++++++++------------------\n 11 files changed, 247 insertions(+), 236 deletions(-)",
    "diff": "diff --git a/accel/hvf/hvf-cpus.c b/accel/hvf/hvf-cpus.c\nindex 60f6d76bf3..2c6796604a 100644\n--- a/accel/hvf/hvf-cpus.c\n+++ b/accel/hvf/hvf-cpus.c\n@@ -312,10 +312,12 @@ static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)\n \n static void hvf_vcpu_destroy(CPUState *cpu)\n {\n-    hv_return_t ret = hv_vcpu_destroy(cpu->hvf_fd);\n+    hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);\n     assert_hvf_ok(ret);\n \n     hvf_arch_vcpu_destroy(cpu);\n+    g_free(cpu->hvf);\n+    cpu->hvf = NULL;\n }\n \n static void dummy_signal(int sig)\n@@ -326,6 +328,8 @@ static int hvf_init_vcpu(CPUState *cpu)\n {\n     int r;\n \n+    cpu->hvf = g_malloc0(sizeof(*cpu->hvf));\n+\n     /* init cpu signals */\n     sigset_t set;\n     struct sigaction sigact;\n@@ -337,7 +341,7 @@ static int hvf_init_vcpu(CPUState *cpu)\n     pthread_sigmask(SIG_BLOCK, NULL, &set);\n     sigdelset(&set, SIG_IPI);\n \n-    r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);\n+    r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);\n     cpu->vcpu_dirty = 1;\n     assert_hvf_ok(r);\n \ndiff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h\nindex 140fa32a5e..9e1b61f63d 100644\n--- a/include/hw/core/cpu.h\n+++ b/include/hw/core/cpu.h\n@@ -281,6 +281,7 @@ struct KVMState;\n struct kvm_run;\n \n struct hax_vcpu_state;\n+struct hvf_vcpu_state;\n \n #define TB_JMP_CACHE_BITS 12\n #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)\n@@ -464,7 +465,7 @@ struct CPUState {\n \n     struct hax_vcpu_state *hax_vcpu;\n \n-    int hvf_fd;\n+    struct hvf_vcpu_state *hvf;\n \n     /* track IOMMUs whose translations we've cached in the TCG TLB */\n     GArray *iommu_notifiers;\ndiff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h\nindex 69de46db7d..9d3cb53e47 100644\n--- a/include/sysemu/hvf_int.h\n+++ b/include/sysemu/hvf_int.h\n@@ -43,6 +43,10 @@ struct HVFState {\n };\n extern HVFState *hvf_state;\n \n+struct hvf_vcpu_state {\n+    int fd;\n+};\n+\n void assert_hvf_ok(hv_return_t ret);\n int hvf_get_registers(CPUState *cpu);\n int hvf_put_registers(CPUState *cpu);\ndiff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c\nindex 8b96ecd619..08b4adecd9 100644\n--- a/target/i386/hvf/hvf.c\n+++ b/target/i386/hvf/hvf.c\n@@ -80,11 +80,11 @@ void vmx_update_tpr(CPUState *cpu)\n     int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;\n     int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);\n \n-    wreg(cpu->hvf_fd, HV_X86_TPR, tpr);\n+    wreg(cpu->hvf->fd, HV_X86_TPR, tpr);\n     if (irr == -1) {\n-        wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);\n+        wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);\n     } else {\n-        wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :\n+        wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :\n               irr >> 4);\n     }\n }\n@@ -92,7 +92,7 @@ void vmx_update_tpr(CPUState *cpu)\n static void update_apic_tpr(CPUState *cpu)\n {\n     X86CPU *x86_cpu = X86_CPU(cpu);\n-    int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;\n+    int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4;\n     cpu_set_apic_tpr(x86_cpu->apic_state, tpr);\n }\n \n@@ -194,43 +194,43 @@ int hvf_arch_init_vcpu(CPUState *cpu)\n     }\n \n     /* set VMCS control fields */\n-    wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,\n+    wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS,\n           cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,\n           VMCS_PIN_BASED_CTLS_EXTINT |\n           VMCS_PIN_BASED_CTLS_NMI |\n           VMCS_PIN_BASED_CTLS_VNMI));\n-    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,\n+    wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS,\n           cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,\n           VMCS_PRI_PROC_BASED_CTLS_HLT |\n           VMCS_PRI_PROC_BASED_CTLS_MWAIT |\n           VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |\n           VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |\n           VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);\n-    wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,\n+    wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS,\n           cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,\n                    VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));\n \n-    wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,\n+    wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,\n           0));\n-    wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */\n+    wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */\n \n-    wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);\n+    wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);\n \n     x86cpu = X86_CPU(cpu);\n     x86cpu->env.xsave_buf = qemu_memalign(4096, 4096);\n \n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);\n-    hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1);\n+    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1);\n \n     return 0;\n }\n@@ -271,16 +271,16 @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in\n         }\n         if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {\n             env->has_error_code = true;\n-            env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);\n+            env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR);\n         }\n     }\n-    if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n+    if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &\n         VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {\n         env->hflags2 |= HF2_NMI_MASK;\n     } else {\n         env->hflags2 &= ~HF2_NMI_MASK;\n     }\n-    if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &\n+    if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &\n          (VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {\n         env->hflags |= HF_INHIBIT_IRQ_MASK;\n@@ -317,20 +317,20 @@ int hvf_vcpu_exec(CPUState *cpu)\n             return EXCP_HLT;\n         }\n \n-        hv_return_t r  = hv_vcpu_run(cpu->hvf_fd);\n+        hv_return_t r  = hv_vcpu_run(cpu->hvf->fd);\n         assert_hvf_ok(r);\n \n         /* handle VMEXIT */\n-        uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);\n-        uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);\n-        uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,\n+        uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON);\n+        uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION);\n+        uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd,\n                                            VMCS_EXIT_INSTRUCTION_LENGTH);\n \n-        uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n+        uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);\n \n         hvf_store_events(cpu, ins_len, idtvec_info);\n-        rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n-        env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n+        rip = rreg(cpu->hvf->fd, HV_X86_RIP);\n+        env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);\n \n         qemu_mutex_lock_iothread();\n \n@@ -360,7 +360,7 @@ int hvf_vcpu_exec(CPUState *cpu)\n         case EXIT_REASON_EPT_FAULT:\n         {\n             hvf_slot *slot;\n-            uint64_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n+            uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS);\n \n             if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&\n                 ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {\n@@ -405,7 +405,7 @@ int hvf_vcpu_exec(CPUState *cpu)\n                 store_regs(cpu);\n                 break;\n             } else if (!string && !in) {\n-                RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);\n+                RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX);\n                 hvf_handle_io(env, port, &RAX(env), 1, size, 1);\n                 macvm_set_rip(cpu, rip + ins_len);\n                 break;\n@@ -421,17 +421,17 @@ int hvf_vcpu_exec(CPUState *cpu)\n             break;\n         }\n         case EXIT_REASON_CPUID: {\n-            uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n-            uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);\n-            uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n-            uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n+            uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);\n+            uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX);\n+            uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);\n+            uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);\n \n             cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);\n \n-            wreg(cpu->hvf_fd, HV_X86_RAX, rax);\n-            wreg(cpu->hvf_fd, HV_X86_RBX, rbx);\n-            wreg(cpu->hvf_fd, HV_X86_RCX, rcx);\n-            wreg(cpu->hvf_fd, HV_X86_RDX, rdx);\n+            wreg(cpu->hvf->fd, HV_X86_RAX, rax);\n+            wreg(cpu->hvf->fd, HV_X86_RBX, rbx);\n+            wreg(cpu->hvf->fd, HV_X86_RCX, rcx);\n+            wreg(cpu->hvf->fd, HV_X86_RDX, rdx);\n \n             macvm_set_rip(cpu, rip + ins_len);\n             break;\n@@ -439,16 +439,16 @@ int hvf_vcpu_exec(CPUState *cpu)\n         case EXIT_REASON_XSETBV: {\n             X86CPU *x86_cpu = X86_CPU(cpu);\n             CPUX86State *env = &x86_cpu->env;\n-            uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);\n-            uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);\n-            uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);\n+            uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);\n+            uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);\n+            uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);\n \n             if (ecx) {\n                 macvm_set_rip(cpu, rip + ins_len);\n                 break;\n             }\n             env->xcr0 = ((uint64_t)edx << 32) | eax;\n-            wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);\n+            wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1);\n             macvm_set_rip(cpu, rip + ins_len);\n             break;\n         }\n@@ -487,11 +487,11 @@ int hvf_vcpu_exec(CPUState *cpu)\n \n             switch (cr) {\n             case 0x0: {\n-                macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));\n+                macvm_set_cr0(cpu->hvf->fd, RRX(env, reg));\n                 break;\n             }\n             case 4: {\n-                macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));\n+                macvm_set_cr4(cpu->hvf->fd, RRX(env, reg));\n                 break;\n             }\n             case 8: {\n@@ -527,7 +527,7 @@ int hvf_vcpu_exec(CPUState *cpu)\n             break;\n         }\n         case EXIT_REASON_TASK_SWITCH: {\n-            uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);\n+            uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);\n             x68_segment_selector sel = {.sel = exit_qual & 0xffff};\n             vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,\n              vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo\n@@ -540,8 +540,8 @@ int hvf_vcpu_exec(CPUState *cpu)\n             break;\n         }\n         case EXIT_REASON_RDPMC:\n-            wreg(cpu->hvf_fd, HV_X86_RAX, 0);\n-            wreg(cpu->hvf_fd, HV_X86_RDX, 0);\n+            wreg(cpu->hvf->fd, HV_X86_RAX, 0);\n+            wreg(cpu->hvf->fd, HV_X86_RDX, 0);\n             macvm_set_rip(cpu, rip + ins_len);\n             break;\n         case VMX_REASON_VMCALL:\ndiff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h\nindex 24c4cdf0be..6df87116f6 100644\n--- a/target/i386/hvf/vmx.h\n+++ b/target/i386/hvf/vmx.h\n@@ -30,6 +30,8 @@\n #include \"vmcs.h\"\n #include \"cpu.h\"\n #include \"x86.h\"\n+#include \"sysemu/hvf.h\"\n+#include \"sysemu/hvf_int.h\"\n \n #include \"exec/address-spaces.h\"\n \n@@ -179,15 +181,15 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)\n     uint64_t val;\n \n     /* BUG, should take considering overlap.. */\n-    wreg(cpu->hvf_fd, HV_X86_RIP, rip);\n+    wreg(cpu->hvf->fd, HV_X86_RIP, rip);\n     env->eip = rip;\n \n     /* after moving forward in rip, we need to clean INTERRUPTABILITY */\n-   val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n+   val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);\n    if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n                VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {\n         env->hflags &= ~HF_INHIBIT_IRQ_MASK;\n-        wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,\n+        wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,\n                val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |\n                VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));\n    }\n@@ -199,9 +201,9 @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu)\n     CPUX86State *env = &x86_cpu->env;\n \n     env->hflags2 &= ~HF2_NMI_MASK;\n-    uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n+    uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);\n     gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;\n-    wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n+    wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n }\n \n static inline void vmx_set_nmi_blocking(CPUState *cpu)\n@@ -210,16 +212,16 @@ static inline void vmx_set_nmi_blocking(CPUState *cpu)\n     CPUX86State *env = &x86_cpu->env;\n \n     env->hflags2 |= HF2_NMI_MASK;\n-    uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);\n+    uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);\n     gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;\n-    wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n+    wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);\n }\n \n static inline void vmx_set_nmi_window_exiting(CPUState *cpu)\n {\n     uint64_t val;\n-    val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n-    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |\n+    val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);\n+    wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |\n           VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n \n }\n@@ -228,8 +230,8 @@ static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)\n {\n \n     uint64_t val;\n-    val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n-    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &\n+    val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);\n+    wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &\n           ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);\n }\n \ndiff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c\nindex cd045183a8..2898bb70a8 100644\n--- a/target/i386/hvf/x86.c\n+++ b/target/i386/hvf/x86.c\n@@ -62,11 +62,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu,\n     }\n \n     if (GDT_SEL == sel.ti) {\n-        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);\n-        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n+        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);\n+        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);\n     } else {\n-        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);\n-        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);\n+        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);\n+        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);\n     }\n \n     if (sel.index * 8 >= limit) {\n@@ -85,11 +85,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu,\n     uint32_t limit;\n     \n     if (GDT_SEL == sel.ti) {\n-        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);\n-        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n+        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);\n+        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);\n     } else {\n-        base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);\n-        limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);\n+        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);\n+        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);\n     }\n     \n     if (sel.index * 8 >= limit) {\n@@ -103,8 +103,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu,\n bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,\n                         int gate)\n {\n-    target_ulong base  = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);\n-    uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);\n+    target_ulong base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);\n+    uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);\n \n     memset(idt_desc, 0, sizeof(*idt_desc));\n     if (gate * 8 >= limit) {\n@@ -118,7 +118,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,\n \n bool x86_is_protected(struct CPUState *cpu)\n {\n-    uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n+    uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);\n     return cr0 & CR0_PE;\n }\n \n@@ -136,7 +136,7 @@ bool x86_is_v8086(struct CPUState *cpu)\n \n bool x86_is_long_mode(struct CPUState *cpu)\n {\n-    return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;\n+    return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;\n }\n \n bool x86_is_long64_mode(struct CPUState *cpu)\n@@ -149,13 +149,13 @@ bool x86_is_long64_mode(struct CPUState *cpu)\n \n bool x86_is_paging_mode(struct CPUState *cpu)\n {\n-    uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n+    uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);\n     return cr0 & CR0_PG;\n }\n \n bool x86_is_pae_enabled(struct CPUState *cpu)\n {\n-    uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);\n+    uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);\n     return cr4 & CR4_PAE;\n }\n \ndiff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c\nindex 9f539e73f6..af15c06ac5 100644\n--- a/target/i386/hvf/x86_descr.c\n+++ b/target/i386/hvf/x86_descr.c\n@@ -48,47 +48,47 @@ static const struct vmx_segment_field {\n \n uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)\n {\n-    return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);\n+    return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);\n }\n \n uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)\n {\n-    return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);\n+    return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);\n }\n \n uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)\n {\n-    return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);\n+    return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);\n }\n \n x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)\n {\n     x68_segment_selector sel;\n-    sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);\n+    sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);\n     return sel;\n }\n \n void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)\n {\n-    wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);\n+    wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel);\n }\n \n void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)\n {\n-    desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);\n-    desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);\n-    desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);\n-    desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);\n+    desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector);\n+    desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base);\n+    desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit);\n+    desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes);\n }\n \n void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)\n {\n     const struct vmx_segment_field *sf = &vmx_segment_fields[seg];\n \n-    wvmcs(cpu->hvf_fd, sf->base, desc->base);\n-    wvmcs(cpu->hvf_fd, sf->limit, desc->limit);\n-    wvmcs(cpu->hvf_fd, sf->selector, desc->sel);\n-    wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);\n+    wvmcs(cpu->hvf->fd, sf->base, desc->base);\n+    wvmcs(cpu->hvf->fd, sf->limit, desc->limit);\n+    wvmcs(cpu->hvf->fd, sf->selector, desc->sel);\n+    wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar);\n }\n \n void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)\ndiff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c\nindex da570e352b..5a512f6768 100644\n--- a/target/i386/hvf/x86_emu.c\n+++ b/target/i386/hvf/x86_emu.c\n@@ -673,7 +673,7 @@ void simulate_rdmsr(struct CPUState *cpu)\n \n     switch (msr) {\n     case MSR_IA32_TSC:\n-        val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);\n+        val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET);\n         break;\n     case MSR_IA32_APICBASE:\n         val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);\n@@ -682,16 +682,16 @@ void simulate_rdmsr(struct CPUState *cpu)\n         val = x86_cpu->ucode_rev;\n         break;\n     case MSR_EFER:\n-        val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);\n+        val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER);\n         break;\n     case MSR_FSBASE:\n-        val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);\n+        val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE);\n         break;\n     case MSR_GSBASE:\n-        val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);\n+        val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE);\n         break;\n     case MSR_KERNELGSBASE:\n-        val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);\n+        val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE);\n         break;\n     case MSR_STAR:\n         abort();\n@@ -775,13 +775,13 @@ void simulate_wrmsr(struct CPUState *cpu)\n         cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);\n         break;\n     case MSR_FSBASE:\n-        wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);\n+        wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data);\n         break;\n     case MSR_GSBASE:\n-        wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);\n+        wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data);\n         break;\n     case MSR_KERNELGSBASE:\n-        wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);\n+        wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data);\n         break;\n     case MSR_STAR:\n         abort();\n@@ -794,9 +794,9 @@ void simulate_wrmsr(struct CPUState *cpu)\n         break;\n     case MSR_EFER:\n         /*printf(\"new efer %llx\\n\", EFER(cpu));*/\n-        wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);\n+        wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data);\n         if (data & MSR_EFER_NXE) {\n-            hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n+            hv_vcpu_invalidate_tlb(cpu->hvf->fd);\n         }\n         break;\n     case MSR_MTRRphysBase(0):\n@@ -1420,21 +1420,21 @@ void load_regs(struct CPUState *cpu)\n     CPUX86State *env = &x86_cpu->env;\n \n     int i = 0;\n-    RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);\n-    RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);\n-    RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);\n-    RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);\n-    RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);\n-    RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);\n-    RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);\n-    RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);\n+    RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX);\n+    RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX);\n+    RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX);\n+    RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX);\n+    RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI);\n+    RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI);\n+    RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP);\n+    RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP);\n     for (i = 8; i < 16; i++) {\n-        RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);\n+        RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i);\n     }\n \n-    env->eflags = rreg(cpu->hvf_fd, HV_X86_RFLAGS);\n+    env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);\n     rflags_to_lflags(env);\n-    env->eip = rreg(cpu->hvf_fd, HV_X86_RIP);\n+    env->eip = rreg(cpu->hvf->fd, HV_X86_RIP);\n }\n \n void store_regs(struct CPUState *cpu)\n@@ -1443,20 +1443,20 @@ void store_regs(struct CPUState *cpu)\n     CPUX86State *env = &x86_cpu->env;\n \n     int i = 0;\n-    wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));\n-    wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));\n-    wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));\n-    wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));\n-    wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));\n-    wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));\n-    wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));\n-    wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));\n+    wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env));\n+    wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env));\n+    wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env));\n+    wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env));\n+    wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env));\n+    wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env));\n+    wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env));\n+    wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env));\n     for (i = 8; i < 16; i++) {\n-        wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));\n+        wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i));\n     }\n \n     lflags_to_rflags(env);\n-    wreg(cpu->hvf_fd, HV_X86_RFLAGS, env->eflags);\n+    wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags);\n     macvm_set_rip(cpu, env->eip);\n }\n \ndiff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c\nindex 882a6237ee..b7e3f8568f 100644\n--- a/target/i386/hvf/x86_mmu.c\n+++ b/target/i386/hvf/x86_mmu.c\n@@ -128,7 +128,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,\n         pt->err_code |= MMU_PAGE_PT;\n     }\n \n-    uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);\n+    uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);\n     /* check protection */\n     if (cr0 & CR0_WP) {\n         if (pt->write_access && !pte_write_access(pte)) {\n@@ -173,7 +173,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,\n {\n     int top_level, level;\n     bool is_large = false;\n-    target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);\n+    target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3);\n     uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;\n     \n     memset(pt, 0, sizeof(*pt));\ndiff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c\nindex 6f04478b3a..c25c8ec88f 100644\n--- a/target/i386/hvf/x86_task.c\n+++ b/target/i386/hvf/x86_task.c\n@@ -62,7 +62,7 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)\n     X86CPU *x86_cpu = X86_CPU(cpu);\n     CPUX86State *env = &x86_cpu->env;\n \n-    wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);\n+    wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3);\n \n     env->eip = tss->eip;\n     env->eflags = tss->eflags | 2;\n@@ -111,11 +111,11 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme\n \n void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)\n {\n-    uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);\n+    uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP);\n     if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&\n                         gate_type != VMCS_INTR_T_HWINTR &&\n                         gate_type != VMCS_INTR_T_NMI)) {\n-        int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n+        int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH);\n         macvm_set_rip(cpu, rip + ins_len);\n         return;\n     }\n@@ -174,12 +174,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea\n         //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);\n         VM_PANIC(\"task_switch_16\");\n \n-    macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);\n+    macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);\n     x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);\n     vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);\n \n     store_regs(cpu);\n \n-    hv_vcpu_invalidate_tlb(cpu->hvf_fd);\n-    hv_vcpu_flush(cpu->hvf_fd);\n+    hv_vcpu_invalidate_tlb(cpu->hvf->fd);\n+    hv_vcpu_flush(cpu->hvf->fd);\n }\ndiff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c\nindex 89b8e9d87a..0f2aeb1cf8 100644\n--- a/target/i386/hvf/x86hvf.c\n+++ b/target/i386/hvf/x86hvf.c\n@@ -82,7 +82,7 @@ void hvf_put_xsave(CPUState *cpu_state)\n \n     x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);\n \n-    if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {\n+    if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {\n         abort();\n     }\n }\n@@ -92,19 +92,19 @@ void hvf_put_segments(CPUState *cpu_state)\n     CPUX86State *env = &X86_CPU(cpu_state)->env;\n     struct vmx_segment seg;\n     \n-    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);\n-    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);\n+    wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);\n+    wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base);\n \n-    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);\n-    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);\n+    wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);\n+    wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);\n \n-    /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */\n-    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);\n+    /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */\n+    wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]);\n     vmx_update_tpr(cpu_state);\n-    wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);\n+    wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer);\n \n-    macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);\n-    macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);\n+    macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]);\n+    macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]);\n \n     hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);\n     vmx_write_segment_descriptor(cpu_state, &seg, R_CS);\n@@ -130,31 +130,31 @@ void hvf_put_segments(CPUState *cpu_state)\n     hvf_set_segment(cpu_state, &seg, &env->ldt, false);\n     vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);\n     \n-    hv_vcpu_flush(cpu_state->hvf_fd);\n+    hv_vcpu_flush(cpu_state->hvf->fd);\n }\n     \n void hvf_put_msrs(CPUState *cpu_state)\n {\n     CPUX86State *env = &X86_CPU(cpu_state)->env;\n \n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS,\n                       env->sysenter_cs);\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP,\n                       env->sysenter_esp);\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP,\n                       env->sysenter_eip);\n \n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star);\n \n #ifdef TARGET_X86_64\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar);\n #endif\n \n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);\n-    hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base);\n+    hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base);\n }\n \n \n@@ -164,7 +164,7 @@ void hvf_get_xsave(CPUState *cpu_state)\n \n     xsave = X86_CPU(cpu_state)->env.xsave_buf;\n \n-    if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {\n+    if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, (void*)xsave, 4096)) {\n         abort();\n     }\n \n@@ -203,17 +203,17 @@ void hvf_get_segments(CPUState *cpu_state)\n     vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);\n     hvf_get_segment(&env->ldt, &seg);\n \n-    env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);\n-    env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);\n-    env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);\n-    env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);\n+    env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT);\n+    env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE);\n+    env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT);\n+    env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE);\n \n-    env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);\n+    env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0);\n     env->cr[2] = 0;\n-    env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);\n-    env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);\n+    env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3);\n+    env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4);\n     \n-    env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);\n+    env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER);\n }\n \n void hvf_get_msrs(CPUState *cpu_state)\n@@ -221,27 +221,27 @@ void hvf_get_msrs(CPUState *cpu_state)\n     CPUX86State *env = &X86_CPU(cpu_state)->env;\n     uint64_t tmp;\n     \n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp);\n     env->sysenter_cs = tmp;\n     \n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp);\n     env->sysenter_esp = tmp;\n \n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp);\n     env->sysenter_eip = tmp;\n \n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star);\n \n #ifdef TARGET_X86_64\n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);\n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);\n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);\n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar);\n #endif\n \n-    hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);\n+    hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp);\n     \n-    env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);\n+    env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET);\n }\n \n int hvf_put_registers(CPUState *cpu_state)\n@@ -249,26 +249,26 @@ int hvf_put_registers(CPUState *cpu_state)\n     X86CPU *x86cpu = X86_CPU(cpu_state);\n     CPUX86State *env = &x86cpu->env;\n \n-    wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);\n-    wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);\n-    wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);\n-    wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);\n+    wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]);\n+    wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]);\n+    wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags);\n+    wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip);\n    \n-    wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);\n+    wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0);\n     \n     hvf_put_xsave(cpu_state);\n     \n@@ -276,14 +276,14 @@ int hvf_put_registers(CPUState *cpu_state)\n     \n     hvf_put_msrs(cpu_state);\n     \n-    wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);\n-    wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]);\n+    wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]);\n     \n     return 0;\n }\n@@ -293,40 +293,40 @@ int hvf_get_registers(CPUState *cpu_state)\n     X86CPU *x86cpu = X86_CPU(cpu_state);\n     CPUX86State *env = &x86cpu->env;\n \n-    env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);\n-    env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);\n-    env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);\n-    env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);\n-    env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);\n-    env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);\n-    env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);\n-    env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);\n-    env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);\n-    env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);\n-    env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);\n-    env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);\n-    env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);\n-    env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);\n-    env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);\n-    env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);\n+    env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX);\n+    env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX);\n+    env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX);\n+    env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX);\n+    env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP);\n+    env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP);\n+    env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI);\n+    env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI);\n+    env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8);\n+    env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9);\n+    env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10);\n+    env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11);\n+    env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12);\n+    env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13);\n+    env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14);\n+    env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15);\n     \n-    env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n-    env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);\n+    env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);\n+    env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP);\n    \n     hvf_get_xsave(cpu_state);\n-    env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);\n+    env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0);\n     \n     hvf_get_segments(cpu_state);\n     hvf_get_msrs(cpu_state);\n     \n-    env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);\n-    env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);\n-    env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);\n-    env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);\n-    env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);\n-    env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);\n-    env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);\n-    env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);\n+    env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0);\n+    env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1);\n+    env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2);\n+    env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3);\n+    env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4);\n+    env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5);\n+    env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6);\n+    env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7);\n     \n     x86_update_hflags(env);\n     return 0;\n@@ -335,16 +335,16 @@ int hvf_get_registers(CPUState *cpu_state)\n static void vmx_set_int_window_exiting(CPUState *cpu)\n {\n      uint64_t val;\n-     val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n-     wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |\n+     val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);\n+     wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |\n              VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n }\n \n void vmx_clear_int_window_exiting(CPUState *cpu)\n {\n      uint64_t val;\n-     val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);\n-     wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &\n+     val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);\n+     wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &\n              ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);\n }\n \n@@ -380,7 +380,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)\n     uint64_t info = 0;\n     if (have_event) {\n         info = vector | intr_type | VMCS_INTR_VALID;\n-        uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);\n+        uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON);\n         if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {\n             vmx_clear_nmi_blocking(cpu_state);\n         }\n@@ -389,17 +389,17 @@ bool hvf_inject_interrupts(CPUState *cpu_state)\n             info &= ~(1 << 12); /* clear undefined bit */\n             if (intr_type == VMCS_INTR_T_SWINTR ||\n                 intr_type == VMCS_INTR_T_SWEXCEPTION) {\n-                wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);\n+                wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);\n             }\n             \n             if (env->has_error_code) {\n-                wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,\n+                wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR,\n                       env->error_code);\n                 /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */\n                 info |= VMCS_INTR_DEL_ERRCODE;\n             }\n             /*printf(\"reinject  %lx err %d\\n\", info, err);*/\n-            wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);\n+            wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);\n         };\n     }\n \n@@ -407,7 +407,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)\n         if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {\n             cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;\n             info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;\n-            wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);\n+            wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info);\n         } else {\n             vmx_set_nmi_window_exiting(cpu_state);\n         }\n@@ -419,7 +419,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)\n         int line = cpu_get_pic_interrupt(&x86cpu->env);\n         cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;\n         if (line >= 0) {\n-            wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |\n+            wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line |\n                   VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);\n         }\n     }\n@@ -435,7 +435,7 @@ int hvf_process_events(CPUState *cpu_state)\n     X86CPU *cpu = X86_CPU(cpu_state);\n     CPUX86State *env = &cpu->env;\n \n-    env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);\n+    env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS);\n \n     if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {\n         cpu_synchronize_state(cpu_state);\n",
    "prefixes": [
        "v6",
        "04/11"
    ]
}